diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000000000000000000000000000000000000..854d7013e0d6d47cc361aceb795eb4d85a9c9c81
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,17 @@
+{
+  "version": "0.2.0",
+  "configurations": [
+    {
+      "name": "Launch from Harmony Root",
+      "type": "go",
+      "request": "launch",
+      "mode": "debug",
+      "program": "src/cmd/web/main.go", 
+      "cwd": "C:/Users/PC/Documents/BA_PARIS_CODE/harmony", 
+      "env": {
+        "PORT": "8080"
+      },
+      "args": []
+    }
+  ]
+}
diff --git a/config/auth.toml b/config/auth.toml
index 009ba718d19bc0ee8c4fed84b58635b294225f2a..4a738ec7f1dd766d4f5bdd0135908c40b7f70356 100644
--- a/config/auth.toml
+++ b/config/auth.toml
@@ -1,14 +1,14 @@
 enable_oauth2 = true
 
 [provider.github]
-enabled = false
+enabled = true
 name = "github"
 display_name = "GitHub"
 authorize_uri = "https://github.com/login/oauth/authorize"
 access_token_uri = "https://github.com/login/oauth/access_token"
 userinfo_uri = "https://api.github.com/user"
-client_id = "[client_id]"
-client_secret = "[client_secret]"
+client_id = "Ov23liwqVDKeBaIHmBfZ"
+client_secret = "513b5408f5d19e5bf30cef7201e25d1056923e4f"
 scopes = ["read:user", "user:email"]
 
 [provider.google]
diff --git a/go.mod b/go.mod
index 3f0f7c2a780fd0579a8f0eed552182aa2900f1f4..ad3b03c1ac7876fd92b893c1174165d02039ad61 100644
--- a/go.mod
+++ b/go.mod
@@ -8,22 +8,31 @@ require (
 	github.com/jackc/pgx/v5 v5.4.3
 	github.com/pelletier/go-toml/v2 v2.1.0
 	github.com/stretchr/testify v1.8.4
+	go.mongodb.org/mongo-driver v1.17.1
 	golang.org/x/oauth2 v0.13.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
+	github.com/golang/snappy v0.0.4 // indirect
+	github.com/gorilla/mux v1.8.1 // indirect
 	github.com/jackc/pgpassfile v1.0.0 // indirect
 	github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
 	github.com/jackc/puddle/v2 v2.2.1 // indirect
+	github.com/klauspost/compress v1.13.6 // indirect
 	github.com/kr/text v0.2.0 // indirect
+	github.com/montanaflynn/stats v0.7.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/rogpeppe/go-internal v1.11.0 // indirect
-	golang.org/x/crypto v0.16.0 // indirect
-	golang.org/x/net v0.19.0 // indirect
-	golang.org/x/sync v0.5.0 // indirect
-	golang.org/x/text v0.14.0 // indirect
+	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
+	github.com/xdg-go/scram v1.1.2 // indirect
+	github.com/xdg-go/stringprep v1.0.4 // indirect
+	github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
+	golang.org/x/crypto v0.26.0 // indirect
+	golang.org/x/net v0.21.0 // indirect
+	golang.org/x/sync v0.8.0 // indirect
+	golang.org/x/text v0.17.0 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
 	google.golang.org/protobuf v1.31.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index d01bdf892fd9353e092f23771555e6e481b38f40..4d4b771a7d1b83d872a54524232d01cad9c66d00 100644
--- a/go.sum
+++ b/go.sum
@@ -8,11 +8,15 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
 github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
 github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
 github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
 github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
@@ -21,10 +25,14 @@ github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY=
 github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
 github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
 github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
 github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
+github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
 github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
 github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -40,22 +48,52 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
+github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
+github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
+github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM=
+go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
 golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
 golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
diff --git a/requirements.json b/requirements.json
new file mode 100644
index 0000000000000000000000000000000000000000..2797bd8dd7ed64a2127c5790a9d1a3dee7f347bd
--- /dev/null
+++ b/requirements.json
@@ -0,0 +1 @@
+[{"_id":"673b9a543c547c9012cab78c","created_at":"2024-11-18T19:49:40.559Z","parsing_result":{"errors":null,"notices":null,"requirement":"test muss test als relevant betrachten, dass test test.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"test","bedingung":"test","begruendung":"test","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"test"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"673b9a9b3c547c9012cab78d","created_at":"2024-11-18T19:50:51.469Z","parsing_result":{"errors":null,"notices":null,"requirement":"test muss test als relevant betrachten, dass test test.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"test","bedingung":"test","begruendung":"test","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"test"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"673bcf662d6ca63eb5638dd2","created_at":"2024-11-18T23:36:06.836Z","parsing_result":{"errors":null,"notices":null,"requirement":"testing muss testing als relevant betrachten, dass testing testing.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"testing","bedingung":"testing","begruendung":"testing","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"testing"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"673bd0ed2d6ca63eb5638dd3","created_at":"2024-11-18T23:42:37.769Z","parsing_result":{"errors":null,"notices":null,"requirement":"testtt muss testtt als relevant betrachten, dass testtt testtt.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"testtt","bedingung":"testtt","begruendung":"testtt","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"testtt"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"673c370f67f12bdd37995229","created_at":"2024-11-19T06:58:23.88Z","parsing_result":{"errors":null,"notices":null,"requirement":"testttttttttt muss testttttttttt als relevant betrachten, dass testttttttttt.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"testttttttttt","bedingung":"testttttttttt","begruendung":"","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"testttttttttt"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"6746983f934773bedf28c1d2","created_at":"2024-11-27T03:55:43.267Z","parsing_result":{"errors":null,"notices":null,"requirement":"ameltest muss ameltest als relevant betrachten, dass ameltest ameltest.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"ameltest","bedingung":"ameltest","begruendung":"ameltest","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"ameltest"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"6746c7151a6cae65b5dde03e","created_at":"2024-11-27T07:15:33.204Z","parsing_result":{"errors":null,"notices":null,"requirement":"Falls das Smartphone als Outdoor-Gerät bezeichnet wird, muss die Komponente Außenhülle als relevant betrachten, dass das System Smartphone bei einer Umgebungstemperatur von -20°C bis 40°C betrieben wird.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"das System Smartphone bei einer Umgebungstemperatur von -20°C bis 40°C betrieben wird","bedingung":"Falls das Smartphone als Outdoor-Gerät bezeichnet wird,","begruendung":"","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"die Komponente Außenhülle"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"6746c7761a6cae65b5dde03f","created_at":"2024-11-27T07:17:10.769Z","parsing_result":{"errors":null,"notices":null,"requirement":"test muss test technisch die Eigenschaft haben, test test.","templateid":"technische-eigenschaft","templatename":"Technische Eigenschaft","templatetype":"ebt","templateversion":"0.1.0","variantname":"Technische Eigenschaft mit Bedingung","warnings":null},"segment_map":{"bedingung":"test","begruendung":"","modalitaet":"muss","name-der-eigenschaft":"test","objektbeschreibung":"test","punkt":".","system":"test","technisch-die-eigenschaft-haben":"technisch die Eigenschaft haben,"},"template_id":"59cda3e1-87cc-406f-b504-34059ec297e4","variant_key":"technische-eigenschaft-mit-bedingung"},{"_id":"674adbf103066ad475dee8f5","created_at":"2024-11-30T09:33:37.733Z","parsing_result":{"errors":[{"downgrade":false,"extra":null,"level":0,"message":"eiffel.parser.equals-any.error","segment":{"name":"modalitaet","value":"tset"},"translationargs":["expected","\"muss\", \"soll\", \"sollte\", \"kann\", \"wird\"","actual","tset"]}],"notices":null,"requirement":"test tset test als relevant betrachten, dass test test.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"test","bedingung":"test","begruendung":"test","modalitaet":"tset","punkt":".","relevanz":"als relevant betrachten, dass","system":"test"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"674adbf703066ad475dee8f6","created_at":"2024-11-30T09:33:43.19Z","parsing_result":{"errors":null,"notices":null,"requirement":"test muss test als relevant betrachten, dass test test.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"test","bedingung":"test","begruendung":"test","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"test"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"674adc2f03066ad475dee8f7","created_at":"2024-11-30T09:34:39.271Z","parsing_result":{"errors":[{"downgrade":false,"extra":null,"level":0,"message":"eiffel.parser.equals-any.error","segment":{"name":"modalitaet","value":"test"},"translationargs":["expected","\"muss\", \"soll\", \"sollte\", \"kann\", \"wird\"","actual","test"]},{"downgrade":false,"extra":null,"level":0,"message":"eiffel.parser.equals-any.error","segment":{"name":"ein-kein","value":"test"},"translationargs":["expected","\"ein\", \"kein\"","actual","test"]},{"downgrade":false,"extra":null,"level":0,"message":"eiffel.parser.equals-any.error","segment":{"name":"des-der","value":"test"},"translationargs":["expected","\"des\", \"der\"","actual","test"]}],"notices":[{"downgrade":true,"extra":null,"level":2,"message":"eiffel.parser.error.missing-segment","segment":{"name":"eigenname","value":""},"translationargs":["name","Eigenname","technicalName","eigenname"]}],"requirement":"test test es test test test test sein, test test.","templateid":"ziel","templatename":"Ziel","templatetype":"ebt","templateversion":"0.1.0","variantname":"Ziel mit Bedingung","warnings":null},"segment_map":{"bedingung":"test","begruendung":"test","bezug":"test","des-der":"test","eigenname":"","ein-kein":"test","es-klein":"es","modalitaet":"test","punkt":".","sein":"sein,","zielart":"test","zu-erreichender-zustand":"test"},"template_id":"35b9e593-dc5a-4ad9-85f4-c13d0703f89c","variant_key":"ziel-mit-bedingung"},{"_id":"674adc3803066ad475dee8f8","created_at":"2024-11-30T09:34:48.828Z","parsing_result":{"errors":null,"notices":[{"downgrade":true,"extra":null,"level":2,"message":"eiffel.parser.error.missing-segment","segment":{"name":"eigenname","value":""},"translationargs":["name","Eigenname","technicalName","eigenname"]}],"requirement":"test muss es ein test des test sein, test test.","templateid":"ziel","templatename":"Ziel","templatetype":"ebt","templateversion":"0.1.0","variantname":"Ziel mit Bedingung","warnings":null},"segment_map":{"bedingung":"test","begruendung":"test","bezug":"test","des-der":"des","eigenname":"","ein-kein":"ein","es-klein":"es","modalitaet":"muss","punkt":".","sein":"sein,","zielart":"test","zu-erreichender-zustand":"test"},"template_id":"35b9e593-dc5a-4ad9-85f4-c13d0703f89c","variant_key":"ziel-mit-bedingung"},{"_id":"674adde903066ad475dee8f9","created_at":"2024-11-30T09:42:01.378Z","parsing_result":{"errors":null,"notices":null,"requirement":"Robin muss Robin als relevant betrachten, dass Robin Robin.","templateid":"kontext","templatename":"Kontext","templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"segment_map":{"abgrenzung":"Robin","bedingung":"Robin","begruendung":"Robin","modalitaet":"muss","punkt":".","relevanz":"als relevant betrachten, dass","system":"Robin"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"},{"_id":"674adfd903066ad475dee8fa","created_at":"2024-11-30T09:50:17.498Z","parsing_result":{"errors":null,"notices":null,"requirement":"Robin3 must possess the quality feature Robin3 Robin3.","templateid":"esqua","templatename":"Extended Template for Quality Requirements (ESQUA)","templatetype":"ebt","templateversion":"0.1.0","variantname":"Quality Feature without Condition","warnings":null},"segment_map":{"dot":".","justification":"Robin3","modality-medium":"must","possess-quality-feature-medium":"possess the quality feature","quality-feature":"Robin3","who-or-what-full":"Robin3"},"template_id":"55ae0045-7c80-48b4-ac54-d76e2a9da667","variant_key":"quality-feature-without-condition"}]
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4e2723fee5815bc39501df08e69c885b6b3c2770
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,14 @@
+{"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"modalitaet":"muss","system":"test","relevanz":"als relevant betrachten, dass","abgrenzung":"test","begruendung":"test","punkt":".","bedingung":"test"},"parsing_result":{"templateid":"kontext","templatetype":"ebt","templatename":"Kontext","requirement":"test muss test als relevant betrachten, dass test test.","errors":null,"notices":null,"templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null},"created_at":{"$date":{"$numberLong":"1731959380559"}},"_id":{"$oid":"673b9a543c547c9012cab78c"}}
+{"variant_key":"kontext-mit-bedingung","_id":{"$oid":"673b9a9b3c547c9012cab78d"},"segment_map":{"abgrenzung":"test","begruendung":"test","punkt":".","bedingung":"test","modalitaet":"muss","system":"test","relevanz":"als relevant betrachten, dass"},"parsing_result":{"variantname":"Kontext mit Bedingung","notices":null,"errors":null,"warnings":null,"templateid":"kontext","templatetype":"ebt","templateversion":"0.1.0","templatename":"Kontext","requirement":"test muss test als relevant betrachten, dass test test."},"created_at":{"$date":{"$numberLong":"1731959451469"}},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8"}
+{"segment_map":{"relevanz":"als relevant betrachten, dass","abgrenzung":"testing","begruendung":"testing","punkt":".","bedingung":"testing","modalitaet":"muss","system":"testing"},"parsing_result":{"templateid":"kontext","templatetype":"ebt","requirement":"testing muss testing als relevant betrachten, dass testing testing.","warnings":null,"templateversion":"0.1.0","templatename":"Kontext","variantname":"Kontext mit Bedingung","errors":null,"notices":null},"created_at":{"$date":{"$numberLong":"1731972966836"}},"_id":{"$oid":"673bcf662d6ca63eb5638dd2"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung"}
+{"_id":{"$oid":"673bd0ed2d6ca63eb5638dd3"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"punkt":".","bedingung":"testtt","modalitaet":"muss","system":"testtt","relevanz":"als relevant betrachten, dass","abgrenzung":"testtt","begruendung":"testtt"},"parsing_result":{"templateid":"kontext","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","warnings":null,"notices":null,"templatetype":"ebt","templatename":"Kontext","requirement":"testtt muss testtt als relevant betrachten, dass testtt testtt.","errors":null},"created_at":{"$date":{"$numberLong":"1731973357769"}}}
+{"_id":{"$oid":"673c370f67f12bdd37995229"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"abgrenzung":"testttttttttt","begruendung":"","punkt":".","bedingung":"testttttttttt","modalitaet":"muss","system":"testttttttttt","relevanz":"als relevant betrachten, dass"},"parsing_result":{"variantname":"Kontext mit Bedingung","requirement":"testttttttttt muss testttttttttt als relevant betrachten, dass testttttttttt.","errors":null,"warnings":null,"templatetype":"ebt","templatename":"Kontext","notices":null,"templateid":"kontext","templateversion":"0.1.0"},"created_at":{"$date":{"$numberLong":"1731999503880"}}}
+{"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"modalitaet":"muss","system":"ameltest","relevanz":"als relevant betrachten, dass","abgrenzung":"ameltest","begruendung":"ameltest","punkt":".","bedingung":"ameltest"},"parsing_result":{"errors":null,"warnings":null,"requirement":"ameltest muss ameltest als relevant betrachten, dass ameltest ameltest.","templatetype":"ebt","templateversion":"0.1.0","templatename":"Kontext","variantname":"Kontext mit Bedingung","notices":null,"templateid":"kontext"},"_id":{"$oid":"6746983f934773bedf28c1d2"},"created_at":{"$date":{"$numberLong":"1732679743267"}}}
+{"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"relevanz":"als relevant betrachten, dass","abgrenzung":"das System Smartphone bei einer Umgebungstemperatur von -20°C bis 40°C betrieben wird","begruendung":"","punkt":".","bedingung":"Falls das Smartphone als Outdoor-Gerät bezeichnet wird,","modalitaet":"muss","system":"die Komponente Außenhülle"},"parsing_result":{"templateid":"kontext","templatename":"Kontext","warnings":null,"notices":null,"templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","requirement":"Falls das Smartphone als Outdoor-Gerät bezeichnet wird, muss die Komponente Außenhülle als relevant betrachten, dass das System Smartphone bei einer Umgebungstemperatur von -20°C bis 40°C betrieben wird.","errors":null},"created_at":{"$date":{"$numberLong":"1732691733204"}},"_id":{"$oid":"6746c7151a6cae65b5dde03e"}}
+{"_id":{"$oid":"6746c7761a6cae65b5dde03f"},"segment_map":{"technisch-die-eigenschaft-haben":"technisch die Eigenschaft haben,","objektbeschreibung":"test","name-der-eigenschaft":"test","begruendung":"","punkt":".","bedingung":"test","modalitaet":"muss","system":"test"},"parsing_result":{"templateid":"technische-eigenschaft","templatetype":"ebt","templatename":"Technische Eigenschaft","variantname":"Technische Eigenschaft mit Bedingung","warnings":null,"templateversion":"0.1.0","requirement":"test muss test technisch die Eigenschaft haben, test test.","errors":null,"notices":null},"created_at":{"$date":{"$numberLong":"1732691830769"}},"template_id":"59cda3e1-87cc-406f-b504-34059ec297e4","variant_key":"technische-eigenschaft-mit-bedingung"}
+{"_id":{"$oid":"674adbf103066ad475dee8f5"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"abgrenzung":"test","begruendung":"test","punkt":".","bedingung":"test","modalitaet":"tset","system":"test","relevanz":"als relevant betrachten, dass"},"parsing_result":{"templateversion":"0.1.0","templatename":"Kontext","variantname":"Kontext mit Bedingung","errors":[{"level":{"$numberInt":"0"},"message":"eiffel.parser.equals-any.error","translationargs":["expected","\"muss\", \"soll\", \"sollte\", \"kann\", \"wird\"","actual","tset"],"extra":null,"downgrade":false,"segment":{"name":"modalitaet","value":"tset"}}],"warnings":null,"notices":null,"templateid":"kontext","requirement":"test tset test als relevant betrachten, dass test test.","templatetype":"ebt"},"created_at":{"$date":{"$numberLong":"1732959217733"}}}
+{"_id":{"$oid":"674adbf703066ad475dee8f6"},"parsing_result":{"templateversion":"0.1.0","templatename":"Kontext","variantname":"Kontext mit Bedingung","errors":null,"notices":null,"templateid":"kontext","templatetype":"ebt","requirement":"test muss test als relevant betrachten, dass test test.","warnings":null},"created_at":{"$date":{"$numberLong":"1732959223190"}},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"bedingung":"test","modalitaet":"muss","system":"test","relevanz":"als relevant betrachten, dass","abgrenzung":"test","begruendung":"test","punkt":"."}}
+{"segment_map":{"zielart":"test","bedingung":"test","zu-erreichender-zustand":"test","bezug":"test","eigenname":"","modalitaet":"test","punkt":".","des-der":"test","es-klein":"es","begruendung":"test","sein":"sein,","ein-kein":"test"},"parsing_result":{"requirement":"test test es test test test test sein, test test.","notices":[{"level":{"$numberInt":"2"},"message":"eiffel.parser.error.missing-segment","translationargs":["name","Eigenname","technicalName","eigenname"],"extra":null,"downgrade":true,"segment":{"name":"eigenname","value":""}}],"templatetype":"ebt","templatename":"Ziel","variantname":"Ziel mit Bedingung","errors":[{"level":{"$numberInt":"0"},"message":"eiffel.parser.equals-any.error","translationargs":["expected","\"muss\", \"soll\", \"sollte\", \"kann\", \"wird\"","actual","test"],"extra":null,"downgrade":false,"segment":{"name":"modalitaet","value":"test"}},{"segment":{"name":"ein-kein","value":"test"},"level":{"$numberInt":"0"},"message":"eiffel.parser.equals-any.error","translationargs":["expected","\"ein\", \"kein\"","actual","test"],"extra":null,"downgrade":false},{"segment":{"name":"des-der","value":"test"},"level":{"$numberInt":"0"},"message":"eiffel.parser.equals-any.error","translationargs":["expected","\"des\", \"der\"","actual","test"],"extra":null,"downgrade":false}],"warnings":null,"templateid":"ziel","templateversion":"0.1.0"},"created_at":{"$date":{"$numberLong":"1732959279271"}},"_id":{"$oid":"674adc2f03066ad475dee8f7"},"template_id":"35b9e593-dc5a-4ad9-85f4-c13d0703f89c","variant_key":"ziel-mit-bedingung"}
+{"parsing_result":{"templatename":"Ziel","variantname":"Ziel mit Bedingung","requirement":"test muss es ein test des test sein, test test.","notices":[{"segment":{"name":"eigenname","value":""},"level":{"$numberInt":"2"},"message":"eiffel.parser.error.missing-segment","translationargs":["name","Eigenname","technicalName","eigenname"],"extra":null,"downgrade":true}],"templateid":"ziel","templatetype":"ebt","templateversion":"0.1.0","errors":null,"warnings":null},"created_at":{"$date":{"$numberLong":"1732959288828"}},"_id":{"$oid":"674adc3803066ad475dee8f8"},"template_id":"35b9e593-dc5a-4ad9-85f4-c13d0703f89c","variant_key":"ziel-mit-bedingung","segment_map":{"zu-erreichender-zustand":"test","des-der":"des","punkt":".","sein":"sein,","es-klein":"es","bezug":"test","bedingung":"test","begruendung":"test","modalitaet":"muss","zielart":"test","eigenname":"","ein-kein":"ein"}}
+{"parsing_result":{"errors":null,"notices":null,"templatetype":"ebt","templateversion":"0.1.0","variantname":"Kontext mit Bedingung","requirement":"Robin muss Robin als relevant betrachten, dass Robin Robin.","warnings":null,"templateid":"kontext","templatename":"Kontext"},"created_at":{"$date":{"$numberLong":"1732959721378"}},"_id":{"$oid":"674adde903066ad475dee8f9"},"template_id":"f9c0636d-e8a4-46e9-be24-89d48d4762f8","variant_key":"kontext-mit-bedingung","segment_map":{"begruendung":"Robin","punkt":".","bedingung":"Robin","modalitaet":"muss","system":"Robin","relevanz":"als relevant betrachten, dass","abgrenzung":"Robin"}}
+{"parsing_result":{"templateversion":"0.1.0","templatename":"Extended Template for Quality Requirements (ESQUA)","errors":null,"templateid":"esqua","templatetype":"ebt","warnings":null,"notices":null,"variantname":"Quality Feature without Condition","requirement":"Robin3 must possess the quality feature Robin3 Robin3."},"created_at":{"$date":{"$numberLong":"1732960217498"}},"template_id":"55ae0045-7c80-48b4-ac54-d76e2a9da667","variant_key":"quality-feature-without-condition","segment_map":{"dot":".","who-or-what-full":"Robin3","modality-medium":"must","possess-quality-feature-medium":"possess the quality feature","quality-feature":"Robin3","justification":"Robin3"},"_id":{"$oid":"674adfd903066ad475dee8fa"}}
diff --git a/src/app/eiffel/web.go b/src/app/eiffel/web.go
index a14b12e42aa9130210396901881507bcb6374859..938aa89df2c83ec44f3eaab6b455f74a48299e40 100644
--- a/src/app/eiffel/web.go
+++ b/src/app/eiffel/web.go
@@ -1,10 +1,17 @@
 package eiffel
 
 import (
+	"context"
 	"encoding/base64"
 	"encoding/json"
 	"errors"
 	"fmt"
+	"log"
+	"net/http"
+	"os"
+	"strings"
+	"time"
+
 	"github.com/google/uuid"
 	"github.com/org-harmony/harmony/src/app/template"
 	"github.com/org-harmony/harmony/src/app/template/parser"
@@ -15,8 +22,9 @@ import (
 	"github.com/org-harmony/harmony/src/core/persistence"
 	"github.com/org-harmony/harmony/src/core/util"
 	"github.com/org-harmony/harmony/src/core/web"
-	"net/http"
-	"strings"
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/mongo"
+	"go.mongodb.org/mongo-driver/mongo/options"
 )
 
 const (
@@ -76,6 +84,231 @@ type HTMXTriggerParsingSuccessEvent struct {
 	ParsingSuccessEvent *parser.ParsingResult `json:"parsingSuccessEvent"`
 }
 
+func ExportToJSON(filename string) error {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	log.Println("Fetching data from MongoDB...")
+	cursor, err := mongoCollection.Find(ctx, bson.M{})
+	if err != nil {
+		log.Printf("Failed to fetch data: %v", err)
+		return fmt.Errorf("failed to fetch data from MongoDB: %w", err)
+	}
+	defer cursor.Close(ctx)
+
+	var requirements []bson.M
+	if err := cursor.All(ctx, &requirements); err != nil {
+		log.Printf("Failed to parse data: %v", err)
+		return fmt.Errorf("failed to parse MongoDB data: %w", err)
+	}
+	log.Printf("Fetched %d documents from MongoDB", len(requirements))
+
+	log.Println("Creating JSON file...")
+	file, err := os.Create(filename)
+	if err != nil {
+		log.Printf("Failed to create file: %v", err)
+		return fmt.Errorf("failed to create JSON file: %w", err)
+	}
+	defer file.Close()
+
+	log.Println("Writing data to JSON file...")
+	if err := json.NewEncoder(file).Encode(requirements); err != nil {
+		log.Printf("Failed to write JSON data: %v", err)
+		return fmt.Errorf("failed to write JSON data to file: %w", err)
+	}
+
+	log.Printf("Exported data to JSON file: %s", filename)
+	return nil
+}
+
+func exportJSONHandler(cfg Cfg) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		filename := "requirements.json"
+
+		if err := ExportToJSON(filename); err != nil {
+			log.Printf("Error exporting to JSON: %v", err)
+			http.Error(w, fmt.Sprintf("Failed to export JSON: %v", err), http.StatusInternalServerError)
+			return
+		}
+
+		w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", filename))
+		w.Header().Set("Content-Type", "application/json")
+		http.ServeFile(w, r, filename)
+	})
+}
+
+func ExportToTXT(filename string) error {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	log.Println("Fetching data from MongoDB...")
+	cursor, err := mongoCollection.Find(ctx, bson.M{})
+	if err != nil {
+		log.Printf("Failed to fetch data: %v", err)
+		return fmt.Errorf("failed to fetch data from MongoDB: %w", err)
+	}
+	defer cursor.Close(ctx)
+
+	var requirements []bson.M
+	if err := cursor.All(ctx, &requirements); err != nil {
+		log.Printf("Failed to parse data: %v", err)
+		return fmt.Errorf("failed to parse MongoDB data: %w", err)
+	}
+	log.Printf("Fetched %d documents from MongoDB", len(requirements))
+
+	log.Println("Creating TXT file...")
+	file, err := os.Create(filename)
+	if err != nil {
+		log.Printf("Failed to create file: %v", err)
+		return fmt.Errorf("failed to create TXT file: %w", err)
+	}
+	defer file.Close()
+
+	log.Println("Writing data to TXT file...")
+	for _, requirement := range requirements {
+
+		line, err := bson.MarshalExtJSON(requirement, true, true)
+		if err != nil {
+			log.Printf("Failed to marshal document to text: %v", err)
+			return fmt.Errorf("failed to convert document to text: %w", err)
+		}
+		_, err = file.WriteString(string(line) + "\n")
+		if err != nil {
+			log.Printf("Failed to write line to TXT file: %v", err)
+			return fmt.Errorf("failed to write line to TXT file: %w", err)
+		}
+	}
+
+	log.Printf("Exported data to TXT file: %s", filename)
+	return nil
+}
+
+func exportTXTHandler(cfg Cfg) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		filename := "requirements.txt"
+
+		if err := ExportToTXT(filename); err != nil {
+			log.Printf("Error exporting to TXT: %v", err)
+			http.Error(w, fmt.Sprintf("Failed to export TXT: %v", err), http.StatusInternalServerError)
+			return
+		}
+
+		w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", filename))
+		w.Header().Set("Content-Type", "text/plain")
+		http.ServeFile(w, r, filename)
+	})
+}
+
+func createRequirementHandler() http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		log.Println("POST /eiffel/requirements called")
+		var requirement bson.M
+		if err := json.NewDecoder(r.Body).Decode(&requirement); err != nil {
+			log.Printf("Failed to decode request body: %v", err)
+			http.Error(w, "Invalid request body", http.StatusBadRequest)
+			return
+		}
+		log.Printf("Requirement received: %+v", requirement)
+
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+
+		result, err := mongoCollection.InsertOne(ctx, requirement)
+		if err != nil {
+			log.Printf("Failed to insert requirement: %v", err)
+			http.Error(w, fmt.Sprintf("Failed to create requirement: %v", err), http.StatusInternalServerError)
+			return
+		}
+
+		log.Printf("Requirement created with ID: %v", result.InsertedID)
+		w.WriteHeader(http.StatusCreated)
+		json.NewEncoder(w).Encode(requirement)
+	})
+}
+
+func getAllRequirementsHandler() http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+
+		cursor, err := mongoCollection.Find(ctx, bson.M{})
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Failed to fetch requirements: %v", err), http.StatusInternalServerError)
+			return
+		}
+		defer cursor.Close(ctx)
+
+		var requirements []bson.M
+		if err := cursor.All(ctx, &requirements); err != nil {
+			http.Error(w, fmt.Sprintf("Failed to parse requirements: %v", err), http.StatusInternalServerError)
+			return
+		}
+
+		w.Header().Set("Content-Type", "application/json")
+		json.NewEncoder(w).Encode(requirements)
+	})
+}
+
+func getRequirementByIDHandler() http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		id := web.URLParam(r, "id")
+
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+
+		var requirement bson.M
+		err := mongoCollection.FindOne(ctx, bson.M{"_id": id}).Decode(&requirement)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Requirement not found: %v", err), http.StatusNotFound)
+			return
+		}
+
+		w.Header().Set("Content-Type", "application/json")
+		json.NewEncoder(w).Encode(requirement)
+	})
+}
+
+func updateRequirementHandler() http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		id := web.URLParam(r, "id")
+
+		var update bson.M
+		if err := json.NewDecoder(r.Body).Decode(&update); err != nil {
+			http.Error(w, "Invalid request body", http.StatusBadRequest)
+			return
+		}
+
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+
+		_, err := mongoCollection.UpdateOne(ctx, bson.M{"_id": id}, bson.M{"$set": update})
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Failed to update requirement: %v", err), http.StatusInternalServerError)
+			return
+		}
+
+		w.WriteHeader(http.StatusOK)
+		json.NewEncoder(w).Encode(update)
+	})
+}
+
+func deleteRequirementHandler() http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		id := web.URLParam(r, "id")
+
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+
+		_, err := mongoCollection.DeleteOne(ctx, bson.M{"_id": id})
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Failed to delete requirement: %v", err), http.StatusInternalServerError)
+			return
+		}
+
+		w.WriteHeader(http.StatusNoContent)
+	})
+}
+
 // RegisterController registers the controllers as well as the navigation and event listeners.
 func RegisterController(appCtx *hctx.AppCtx, webCtx *web.Ctx) {
 	cfg := Cfg{}
@@ -96,6 +329,15 @@ func RegisterController(appCtx *hctx.AppCtx, webCtx *web.Ctx) {
 	router.Get("/eiffel/elicitation/{templateID}", elicitationTemplate(cfg, appCtx, webCtx, true).ServeHTTP)
 	router.Get("/eiffel/elicitation/{templateID}/{variant}", elicitationTemplate(cfg, appCtx, webCtx, false).ServeHTTP)
 	router.Post("/eiffel/elicitation/{templateID}/{variant}", parseRequirement(cfg, appCtx, webCtx).ServeHTTP)
+	//Export Endpunkte
+	router.Get("/eiffel/export/json", exportJSONHandler(cfg).ServeHTTP)
+	router.Get("/eiffel/export/txt", exportTXTHandler(cfg).ServeHTTP)
+	// TO DO CRUD-Endpunkte müssen noch überarbeitet werden
+	router.Post("/eiffel/requirements", createRequirementHandler().ServeHTTP)
+	router.Get("/eiffel/requirements", getAllRequirementsHandler().ServeHTTP)
+	router.Get("/eiffel/requirements/{id}", getRequirementByIDHandler().ServeHTTP)
+	router.Put("/eiffel/requirements/{id}", updateRequirementHandler().ServeHTTP)
+	router.Delete("/eiffel/requirements/{id}", deleteRequirementHandler().ServeHTTP)
 }
 
 func subscribeEvents(appCtx *hctx.AppCtx) {
@@ -264,11 +506,48 @@ func elicitationTemplate(cfg Cfg, appCtx *hctx.AppCtx, webCtx *web.Ctx, defaultF
 	})
 }
 
+var mongoClient *mongo.Client
+var mongoCollection *mongo.Collection
+
+func InitMongoDB() error {
+	clientOptions := options.Client().ApplyURI("mongodb://localhost:27017")
+	client, err := mongo.Connect(context.TODO(), clientOptions)
+	if err != nil {
+		return fmt.Errorf("failed to connect to MongoDB: %w", err)
+	}
+
+	err = client.Ping(context.TODO(), nil)
+	if err != nil {
+		return fmt.Errorf("failed to ping MongoDB: %w", err)
+	}
+
+	mongoCollection = client.Database("harmony").Collection("requirements")
+	log.Println("Connected to MongoDB")
+	return nil
+}
+
+func saveToMongoDB(formData *TemplateFormData) error {
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	document := bson.M{
+		"template_id":    formData.TemplateID.String(),
+		"variant_key":    formData.VariantKey,
+		"segment_map":    formData.SegmentMap,
+		"parsing_result": formData.ParsingResult,
+		"created_at":     time.Now(),
+	}
+
+	_, err := mongoCollection.InsertOne(ctx, document)
+	return err
+}
+
 func parseRequirement(cfg Cfg, appCtx *hctx.AppCtx, webCtx *web.Ctx) http.Handler {
 	templateRepository := util.UnwrapType[template.Repository](appCtx.Repository(template.RepositoryName))
 	sessionStore := util.UnwrapType[user.SessionRepository](appCtx.Repository(user.SessionRepositoryName))
 
 	return web.NewController(appCtx, webCtx, func(io web.IO) error {
+		log.Println("parseRequirement called")
 		request := io.Request()
 		ctx := request.Context()
 		parsers := RuleParsers()
@@ -297,6 +576,13 @@ func parseRequirement(cfg Cfg, appCtx *hctx.AppCtx, webCtx *web.Ctx) http.Handle
 
 		parsingResult, err := formData.Template.Parse(ctx, parsers, formData.VariantKey, SegmentMapToSegments(segmentMap)...)
 		formData.ParsingResult = &parsingResult
+		log.Printf("Captured formData: %+v\n", formData)
+
+		// Save to MongoDB
+		err = saveToMongoDB(&formData)
+		if err != nil {
+			log.Printf("Failed to save formData to MongoDB: %v\n", err)
+		}
 
 		var s []string
 		if parsingResult.Flawless() {
diff --git a/src/cmd/web/main.go b/src/cmd/web/main.go
index cf0a97dfdc5f99aae4aabbccf40619067f0254bb..69d561939125ef93ed71cb1ba6c6cbca19986b97 100644
--- a/src/cmd/web/main.go
+++ b/src/cmd/web/main.go
@@ -1,6 +1,8 @@
 package main
 
 import (
+	"log"
+
 	"github.com/jackc/pgx/v5/pgxpool"
 	"github.com/org-harmony/harmony/src/app/eiffel"
 	homeWeb "github.com/org-harmony/harmony/src/app/home"
@@ -38,6 +40,10 @@ func main() {
 	validator := initValidator()
 	eventManager := event.NewManager(logger)
 
+	if err := eiffel.InitMongoDB(); err != nil {
+		log.Fatalf("Error initializing MongoDB: %v", err)
+	}
+
 	provider, db := initDB(validator)
 	defer db.Close()
 
diff --git a/templates/base/index.go.html b/templates/base/index.go.html
index b8ab670d5efb271feaf5bad96544caf95e368ff8..bf314bb3039c33a585c0ba8f0b752caf82204f39 100644
--- a/templates/base/index.go.html
+++ b/templates/base/index.go.html
@@ -10,7 +10,7 @@
                 {{ end }}
 
                 {{ block "favicon" . }}
-                    <link rel="icon" href="{{ asset "img/harmony-logo.jpg" }}">
+                <link rel="icon" href="/assets/img/harmony-logo.jpg">
                 {{ end }}
 
                 {{ block "title-container" . }}
@@ -18,15 +18,15 @@
                 {{ end }}
 
                 {{ block "styles" . }}
-                    <link rel="stylesheet" href="{{ asset "css/styles.css" }}">
+                <link rel="stylesheet" href="/assets/css/styles.css">
                 {{ end }}
 
                 {{ block "scripts" . }}
-                    <script defer src="{{ asset "js/htmx.min.js" }}"></script>
-                    <script defer src="{{ asset "js/bootstrap.min.js" }}"></script>
-                    {{/* TODO build plugin system and js plugins */}}
-                    <script defer src="{{ asset "js/eiffel.js" }}"></script>
-                    <script defer src="{{ asset "js/htmx-extra.js" }}"></script>
+                <script defer src="/assets/js/htmx.min.js"></script>
+                <script defer src="/assets/js/bootstrap.min.js"></script>
+                {{/* TODO build plugin system and js plugins */}}
+                <script defer src="/assets/js/eiffel.js"></script>
+                <script defer src="/assets/js/htmx-extra.js"></script>
                 {{ end }}
             {{ end }}
         </head>
diff --git a/templates/base/layout.go.html b/templates/base/layout.go.html
index 9c3b1fad56de91cf9db7d6dce850cbf398c3b3fe..5b0f5a82c2baff73339bbc076792e1d27adc876a 100644
--- a/templates/base/layout.go.html
+++ b/templates/base/layout.go.html
@@ -5,7 +5,7 @@
                 <nav class="navbar navbar-expand-lg">
                     <div class="container-fluid">
                         <a class="navbar-brand" href="#">
-                            <img class="img-fluid rounded border-light" width="70rem" src="{{ asset "img/harmony-logo.jpg" }}" alt="HARMONY Logo" />
+                            <img class="img-fluid rounded border-light" width="70rem" src="/assets/img/harmony-logo.jpg" alt="HARMONY Logo" />
                         </a>
                         <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbar" aria-controls="navbar" aria-expanded="false" aria-label="Toggle navigation">
                             <span class="navbar-toggler-icon"></span>
diff --git a/templates/eiffel/_elicitation-template.go.html b/templates/eiffel/_elicitation-template.go.html
index 27fd2dbe62e25cf57eb852c1fab06ec865d67ea5..e57907ed9f9e58d60ae7e41d7d18e91cdd805cd8 100644
--- a/templates/eiffel/_elicitation-template.go.html
+++ b/templates/eiffel/_elicitation-template.go.html
@@ -7,7 +7,7 @@
                         data-bs-toggle="modal"
                         data-bs-target="#eiffelTemplateSearch"
                         class="btn w-100 h-100 p-0">
-                    <img class="m-auto align-baseline w-25 d-block" src="{{ asset "icons/search.svg" }}" alt="{{ t "harmony.generic.search" }}">
+                    <img class="m-auto align-baseline w-25 d-block" src="/assets/icons/search.svg" alt="{{ t "harmony.generic.search" }}">
                     <span class="badge shadow rounded-pill text-bg-secondary mt-3">{{ t "eiffel.elicitation.template.search.shortcut" }}</span>
                 </button>
 
diff --git a/templates/template/_list-set.go.html b/templates/template/_list-set.go.html
index 72d7cdb8d63ef432a6f9df6d71349dca79a55fcd..c46755fa1522d518ade3f5801ad4bb694203f2c9 100644
--- a/templates/template/_list-set.go.html
+++ b/templates/template/_list-set.go.html
@@ -39,7 +39,7 @@
                         <td>
                             {{/* edit button + modal */}}
                             <span hx-get="/template-set/edit/{{ .ID }}" hx-target="#edit-form-for-{{ .ID }}" hx-swap="outerHTML" data-bs-toggle="modal" data-bs-target="#edit-modal-for-{{ .ID }}" class="edit-icon mx-2" role="button">
-                                <img src="{{ asset "icons/edit.svg" }}" alt="{{ "template.set.action.edit" | t }}" title="{{ "template.set.action.edit" | t }}" class="align-baseline" />
+                                <img src="/assets/icons/edit.svg" alt="{{ "template.set.action.edit" | t }}" title="{{ "template.set.action.edit" | t }}" class="align-baseline" />
                             </span>
                             <div class="modal fade" id="edit-modal-for-{{ .ID }}" tabindex="-1" role="dialog" aria-labelledby="edit-modal-for-{{ .ID }}-label" aria-hidden="true">
                                 <div class="modal-dialog" role="document">
@@ -63,7 +63,7 @@
 
                             {{/* delete button + modal */}}
                             <span data-bs-toggle="modal" data-bs-target="#delete-modal-for-{{ .ID }}" class="delete-icon" role="button">
-                                <img src="{{ asset "icons/x.svg" }}" alt="{{ "template.set.action.delete" | t }}" title="{{ "template.set.action.delete" | t }}" class="align-baseline" />
+                                <img src="/assets/icons/x.svg" alt="{{ "template.set.action.delete" | t }}" title="{{ "template.set.action.delete" | t }}" class="align-baseline" />
                             </span>
                             <div class="modal fade" id="delete-modal-for-{{ .ID }}" tabindex="-1" role="dialog" aria-labelledby="delete-modal-for-{{ .ID }}-label" aria-hidden="true">
                                 <div class="modal-dialog" role="document">
diff --git a/templates/template/_list.go.html b/templates/template/_list.go.html
index 92a3f97234ae850d032e4d251260a47b2e22821e..9ec3de89e2b6f9df3af0f09762acdc36aae2d7f3 100644
--- a/templates/template/_list.go.html
+++ b/templates/template/_list.go.html
@@ -89,7 +89,7 @@
                     {{ end }}
                     <td>
                         <a hx-boost="true" href="/template/{{ .ID }}/edit" hx-target="body" class="edit-icon mx-2 text-decoration-none" role="button">
-                            <img src="{{ asset "icons/edit.svg" }}" alt="{{ "template.set.action.edit" | t }}" title="{{ "template.set.action.edit" | t }}" class="align-baseline" />
+                            <img src="/assets/icons/edit.svg" alt="{{ "template.set.action.edit" | t }}" title="{{ "template.set.action.edit" | t }}" class="align-baseline" />
                         </a>
 
                         {{/* copy button + modal */}}
@@ -98,7 +98,7 @@
                             data-bs-toggle="modal"
                             data-bs-target="#copyModalFor-{{ .ID }}"
                             class="copy-icon bg-transparent border-0 me-2 p-0">
-                            <img src="{{ asset "icons/copy.svg" }}" alt="{{ "template.copy.title" | t }}" title="{{ "template.copy.title" | t }}" class="align-baseline" />
+                            <img src="/assets/icons/copy.svg" alt="{{ "template.copy.title" | t }}" title="{{ "template.copy.title" | t }}" class="align-baseline" />
                         </button>
                         <div id="copyModalFor-{{ .ID }}"
                              class="modal fade" tabindex="-1"
@@ -113,7 +113,7 @@
 
                         {{/* delete button + modal */}}
                         <span data-bs-toggle="modal" data-bs-target="#delete-modal-for-{{ .ID }}" class="delete-icon" role="button">
-                            <img src="{{ asset "icons/x.svg" }}" alt="{{ "template.action.delete" | t }}" title="{{ "template.action.delete" | t }}" class="align-baseline" />
+                            <img src="/assets/icons/x.svg" alt="{{ "template.action.delete" | t }}" title="{{ "template.action.delete" | t }}" class="align-baseline" />
                         </span>
                         <div class="modal fade" id="delete-modal-for-{{ .ID }}" tabindex="-1" role="dialog" aria-labelledby="delete-modal-for-{{ .ID }}-label" aria-hidden="true">
                             <div class="modal-dialog" role="document">
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..042091d9b3b0d93b7070e05e11a35b4131c826f7
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..52ccb5a934d19bdf6fcbd22b0ab24313e4affb3d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,18 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Amazon.com, Inc
+Damian Gryski <dgryski@gmail.com>
+Eric Buth <eric@topos.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Klaus Post <klauspost@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000000000000000000000000000000000000..ea6524ddd02ff658c1dd7dddd5f2f0b28cc90dbd
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,41 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Alex Legg <alexlegg@google.com>
+Damian Gryski <dgryski@gmail.com>
+Eric Buth <eric@topos.com>
+Jan Mercl <0xjnml@gmail.com>
+Jonathan Swinney <jswinney@amazon.com>
+Kai Backman <kaib@golang.org>
+Klaus Post <klauspost@gmail.com>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6050c10f4c8b4c22f50c83715f44f12419f763be
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000000000000000000000000000000000000..cea12879a0eae937f6ecdb6243f64591c5217fef
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8         2.19GB/s ± 0%  html
+_UFlat1-8         1.41GB/s ± 0%  urls
+_UFlat2-8         23.5GB/s ± 2%  jpg
+_UFlat3-8         1.91GB/s ± 0%  jpg_200
+_UFlat4-8         14.0GB/s ± 1%  pdf
+_UFlat5-8         1.97GB/s ± 0%  html4
+_UFlat6-8          814MB/s ± 0%  txt1
+_UFlat7-8          785MB/s ± 0%  txt2
+_UFlat8-8          857MB/s ± 0%  txt3
+_UFlat9-8          719MB/s ± 1%  txt4
+_UFlat10-8        2.84GB/s ± 0%  pb
+_UFlat11-8        1.05GB/s ± 0%  gaviota
+
+_ZFlat0-8         1.04GB/s ± 0%  html
+_ZFlat1-8          534MB/s ± 0%  urls
+_ZFlat2-8         15.7GB/s ± 1%  jpg
+_ZFlat3-8          740MB/s ± 3%  jpg_200
+_ZFlat4-8         9.20GB/s ± 1%  pdf
+_ZFlat5-8          991MB/s ± 0%  html4
+_ZFlat6-8          379MB/s ± 0%  txt1
+_ZFlat7-8          352MB/s ± 0%  txt2
+_ZFlat8-8          396MB/s ± 1%  txt3
+_ZFlat9-8          327MB/s ± 1%  txt4
+_ZFlat10-8        1.33GB/s ± 1%  pb
+_ZFlat11-8         605MB/s ± 1%  gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8          621MB/s ± 2%  html
+_UFlat1-8          494MB/s ± 1%  urls
+_UFlat2-8         23.2GB/s ± 1%  jpg
+_UFlat3-8         1.12GB/s ± 1%  jpg_200
+_UFlat4-8         4.35GB/s ± 1%  pdf
+_UFlat5-8          609MB/s ± 0%  html4
+_UFlat6-8          296MB/s ± 0%  txt1
+_UFlat7-8          288MB/s ± 0%  txt2
+_UFlat8-8          309MB/s ± 1%  txt3
+_UFlat9-8          280MB/s ± 1%  txt4
+_UFlat10-8         753MB/s ± 0%  pb
+_UFlat11-8         400MB/s ± 0%  gaviota
+
+_ZFlat0-8          409MB/s ± 1%  html
+_ZFlat1-8          250MB/s ± 1%  urls
+_ZFlat2-8         12.3GB/s ± 1%  jpg
+_ZFlat3-8          132MB/s ± 0%  jpg_200
+_ZFlat4-8         2.92GB/s ± 0%  pdf
+_ZFlat5-8          405MB/s ± 1%  html4
+_ZFlat6-8          179MB/s ± 1%  txt1
+_ZFlat7-8          170MB/s ± 1%  txt2
+_ZFlat8-8          189MB/s ± 1%  txt3
+_ZFlat9-8          164MB/s ± 1%  txt4
+_ZFlat10-8         479MB/s ± 1%  pb
+_ZFlat11-8         270MB/s ± 1%  gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0     2.4GB/s  html
+BM_UFlat/1     1.4GB/s  urls
+BM_UFlat/2    21.8GB/s  jpg
+BM_UFlat/3     1.5GB/s  jpg_200
+BM_UFlat/4    13.3GB/s  pdf
+BM_UFlat/5     2.1GB/s  html4
+BM_UFlat/6     1.0GB/s  txt1
+BM_UFlat/7   959.4MB/s  txt2
+BM_UFlat/8     1.0GB/s  txt3
+BM_UFlat/9   864.5MB/s  txt4
+BM_UFlat/10    2.9GB/s  pb
+BM_UFlat/11    1.2GB/s  gaviota
+
+BM_ZFlat/0   944.3MB/s  html (22.31 %)
+BM_ZFlat/1   501.6MB/s  urls (47.78 %)
+BM_ZFlat/2    14.3GB/s  jpg (99.95 %)
+BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %)
+BM_ZFlat/4     8.3GB/s  pdf (83.30 %)
+BM_ZFlat/5   903.5MB/s  html4 (22.52 %)
+BM_ZFlat/6   336.0MB/s  txt1 (57.88 %)
+BM_ZFlat/7   312.3MB/s  txt2 (61.91 %)
+BM_ZFlat/8   353.1MB/s  txt3 (54.99 %)
+BM_ZFlat/9   289.9MB/s  txt4 (66.26 %)
+BM_ZFlat/10    1.2GB/s  pb (19.68 %)
+BM_ZFlat/11  527.4MB/s  gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000000000000000000000000000000000000..23c6e26c6b9b345d10a67713177010bc359ee64a
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,264 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Decode handles the Snappy block format, not the Snappy stream format.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+//
+// Reader handles the Snappy stream format, not the Snappy block format.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+func (r *Reader) fill() error {
+	for r.i >= r.j {
+		if !r.readFull(r.buf[:4], true) {
+			return r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return r.err
+		}
+	}
+
+	return nil
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+
+	if err := r.fill(); err != nil {
+		return 0, err
+	}
+
+	n := copy(p, r.decoded[r.i:r.j])
+	r.i += n
+	return n, nil
+}
+
+// ReadByte satisfies the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+
+	if err := r.fill(); err != nil {
+		return 0, err
+	}
+
+	c := r.decoded[r.i]
+	r.i++
+	return c, nil
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000000000000000000000000000000000000..e6179f65e3511d6da76e25c749c6d781c5e337a7
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- AX	scratch
+//	- BX	scratch
+//	- CX	length or x
+//	- DX	offset
+//	- SI	&src[s]
+//	- DI	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+	// Initialize SI, DI and R8-R13.
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, DI
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, SI
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+loop:
+	// for s < len(src)
+	CMPQ SI, R13
+	JEQ  end
+
+	// CX = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBLZX (SI), CX
+	MOVL    CX, BX
+	ANDL    $3, BX
+	CMPL    BX, $1
+	JAE     tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	SHRL $2, CX
+	CMPL CX, $60
+	JAE  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	INCQ SI
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that CX == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// CX can hold 64 bits, so the increment cannot overflow.
+	INCQ CX
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// AX = len(dst) - d
+	// BX = len(src) - s
+	MOVQ R10, AX
+	SUBQ DI, AX
+	MOVQ R13, BX
+	SUBQ SI, BX
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMPQ CX, $16
+	JGT  callMemmove
+	CMPQ AX, $16
+	JLT  callMemmove
+	CMPQ BX, $16
+	JLT  callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(SI), X0
+	MOVOU X0, 0(DI)
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMPQ CX, AX
+	JGT  errCorrupt
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// DI, SI and CX as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADDQ CX, SI
+	SUBQ $58, SI
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// case x == 60:
+	CMPL CX, $61
+	JEQ  tagLit61
+	JA   tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBLZX -1(SI), CX
+	JMP     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVWLZX -2(SI), CX
+	JMP     doLit
+
+tagLit62Plus:
+	CMPL CX, $62
+	JA   tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVWLZX -3(SI), CX
+	MOVBLZX -1(SI), BX
+	SHLL    $16, BX
+	ORL     BX, CX
+	JMP     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVL -4(SI), CX
+	JMP  doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADDQ $5, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVLQZX -4(SI), DX
+	JMP     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADDQ $3, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVWQZX -2(SI), DX
+	JMP     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- BX == src[s] & 0x03
+	//	- CX == src[s]
+	CMPQ BX, $2
+	JEQ  tagCopy2
+	JA   tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADDQ $2, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVQ    CX, DX
+	ANDQ    $0xe0, DX
+	SHLQ    $3, DX
+	MOVBQZX -1(SI), BX
+	ORQ     BX, DX
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	SHRQ $2, CX
+	ANDQ $7, CX
+	ADDQ $4, CX
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- CX == length && CX > 0
+	//	- DX == offset
+
+	// if offset <= 0 { etc }
+	CMPQ DX, $0
+	JLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVQ DI, BX
+	SUBQ R8, BX
+	CMPQ BX, DX
+	JLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVQ R10, BX
+	SUBQ DI, BX
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVQ R10, R14
+	SUBQ DI, R14
+	MOVQ DI, R15
+	SUBQ DX, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMPQ CX, $16
+	JGT  slowForwardCopy
+	CMPQ DX, $8
+	JLT  slowForwardCopy
+	CMPQ R14, $16
+	JLT  slowForwardCopy
+	MOVQ 0(R15), AX
+	MOVQ AX, 0(DI)
+	MOVQ 8(R15), BX
+	MOVQ BX, 8(DI)
+	ADDQ CX, DI
+	JMP  loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUBQ $10, R14
+	CMPQ CX, R14
+	JGT  verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMPQ DX, $8
+	JGE  fixUpSlowForwardCopy
+	MOVQ (R15), BX
+	MOVQ BX, (DI)
+	SUBQ DX, CX
+	ADDQ DX, DI
+	ADDQ DX, DX
+	JMP  makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by DI being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save DI to AX so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVQ DI, AX
+	ADDQ CX, DI
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	CMPQ CX, $0
+	JLE  loop
+	MOVQ (R15), BX
+	MOVQ BX, (AX)
+	ADDQ $8, R15
+	ADDQ $8, AX
+	SUBQ $8, CX
+	JMP  finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), BX
+	MOVB BX, (DI)
+	INCQ R15
+	INCQ DI
+	DECQ CX
+	JNZ  verySlowForwardCopy
+	JMP  loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMPQ DI, R10
+	JNE  errCorrupt
+
+	// return 0
+	MOVQ $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVQ $1, ret+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s
new file mode 100644
index 0000000000000000000000000000000000000000..7a3ead17eacfe3add2fb2387c40e3682bda4641f
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_arm64.s
@@ -0,0 +1,494 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- R2	scratch
+//	- R3	scratch
+//	- R4	length or x
+//	- R5	offset
+//	- R6	&src[s]
+//	- R7	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly R7 - R8,  and len(dst)-d is R10 - R7.
+// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6.
+TEXT ·decode(SB), NOSPLIT, $56-56
+	// Initialize R6, R7 and R8-R13.
+	MOVD dst_base+0(FP), R8
+	MOVD dst_len+8(FP), R9
+	MOVD R8, R7
+	MOVD R8, R10
+	ADD  R9, R10, R10
+	MOVD src_base+24(FP), R11
+	MOVD src_len+32(FP), R12
+	MOVD R11, R6
+	MOVD R11, R13
+	ADD  R12, R13, R13
+
+loop:
+	// for s < len(src)
+	CMP R13, R6
+	BEQ end
+
+	// R4 = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBU (R6), R4
+	MOVW  R4, R3
+	ANDW  $3, R3
+	MOVW  $1, R1
+	CMPW  R1, R3
+	BGE   tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	MOVW $60, R1
+	LSRW $2, R4, R4
+	CMPW R4, R1
+	BLS  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	ADD $1, R6, R6
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that R4 == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// R4 can hold 64 bits, so the increment cannot overflow.
+	ADD $1, R4, R4
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// R2 = len(dst) - d
+	// R3 = len(src) - s
+	MOVD R10, R2
+	SUB  R7, R2, R2
+	MOVD R13, R3
+	SUB  R6, R3, R3
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMP $16, R4
+	BGT callMemmove
+	CMP $16, R2
+	BLT callMemmove
+	CMP $16, R3
+	BLT callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	LDP 0(R6), (R14, R15)
+	STP (R14, R15), 0(R7)
+
+	// d += length
+	// s += length
+	ADD R4, R7, R7
+	ADD R4, R6, R6
+	B   loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMP R2, R4
+	BGT errCorrupt
+	CMP R3, R4
+	BGT errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVD R7, 8(RSP)
+	MOVD R6, 16(RSP)
+	MOVD R4, 24(RSP)
+	MOVD R7, 32(RSP)
+	MOVD R6, 40(RSP)
+	MOVD R4, 48(RSP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVD 32(RSP), R7
+	MOVD 40(RSP), R6
+	MOVD 48(RSP), R4
+	MOVD dst_base+0(FP), R8
+	MOVD dst_len+8(FP), R9
+	MOVD R8, R10
+	ADD  R9, R10, R10
+	MOVD src_base+24(FP), R11
+	MOVD src_len+32(FP), R12
+	MOVD R11, R13
+	ADD  R12, R13, R13
+
+	// d += length
+	// s += length
+	ADD R4, R7, R7
+	ADD R4, R6, R6
+	B   loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADD  R4, R6, R6
+	SUB  $58, R6, R6
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// case x == 60:
+	MOVW $61, R1
+	CMPW R1, R4
+	BEQ  tagLit61
+	BGT  tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBU -1(R6), R4
+	B     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVHU -2(R6), R4
+	B     doLit
+
+tagLit62Plus:
+	CMPW $62, R4
+	BHI  tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVHU -3(R6), R4
+	MOVBU -1(R6), R3
+	ORR   R3<<16, R4
+	B     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVWU -4(R6), R4
+	B     doLit
+
+	// The code above handles literal tags.
+	// ----------------------------------------
+	// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADD $5, R6, R6
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	MOVD $1, R1
+	ADD  R4>>2, R1, R4
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVWU -4(R6), R5
+	B     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADD $3, R6, R6
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	MOVD $1, R1
+	ADD  R4>>2, R1, R4
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVHU -2(R6), R5
+	B     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- R3 == src[s] & 0x03
+	//	- R4 == src[s]
+	CMP $2, R3
+	BEQ tagCopy2
+	BGT tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADD $2, R6, R6
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVD  R4, R5
+	AND   $0xe0, R5
+	MOVBU -1(R6), R3
+	ORR   R5<<3, R3, R5
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	MOVD $7, R1
+	AND  R4>>2, R1, R4
+	ADD  $4, R4, R4
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- R4 == length && R4 > 0
+	//	- R5 == offset
+
+	// if offset <= 0 { etc }
+	MOVD $0, R1
+	CMP  R1, R5
+	BLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVD R7, R3
+	SUB  R8, R3, R3
+	CMP  R5, R3
+	BLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVD R10, R3
+	SUB  R7, R3, R3
+	CMP  R3, R4
+	BGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVD R10, R14
+	SUB  R7, R14, R14
+	MOVD R7, R15
+	SUB  R5, R15, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMP  $16, R4
+	BGT  slowForwardCopy
+	CMP  $8, R5
+	BLT  slowForwardCopy
+	CMP  $16, R14
+	BLT  slowForwardCopy
+	MOVD 0(R15), R2
+	MOVD R2, 0(R7)
+	MOVD 8(R15), R3
+	MOVD R3, 8(R7)
+	ADD  R4, R7, R7
+	B    loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUB $10, R14, R14
+	CMP R14, R4
+	BGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMP  $8, R5
+	BGE  fixUpSlowForwardCopy
+	MOVD (R15), R3
+	MOVD R3, (R7)
+	SUB  R5, R4, R4
+	ADD  R5, R7, R7
+	ADD  R5, R5, R5
+	B    makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by R7 being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save R7 to R2 so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVD R7, R2
+	ADD  R4, R7, R7
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	MOVD $0, R1
+	CMP  R1, R4
+	BLE  loop
+	MOVD (R15), R3
+	MOVD R3, (R2)
+	ADD  $8, R15, R15
+	ADD  $8, R2, R2
+	SUB  $8, R4, R4
+	B    finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), R3
+	MOVB R3, (R7)
+	ADD  $1, R15, R15
+	ADD  $1, R7, R7
+	SUB  $1, R4, R4
+	CBNZ R4, verySlowForwardCopy
+	B    loop
+
+	// The code above handles copy tags.
+	// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMP R10, R7
+	BNE errCorrupt
+
+	// return 0
+	MOVD $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVD $1, R2
+	MOVD R2, ret+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go
new file mode 100644
index 0000000000000000000000000000000000000000..7082b349199a3fd3009037f2d15e1df7eca67ec2
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_asm.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+// +build amd64 arm64
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f672be55743cda746382bb52800fff89b17f7eb
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,115 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!arm64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice.
+		// If no overlap, use the built-in copy:
+		if offset >= length {
+			copy(dst[d:d+length], dst[d-offset:])
+			d += length
+			continue
+		}
+
+		// Unlike the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		//
+		// We align the slices into a and b and show the compiler they are the same size.
+		// This allows the loop to run without bounds checks.
+		a := dst[d : d+length]
+		b := dst[d-offset:]
+		b = b[:len(a)]
+		for i := range a {
+			a[i] = b[i]
+		}
+		d += length
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f23657076c57a0cf9dcdab1aed741db36b97979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,289 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Encode handles the Snappy block format, not the Snappy stream format.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+//
+// Writer handles the Snappy stream format, not the Snappy block format.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000000000000000000000000000000000000..adfd979fe277aa548dc545ab9940a9ad0118fe2d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+//	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	len(lit)
+//	- BX	n
+//	- DX	return value
+//	- DI	&dst[i]
+//	- R10	&lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ lit_base+24(FP), R10
+	MOVQ lit_len+32(FP), AX
+	MOVQ AX, DX
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  oneByte
+	CMPL BX, $256
+	JLT  twoBytes
+
+threeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	ADDQ $3, DX
+	JMP  memmove
+
+twoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	ADDQ $2, DX
+	JMP  memmove
+
+oneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+	ADDQ $1, DX
+
+memmove:
+	MOVQ DX, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	length
+//	- SI	&dst[0]
+//	- DI	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, SI
+	MOVQ offset+24(FP), R11
+	MOVQ length+32(FP), AX
+
+loop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  loop0
+
+step1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMPL AX, $12
+	JGE  step3
+	CMPL R11, $2048
+	JGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- DX	&src[0]
+//	- SI	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVQ src_base+0(FP), DX
+	MOVQ src_len+8(FP), R14
+	MOVQ i+24(FP), R15
+	MOVQ j+32(FP), SI
+	ADDQ DX, R14
+	ADDQ DX, R15
+	ADDQ DX, SI
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   cmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  bsf
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  extendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  extendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- AX	.	.
+//	- BX	.	.
+//	- CX	56	shift (note that amd64 shifts by non-immediates must use CX).
+//	- DX	64	&src[0], tableSize
+//	- SI	72	&src[s]
+//	- DI	80	&dst[d]
+//	- R9	88	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	96	prevHash, currHash, nextHash, offset
+//	- R12	104	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	112	candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVQ $24, CX
+	MOVQ $256, DX
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	CMPQ DX, $16384
+	JGE  varTable
+	CMPQ DX, R14
+	JGE  varTable
+	SUBQ $1, CX
+	SHLQ $1, DX
+	JMP  calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+	// writes 16 bytes, so we can do only tableSize/8 writes instead of the
+	// 2048 writes that would zero-initialize all of table's 32768 bytes.
+	SHRQ $3, DX
+	LEAQ table-32768(SP), BX
+	PXOR X0, X0
+
+memclr:
+	MOVOU X0, 0(BX)
+	ADDQ  $16, BX
+	SUBQ  $1, DX
+	JNZ   memclr
+
+	// !!! DX = &src[0]
+	MOVQ SI, DX
+
+	// sLimit := len(src) - inputMargin
+	MOVQ R14, R9
+	SUBQ $15, R9
+
+	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVQ CX, 56(SP)
+	MOVQ DX, 64(SP)
+	MOVQ R9, 88(SP)
+
+	// nextEmit := 0
+	MOVQ DX, R10
+
+	// s := 1
+	ADDQ $1, SI
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVL  0(SI), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVQ $32, R12
+
+	// nextS := s
+	MOVQ SI, R13
+
+	// candidate := 0
+	MOVQ $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVQ R13, SI
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVQ R12, R14
+	SHRQ $5, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADDQ R14, R13
+
+	// skip += bytesBetweenHashLookups
+	ADDQ R14, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVQ R13, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JA   emitRemainder
+
+	// candidate = int(table[nextHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[nextHash] = uint16(s)
+	MOVQ SI, AX
+	SUBQ DX, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVL  0(R13), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVL 0(SI), AX
+	MOVL (DX)(R15*1), BX
+	CMPL AX, BX
+	JNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVQ SI, AX
+	SUBQ R10, AX
+	CMPQ AX, $16
+	JLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  inlineEmitLiteralOneByte
+	CMPL BX, $256
+	JLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVQ SI, 72(SP)
+	MOVQ DI, 80(SP)
+	MOVQ R15, 112(SP)
+	CALL runtime·memmove(SB)
+	MOVQ 56(SP), CX
+	MOVQ 64(SP), DX
+	MOVQ 72(SP), SI
+	MOVQ 80(SP), DI
+	MOVQ 88(SP), R9
+	MOVQ 112(SP), R15
+	JMP  inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB AX, BX
+	SUBB $1, BX
+	SHLB $2, BX
+	MOVB BX, (DI)
+	ADDQ $1, DI
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(R10), X0
+	MOVOU X0, 0(DI)
+	ADDQ  AX, DI
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVQ SI, R12
+
+	// !!! offset := base - candidate
+	MOVQ R12, R11
+	SUBQ R15, R11
+	SUBQ DX, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVQ src_len+32(FP), R14
+	ADDQ DX, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADDQ $4, R15
+	ADDQ DX, R15
+
+	// !!! s += 4
+	ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   inlineExtendMatchCmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  inlineExtendMatchBSF
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+	JMP  inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  inlineExtendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  inlineExtendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVQ SI, AX
+	SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	CMPL AX, $12
+	JGE  inlineEmitCopyStep3
+	CMPL R11, $2048
+	JGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVQ SI, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVQ SI, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JAE  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVQ -1(SI), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVQ SI, AX
+	SUBQ DX, AX
+	SUBQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// currHash := hash(uint32(x>>8), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// candidate = int(table[currHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[currHash] = uint16(s)
+	ADDQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVL (DX)(R15*1), BX
+	CMPL R14, BX
+	JEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// s++
+	ADDQ $1, SI
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	JMP outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVQ src_len+32(FP), AX
+	ADDQ DX, AX
+	CMPQ R10, AX
+	JEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVQ DI, 0(SP)
+	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ R10, 24(SP)
+	SUBQ R10, AX
+	MOVQ AX, 32(SP)
+	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVQ DI, 80(SP)
+	CALL ·emitLiteral(SB)
+	MOVQ 80(SP), DI
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADDQ 48(SP), DI
+
+encodeBlockEnd:
+	MOVQ dst_base+0(FP), AX
+	SUBQ AX, DI
+	MOVQ DI, d+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s
new file mode 100644
index 0000000000000000000000000000000000000000..f8d54adfc5c1db9628a677ae5d9cd036ea6865ac
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_arm64.s
@@ -0,0 +1,722 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- R3	len(lit)
+//	- R4	n
+//	- R6	return value
+//	- R8	&dst[i]
+//	- R10	&lit[0]
+//
+// The 32 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $32-56
+	MOVD dst_base+0(FP), R8
+	MOVD lit_base+24(FP), R10
+	MOVD lit_len+32(FP), R3
+	MOVD R3, R6
+	MOVW R3, R4
+	SUBW $1, R4, R4
+
+	CMPW $60, R4
+	BLT  oneByte
+	CMPW $256, R4
+	BLT  twoBytes
+
+threeBytes:
+	MOVD $0xf4, R2
+	MOVB R2, 0(R8)
+	MOVW R4, 1(R8)
+	ADD  $3, R8, R8
+	ADD  $3, R6, R6
+	B    memmove
+
+twoBytes:
+	MOVD $0xf0, R2
+	MOVB R2, 0(R8)
+	MOVB R4, 1(R8)
+	ADD  $2, R8, R8
+	ADD  $2, R6, R6
+	B    memmove
+
+oneByte:
+	LSLW $2, R4, R4
+	MOVB R4, 0(R8)
+	ADD  $1, R8, R8
+	ADD  $1, R6, R6
+
+memmove:
+	MOVD R6, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// R8, R10 and R3 as arguments.
+	MOVD R8, 8(RSP)
+	MOVD R10, 16(RSP)
+	MOVD R3, 24(RSP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- R3	length
+//	- R7	&dst[0]
+//	- R8	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVD dst_base+0(FP), R8
+	MOVD R8, R7
+	MOVD offset+24(FP), R11
+	MOVD length+32(FP), R3
+
+loop0:
+	// for length >= 68 { etc }
+	CMPW $68, R3
+	BLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVD $0xfe, R2
+	MOVB R2, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUB  $64, R3, R3
+	B    loop0
+
+step1:
+	// if length > 64 { etc }
+	CMP $64, R3
+	BLE step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVD $0xee, R2
+	MOVB R2, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUB  $60, R3, R3
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMP  $12, R3
+	BGE  step3
+	CMPW $2048, R11
+	BGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(R8)
+	LSRW $3, R11, R11
+	AND  $0xe0, R11, R11
+	SUB  $4, R3, R3
+	LSLW $2, R3
+	AND  $0xff, R3, R3
+	ORRW R3, R11, R11
+	ORRW $1, R11, R11
+	MOVB R11, 0(R8)
+	ADD  $2, R8, R8
+
+	// Return the number of bytes written.
+	SUB  R7, R8, R8
+	MOVD R8, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUB  $1, R3, R3
+	AND  $0xff, R3, R3
+	LSLW $2, R3, R3
+	ORRW $2, R3, R3
+	MOVB R3, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+
+	// Return the number of bytes written.
+	SUB  R7, R8, R8
+	MOVD R8, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- R6	&src[0]
+//	- R7	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVD src_base+0(FP), R6
+	MOVD src_len+8(FP), R14
+	MOVD i+24(FP), R15
+	MOVD j+32(FP), R7
+	ADD  R6, R14, R14
+	ADD  R6, R15, R15
+	ADD  R6, R7, R7
+	MOVD R14, R13
+	SUB  $8, R13, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMP  R13, R7
+	BHI  cmp1
+	MOVD (R15), R3
+	MOVD (R7), R4
+	CMP  R4, R3
+	BNE  bsf
+	ADD  $8, R15, R15
+	ADD  $8, R7, R7
+	B    cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs.
+	// RBIT reverses the bit order, then CLZ counts the leading zeros, the
+	// combination of which finds the least significant bit which is set.
+	// The arm64 architecture is little-endian, and the shift by 3 converts
+	// a bit index to a byte index.
+	EOR  R3, R4, R4
+	RBIT R4, R4
+	CLZ  R4, R4
+	ADD  R4>>3, R7, R7
+
+	// Convert from &src[ret] to ret.
+	SUB  R6, R7, R7
+	MOVD R7, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMP  R7, R14
+	BLS  extendMatchEnd
+	MOVB (R15), R3
+	MOVB (R7), R4
+	CMP  R4, R3
+	BNE  extendMatchEnd
+	ADD  $1, R15, R15
+	ADD  $1, R7, R7
+	B    cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUB  R6, R7, R7
+	MOVD R7, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- R3	.	.
+//	- R4	.	.
+//	- R5	64	shift
+//	- R6	72	&src[0], tableSize
+//	- R7	80	&src[s]
+//	- R8	88	&dst[d]
+//	- R9	96	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	104	prevHash, currHash, nextHash, offset
+//	- R12	112	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	120	candidate
+//	- R16	.	hash constant, 0x1e35a7bd
+//	- R17	.	&table
+//	- .  	128	table
+//
+// The second column (64, 72, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 64 + 64 = 32896.
+TEXT ·encodeBlock(SB), 0, $32896-56
+	MOVD dst_base+0(FP), R8
+	MOVD src_base+24(FP), R7
+	MOVD src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVD  $24, R5
+	MOVD  $256, R6
+	MOVW  $0xa7bd, R16
+	MOVKW $(0x1e35<<16), R16
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	MOVD $16384, R2
+	CMP  R2, R6
+	BGE  varTable
+	CMP  R14, R6
+	BGE  varTable
+	SUB  $1, R5, R5
+	LSL  $1, R6, R6
+	B    calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each
+	// iterations writes 64 bytes, so we can do only tableSize/32 writes
+	// instead of the 2048 writes that would zero-initialize all of table's
+	// 32768 bytes. This clear could overrun the first tableSize elements, but
+	// it won't overrun the allocated stack size.
+	ADD  $128, RSP, R17
+	MOVD R17, R4
+
+	// !!! R6 = &src[tableSize]
+	ADD R6<<1, R17, R6
+
+memclr:
+	STP.P (ZR, ZR), 64(R4)
+	STP   (ZR, ZR), -48(R4)
+	STP   (ZR, ZR), -32(R4)
+	STP   (ZR, ZR), -16(R4)
+	CMP   R4, R6
+	BHI   memclr
+
+	// !!! R6 = &src[0]
+	MOVD R7, R6
+
+	// sLimit := len(src) - inputMargin
+	MOVD R14, R9
+	SUB  $15, R9, R9
+
+	// !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVD R5, 64(RSP)
+	MOVD R6, 72(RSP)
+	MOVD R9, 96(RSP)
+
+	// nextEmit := 0
+	MOVD R6, R10
+
+	// s := 1
+	ADD $1, R7, R7
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVW 0(R7), R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVD $32, R12
+
+	// nextS := s
+	MOVD R7, R13
+
+	// candidate := 0
+	MOVD $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVD R13, R7
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVD R12, R14
+	LSR  $5, R14, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADD R14, R13, R13
+
+	// skip += bytesBetweenHashLookups
+	ADD R14, R12, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVD R13, R3
+	SUB  R6, R3, R3
+	CMP  R9, R3
+	BHI  emitRemainder
+
+	// candidate = int(table[nextHash])
+	MOVHU 0(R17)(R11<<1), R15
+
+	// table[nextHash] = uint16(s)
+	MOVD R7, R3
+	SUB  R6, R3, R3
+
+	MOVH R3, 0(R17)(R11<<1)
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVW 0(R13), R11
+	MULW R16, R11
+	LSRW R5, R11, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVW 0(R7), R3
+	MOVW (R6)(R15), R4
+	CMPW R4, R3
+	BNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVD R7, R3
+	SUB  R10, R3, R3
+	CMP  $16, R3
+	BLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVW R3, R4
+	SUBW $1, R4, R4
+
+	MOVW $60, R2
+	CMPW R2, R4
+	BLT  inlineEmitLiteralOneByte
+	MOVW $256, R2
+	CMPW R2, R4
+	BLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVD $0xf4, R1
+	MOVB R1, 0(R8)
+	MOVW R4, 1(R8)
+	ADD  $3, R8, R8
+	B    inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVD $0xf0, R1
+	MOVB R1, 0(R8)
+	MOVB R4, 1(R8)
+	ADD  $2, R8, R8
+	B    inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	LSLW $2, R4, R4
+	MOVB R4, 0(R8)
+	ADD  $1, R8, R8
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// R8, R10 and R3 as arguments.
+	MOVD R8, 8(RSP)
+	MOVD R10, 16(RSP)
+	MOVD R3, 24(RSP)
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADD   R3, R8, R8
+	MOVD  R7, 80(RSP)
+	MOVD  R8, 88(RSP)
+	MOVD  R15, 120(RSP)
+	CALL  runtime·memmove(SB)
+	MOVD  64(RSP), R5
+	MOVD  72(RSP), R6
+	MOVD  80(RSP), R7
+	MOVD  88(RSP), R8
+	MOVD  96(RSP), R9
+	MOVD  120(RSP), R15
+	ADD   $128, RSP, R17
+	MOVW  $0xa7bd, R16
+	MOVKW $(0x1e35<<16), R16
+	B     inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB R3, R4
+	SUBW $1, R4, R4
+	AND  $0xff, R4, R4
+	LSLW $2, R4, R4
+	MOVB R4, (R8)
+	ADD  $1, R8, R8
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	LDP 0(R10), (R0, R1)
+	STP (R0, R1), 0(R8)
+	ADD R3, R8, R8
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVD R7, R12
+
+	// !!! offset := base - candidate
+	MOVD R12, R11
+	SUB  R15, R11, R11
+	SUB  R6, R11, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVD src_len+32(FP), R14
+	ADD  R6, R14, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVD R14, R13
+	SUB  $8, R13, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADD $4, R15, R15
+	ADD R6, R15, R15
+
+	// !!! s += 4
+	ADD $4, R7, R7
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMP  R13, R7
+	BHI  inlineExtendMatchCmp1
+	MOVD (R15), R3
+	MOVD (R7), R4
+	CMP  R4, R3
+	BNE  inlineExtendMatchBSF
+	ADD  $8, R15, R15
+	ADD  $8, R7, R7
+	B    inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs.
+	// RBIT reverses the bit order, then CLZ counts the leading zeros, the
+	// combination of which finds the least significant bit which is set.
+	// The arm64 architecture is little-endian, and the shift by 3 converts
+	// a bit index to a byte index.
+	EOR  R3, R4, R4
+	RBIT R4, R4
+	CLZ  R4, R4
+	ADD  R4>>3, R7, R7
+	B    inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMP  R7, R14
+	BLS  inlineExtendMatchEnd
+	MOVB (R15), R3
+	MOVB (R7), R4
+	CMP  R4, R3
+	BNE  inlineExtendMatchEnd
+	ADD  $1, R15, R15
+	ADD  $1, R7, R7
+	B    inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVD R7, R3
+	SUB  R12, R3, R3
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	MOVW $68, R2
+	CMPW R2, R3
+	BLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVD $0xfe, R1
+	MOVB R1, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUBW $64, R3, R3
+	B    inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	MOVW $64, R2
+	CMPW R2, R3
+	BLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVD $0xee, R1
+	MOVB R1, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUBW $60, R3, R3
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	MOVW $12, R2
+	CMPW R2, R3
+	BGE  inlineEmitCopyStep3
+	MOVW $2048, R2
+	CMPW R2, R11
+	BGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(R8)
+	LSRW $8, R11, R11
+	LSLW $5, R11, R11
+	SUBW $4, R3, R3
+	AND  $0xff, R3, R3
+	LSLW $2, R3, R3
+	ORRW R3, R11, R11
+	ORRW $1, R11, R11
+	MOVB R11, 0(R8)
+	ADD  $2, R8, R8
+	B    inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBW $1, R3, R3
+	LSLW $2, R3, R3
+	ORRW $2, R3, R3
+	MOVB R3, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVD R7, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVD R7, R3
+	SUB  R6, R3, R3
+	CMP  R3, R9
+	BLS  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVD -1(R7), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVW R14, R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVD R7, R3
+	SUB  R6, R3, R3
+	SUB  $1, R3, R3
+
+	MOVHU R3, 0(R17)(R11<<1)
+
+	// currHash := hash(uint32(x>>8), shift)
+	LSR  $8, R14, R14
+	MOVW R14, R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+	// candidate = int(table[currHash])
+	MOVHU 0(R17)(R11<<1), R15
+
+	// table[currHash] = uint16(s)
+	ADD   $1, R3, R3
+	MOVHU R3, 0(R17)(R11<<1)
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVW (R6)(R15), R4
+	CMPW R4, R14
+	BEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	LSR  $8, R14, R14
+	MOVW R14, R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+	// s++
+	ADD $1, R7, R7
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	B outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVD src_len+32(FP), R3
+	ADD  R6, R3, R3
+	CMP  R3, R10
+	BEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVD R8, 8(RSP)
+	MOVD $0, 16(RSP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVD $0, 24(RSP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVD R10, 32(RSP)
+	SUB  R10, R3, R3
+	MOVD R3, 40(RSP)
+	MOVD R3, 48(RSP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVD R8, 88(RSP)
+	CALL ·emitLiteral(SB)
+	MOVD 88(RSP), R8
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVD 56(RSP), R1
+	ADD  R1, R8, R8
+
+encodeBlockEnd:
+	MOVD dst_base+0(FP), R3
+	SUB  R3, R8, R8
+	MOVD R8, d+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go
new file mode 100644
index 0000000000000000000000000000000000000000..107c1e71418f67034c18e8ee95e674a97fd2d047
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_asm.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+// +build amd64 arm64
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..296d7f0beb0fae11e4b2f2741e91f93c27b06500
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!arm64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000000000000000000000000000000000000..ece692ea4610ab717f74b1b4a416d1452d3673dc
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig
new file mode 100644
index 0000000000000000000000000000000000000000..c6b74c3e0d0c70aa3e311cba355eab11fd7adf8d
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/.editorconfig
@@ -0,0 +1,20 @@
+; https://editorconfig.org/
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = space
+indent_size = 2
+
+[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
+indent_style = tab
+indent_size = 4
+
+[*.md]
+indent_size = 4
+trim_trailing_whitespace = false
+
+eclint_indent_style = unset
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..84039fec68771bc54be8f70617692068532536d2
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/.gitignore
@@ -0,0 +1 @@
+coverage.coverprofile
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..bb9d80bc9b6bc381c1e0edc7b76683cc621b3183
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2023 The Gorilla Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+	 * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+	 * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+	 * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..98f5ab75f9d7ccbf249cd7271c44303ed719ebd3
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/Makefile
@@ -0,0 +1,34 @@
+GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
+GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+
+GO_SEC=$(shell which gosec 2> /dev/null || echo '')
+GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
+
+GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
+GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
+
+.PHONY: golangci-lint
+golangci-lint:
+	$(if $(GO_LINT), ,go install $(GO_LINT_URI))
+	@echo "##### Running golangci-lint"
+	golangci-lint run -v
+	
+.PHONY: gosec
+gosec:
+	$(if $(GO_SEC), ,go install $(GO_SEC_URI))
+	@echo "##### Running gosec"
+	gosec ./...
+
+.PHONY: govulncheck
+govulncheck:
+	$(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
+	@echo "##### Running govulncheck"
+	govulncheck ./...
+
+.PHONY: verify
+verify: golangci-lint gosec govulncheck
+
+.PHONY: test
+test:
+	@echo "##### Running tests"
+	go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..382513d57c4c6ffd671b8a173beaef1f094e1ec2
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,812 @@
+# gorilla/mux
+
+![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg)
+[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux)
+[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
+
+
+![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5)
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts, paths and query values can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Static Files](#static-files)
+* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.)
+* [Registered URLs](#registered-urls)
+* [Walking Routes](#walking-routes)
+* [Graceful Shutdown](#graceful-shutdown)
+* [Middleware](#middleware)
+* [Handling CORS Requests](#handling-cors-requests)
+* [Testing Handlers](#testing-handlers)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", HomeHandler)
+    r.HandleFunc("/products", ProductsHandler)
+    r.HandleFunc("/articles", ArticlesHandler)
+    http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+    vars := mux.Vars(r)
+    w.WriteHeader(http.StatusOK)
+    fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.example.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+    return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+  Host("www.example.com").
+  Methods("GET").
+  Schemes("http")
+```
+
+Routes are tested in the order they were added to the router. If two routes match, the first one wins:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/specific", specificHandler)
+r.PathPrefix("/").Handler(catchAllHandler)
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/\*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+    var dir string
+
+    flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+    flag.Parse()
+    r := mux.NewRouter()
+
+    // This will serve files under http://localhost:8000/static/<filename>
+    r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+    srv := &http.Server{
+        Handler:      r,
+        Addr:         "127.0.0.1:8000",
+        // Good practice: enforce timeouts for servers you create!
+        WriteTimeout: 15 * time.Second,
+        ReadTimeout:  15 * time.Second,
+    }
+
+    log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Serving Single Page Applications
+
+Most of the time it makes sense to serve your SPA on a separate web server from your API,
+but sometimes it's desirable to serve them both from one place. It's possible to write a simple
+handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage
+mux's powerful routing for your API endpoints.
+
+```go
+package main
+
+import (
+	"encoding/json"
+	"log"
+	"net/http"
+	"os"
+	"path/filepath"
+	"time"
+
+	"github.com/gorilla/mux"
+)
+
+// spaHandler implements the http.Handler interface, so we can use it
+// to respond to HTTP requests. The path to the static directory and
+// path to the index file within that static directory are used to
+// serve the SPA in the given static directory.
+type spaHandler struct {
+	staticPath string
+	indexPath  string
+}
+
+// ServeHTTP inspects the URL path to locate a file within the static dir
+// on the SPA handler. If a file is found, it will be served. If not, the
+// file located at the index path on the SPA handler will be served. This
+// is suitable behavior for serving an SPA (single page application).
+func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	// Join internally call path.Clean to prevent directory traversal
+	path := filepath.Join(h.staticPath, r.URL.Path)
+
+	// check whether a file exists or is a directory at the given path
+	fi, err := os.Stat(path)
+	if os.IsNotExist(err) || fi.IsDir() {
+		// file does not exist or path is a directory, serve index.html
+		http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
+		return
+	}
+
+	if err != nil {
+		// if we got an error (that wasn't that the file doesn't exist) stating the
+		// file, return a 500 internal server error and stop
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+        return
+	}
+
+	// otherwise, use http.FileServer to serve the static file
+	http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
+}
+
+func main() {
+	router := mux.NewRouter()
+
+	router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
+		// an example API handler
+		json.NewEncoder(w).Encode(map[string]bool{"ok": true})
+	})
+
+	spa := spaHandler{staticPath: "build", indexPath: "index.html"}
+	router.PathPrefix("/").Handler(spa)
+
+	srv := &http.Server{
+		Handler: router,
+		Addr:    "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+  Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host and query value variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.example.com").
+  Path("/articles/{category}/{id:[0-9]+}").
+  Queries("filter", "{filter}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42",
+                                 "filter", "gorilla")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.example.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.example.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// "http://news.example.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available:
+```go
+r := mux.NewRouter()
+r.Host("{domain}").
+    Path("/{group}/{item_id}").
+    Queries("some_data1", "{some_data1}").
+    Queries("some_data2", "{some_data2}").
+    Name("article")
+
+// Will print [domain group item_id some_data1 some_data2] <nil>
+fmt.Println(r.Get("article").GetVarNames())
+
+```
+### Walking Routes
+
+The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
+the following prints all of the registered routes:
+
+```go
+package main
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	return
+}
+
+func main() {
+	r := mux.NewRouter()
+	r.HandleFunc("/", handler)
+	r.HandleFunc("/products", handler).Methods("POST")
+	r.HandleFunc("/articles", handler).Methods("GET")
+	r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
+	r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
+	err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+		pathTemplate, err := route.GetPathTemplate()
+		if err == nil {
+			fmt.Println("ROUTE:", pathTemplate)
+		}
+		pathRegexp, err := route.GetPathRegexp()
+		if err == nil {
+			fmt.Println("Path regexp:", pathRegexp)
+		}
+		queriesTemplates, err := route.GetQueriesTemplates()
+		if err == nil {
+			fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
+		}
+		queriesRegexps, err := route.GetQueriesRegexp()
+		if err == nil {
+			fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
+		}
+		methods, err := route.GetMethods()
+		if err == nil {
+			fmt.Println("Methods:", strings.Join(methods, ","))
+		}
+		fmt.Println()
+		return nil
+	})
+
+	if err != nil {
+		fmt.Println(err)
+	}
+
+	http.Handle("/", r)
+}
+```
+
+### Graceful Shutdown
+
+Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
+
+```go
+package main
+
+import (
+    "context"
+    "flag"
+    "log"
+    "net/http"
+    "os"
+    "os/signal"
+    "time"
+
+    "github.com/gorilla/mux"
+)
+
+func main() {
+    var wait time.Duration
+    flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
+    flag.Parse()
+
+    r := mux.NewRouter()
+    // Add your routes as needed
+
+    srv := &http.Server{
+        Addr:         "0.0.0.0:8080",
+        // Good practice to set timeouts to avoid Slowloris attacks.
+        WriteTimeout: time.Second * 15,
+        ReadTimeout:  time.Second * 15,
+        IdleTimeout:  time.Second * 60,
+        Handler: r, // Pass our instance of gorilla/mux in.
+    }
+
+    // Run our server in a goroutine so that it doesn't block.
+    go func() {
+        if err := srv.ListenAndServe(); err != nil {
+            log.Println(err)
+        }
+    }()
+
+    c := make(chan os.Signal, 1)
+    // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
+    // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
+    signal.Notify(c, os.Interrupt)
+
+    // Block until we receive our signal.
+    <-c
+
+    // Create a deadline to wait for.
+    ctx, cancel := context.WithTimeout(context.Background(), wait)
+    defer cancel()
+    // Doesn't block if no connections, but will otherwise wait
+    // until the timeout deadline.
+    srv.Shutdown(ctx)
+    // Optionally, you could run srv.Shutdown in a goroutine and block on
+    // <-ctx.Done() if your application should wait for other services
+    // to finalize based on context cancellation.
+    log.Println("shutting down")
+    os.Exit(0)
+}
+```
+
+### Middleware
+
+Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
+Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
+
+Mux middlewares are defined using the de facto standard type:
+
+```go
+type MiddlewareFunc func(http.Handler) http.Handler
+```
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+```go
+func loggingMiddleware(next http.Handler) http.Handler {
+    return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+        // Do stuff here
+        log.Println(r.RequestURI)
+        // Call the next handler, which can be another middleware in the chain, or the final handler.
+        next.ServeHTTP(w, r)
+    })
+}
+```
+
+Middlewares can be added to a router using `Router.Use()`:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+r.Use(loggingMiddleware)
+```
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+```go
+// Define our struct
+type authenticationMiddleware struct {
+	tokenUsers map[string]string
+}
+
+// Initialize it somewhere
+func (amw *authenticationMiddleware) Populate() {
+	amw.tokenUsers["00000000"] = "user0"
+	amw.tokenUsers["aaaaaaaa"] = "userA"
+	amw.tokenUsers["05f717e5"] = "randomUser"
+	amw.tokenUsers["deadbeef"] = "user0"
+}
+
+// Middleware function, which will be called for each request
+func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+    return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+        token := r.Header.Get("X-Session-Token")
+
+        if user, found := amw.tokenUsers[token]; found {
+        	// We found the token in our map
+        	log.Printf("Authenticated user %s\n", user)
+        	// Pass down the request to the next middleware (or final handler)
+        	next.ServeHTTP(w, r)
+        } else {
+        	// Write an error and stop the handler chain
+        	http.Error(w, "Forbidden", http.StatusForbidden)
+        }
+    })
+}
+```
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+
+amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
+amw.Populate()
+
+r.Use(amw.Middleware)
+```
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
+
+### Handling CORS Requests
+
+[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
+
+* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
+* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
+* If you do not specify any methods, then:
+> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
+
+Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
+
+```go
+package main
+
+import (
+	"net/http"
+	"github.com/gorilla/mux"
+)
+
+func main() {
+    r := mux.NewRouter()
+
+    // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
+    r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
+    r.Use(mux.CORSMethodMiddleware(r))
+    
+    http.ListenAndServe(":8080", r)
+}
+
+func fooHandler(w http.ResponseWriter, r *http.Request) {
+    w.Header().Set("Access-Control-Allow-Origin", "*")
+    if r.Method == http.MethodOptions {
+        return
+    }
+
+    w.Write([]byte("foo"))
+}
+```
+
+And an request to `/foo` using something like:
+
+```bash
+curl localhost:8080/foo -v
+```
+
+Would look like:
+
+```bash
+*   Trying ::1...
+* TCP_NODELAY set
+* Connected to localhost (::1) port 8080 (#0)
+> GET /foo HTTP/1.1
+> Host: localhost:8080
+> User-Agent: curl/7.59.0
+> Accept: */*
+> 
+< HTTP/1.1 200 OK
+< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
+< Access-Control-Allow-Origin: *
+< Date: Fri, 28 Jun 2019 20:13:30 GMT
+< Content-Length: 3
+< Content-Type: text/plain; charset=utf-8
+< 
+* Connection #0 to host localhost left intact
+foo
+```
+
+### Testing Handlers
+
+Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
+
+First, our simple HTTP handler:
+
+```go
+// endpoints.go
+package main
+
+func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
+    // A very simple health check.
+    w.Header().Set("Content-Type", "application/json")
+    w.WriteHeader(http.StatusOK)
+
+    // In the future we could report back on the status of our DB, or our cache
+    // (e.g. Redis) by performing a simple PING, and include them in the response.
+    io.WriteString(w, `{"alive": true}`)
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/health", HealthCheckHandler)
+
+    log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test code:
+
+```go
+// endpoints_test.go
+package main
+
+import (
+    "net/http"
+    "net/http/httptest"
+    "testing"
+)
+
+func TestHealthCheckHandler(t *testing.T) {
+    // Create a request to pass to our handler. We don't have any query parameters for now, so we'll
+    // pass 'nil' as the third parameter.
+    req, err := http.NewRequest("GET", "/health", nil)
+    if err != nil {
+        t.Fatal(err)
+    }
+
+    // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
+    rr := httptest.NewRecorder()
+    handler := http.HandlerFunc(HealthCheckHandler)
+
+    // Our handlers satisfy http.Handler, so we can call their ServeHTTP method
+    // directly and pass in our Request and ResponseRecorder.
+    handler.ServeHTTP(rr, req)
+
+    // Check the status code is what we expect.
+    if status := rr.Code; status != http.StatusOK {
+        t.Errorf("handler returned wrong status code: got %v want %v",
+            status, http.StatusOK)
+    }
+
+    // Check the response body is what we expect.
+    expected := `{"alive": true}`
+    if rr.Body.String() != expected {
+        t.Errorf("handler returned unexpected body: got %v want %v",
+            rr.Body.String(), expected)
+    }
+}
+```
+
+In the case that our routes have [variables](#examples), we can pass those in the request. We could write
+[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
+possible route variables as needed.
+
+```go
+// endpoints.go
+func main() {
+    r := mux.NewRouter()
+    // A route with a route variable:
+    r.HandleFunc("/metrics/{type}", MetricsHandler)
+
+    log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test file, with a table-driven test of `routeVariables`:
+
+```go
+// endpoints_test.go
+func TestMetricsHandler(t *testing.T) {
+    tt := []struct{
+        routeVariable string
+        shouldPass bool
+    }{
+        {"goroutines", true},
+        {"heap", true},
+        {"counters", true},
+        {"queries", true},
+        {"adhadaeqm3k", false},
+    }
+
+    for _, tc := range tt {
+        path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
+        req, err := http.NewRequest("GET", path, nil)
+        if err != nil {
+            t.Fatal(err)
+        }
+
+        rr := httptest.NewRecorder()
+	
+	// To add the vars to the context, 
+	// we need to create a router through which we can pass the request.
+	router := mux.NewRouter()
+        router.HandleFunc("/metrics/{type}", MetricsHandler)
+        router.ServeHTTP(rr, req)
+
+        // In this case, our MetricsHandler returns a non-200 response
+        // for a route variable it doesn't know about.
+        if rr.Code == http.StatusOK && !tc.shouldPass {
+            t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
+                tc.routeVariable, rr.Code, http.StatusOK)
+        }
+    }
+}
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+    "net/http"
+    "log"
+    "github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+    w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+    r := mux.NewRouter()
+    // Routes consist of a path and a handler function.
+    r.HandleFunc("/", YourHandler)
+
+    // Bind to a port and pass our router in
+    log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..80601351fd071c44b7c0a740e7d3a55520adcfc8
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,305 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+  - Requests can be matched based on URL host, path, path prefix, schemes,
+    header and query values, HTTP methods or using custom matchers.
+  - URL hosts, paths and query values can have variables with an optional
+    regular expression.
+  - Registered URLs can be built, or "reversed", which helps maintaining
+    references to resources.
+  - Routes can be used as subrouters: nested routes are only tested if the
+    parent route matches. This is useful to define groups of routes that
+    share common conditions like a host, a path prefix or other repeated
+    attributes. As a bonus, this optimizes request matching.
+  - It implements the http.Handler interface so it is compatible with the
+    standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+	func main() {
+		r := mux.NewRouter()
+		r.HandleFunc("/", HomeHandler)
+		r.HandleFunc("/products", ProductsHandler)
+		r.HandleFunc("/articles", ArticlesHandler)
+		http.Handle("/", r)
+	}
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/products/{key}", ProductHandler)
+	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+	vars := mux.Vars(request)
+	category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+	r := mux.NewRouter()
+	// Only matches if domain is "www.example.com".
+	r.Host("www.example.com")
+	// Matches a dynamic subdomain.
+	r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+	r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+	r.Methods("GET", "POST")
+
+...or URL schemes:
+
+	r.Schemes("https")
+
+...or header values:
+
+	r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+	r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+	r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+		return r.ProtoMajor == 0
+	})
+
+...and finally, it is possible to combine several matchers in a single route:
+
+	r.HandleFunc("/products", ProductsHandler).
+	  Host("www.example.com").
+	  Methods("GET").
+	  Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+	r := mux.NewRouter()
+	s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+	s.HandleFunc("/products/", ProductsHandler)
+	s.HandleFunc("/products/{key}", ProductHandler)
+	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+	r := mux.NewRouter()
+	s := r.PathPrefix("/products").Subrouter()
+	// "/products/"
+	s.HandleFunc("/", ProductsHandler)
+	// "/products/{key}/"
+	s.HandleFunc("/{key}/", ProductHandler)
+	// "/products/{key}/details"
+	s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+	  Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+	url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+	"/articles/technology/42"
+
+This also works for host and query value variables:
+
+	r := mux.NewRouter()
+	r.Host("{subdomain}.domain.com").
+	  Path("/articles/{category}/{id:[0-9]+}").
+	  Queries("filter", "{filter}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42",
+	                                 "filter", "gorilla")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+	r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+	// "http://news.domain.com/"
+	host, err := r.Get("article").URLHost("subdomain", "news")
+
+	// "/articles/technology/42"
+	path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+	r := mux.NewRouter()
+	s := r.Host("{subdomain}.domain.com").Subrouter()
+	s.Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+
+Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
+
+	type MiddlewareFunc func(http.Handler) http.Handler
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+	func simpleMw(next http.Handler) http.Handler {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			// Do stuff here
+			log.Println(r.RequestURI)
+			// Call the next handler, which can be another middleware in the chain, or the final handler.
+			next.ServeHTTP(w, r)
+		})
+	}
+
+Middlewares can be added to a router using `Router.Use()`:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/", handler)
+	r.Use(simpleMw)
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+	// Define our struct
+	type authenticationMiddleware struct {
+		tokenUsers map[string]string
+	}
+
+	// Initialize it somewhere
+	func (amw *authenticationMiddleware) Populate() {
+		amw.tokenUsers["00000000"] = "user0"
+		amw.tokenUsers["aaaaaaaa"] = "userA"
+		amw.tokenUsers["05f717e5"] = "randomUser"
+		amw.tokenUsers["deadbeef"] = "user0"
+	}
+
+	// Middleware function, which will be called for each request
+	func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			token := r.Header.Get("X-Session-Token")
+
+			if user, found := amw.tokenUsers[token]; found {
+				// We found the token in our map
+				log.Printf("Authenticated user %s\n", user)
+				next.ServeHTTP(w, r)
+			} else {
+				http.Error(w, "Forbidden", http.StatusForbidden)
+			}
+		})
+	}
+
+	r := mux.NewRouter()
+	r.HandleFunc("/", handler)
+
+	amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
+	amw.Populate()
+
+	r.Use(amw.Middleware)
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
+*/
+package mux
diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb51c565ebd3ab5eb70ab12f94dae23f725944e4
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/middleware.go
@@ -0,0 +1,74 @@
+package mux
+
+import (
+	"net/http"
+	"strings"
+)
+
+// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
+// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
+// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// middleware interface is anything which implements a MiddlewareFunc named Middleware.
+type middleware interface {
+	Middleware(handler http.Handler) http.Handler
+}
+
+// Middleware allows MiddlewareFunc to implement the middleware interface.
+func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
+	return mw(handler)
+}
+
+// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) Use(mwf ...MiddlewareFunc) {
+	for _, fn := range mwf {
+		r.middlewares = append(r.middlewares, fn)
+	}
+}
+
+// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) useInterface(mw middleware) {
+	r.middlewares = append(r.middlewares, mw)
+}
+
+// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
+// on requests for routes that have an OPTIONS method matcher to all the method matchers on
+// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
+// by the middleware. See examples for usage.
+func CORSMethodMiddleware(r *Router) MiddlewareFunc {
+	return func(next http.Handler) http.Handler {
+		return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+			allMethods, err := getAllMethodsForRoute(r, req)
+			if err == nil {
+				for _, v := range allMethods {
+					if v == http.MethodOptions {
+						w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
+					}
+				}
+			}
+
+			next.ServeHTTP(w, req)
+		})
+	}
+}
+
+// getAllMethodsForRoute returns all the methods from method matchers matching a given
+// request.
+func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
+	var allMethods []string
+
+	for _, route := range r.routes {
+		var match RouteMatch
+		if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
+			methods, err := route.GetMethods()
+			if err != nil {
+				return nil, err
+			}
+
+			allMethods = append(allMethods, methods...)
+		}
+	}
+
+	return allMethods, nil
+}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e089906fad5715477ba0c785562a43f0d987a8a
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,608 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+)
+
+var (
+	// ErrMethodMismatch is returned when the method in the request does not match
+	// the method defined against the route.
+	ErrMethodMismatch = errors.New("method is not allowed")
+	// ErrNotFound is returned when no route match is found.
+	ErrNotFound = errors.New("no matching route was found")
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+	return &Router{namedRoutes: make(map[string]*Route)}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+//	var router = mux.NewRouter()
+//
+//	func main() {
+//	    http.Handle("/", router)
+//	}
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+//	func init() {
+//	    http.Handle("/", router)
+//	}
+//
+// This will send all incoming requests to the router.
+type Router struct {
+	// Configurable Handler to be used when no route matches.
+	// This can be used to render your own 404 Not Found errors.
+	NotFoundHandler http.Handler
+
+	// Configurable Handler to be used when the request method does not match the route.
+	// This can be used to render your own 405 Method Not Allowed errors.
+	MethodNotAllowedHandler http.Handler
+
+	// Routes to be matched, in order.
+	routes []*Route
+
+	// Routes by name for URL building.
+	namedRoutes map[string]*Route
+
+	// If true, do not clear the request context after handling the request.
+	//
+	// Deprecated: No effect, since the context is stored on the request itself.
+	KeepContext bool
+
+	// Slice of middlewares to be called after a match is found
+	middlewares []middleware
+
+	// configuration shared with `Route`
+	routeConf
+}
+
+// common route configuration shared between `Router` and `Route`
+type routeConf struct {
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
+
+	// If true, when the path pattern is "/path/", accessing "/path" will
+	// redirect to the former and vice versa.
+	strictSlash bool
+
+	// If true, when the path pattern is "/path//to", accessing "/path//to"
+	// will not redirect
+	skipClean bool
+
+	// Manager for the variables from host and path.
+	regexp routeRegexpGroup
+
+	// List of matchers.
+	matchers []matcher
+
+	// The scheme used when building URLs.
+	buildScheme string
+
+	buildVarsFunc BuildVarsFunc
+}
+
+// returns an effective deep copy of `routeConf`
+func copyRouteConf(r routeConf) routeConf {
+	c := r
+
+	if r.regexp.path != nil {
+		c.regexp.path = copyRouteRegexp(r.regexp.path)
+	}
+
+	if r.regexp.host != nil {
+		c.regexp.host = copyRouteRegexp(r.regexp.host)
+	}
+
+	c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
+	for _, q := range r.regexp.queries {
+		c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
+	}
+
+	c.matchers = make([]matcher, len(r.matchers))
+	copy(c.matchers, r.matchers)
+
+	return c
+}
+
+func copyRouteRegexp(r *routeRegexp) *routeRegexp {
+	c := *r
+	return &c
+}
+
+// Match attempts to match the given request against the router's registered routes.
+//
+// If the request matches a route of this router or one of its subrouters the Route,
+// Handler, and Vars fields of the the match argument are filled and this function
+// returns true.
+//
+// If the request does not match any of this router's or its subrouters' routes
+// then this function returns false. If available, a reason for the match failure
+// will be filled in the match argument's MatchErr field. If the match failure type
+// (eg: not found) has a registered handler, the handler is assigned to the Handler
+// field of the match argument.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+	for _, route := range r.routes {
+		if route.Match(req, match) {
+			// Build middleware chain if no error was found
+			if match.MatchErr == nil {
+				for i := len(r.middlewares) - 1; i >= 0; i-- {
+					match.Handler = r.middlewares[i].Middleware(match.Handler)
+				}
+			}
+			return true
+		}
+	}
+
+	if match.MatchErr == ErrMethodMismatch {
+		if r.MethodNotAllowedHandler != nil {
+			match.Handler = r.MethodNotAllowedHandler
+			return true
+		}
+
+		return false
+	}
+
+	// Closest match for a router (includes sub-routers)
+	if r.NotFoundHandler != nil {
+		match.Handler = r.NotFoundHandler
+		match.MatchErr = ErrNotFound
+		return true
+	}
+
+	match.MatchErr = ErrNotFound
+	return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = req.URL.EscapedPath()
+		}
+		// Clean path to canonical form and redirect.
+		if p := cleanPath(path); p != path {
+
+			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
+			// http://code.google.com/p/go/issues/detail?id=5252
+			url := *req.URL
+			url.Path = p
+			p = url.String()
+
+			w.Header().Set("Location", p)
+			w.WriteHeader(http.StatusMovedPermanently)
+			return
+		}
+	}
+	var match RouteMatch
+	var handler http.Handler
+	if r.Match(req, &match) {
+		handler = match.Handler
+		req = requestWithVars(req, match.Vars)
+		req = requestWithRoute(req, match.Route)
+	}
+
+	if handler == nil && match.MatchErr == ErrMethodMismatch {
+		handler = methodNotAllowedHandler()
+	}
+
+	if handler == nil {
+		handler = http.NotFoundHandler()
+	}
+
+	handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+	return r.namedRoutes[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+	return r.namedRoutes[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+	r.strictSlash = value
+	return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+	r.skipClean = value
+	return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+	// initialize a route with a copy of the parent router's configuration
+	route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+	r.routes = append(r.routes, route)
+	return route
+}
+
+// Name registers a new route with a name.
+// See Route.Name().
+func (r *Router) Name(name string) *Route {
+	return r.NewRoute().Name(name)
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+	return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+	*http.Request)) *Route {
+	return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+	return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+	return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+	return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+	return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+	return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+	return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+	return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+	return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+	return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+	return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+	for _, t := range r.routes {
+		err := walkFn(t, r, ancestors)
+		if err == SkipRouter {
+			continue
+		}
+		if err != nil {
+			return err
+		}
+		for _, sr := range t.matchers {
+			if h, ok := sr.(*Router); ok {
+				ancestors = append(ancestors, t)
+				err := h.walk(walkFn, ancestors)
+				if err != nil {
+					return err
+				}
+				ancestors = ancestors[:len(ancestors)-1]
+			}
+		}
+		if h, ok := t.handler.(*Router); ok {
+			ancestors = append(ancestors, t)
+			err := h.walk(walkFn, ancestors)
+			if err != nil {
+				return err
+			}
+			ancestors = ancestors[:len(ancestors)-1]
+		}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+	Route   *Route
+	Handler http.Handler
+	Vars    map[string]string
+
+	// MatchErr is set to appropriate matching error
+	// It is set to ErrMethodMismatch if there is a mismatch in
+	// the request method and route method
+	MatchErr error
+}
+
+type contextKey int
+
+const (
+	varsKey contextKey = iota
+	routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+	if rv := r.Context().Value(varsKey); rv != nil {
+		return rv.(map[string]string)
+	}
+	return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns.
+func CurrentRoute(r *http.Request) *Route {
+	if rv := r.Context().Value(routeKey); rv != nil {
+		return rv.(*Route)
+	}
+	return nil
+}
+
+func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
+	ctx := context.WithValue(r.Context(), varsKey, vars)
+	return r.WithContext(ctx)
+}
+
+func requestWithRoute(r *http.Request, route *Route) *http.Request {
+	ctx := context.WithValue(r.Context(), routeKey, route)
+	return r.WithContext(ctx)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root;
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+
+	return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+	for _, v1 := range s1 {
+		for _, v2 := range s2 {
+			if v1 == v2 {
+				return fmt.Errorf("mux: duplicated route variable %q", v2)
+			}
+		}
+	}
+	return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+	length := len(pairs)
+	if length%2 != 0 {
+		return length, fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+	}
+	return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string, length/2)
+	for i := 0; i < length; i += 2 {
+		m[pairs[i]] = pairs[i+1]
+	}
+	return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string parameters to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]*regexp.Regexp, length/2)
+	for i := 0; i < length; i += 2 {
+		regex, err := regexp.Compile(pairs[i+1])
+		if err != nil {
+			return nil, err
+		}
+		m[pairs[i]] = regex
+	}
+	return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+	for _, v := range arr {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != "" {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v == value {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != nil {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v.MatchString(value) {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// methodNotAllowed replies to the request with an HTTP status code 405.
+func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
+	w.WriteHeader(http.StatusMethodNotAllowed)
+}
+
+// methodNotAllowedHandler returns a simple request handler
+// that replies to each request with a status code 405.
+func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d05cfa0e9ea01a497425a0c41a721c430c80099
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,388 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type routeRegexpOptions struct {
+	strictSlash    bool
+	useEncodedPath bool
+}
+
+type regexpType int
+
+const (
+	regexpTypePath regexpType = iota
+	regexpTypeHost
+	regexpTypePrefix
+	regexpTypeQuery
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
+	// Check if it is well-formed.
+	idxs, errBraces := braceIndices(tpl)
+	if errBraces != nil {
+		return nil, errBraces
+	}
+	// Backup the original.
+	template := tpl
+	// Now let's parse it.
+	defaultPattern := "[^/]+"
+	if typ == regexpTypeQuery {
+		defaultPattern = ".*"
+	} else if typ == regexpTypeHost {
+		defaultPattern = "[^.]+"
+	}
+	// Only match strict slash if not matching
+	if typ != regexpTypePath {
+		options.strictSlash = false
+	}
+	// Set a flag for strictSlash.
+	endSlash := false
+	if options.strictSlash && strings.HasSuffix(tpl, "/") {
+		tpl = tpl[:len(tpl)-1]
+		endSlash = true
+	}
+	varsN := make([]string, len(idxs)/2)
+	varsR := make([]*regexp.Regexp, len(idxs)/2)
+	pattern := bytes.NewBufferString("")
+	pattern.WriteByte('^')
+	reverse := bytes.NewBufferString("")
+	var end int
+	var err error
+	for i := 0; i < len(idxs); i += 2 {
+		// Set all values we are interested in.
+		raw := tpl[end:idxs[i]]
+		end = idxs[i+1]
+		parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+		name := parts[0]
+		patt := defaultPattern
+		if len(parts) == 2 {
+			patt = parts[1]
+		}
+		// Name or pattern can't be empty.
+		if name == "" || patt == "" {
+			return nil, fmt.Errorf("mux: missing name or pattern in %q",
+				tpl[idxs[i]:end])
+		}
+		// Build the regexp pattern.
+		fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+		// Build the reverse template.
+		fmt.Fprintf(reverse, "%s%%s", raw)
+
+		// Append variable name and compiled pattern.
+		varsN[i/2] = name
+		varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Add the remaining.
+	raw := tpl[end:]
+	pattern.WriteString(regexp.QuoteMeta(raw))
+	if options.strictSlash {
+		pattern.WriteString("[/]?")
+	}
+	if typ == regexpTypeQuery {
+		// Add the default pattern if the query value is empty
+		if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+			pattern.WriteString(defaultPattern)
+		}
+	}
+	if typ != regexpTypePrefix {
+		pattern.WriteByte('$')
+	}
+
+	var wildcardHostPort bool
+	if typ == regexpTypeHost {
+		if !strings.Contains(pattern.String(), ":") {
+			wildcardHostPort = true
+		}
+	}
+	reverse.WriteString(raw)
+	if endSlash {
+		reverse.WriteByte('/')
+	}
+	// Compile full regexp.
+	reg, errCompile := regexp.Compile(pattern.String())
+	if errCompile != nil {
+		return nil, errCompile
+	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
+	// Done!
+	return &routeRegexp{
+		template:         template,
+		regexpType:       typ,
+		options:          options,
+		regexp:           reg,
+		reverse:          reverse.String(),
+		varsN:            varsN,
+		varsR:            varsR,
+		wildcardHostPort: wildcardHostPort,
+	}, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+	// The unmodified template.
+	template string
+	// The type of match
+	regexpType regexpType
+	// Options for matching
+	options routeRegexpOptions
+	// Expanded regexp.
+	regexp *regexp.Regexp
+	// Reverse template.
+	reverse string
+	// Variable names.
+	varsN []string
+	// Variable regexps (validators).
+	varsR []*regexp.Regexp
+	// Wildcard host-port (no strict port match in hostname)
+	wildcardHostPort bool
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+	if r.regexpType == regexpTypeHost {
+		host := getHost(req)
+		if r.wildcardHostPort {
+			// Don't be strict on the port match
+			if i := strings.Index(host, ":"); i != -1 {
+				host = host[:i]
+			}
+		}
+		return r.regexp.MatchString(host)
+	}
+
+	if r.regexpType == regexpTypeQuery {
+		return r.matchQueryString(req)
+	}
+	path := req.URL.Path
+	if r.options.useEncodedPath {
+		path = req.URL.EscapedPath()
+	}
+	return r.regexp.MatchString(path)
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+	urlValues := make([]interface{}, len(r.varsN))
+	for k, v := range r.varsN {
+		value, ok := values[v]
+		if !ok {
+			return "", fmt.Errorf("mux: missing route variable %q", v)
+		}
+		if r.regexpType == regexpTypeQuery {
+			value = url.QueryEscape(value)
+		}
+		urlValues[k] = value
+	}
+	rv := fmt.Sprintf(r.reverse, urlValues...)
+	if !r.regexp.MatchString(rv) {
+		// The URL is checked against the full regexp, instead of checking
+		// individual variables. This is faster but to provide a good error
+		// message, we check individual regexps if the URL doesn't match.
+		for k, v := range r.varsN {
+			if !r.varsR[k].MatchString(values[v]) {
+				return "", fmt.Errorf(
+					"mux: variable %q doesn't match, expected %q", values[v],
+					r.varsR[k].String())
+			}
+		}
+	}
+	return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+	if r.regexpType != regexpTypeQuery {
+		return ""
+	}
+	templateKey := strings.SplitN(r.template, "=", 2)[0]
+	val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
+	if ok {
+		return templateKey + "=" + val
+	}
+	return ""
+}
+
+// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
+// If key was not found, empty string and false is returned.
+func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
+	query := []byte(rawQuery)
+	for len(query) > 0 {
+		foundKey := query
+		if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
+			foundKey, query = foundKey[:i], foundKey[i+1:]
+		} else {
+			query = query[:0]
+		}
+		if len(foundKey) == 0 {
+			continue
+		}
+		var value []byte
+		if i := bytes.IndexByte(foundKey, '='); i >= 0 {
+			foundKey, value = foundKey[:i], foundKey[i+1:]
+		}
+		if len(foundKey) < len(key) {
+			// Cannot possibly be key.
+			continue
+		}
+		keyString, err := url.QueryUnescape(string(foundKey))
+		if err != nil {
+			continue
+		}
+		if keyString != key {
+			continue
+		}
+		valueString, err := url.QueryUnescape(string(value))
+		if err != nil {
+			continue
+		}
+		return valueString, true
+	}
+	return "", false
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+	return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+	var level, idx int
+	var idxs []int
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '{':
+			if level++; level == 1 {
+				idx = i
+			}
+		case '}':
+			if level--; level == 0 {
+				idxs = append(idxs, idx, i+1)
+			} else if level < 0 {
+				return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+			}
+		}
+	}
+	if level != 0 {
+		return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+	}
+	return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+	return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+	// Store host variables.
+	if v.host != nil {
+		host := getHost(req)
+		if v.host.wildcardHostPort {
+			// Don't be strict on the port match
+			if i := strings.Index(host, ":"); i != -1 {
+				host = host[:i]
+			}
+		}
+		matches := v.host.regexp.FindStringSubmatchIndex(host)
+		if len(matches) > 0 {
+			extractVars(host, matches, v.host.varsN, m.Vars)
+		}
+	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = req.URL.EscapedPath()
+	}
+	// Store path variables.
+	if v.path != nil {
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
+		if len(matches) > 0 {
+			extractVars(path, matches, v.path.varsN, m.Vars)
+			// Check if we should redirect.
+			if v.path.options.strictSlash {
+				p1 := strings.HasSuffix(path, "/")
+				p2 := strings.HasSuffix(v.path.template, "/")
+				if p1 != p2 {
+					u, _ := url.Parse(req.URL.String())
+					if p1 {
+						u.Path = u.Path[:len(u.Path)-1]
+					} else {
+						u.Path += "/"
+					}
+					m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
+				}
+			}
+		}
+	}
+	// Store query string variables.
+	for _, q := range v.queries {
+		queryURL := q.getURLQuery(req)
+		matches := q.regexp.FindStringSubmatchIndex(queryURL)
+		if len(matches) > 0 {
+			extractVars(queryURL, matches, q.varsN, m.Vars)
+		}
+	}
+}
+
+// getHost tries its best to return the request host.
+// According to section 14.23 of RFC 2616 the Host header
+// can include the port number if the default value of 80 is not used.
+func getHost(r *http.Request) string {
+	if r.URL.IsAbs() {
+		return r.URL.Host
+	}
+	return r.Host
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
+	}
+}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000000000000000000000000000000000000..e8f11df221f089306926ac23775906a4a50cde62
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,765 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+	// Request handler for the route.
+	handler http.Handler
+	// If true, this route never matches: it is only used to build URLs.
+	buildOnly bool
+	// The name used to build URLs.
+	name string
+	// Error resulted from building a route.
+	err error
+
+	// "global" reference to all named routes
+	namedRoutes map[string]*Route
+
+	// config possibly passed in from `Router`
+	routeConf
+}
+
+// SkipClean reports whether path cleaning is enabled for this route via
+// Router.SkipClean.
+func (r *Route) SkipClean() bool {
+	return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+	if r.buildOnly || r.err != nil {
+		return false
+	}
+
+	var matchErr error
+
+	// Match everything.
+	for _, m := range r.matchers {
+		if matched := m.Match(req, match); !matched {
+			if _, ok := m.(methodMatcher); ok {
+				matchErr = ErrMethodMismatch
+				continue
+			}
+
+			// Ignore ErrNotFound errors. These errors arise from match call
+			// to Subrouters.
+			//
+			// This prevents subsequent matching subrouters from failing to
+			// run middleware. If not ignored, the middleware would see a
+			// non-nil MatchErr and be skipped, even when there was a
+			// matching route.
+			if match.MatchErr == ErrNotFound {
+				match.MatchErr = nil
+			}
+
+			matchErr = nil // nolint:ineffassign
+			return false
+		} else {
+			// Multiple routes may share the same path but use different HTTP methods. For instance:
+			// Route 1: POST "/users/{id}".
+			// Route 2: GET "/users/{id}", parameters: "id": "[0-9]+".
+			//
+			// The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2",
+			// The router should return a "Not Found" error as no route fully matches this request.
+			if match.MatchErr == ErrMethodMismatch {
+				match.MatchErr = nil
+			}
+		}
+	}
+
+	if matchErr != nil {
+		match.MatchErr = matchErr
+		return false
+	}
+
+	if match.MatchErr == ErrMethodMismatch && r.handler != nil {
+		// We found a route which matches request method, clear MatchErr
+		match.MatchErr = nil
+		// Then override the mis-matched handler
+		match.Handler = r.handler
+	}
+
+	// Yay, we have a match. Let's collect some info about it.
+	if match.Route == nil {
+		match.Route = r
+	}
+	if match.Handler == nil {
+		match.Handler = r.handler
+	}
+	if match.Vars == nil {
+		match.Vars = make(map[string]string)
+	}
+
+	// Set variables.
+	r.regexp.setMatch(req, match, r)
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+	return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+	r.buildOnly = true
+	return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+	if r.err == nil {
+		r.handler = handler
+	}
+	return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+	return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+	return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// It is an error to call Name more than once on a route.
+func (r *Route) Name(name string) *Route {
+	if r.name != "" {
+		r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+			r.name, name)
+	}
+	if r.err == nil {
+		r.name = name
+		r.namedRoutes[name] = r
+	}
+	return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+	return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+	Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+	if r.err == nil {
+		r.matchers = append(r.matchers, m)
+	}
+	return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
+	if r.err != nil {
+		return r.err
+	}
+	if typ == regexpTypePath || typ == regexpTypePrefix {
+		if len(tpl) > 0 && tpl[0] != '/' {
+			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+		}
+		if r.regexp.path != nil {
+			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+		}
+	}
+	rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
+		strictSlash:    r.strictSlash,
+		useEncodedPath: r.useEncodedPath,
+	})
+	if err != nil {
+		return err
+	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
+	if typ == regexpTypeHost {
+		if r.regexp.path != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+				return err
+			}
+		}
+		r.regexp.host = rr
+	} else {
+		if r.regexp.host != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+				return err
+			}
+		}
+		if typ == regexpTypeQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
+	}
+	r.addMatcher(rr)
+	return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+//	r := mux.NewRouter().NewRoute()
+//	r.Headers("Content-Type", "application/json",
+//	          "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]string
+		headers, r.err = mapFromPairsToString(pairs...)
+		return r.addMatcher(headerMatcher(headers))
+	}
+	return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+//	r := mux.NewRouter().NewRoute()
+//	r.HeadersRegexp("Content-Type", "application/(text|json)",
+//	          "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// If the value is an empty string, it will match any value if the key is set.
+// Use the start and end of string anchors (^ and $) to match an exact value.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]*regexp.Regexp
+		headers, r.err = mapFromPairsToRegex(pairs...)
+		return r.addMatcher(headerRegexMatcher(headers))
+	}
+	return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//	r := mux.NewRouter().NewRoute()
+//	r.Host("www.example.com")
+//	r.Host("{subdomain}.domain.com")
+//	r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
+	return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+	return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+	return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+	for k, v := range methods {
+		methods[k] = strings.ToUpper(v)
+	}
+	return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//	r := mux.NewRouter().NewRoute()
+//	r.Path("/products/").Handler(ProductsHandler)
+//	r.Path("/products/{key}").Handler(ProductsHandler)
+//	r.Path("/articles/{category}/{id:[0-9]+}").
+//	  Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, regexpTypePath)
+	return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
+	return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+//	r := mux.NewRouter().NewRoute()
+//	r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// If the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
+	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
+			return r
+		}
+	}
+
+	return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	scheme := r.URL.Scheme
+	// https://golang.org/pkg/net/http/#Request
+	// "For [most] server requests, fields other than Path and RawQuery will be
+	// empty."
+	// Since we're an http muxer, the scheme is either going to be http or https
+	// though, so we can just set it based on the tls termination state.
+	if scheme == "" {
+		if r.TLS == nil {
+			scheme = "http"
+		} else {
+			scheme = "https"
+		}
+	}
+	return matchInArray(m, scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+// If the request's URL has a scheme set, it will be matched against.
+// Generally, the URL scheme will only be set if a previous handler set it,
+// such as the ProxyHeaders handler from gorilla/handlers.
+// If unset, the scheme will be determined based on the request's TLS
+// termination state.
+// The first argument to Schemes will be used when constructing a route URL.
+func (r *Route) Schemes(schemes ...string) *Route {
+	for k, v := range schemes {
+		schemes[k] = strings.ToLower(v)
+	}
+	if len(schemes) > 0 {
+		r.buildScheme = schemes[0]
+	}
+	return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+	if r.buildVarsFunc != nil {
+		// compose the old and new functions
+		old := r.buildVarsFunc
+		r.buildVarsFunc = func(m map[string]string) map[string]string {
+			return f(old(m))
+		}
+	} else {
+		r.buildVarsFunc = f
+	}
+	return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+//	r := mux.NewRouter().NewRoute()
+//	s := r.Host("www.example.com").Subrouter()
+//	s.HandleFunc("/products/", ProductsHandler)
+//	s.HandleFunc("/products/{key}", ProductHandler)
+//	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+	// initialize a subrouter with a copy of the parent route's configuration
+	router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+	r.addMatcher(router)
+	return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+//	r := mux.NewRouter()
+//	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//	  Name("article")
+//
+// ...a URL for it can be built using:
+//
+//	url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+//	"/articles/technology/42"
+//
+// This also works for host variables:
+//
+//	r := mux.NewRouter()
+//	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//	  Host("{subdomain}.domain.com").
+//	  Name("article")
+//
+//	// url.String() will be "http://news.domain.com/articles/technology/42"
+//	url, err := r.Get("article").URL("subdomain", "news",
+//	                                 "category", "technology",
+//	                                 "id", "42")
+//
+// The scheme of the resulting url will be the first argument that was passed to Schemes:
+//
+//	// url.String() will be "https://example.com"
+//	r := mux.NewRouter().NewRoute()
+//	url, err := r.Host("example.com")
+//	             .Schemes("https", "http").URL()
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	var scheme, host, path string
+	queries := make([]string, 0, len(r.regexp.queries))
+	if r.regexp.host != nil {
+		if host, err = r.regexp.host.url(values); err != nil {
+			return nil, err
+		}
+		scheme = "http"
+		if r.buildScheme != "" {
+			scheme = r.buildScheme
+		}
+	}
+	if r.regexp.path != nil {
+		if path, err = r.regexp.path.url(values); err != nil {
+			return nil, err
+		}
+	}
+	for _, q := range r.regexp.queries {
+		var query string
+		if query, err = q.url(values); err != nil {
+			return nil, err
+		}
+		queries = append(queries, query)
+	}
+	return &url.URL{
+		Scheme:   scheme,
+		Host:     host,
+		Path:     path,
+		RawQuery: strings.Join(queries, "&"),
+	}, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.host == nil {
+		return nil, errors.New("mux: route doesn't have a host")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	host, err := r.regexp.host.url(values)
+	if err != nil {
+		return nil, err
+	}
+	u := &url.URL{
+		Scheme: "http",
+		Host:   host,
+	}
+	if r.buildScheme != "" {
+		u.Scheme = r.buildScheme
+	}
+	return u, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.path == nil {
+		return nil, errors.New("mux: route doesn't have a path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	path, err := r.regexp.path.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Path: path,
+	}, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp.path == nil {
+		return "", errors.New("mux: route doesn't have a path")
+	}
+	return r.regexp.path.template, nil
+}
+
+// GetPathRegexp returns the expanded regular expression used to match route path.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathRegexp() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp.path == nil {
+		return "", errors.New("mux: route does not have a path")
+	}
+	return r.regexp.path.regexp.String(), nil
+}
+
+// GetQueriesRegexp returns the expanded regular expressions used to match the
+// route queries.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not have queries.
+func (r *Route) GetQueriesRegexp() ([]string, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.queries == nil {
+		return nil, errors.New("mux: route doesn't have queries")
+	}
+	queries := make([]string, 0, len(r.regexp.queries))
+	for _, query := range r.regexp.queries {
+		queries = append(queries, query.regexp.String())
+	}
+	return queries, nil
+}
+
+// GetQueriesTemplates returns the templates used to build the
+// query matching.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define queries.
+func (r *Route) GetQueriesTemplates() ([]string, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.queries == nil {
+		return nil, errors.New("mux: route doesn't have queries")
+	}
+	queries := make([]string, 0, len(r.regexp.queries))
+	for _, query := range r.regexp.queries {
+		queries = append(queries, query.template)
+	}
+	return queries, nil
+}
+
+// GetMethods returns the methods the route matches against
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if route does not have methods.
+func (r *Route) GetMethods() ([]string, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	for _, m := range r.matchers {
+		if methods, ok := m.(methodMatcher); ok {
+			return []string(methods), nil
+		}
+	}
+	return nil, errors.New("mux: route doesn't have methods")
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp.host == nil {
+		return "", errors.New("mux: route doesn't have a host")
+	}
+	return r.regexp.host.template, nil
+}
+
+// GetVarNames returns the names of all variables added by regexp matchers
+// These can be used to know which route variables should be passed into r.URL()
+func (r *Route) GetVarNames() ([]string, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	var varNames []string
+	if r.regexp.host != nil {
+		varNames = append(varNames, r.regexp.host.varsN...)
+	}
+	if r.regexp.path != nil {
+		varNames = append(varNames, r.regexp.path.varsN...)
+	}
+	for _, regx := range r.regexp.queries {
+		varNames = append(varNames, regx.varsN...)
+	}
+	return varNames, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+	m, err := mapFromPairsToString(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+	if r.buildVarsFunc != nil {
+		m = r.buildVarsFunc(m)
+	}
+	return m
+}
diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..5f5c496de0129816966873f31cce8b41c76668fe
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/test_helpers.go
@@ -0,0 +1,19 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import "net/http"
+
+// SetURLVars sets the URL variables for the given request, to be accessed via
+// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
+// copy is returned.
+//
+// This API should only be used for testing purposes; it provides a way to
+// inject variables into the request context. Alternatively, URL variables
+// can be set by making a route that captures the required variables,
+// starting a server and sending the request to that server.
+func SetURLVars(r *http.Request, val map[string]string) *http.Request {
+	return requestWithVars(r, val)
+}
diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..402433593c09cf9fb07ffd79e7f7beb0f04c6538
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.gitattributes
@@ -0,0 +1,2 @@
+* -text
+*.bin -text -diff
diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b35f8449bf280c131afd066ae3e34b7f01bdba43
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+/s2/cmd/_s2sx/sfx-exe
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c9014ce1da23b3cde00db042dbbd595cc283351d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -0,0 +1,137 @@
+# This is an example goreleaser.yaml file with some sane defaults.
+# Make sure to check the documentation at http://goreleaser.com
+before:
+  hooks:
+    - ./gen.sh
+
+builds:
+  -
+    id: "s2c"
+    binary: s2c
+    main: ./s2/cmd/s2c/main.go
+    flags:
+      - -trimpath
+    env:
+      - CGO_ENABLED=0
+    goos:
+      - aix
+      - linux
+      - freebsd
+      - netbsd
+      - windows
+      - darwin
+    goarch:
+      - 386
+      - amd64
+      - arm
+      - arm64
+      - ppc64
+      - ppc64le
+      - mips64
+      - mips64le
+    goarm:
+      - 7
+  -
+    id: "s2d"
+    binary: s2d
+    main: ./s2/cmd/s2d/main.go
+    flags:
+      - -trimpath
+    env:
+      - CGO_ENABLED=0
+    goos:
+      - aix
+      - linux
+      - freebsd
+      - netbsd
+      - windows
+      - darwin
+    goarch:
+      - 386
+      - amd64
+      - arm
+      - arm64
+      - ppc64
+      - ppc64le
+      - mips64
+      - mips64le
+    goarm:
+      - 7
+  -
+    id: "s2sx"
+    binary: s2sx
+    main: ./s2/cmd/_s2sx/main.go
+    flags:
+      - -modfile=s2sx.mod
+      - -trimpath
+    env:
+      - CGO_ENABLED=0
+    goos:
+      - aix
+      - linux
+      - freebsd
+      - netbsd
+      - windows
+      - darwin
+    goarch:
+      - 386
+      - amd64
+      - arm
+      - arm64
+      - ppc64
+      - ppc64le
+      - mips64
+      - mips64le
+    goarm:
+      - 7
+
+archives:
+  -
+    id: s2-binaries
+    name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
+    replacements:
+      aix: AIX
+      darwin: OSX
+      linux: Linux
+      windows: Windows
+      386: i386
+      amd64: x86_64
+      freebsd: FreeBSD
+      netbsd: NetBSD
+    format_overrides:
+      - goos: windows
+        format: zip
+    files:
+      - unpack/*
+      - s2/LICENSE
+      - s2/README.md
+checksum:
+  name_template: 'checksums.txt'
+snapshot:
+  name_template: "{{ .Tag }}-next"
+changelog:
+  sort: asc
+  filters:
+    exclude:
+    - '^doc:'
+    - '^docs:'
+    - '^test:'
+    - '^tests:'
+    - '^Update\sREADME.md'
+
+nfpms:
+  -
+    file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+    vendor: Klaus Post
+    homepage: https://github.com/klauspost/compress
+    maintainer: Klaus Post <klauspost@gmail.com>
+    description: S2 Compression Tool
+    license: BSD 3-Clause
+    formats:
+      - deb
+      - rpm
+    replacements:
+      darwin: Darwin
+      linux: Linux
+      freebsd: FreeBSD
+      amd64: x86_64
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..87d55747778c572bda5a21a2e9ad4578b088e832
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,304 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------
+
+Files: gzhttp/*
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016-2017 The New York Times Company
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+------------------
+
+Files: s2/cmd/internal/readahead/*
+
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+---------------------
+Files: snappy/*
+Files: internal/snapref/*
+
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------
+
+Files: s2/cmd/internal/filepathx/*
+
+Copyright 2016 The filepathx Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3429879eb69fd4e1358916348d814c1f1ae8efcf
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -0,0 +1,438 @@
+# compress
+
+This package provides various compression algorithms.
+
+* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
+* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
+* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
+* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
+* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
+* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
+* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
+* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
+
+[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
+[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
+
+# changelog
+
+* Aug 30, 2021 (v1.13.5)
+	* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
+	* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
+	* zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
+	* Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
+
+* Aug 12, 2021 (v1.13.4)
+	* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
+	* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
+
+* Aug 3, 2021 (v1.13.3) 
+	* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
+	* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
+	* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
+	* s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
+	* zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
+	* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
+
+* Jun 14, 2021 (v1.13.1)
+	* s2: Add full Snappy output support  [#396](https://github.com/klauspost/compress/pull/396)
+	* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
+	* gzhttp: Add header to skip compression  [#389](https://github.com/klauspost/compress/pull/389)
+	* s2: Improve speed with bigger output margin  [#395](https://github.com/klauspost/compress/pull/395)
+
+* Jun 3, 2021 (v1.13.0)
+	* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
+	* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
+	* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+
+* May 25, 2021 (v1.12.3)
+	* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
+	* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
+	* zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) 
+
+* Apr 27, 2021 (v1.12.2)
+	* zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
+	* zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
+	* deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
+	* s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
+	* s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
+	* s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) 
+
+* Apr 14, 2021 (v1.12.1)
+	* snappy package removed. Upstream added as dependency.
+	* s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
+	* s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
+	* s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
+	* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
+	* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
+	* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+
+<details>
+	<summary>See changes prior to v1.12.1</summary>
+	
+* Mar 26, 2021 (v1.11.13)
+	* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
+	* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
+	* deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
+	* s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
+
+* Mar 5, 2021 (v1.11.12)
+	* s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
+	* s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
+
+* Mar 1, 2021 (v1.11.9)
+	* s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
+	* s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
+	* s2: Fix binaries.
+
+* Feb 25, 2021 (v1.11.8)
+	* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+	* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
+	* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
+	* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
+	* zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
+  
+* Jan 14, 2021 (v1.11.7)
+	* Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
+	* s2: Add 'best' compression option.  [#310](https://github.com/klauspost/compress/pull/310)
+	* s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
+	* s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
+	* s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
+
+* Jan 7, 2021 (v1.11.6)
+	* zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
+	* zstd: Free Decoder resources when Reset is called with a nil io.Reader  [#305](https://github.com/klauspost/compress/pull/305)
+
+* Dec 20, 2020 (v1.11.4)
+	* zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
+	* Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
+	* s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
+	* Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
+	* zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
+
+* Nov 15, 2020 (v1.11.3)
+	* inflate: 10-15% faster decompression  [#293](https://github.com/klauspost/compress/pull/293)
+	* zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
+
+* Oct 11, 2020 (v1.11.2)
+	* s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
+
+* Oct 1, 2020 (v1.11.1)
+	* zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
+
+* Sept 8, 2020 (v1.11.0)
+	* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
+	* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
+	* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
+</details>
+
+<details>
+	<summary>See changes prior to v1.11.0</summary>
+ 
+* July 8, 2020 (v1.10.11) 
+	* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
+	* huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
+	
+* June 23, 2020 (v1.10.10) 
+	* zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
+	
+* June 16, 2020 (v1.10.9): 
+	* zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
+	* zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
+	* Fuzzit tests removed. The service has been purchased and is no longer available.
+	
+* June 5, 2020 (v1.10.8): 
+	* 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
+	
+* June 1, 2020 (v1.10.7): 
+	* Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
+	* Increase zstd decompression speed up to 1.19x.  [#259](https://github.com/klauspost/compress/pull/259)
+	* Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
+	
+* May 21, 2020: (v1.10.6) 
+	* zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
+	* zstd: Stricter decompression checks.
+	
+* April 12, 2020: (v1.10.5)
+	* s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
+	
+* Apr 8, 2020: (v1.10.4) 
+	* zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251),  [#250](https://github.com/klauspost/compress/pull/250),  [#249](https://github.com/klauspost/compress/pull/249),  [#247](https://github.com/klauspost/compress/pull/247)
+* Mar 11, 2020: (v1.10.3) 
+	* s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
+	* s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
+	* zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
+	* zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
+	* zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
+	
+* Feb 27, 2020: (v1.10.2) 
+	* Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
+	* Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
+	
+* Feb 18, 2020: (v1.10.1)
+	* Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
+	* deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
+	* Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
+	
+* Feb 4, 2020: (v1.10.0) 
+	* Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
+	* Fix buffer overflow on repeated small block deflate.  [#218](https://github.com/klauspost/compress/pull/218)
+	* Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
+	* Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s.  [#186](https://github.com/klauspost/compress/pull/186)
+
+</details>
+
+<details>
+	<summary>See changes prior to v1.10.0</summary>
+
+* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056),  [#206](https://github.com/klauspost/compress/pull/206).
+* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) 
+* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
+* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
+* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
+* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
+* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
+* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
+* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
+* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
+* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
+* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
+* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
+* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
+* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
+* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
+* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
+* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
+* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
+* Nov 11, 2019: Set default  [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
+* Nov 11, 2019: Reduce inflate memory use by 1KB.
+* Nov 10, 2019: Less allocations in deflate bit writer.
+* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
+* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
+* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
+* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) 
+* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
+
+</details>
+
+<details>
+	<summary>See changes prior to v1.9.0</summary>
+
+* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
+* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
+* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
+* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
+* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
+* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
+* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
+* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
+* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
+* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
+* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. 
+* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
+* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
+* Aug 14, 2019: zstd: Skip incompressible data 2x faster.  [#147](https://github.com/klauspost/compress/pull/147)
+* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
+* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
+* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
+* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
+* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
+* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
+* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
+* June 17, 2019: zstd decompression bugfix.
+* June 17, 2019: fix 32 bit builds.
+* June 17, 2019: Easier use in modules (less dependencies).
+* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
+* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
+* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
+* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
+* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
+* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
+* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
+* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
+* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
+* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
+* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
+* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
+* May 28, 2017: Reduce allocations when resetting decoder.
+* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
+* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
+* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
+* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
+* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. 
+* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
+* Mar 24, 2016: Small speedup for level 1-3.
+* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
+* Feb 19, 2016: Handle small payloads faster in level 1-3.
+* Feb 19, 2016: Added faster level 2 + 3 compression modes.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 14, 2016: Snappy: Merge upstream changes. 
+* Feb 14, 2016: Snappy: Fix aggressive skipping.
+* Feb 14, 2016: Snappy: Update benchmark.
+* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
+* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
+* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
+* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
+* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
+* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
+* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
+* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
+* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
+* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
+* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
+* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
+* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
+* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
+* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
+
+</details>
+
+# deflate usage
+
+* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
+* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
+* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
+* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
+
+The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
+
+| old import         | new import                              | Documentation
+|--------------------|-----------------------------------------|--------------------|
+| `compress/gzip`    | `github.com/klauspost/compress/gzip`    | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
+| `compress/zlib`    | `github.com/klauspost/compress/zlib`    | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
+| `archive/zip`      | `github.com/klauspost/compress/zip`     | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
+| `compress/flate`   | `github.com/klauspost/compress/flate`   | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
+
+* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
+
+You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
+
+The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/),  [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
+
+Currently there is only minor speedup on decompression (mostly CRC32 calculation).
+
+Memory usage is typically 1MB for a Writer. stdlib is in the same range. 
+If you expect to have a lot of concurrently allocated Writers consider using 
+the stateless compress described below.
+
+# Stateless compression
+
+This package offers stateless compression as a special option for gzip/deflate. 
+It will do compression but without maintaining any state between Write calls.
+
+This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
+
+This is only relevant in cases where you expect to run many thousands of compressors concurrently, 
+but with very little activity. This is *not* intended for regular web servers serving individual requests.  
+
+Because of this, the size of actual Write calls will affect output size.
+
+In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
+
+For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
+
+A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
+
+```
+	// replace 'ioutil.Discard' with your output.
+	gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
+	if err != nil {
+		return err
+	}
+	defer gzw.Close()
+
+	w := bufio.NewWriterSize(gzw, 4096)
+	defer w.Flush()
+	
+	// Write to 'w' 
+```
+
+This will only use up to 4KB in memory when the writer is idle. 
+
+Compression is almost always worse than the fastest compression level 
+and each write will allocate (a little) memory. 
+
+# Performance Update 2018
+
+It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
+
+The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
+
+The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
+
+The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
+
+
+## Overall differences.
+
+There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
+
+The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
+
+This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
+
+There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
+
+## Web Content
+
+This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
+
+Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
+
+Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
+
+## Object files
+
+This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
+
+The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. 
+
+The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
+
+## Highly Compressible File
+
+This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
+
+It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
+
+So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
+
+## Medium-High Compressible
+
+This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
+
+We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
+
+## Medium Compressible
+
+I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
+
+The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
+
+
+## Un-compressible Content
+
+This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed.  The only downside is that it might skip some compressible data on false detections.
+
+
+## Huffman only compression
+
+This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. 
+
+This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
+
+Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
+
+The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). 
+
+The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
+
+For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
+
+This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
+
+
+# license
+
+This code is licensed under the same conditions as the original Go code. See LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea5a692d5130f55b62d741e753d264e2ba3ac598
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/compressible.go
@@ -0,0 +1,85 @@
+package compress
+
+import "math"
+
+// Estimate returns a normalized compressibility estimate of block b.
+// Values close to zero are likely uncompressible.
+// Values above 0.1 are likely to be compressible.
+// Values above 0.5 are very compressible.
+// Very small lengths will return 0.
+func Estimate(b []byte) float64 {
+	if len(b) < 16 {
+		return 0
+	}
+
+	// Correctly predicted order 1
+	hits := 0
+	lastMatch := false
+	var o1 [256]byte
+	var hist [256]int
+	c1 := byte(0)
+	for _, c := range b {
+		if c == o1[c1] {
+			// We only count a hit if there was two correct predictions in a row.
+			if lastMatch {
+				hits++
+			}
+			lastMatch = true
+		} else {
+			lastMatch = false
+		}
+		o1[c1] = c
+		c1 = c
+		hist[c]++
+	}
+
+	// Use x^0.6 to give better spread
+	prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
+
+	// Calculate histogram distribution
+	variance := float64(0)
+	avg := float64(len(b)) / 256
+
+	for _, v := range hist {
+		Δ := float64(v) - avg
+		variance += Δ * Δ
+	}
+
+	stddev := math.Sqrt(float64(variance)) / float64(len(b))
+	exp := math.Sqrt(1 / float64(len(b)))
+
+	// Subtract expected stddev
+	stddev -= exp
+	if stddev < 0 {
+		stddev = 0
+	}
+	stddev *= 1 + exp
+
+	// Use x^0.4 to give better spread
+	entropy := math.Pow(stddev, 0.4)
+
+	// 50/50 weight between prediction and histogram distribution
+	return math.Pow((prediction+entropy)/2, 0.9)
+}
+
+// ShannonEntropyBits returns the number of bits minimum required to represent
+// an entropy encoding of the input bytes.
+// https://en.wiktionary.org/wiki/Shannon_entropy
+func ShannonEntropyBits(b []byte) int {
+	if len(b) == 0 {
+		return 0
+	}
+	var hist [256]int
+	for _, c := range b {
+		hist[c]++
+	}
+	shannon := float64(0)
+	invTotal := 1.0 / float64(len(b))
+	for _, v := range hist[:] {
+		if v > 0 {
+			n := float64(v)
+			shannon += math.Ceil(-math.Log2(n*invTotal) * n)
+		}
+	}
+	return int(math.Ceil(shannon))
+}
diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ea7324da671f67ed33d9ca612d5e5fcbbe912d76
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/README.md
@@ -0,0 +1,79 @@
+# Finite State Entropy
+
+This package provides Finite State Entropy encoding and decoding.
+            
+Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) 
+encoding provides a fast near-optimal symbol encoding/decoding
+for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd).
+
+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.
+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,
+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 
+
+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse)
+
+## News
+
+ * Feb 2018: First implementation released. Consider this beta software for now.
+
+# Usage
+
+This package provides a low level interface that allows to compress single independent blocks. 
+
+Each block is separate, and there is no built in integrity checks. 
+This means that the caller should keep track of block sizes and also do checksums if needed.  
+
+Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function.
+You must provide input and will receive the output and maybe an error.
+
+These error values can be returned:
+
+| Error               | Description                                                                 |
+|---------------------|-----------------------------------------------------------------------------|
+| `<nil>`             | Everything ok, output is returned                                           |
+| `ErrIncompressible` | Returned when input is judged to be too hard to compress                    |
+| `ErrUseRLE`         | Returned from the compressor when the input is a single byte value repeated |
+| `(error)`           | An internal error occurred.                                                 |
+
+As can be seen above there are errors that will be returned even under normal operation so it is important to handle these.
+
+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object 
+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 
+object can be used for both.   
+
+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this
+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.
+
+Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function.
+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back
+your input was likely corrupted. 
+
+It is important to note that a successful decoding does *not* mean your output matches your original input. 
+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.
+
+For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples).
+
+# Performance
+
+A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors.  
+All compression functions are currently only running on the calling goroutine so only one core will be used per block.  
+
+The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input
+is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be 
+beneficial to transpose all your input values down by 64.   
+
+With moderate block sizes around 64k speed are typically 200MB/s per core for compression and 
+around 300MB/s decompression speed. 
+
+The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. 
+
+# Plans
+
+At one point, more internals will be exposed to facilitate more "expert" usage of the components. 
+
+A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261).  
+
+# Contributing
+
+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 
+changes will likely not be accepted. If in doubt open an issue before writing the PR.  
\ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go
new file mode 100644
index 0000000000000000000000000000000000000000..f65eb3909cf4a59255e0302a96d7e7d3ab21fd29
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitreader.go
@@ -0,0 +1,122 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.bitsRead += 8 - uint8(highBits(uint32(v)))
+	return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) uint16 {
+	if n == 0 || b.bitsRead >= 64 {
+		return 0
+	}
+	return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) uint16 {
+	const regMask = 64 - 1
+	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	b.bitsRead += n
+	return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+	// 2 bounds checks.
+	v := b.in[b.off-4:]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.bitsRead >= 64 && b.off == 0
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go
new file mode 100644
index 0000000000000000000000000000000000000000..43e463611b15ccd0f92710da6c6ac8502dbf009a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -0,0 +1,168 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16ZeroNC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+// This is fastest if bits can be zero.
+func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
+	if bits == 0 {
+		return
+	}
+	value <<= (16 - bits) & 15
+	value >>= (16 - bits) & 15
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.bitContainer >>= v << 3
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go
new file mode 100644
index 0000000000000000000000000000000000000000..abade2d605279b6a57d475d73d63b5033230676c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bytereader.go
@@ -0,0 +1,47 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f341914c67f05381ef2c0c037254e1bc799e7ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -0,0 +1,683 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+	"errors"
+	"fmt"
+)
+
+// Compress the input bytes. Input must be < 2GB.
+// Provide a Scratch buffer to avoid memory allocations.
+// Note that the output is also kept in the scratch buffer.
+// If input is too hard to compress, ErrIncompressible is returned.
+// If input is a single byte value repeated ErrUseRLE is returned.
+func Compress(in []byte, s *Scratch) ([]byte, error) {
+	if len(in) <= 1 {
+		return nil, ErrIncompressible
+	}
+	if len(in) > (2<<30)-1 {
+		return nil, errors.New("input too big, must be < 2GB")
+	}
+	s, err := s.prepare(in)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create histogram, if none was provided.
+	maxCount := s.maxCount
+	if maxCount == 0 {
+		maxCount = s.countSimple(in)
+	}
+	// Reset for next run.
+	s.clearCount = true
+	s.maxCount = 0
+	if maxCount == len(in) {
+		// One symbol, use RLE
+		return nil, ErrUseRLE
+	}
+	if maxCount == 1 || maxCount < (len(in)>>7) {
+		// Each symbol present maximum once or too well distributed.
+		return nil, ErrIncompressible
+	}
+	s.optimalTableLog()
+	err = s.normalizeCount()
+	if err != nil {
+		return nil, err
+	}
+	err = s.writeCount()
+	if err != nil {
+		return nil, err
+	}
+
+	if false {
+		err = s.validateNorm()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	err = s.buildCTable()
+	if err != nil {
+		return nil, err
+	}
+	err = s.compress(in)
+	if err != nil {
+		return nil, err
+	}
+	s.Out = s.bw.out
+	// Check if we compressed.
+	if len(s.Out) >= len(in) {
+		return nil, ErrIncompressible
+	}
+	return s.Out, nil
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+	bw         *bitWriter
+	stateTable []uint16
+	state      uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) {
+	c.bw = bw
+	c.stateTable = ct.stateTable
+
+	nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+	im := int32((nbBitsOut << 16) - first.deltaNbBits)
+	lu := (im >> nbBitsOut) + first.deltaFindState
+	c.state = c.stateTable[lu]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+	c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encodeZero(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+	c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+	c.bw.flush32()
+	c.bw.addBits16NC(c.state, tableLog)
+	c.bw.flush()
+}
+
+// compress is the main compression loop that will encode the input from the last byte to the first.
+func (s *Scratch) compress(src []byte) error {
+	if len(src) <= 2 {
+		return errors.New("compress: src too small")
+	}
+	tt := s.ct.symbolTT[:256]
+	s.bw.reset(s.Out)
+
+	// Our two states each encodes every second byte.
+	// Last byte encoded (first byte decoded) will always be encoded by c1.
+	var c1, c2 cState
+
+	// Encode so remaining size is divisible by 4.
+	ip := len(src)
+	if ip&1 == 1 {
+		c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+		c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+		c1.encodeZero(tt[src[ip-3]])
+		ip -= 3
+	} else {
+		c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+		c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+		ip -= 2
+	}
+	if ip&2 != 0 {
+		c2.encodeZero(tt[src[ip-1]])
+		c1.encodeZero(tt[src[ip-2]])
+		ip -= 2
+	}
+
+	// Main compression loop.
+	switch {
+	case !s.zeroBits && s.actualTableLog <= 8:
+		// We can encode 4 symbols without requiring a flush.
+		// We do not need to check if any output is 0 bits.
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encode(tt[v0])
+			c1.encode(tt[v1])
+			c2.encode(tt[v2])
+			c1.encode(tt[v3])
+			ip -= 4
+		}
+	case !s.zeroBits:
+		// We do not need to check if any output is 0 bits.
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encode(tt[v0])
+			c1.encode(tt[v1])
+			s.bw.flush32()
+			c2.encode(tt[v2])
+			c1.encode(tt[v3])
+			ip -= 4
+		}
+	case s.actualTableLog <= 8:
+		// We can encode 4 symbols without requiring a flush
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encodeZero(tt[v0])
+			c1.encodeZero(tt[v1])
+			c2.encodeZero(tt[v2])
+			c1.encodeZero(tt[v3])
+			ip -= 4
+		}
+	default:
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encodeZero(tt[v0])
+			c1.encodeZero(tt[v1])
+			s.bw.flush32()
+			c2.encodeZero(tt[v2])
+			c1.encodeZero(tt[v3])
+			ip -= 4
+		}
+	}
+
+	// Flush final state.
+	// Used to initialize state when decoding.
+	c2.flush(s.actualTableLog)
+	c1.flush(s.actualTableLog)
+
+	return s.bw.close()
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *Scratch) writeCount() error {
+	var (
+		tableLog  = s.actualTableLog
+		tableSize = 1 << tableLog
+		previous0 bool
+		charnum   uint16
+
+		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+
+		// Write Table Size
+		bitStream = uint32(tableLog - minTablelog)
+		bitCount  = uint(4)
+		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+		threshold = int16(tableSize)
+		nbBits    = uint(tableLog + 1)
+	)
+	if cap(s.Out) < maxHeaderSize {
+		s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize)
+	}
+	outP := uint(0)
+	out := s.Out[:maxHeaderSize]
+
+	// stops at 1
+	for remaining > 1 {
+		if previous0 {
+			start := charnum
+			for s.norm[charnum] == 0 {
+				charnum++
+			}
+			for charnum >= start+24 {
+				start += 24
+				bitStream += uint32(0xFFFF) << bitCount
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+			}
+			for charnum >= start+3 {
+				start += 3
+				bitStream += 3 << bitCount
+				bitCount += 2
+			}
+			bitStream += uint32(charnum-start) << bitCount
+			bitCount += 2
+			if bitCount > 16 {
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+				bitCount -= 16
+			}
+		}
+
+		count := s.norm[charnum]
+		charnum++
+		max := (2*threshold - 1) - remaining
+		if count < 0 {
+			remaining += count
+		} else {
+			remaining -= count
+		}
+		count++ // +1 for extra accuracy
+		if count >= threshold {
+			count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+		}
+		bitStream += uint32(count) << bitCount
+		bitCount += nbBits
+		if count < max {
+			bitCount--
+		}
+
+		previous0 = count == 1
+		if remaining < 1 {
+			return errors.New("internal error: remaining<1")
+		}
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if bitCount > 16 {
+			out[outP] = byte(bitStream)
+			out[outP+1] = byte(bitStream >> 8)
+			outP += 2
+			bitStream >>= 16
+			bitCount -= 16
+		}
+	}
+
+	out[outP] = byte(bitStream)
+	out[outP+1] = byte(bitStream >> 8)
+	outP += (bitCount + 7) / 8
+
+	if charnum > s.symbolLen {
+		return errors.New("internal error: charnum > s.symbolLen")
+	}
+	s.Out = out[:outP]
+	return nil
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+	deltaFindState int32
+	deltaNbBits    uint32
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+	return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState)
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+	tableSymbol []byte
+	stateTable  []uint16
+	symbolTT    []symbolTransform
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *Scratch) allocCtable() {
+	tableSize := 1 << s.actualTableLog
+	// get tableSymbol that is big enough.
+	if cap(s.ct.tableSymbol) < tableSize {
+		s.ct.tableSymbol = make([]byte, tableSize)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+	ctSize := tableSize
+	if cap(s.ct.stateTable) < ctSize {
+		s.ct.stateTable = make([]uint16, ctSize)
+	}
+	s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+	if cap(s.ct.symbolTT) < 256 {
+		s.ct.symbolTT = make([]symbolTransform, 256)
+	}
+	s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *Scratch) buildCTable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	var cumul [maxSymbolValue + 2]int16
+
+	s.allocCtable()
+	tableSymbol := s.ct.tableSymbol[:tableSize]
+	// symbol start positions
+	{
+		cumul[0] = 0
+		for ui, v := range s.norm[:s.symbolLen-1] {
+			u := byte(ui) // one less than reference
+			if v == -1 {
+				// Low proba symbol
+				cumul[u+1] = cumul[u] + 1
+				tableSymbol[highThreshold] = u
+				highThreshold--
+			} else {
+				cumul[u+1] = cumul[u] + v
+			}
+		}
+		// Encode last symbol separately to avoid overflowing u
+		u := int(s.symbolLen - 1)
+		v := s.norm[s.symbolLen-1]
+		if v == -1 {
+			// Low proba symbol
+			cumul[u+1] = cumul[u] + 1
+			tableSymbol[highThreshold] = byte(u)
+			highThreshold--
+		} else {
+			cumul[u+1] = cumul[u] + v
+		}
+		if uint32(cumul[s.symbolLen]) != tableSize {
+			return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+		}
+		cumul[s.symbolLen] = int16(tableSize) + 1
+	}
+	// Spread symbols
+	s.zeroBits = false
+	{
+		step := tableStep(tableSize)
+		tableMask := tableSize - 1
+		var position uint32
+		// if any symbol > largeLimit, we may have 0 bits output.
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for ui, v := range s.norm[:s.symbolLen] {
+			symbol := byte(ui)
+			if v > largeLimit {
+				s.zeroBits = true
+			}
+			for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+				tableSymbol[position] = symbol
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					position = (position + step) & tableMask
+				} /* Low proba area */
+			}
+		}
+
+		// Check if we have gone through all positions
+		if position != 0 {
+			return errors.New("position!=0")
+		}
+	}
+
+	// Build table
+	table := s.ct.stateTable
+	{
+		tsi := int(tableSize)
+		for u, v := range tableSymbol {
+			// TableU16 : sorted by symbol order; gives next state value
+			table[cumul[v]] = uint16(tsi + u)
+			cumul[v]++
+		}
+	}
+
+	// Build Symbol Transformation Table
+	{
+		total := int16(0)
+		symbolTT := s.ct.symbolTT[:s.symbolLen]
+		tableLog := s.actualTableLog
+		tl := (uint32(tableLog) << 16) - (1 << tableLog)
+		for i, v := range s.norm[:s.symbolLen] {
+			switch v {
+			case 0:
+			case -1, 1:
+				symbolTT[i].deltaNbBits = tl
+				symbolTT[i].deltaFindState = int32(total - 1)
+				total++
+			default:
+				maxBitsOut := uint32(tableLog) - highBits(uint32(v-1))
+				minStatePlus := uint32(v) << maxBitsOut
+				symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+				symbolTT[i].deltaFindState = int32(total - v)
+				total += v
+			}
+		}
+		if total != int16(tableSize) {
+			return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+		}
+	}
+	return nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int) {
+	for _, v := range in {
+		s.count[v]++
+	}
+	m := uint32(0)
+	for i, v := range s.count[:] {
+		if v > m {
+			m = v
+		}
+		if v > 0 {
+			s.symbolLen = uint16(i) + 1
+		}
+	}
+	return int(m)
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+	minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1
+	minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2
+	if minBitsSrc < minBitsSymbols {
+		return uint8(minBitsSrc)
+	}
+	return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+	tableLog := s.TableLog
+	minBits := s.minTableLog()
+	maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minTablelog {
+		tableLog = minTablelog
+	}
+	if tableLog > maxTableLog {
+		tableLog = maxTableLog
+	}
+	s.actualTableLog = tableLog
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+func (s *Scratch) normalizeCount() error {
+	var (
+		tableLog          = s.actualTableLog
+		scale             = 62 - uint64(tableLog)
+		step              = (1 << 62) / uint64(s.br.remain())
+		vStep             = uint64(1) << (scale - 20)
+		stillToDistribute = int16(1 << tableLog)
+		largest           int
+		largestP          int16
+		lowThreshold      = (uint32)(s.br.remain() >> tableLog)
+	)
+
+	for i, cnt := range s.count[:s.symbolLen] {
+		// already handled
+		// if (count[s] == s.length) return 0;   /* rle special case */
+
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			stillToDistribute--
+		} else {
+			proba := (int16)((uint64(cnt) * step) >> scale)
+			if proba < 8 {
+				restToBeat := vStep * uint64(rtbTable[proba])
+				v := uint64(cnt)*step - (uint64(proba) << scale)
+				if v > restToBeat {
+					proba++
+				}
+			}
+			if proba > largestP {
+				largestP = proba
+				largest = i
+			}
+			s.norm[i] = proba
+			stillToDistribute -= proba
+		}
+	}
+
+	if -stillToDistribute >= (s.norm[largest] >> 1) {
+		// corner case, need another normalization method
+		return s.normalizeCount2()
+	}
+	s.norm[largest] += stillToDistribute
+	return nil
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *Scratch) normalizeCount2() error {
+	const notYetAssigned = -2
+	var (
+		distributed  uint32
+		total        = uint32(s.br.remain())
+		tableLog     = s.actualTableLog
+		lowThreshold = total >> tableLog
+		lowOne       = (total * 3) >> (tableLog + 1)
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			distributed++
+			total -= cnt
+			continue
+		}
+		if cnt <= lowOne {
+			s.norm[i] = 1
+			distributed++
+			total -= cnt
+			continue
+		}
+		s.norm[i] = notYetAssigned
+	}
+	toDistribute := (1 << tableLog) - distributed
+
+	if (total / toDistribute) > lowOne {
+		// risk of rounding to zero
+		lowOne = (total * 3) / (toDistribute * 2)
+		for i, cnt := range s.count[:s.symbolLen] {
+			if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+				s.norm[i] = 1
+				distributed++
+				total -= cnt
+				continue
+			}
+		}
+		toDistribute = (1 << tableLog) - distributed
+	}
+	if distributed == uint32(s.symbolLen)+1 {
+		// all values are pretty poor;
+		//   probably incompressible data (should have already been detected);
+		//   find max, then give all remaining points to max
+		var maxV int
+		var maxC uint32
+		for i, cnt := range s.count[:s.symbolLen] {
+			if cnt > maxC {
+				maxV = i
+				maxC = cnt
+			}
+		}
+		s.norm[maxV] += int16(toDistribute)
+		return nil
+	}
+
+	if total == 0 {
+		// all of the symbols were low enough for the lowOne or lowThreshold
+		for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+			if s.norm[i] > 0 {
+				toDistribute--
+				s.norm[i]++
+			}
+		}
+		return nil
+	}
+
+	var (
+		vStepLog = 62 - uint64(tableLog)
+		mid      = uint64((1 << (vStepLog - 1)) - 1)
+		rStep    = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+		tmpTotal = mid
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if s.norm[i] == notYetAssigned {
+			var (
+				end    = tmpTotal + uint64(cnt)*rStep
+				sStart = uint32(tmpTotal >> vStepLog)
+				sEnd   = uint32(end >> vStepLog)
+				weight = sEnd - sStart
+			)
+			if weight < 1 {
+				return errors.New("weight < 1")
+			}
+			s.norm[i] = int16(weight)
+			tmpTotal = end
+		}
+	}
+	return nil
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *Scratch) validateNorm() (err error) {
+	var total int
+	for _, v := range s.norm[:s.symbolLen] {
+		if v >= 0 {
+			total += int(v)
+		} else {
+			total -= int(v)
+		}
+	}
+	defer func() {
+		if err == nil {
+			return
+		}
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+		for i, v := range s.norm[:s.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+		}
+	}()
+	if total != (1 << s.actualTableLog) {
+		return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
+	}
+	for i, v := range s.count[s.symbolLen:] {
+		if v != 0 {
+			return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
new file mode 100644
index 0000000000000000000000000000000000000000..926f5f15356a48222dcc76c18ed3fe8dad5cbe3b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -0,0 +1,374 @@
+package fse
+
+import (
+	"errors"
+	"fmt"
+)
+
+const (
+	tablelogAbsoluteMax = 15
+)
+
+// Decompress a block of data.
+// You can provide a scratch buffer to avoid allocations.
+// If nil is provided a temporary one will be allocated.
+// It is possible, but by no way guaranteed that corrupt data will
+// return an error.
+// It is up to the caller to verify integrity of the returned data.
+// Use a predefined Scrach to set maximum acceptable output size.
+func Decompress(b []byte, s *Scratch) ([]byte, error) {
+	s, err := s.prepare(b)
+	if err != nil {
+		return nil, err
+	}
+	s.Out = s.Out[:0]
+	err = s.readNCount()
+	if err != nil {
+		return nil, err
+	}
+	err = s.buildDtable()
+	if err != nil {
+		return nil, err
+	}
+	err = s.decompress()
+	if err != nil {
+		return nil, err
+	}
+
+	return s.Out, nil
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *Scratch) readNCount() error {
+	var (
+		charnum   uint16
+		previous0 bool
+		b         = &s.br
+	)
+	iend := b.remain()
+	if iend < 4 {
+		return errors.New("input too small")
+	}
+	bitStream := b.Uint32()
+	nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+	if nbBits > tablelogAbsoluteMax {
+		return errors.New("tableLog too large")
+	}
+	bitStream >>= 4
+	bitCount := uint(4)
+
+	s.actualTableLog = uint8(nbBits)
+	remaining := int32((1 << nbBits) + 1)
+	threshold := int32(1 << nbBits)
+	gotTotal := int32(0)
+	nbBits++
+
+	for remaining > 1 {
+		if previous0 {
+			n0 := charnum
+			for (bitStream & 0xFFFF) == 0xFFFF {
+				n0 += 24
+				if b.off < iend-5 {
+					b.advance(2)
+					bitStream = b.Uint32() >> bitCount
+				} else {
+					bitStream >>= 16
+					bitCount += 16
+				}
+			}
+			for (bitStream & 3) == 3 {
+				n0 += 3
+				bitStream >>= 2
+				bitCount += 2
+			}
+			n0 += uint16(bitStream & 3)
+			bitCount += 2
+			if n0 > maxSymbolValue {
+				return errors.New("maxSymbolValue too small")
+			}
+			for charnum < n0 {
+				s.norm[charnum&0xff] = 0
+				charnum++
+			}
+
+			if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+				b.advance(bitCount >> 3)
+				bitCount &= 7
+				bitStream = b.Uint32() >> bitCount
+			} else {
+				bitStream >>= 2
+			}
+		}
+
+		max := (2*(threshold) - 1) - (remaining)
+		var count int32
+
+		if (int32(bitStream) & (threshold - 1)) < max {
+			count = int32(bitStream) & (threshold - 1)
+			bitCount += nbBits - 1
+		} else {
+			count = int32(bitStream) & (2*threshold - 1)
+			if count >= threshold {
+				count -= max
+			}
+			bitCount += nbBits
+		}
+
+		count-- // extra accuracy
+		if count < 0 {
+			// -1 means +1
+			remaining += count
+			gotTotal -= count
+		} else {
+			remaining -= count
+			gotTotal += count
+		}
+		s.norm[charnum&0xff] = int16(count)
+		charnum++
+		previous0 = count == 0
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+		if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+			b.advance(bitCount >> 3)
+			bitCount &= 7
+		} else {
+			bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+			b.off = len(b.b) - 4
+		}
+		bitStream = b.Uint32() >> (bitCount & 31)
+	}
+	s.symbolLen = charnum
+
+	if s.symbolLen <= 1 {
+		return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+	}
+	if s.symbolLen > maxSymbolValue+1 {
+		return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+	}
+	if remaining != 1 {
+		return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+	}
+	if bitCount > 32 {
+		return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+	}
+	if gotTotal != 1<<s.actualTableLog {
+		return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
+	}
+	b.advance((bitCount + 7) >> 3)
+	return nil
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+type decSymbol struct {
+	newState uint16
+	symbol   uint8
+	nbBits   uint8
+}
+
+// allocDtable will allocate decoding tables if they are not big enough.
+func (s *Scratch) allocDtable() {
+	tableSize := 1 << s.actualTableLog
+	if cap(s.decTable) < tableSize {
+		s.decTable = make([]decSymbol, tableSize)
+	}
+	s.decTable = s.decTable[:tableSize]
+
+	if cap(s.ct.tableSymbol) < 256 {
+		s.ct.tableSymbol = make([]byte, 256)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:256]
+
+	if cap(s.ct.stateTable) < 256 {
+		s.ct.stateTable = make([]uint16, 256)
+	}
+	s.ct.stateTable = s.ct.stateTable[:256]
+}
+
+// buildDtable will build the decoding table.
+func (s *Scratch) buildDtable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	s.allocDtable()
+	symbolNext := s.ct.stateTable[:256]
+
+	// Init, lay down lowprob symbols
+	s.zeroBits = false
+	{
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for i, v := range s.norm[:s.symbolLen] {
+			if v == -1 {
+				s.decTable[highThreshold].symbol = uint8(i)
+				highThreshold--
+				symbolNext[i] = 1
+			} else {
+				if v >= largeLimit {
+					s.zeroBits = true
+				}
+				symbolNext[i] = uint16(v)
+			}
+		}
+	}
+	// Spread symbols
+	{
+		tableMask := tableSize - 1
+		step := tableStep(tableSize)
+		position := uint32(0)
+		for ss, v := range s.norm[:s.symbolLen] {
+			for i := 0; i < int(v); i++ {
+				s.decTable[position].symbol = uint8(ss)
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					// lowprob area
+					position = (position + step) & tableMask
+				}
+			}
+		}
+		if position != 0 {
+			// position must reach all cells once, otherwise normalizedCounter is incorrect
+			return errors.New("corrupted input (position != 0)")
+		}
+	}
+
+	// Build Decoding table
+	{
+		tableSize := uint16(1 << s.actualTableLog)
+		for u, v := range s.decTable {
+			symbol := v.symbol
+			nextState := symbolNext[symbol]
+			symbolNext[symbol] = nextState + 1
+			nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+			s.decTable[u].nbBits = nBits
+			newState := (nextState << nBits) - tableSize
+			if newState >= tableSize {
+				return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+			}
+			if newState == uint16(u) && nBits == 0 {
+				// Seems weird that this is possible with nbits > 0.
+				return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+			}
+			s.decTable[u].newState = newState
+		}
+	}
+	return nil
+}
+
+// decompress will decompress the bitstream.
+// If the buffer is over-read an error is returned.
+func (s *Scratch) decompress() error {
+	br := &s.bits
+	br.init(s.br.unread())
+
+	var s1, s2 decoder
+	// Initialize and decode first state and symbol.
+	s1.init(br, s.decTable, s.actualTableLog)
+	s2.init(br, s.decTable, s.actualTableLog)
+
+	// Use temp table to avoid bound checks/append penalty.
+	var tmp = s.ct.tableSymbol[:256]
+	var off uint8
+
+	// Main part
+	if !s.zeroBits {
+		for br.off >= 8 {
+			br.fillFast()
+			tmp[off+0] = s1.nextFast()
+			tmp[off+1] = s2.nextFast()
+			br.fillFast()
+			tmp[off+2] = s1.nextFast()
+			tmp[off+3] = s2.nextFast()
+			off += 4
+			// When off is 0, we have overflowed and should write.
+			if off == 0 {
+				s.Out = append(s.Out, tmp...)
+				if len(s.Out) >= s.DecompressLimit {
+					return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+				}
+			}
+		}
+	} else {
+		for br.off >= 8 {
+			br.fillFast()
+			tmp[off+0] = s1.next()
+			tmp[off+1] = s2.next()
+			br.fillFast()
+			tmp[off+2] = s1.next()
+			tmp[off+3] = s2.next()
+			off += 4
+			if off == 0 {
+				s.Out = append(s.Out, tmp...)
+				// When off is 0, we have overflowed and should write.
+				if len(s.Out) >= s.DecompressLimit {
+					return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+				}
+			}
+		}
+	}
+	s.Out = append(s.Out, tmp[:off]...)
+
+	// Final bits, a bit more expensive check
+	for {
+		if s1.finished() {
+			s.Out = append(s.Out, s1.final(), s2.final())
+			break
+		}
+		br.fill()
+		s.Out = append(s.Out, s1.next())
+		if s2.finished() {
+			s.Out = append(s.Out, s2.final(), s1.final())
+			break
+		}
+		s.Out = append(s.Out, s2.next())
+		if len(s.Out) >= s.DecompressLimit {
+			return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+		}
+	}
+	return br.close()
+}
+
+// decoder keeps track of the current state and updates it from the bitstream.
+type decoder struct {
+	state uint16
+	br    *bitReader
+	dt    []decSymbol
+}
+
+// init will initialize the decoder and read the first state from the stream.
+func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
+	d.dt = dt
+	d.br = in
+	d.state = in.getBits(tableLog)
+}
+
+// next returns the next symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) next() uint8 {
+	n := &d.dt[d.state]
+	lowBits := d.br.getBits(n.nbBits)
+	d.state = n.newState + lowBits
+	return n.symbol
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (d *decoder) finished() bool {
+	return d.br.finished() && d.dt[d.state].nbBits > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (d *decoder) final() uint8 {
+	return d.dt[d.state].symbol
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) nextFast() uint8 {
+	n := d.dt[d.state]
+	lowBits := d.br.getBitsFast(n.nbBits)
+	d.state = n.newState + lowBits
+	return n.symbol
+}
diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go
new file mode 100644
index 0000000000000000000000000000000000000000..535cbadfdea192827c17417deaa8a476a4f3f715
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/fse.go
@@ -0,0 +1,144 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+// Package fse provides Finite State Entropy encoding and decoding.
+//
+// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding
+// for byte blocks as implemented in zstd.
+//
+// See https://github.com/klauspost/compress/tree/master/fse for more information.
+package fse
+
+import (
+	"errors"
+	"fmt"
+	"math/bits"
+)
+
+const (
+	/*!MEMORY_USAGE :
+	 *  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+	 *  Increasing memory usage improves compression ratio
+	 *  Reduced memory usage can improve speed, due to cache effect
+	 *  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+	maxMemoryUsage     = 14
+	defaultMemoryUsage = 13
+
+	maxTableLog     = maxMemoryUsage - 2
+	maxTablesize    = 1 << maxTableLog
+	defaultTablelog = defaultMemoryUsage - 2
+	minTablelog     = 5
+	maxSymbolValue  = 255
+)
+
+var (
+	// ErrIncompressible is returned when input is judged to be too hard to compress.
+	ErrIncompressible = errors.New("input is not compressible")
+
+	// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+	ErrUseRLE = errors.New("input is single value repeated")
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type Scratch struct {
+	// Private
+	count    [maxSymbolValue + 1]uint32
+	norm     [maxSymbolValue + 1]int16
+	br       byteReader
+	bits     bitReader
+	bw       bitWriter
+	ct       cTable      // Compression tables.
+	decTable []decSymbol // Decompression table.
+	maxCount int         // count of the most probable symbol
+
+	// Per block parameters.
+	// These can be used to override compression parameters of the block.
+	// Do not touch, unless you know what you are doing.
+
+	// Out is output buffer.
+	// If the scratch is re-used before the caller is done processing the output,
+	// set this field to nil.
+	// Otherwise the output buffer will be re-used for next Compression/Decompression step
+	// and allocation will be avoided.
+	Out []byte
+
+	// DecompressLimit limits the maximum decoded size acceptable.
+	// If > 0 decompression will stop when approximately this many bytes
+	// has been decoded.
+	// If 0, maximum size will be 2GB.
+	DecompressLimit int
+
+	symbolLen      uint16 // Length of active part of the symbol table.
+	actualTableLog uint8  // Selected tablelog.
+	zeroBits       bool   // no bits has prob > 50%.
+	clearCount     bool   // clear count
+
+	// MaxSymbolValue will override the maximum symbol value of the next block.
+	MaxSymbolValue uint8
+
+	// TableLog will attempt to override the tablelog for the next block.
+	TableLog uint8
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *Scratch) Histogram() []uint32 {
+	return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) {
+	s.maxCount = maxCount
+	s.symbolLen = uint16(maxSymbol) + 1
+	s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+	if s == nil {
+		s = &Scratch{}
+	}
+	if s.MaxSymbolValue == 0 {
+		s.MaxSymbolValue = 255
+	}
+	if s.TableLog == 0 {
+		s.TableLog = defaultTablelog
+	}
+	if s.TableLog > maxTableLog {
+		return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog)
+	}
+	if cap(s.Out) == 0 {
+		s.Out = make([]byte, 0, len(in))
+	}
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	s.br.init(in)
+	if s.DecompressLimit == 0 {
+		// Max size 2GB.
+		s.DecompressLimit = (2 << 30) - 1
+	}
+
+	return s, nil
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+	return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+func highBits(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh
new file mode 100644
index 0000000000000000000000000000000000000000..aff942205f1c2a37223cb4136d760b28f5635d5b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gen.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+cd s2/cmd/_s2sx/ || exit 1
+go generate .
diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b3d262958f85658c64b6faa6739c341e71d8826c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/.gitignore
@@ -0,0 +1 @@
+/huff0-fuzz.zip
diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..8b6e5c66383d7912bfc4eb43075c3c53d43bb8e9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/README.md
@@ -0,0 +1,89 @@
+# Huff0 entropy compression
+
+This package provides Huff0 encoding and decoding as used in zstd.
+            
+[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), 
+a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU 
+(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds.
+
+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.
+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,
+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 
+
+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0)
+
+## News
+
+This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.
+
+This ensures that most functionality is well tested.
+
+# Usage
+
+This package provides a low level interface that allows to compress single independent blocks. 
+
+Each block is separate, and there is no built in integrity checks. 
+This means that the caller should keep track of block sizes and also do checksums if needed.  
+
+Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and 
+[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions.
+You must provide input and will receive the output and maybe an error.
+
+These error values can be returned:
+
+| Error               | Description                                                                 |
+|---------------------|-----------------------------------------------------------------------------|
+| `<nil>`             | Everything ok, output is returned                                           |
+| `ErrIncompressible` | Returned when input is judged to be too hard to compress                    |
+| `ErrUseRLE`         | Returned from the compressor when the input is a single byte value repeated |
+| `ErrTooBig`         | Returned if the input block exceeds the maximum allowed size (128 Kib)      |
+| `(error)`           | An internal error occurred.                                                 |
+
+
+As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these.
+
+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object 
+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 
+object can be used for both.   
+
+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this
+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.
+
+The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding.  
+
+## Tables and re-use
+
+Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. 
+
+The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) 
+that controls this behaviour. See the documentation for details. This can be altered between each block.
+
+Do however note that this information is *not* stored in the output block and it is up to the users of the package to
+record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called,
+based on the boolean reported back from the CompressXX call. 
+
+If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the 
+[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object.
+
+## Decompressing
+
+The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable).
+This will initialize the decoding tables. 
+You can supply the complete block to `ReadTable` and it will return the data part of the block 
+which can be given to the decompressor. 
+
+Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) 
+or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function.
+
+For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size.
+
+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back
+your input was likely corrupted. 
+
+It is important to note that a successful decoding does *not* mean your output matches your original input. 
+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.
+
+# Contributing
+
+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 
+changes will likely not be accepted. If in doubt open an issue before writing the PR.
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
new file mode 100644
index 0000000000000000000000000000000000000000..a4979e8868a5232c30ce74f858fe81e8377e574c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -0,0 +1,329 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.bitsRead += 8 - uint8(highBit32(uint32(v)))
+	return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) peekBitsFast(n uint8) uint16 {
+	const regMask = 64 - 1
+	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+
+	// 2 bounds checks.
+	v := b.in[b.off-4 : b.off]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+func (b *bitReader) advance(n uint8) {
+	b.bitsRead += n
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderBytes struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderBytes) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.advance(8 - uint8(highBit32(uint32(v))))
+	return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderBytes) peekByteFast() uint8 {
+	got := uint8(b.value >> 56)
+	return got
+}
+
+func (b *bitReaderBytes) advance(n uint8) {
+	b.bitsRead += n
+	b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderBytes) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+
+	// 2 bounds checks.
+	v := b.in[b.off-4 : b.off]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value |= uint64(low) << (b.bitsRead - 32)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
+func (b *bitReaderBytes) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderBytes) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value |= uint64(low) << (b.bitsRead - 32)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8)
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReaderBytes) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderBytes) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+// bitReaderShifted reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderShifted struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderShifted) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.advance(8 - uint8(highBit32(uint32(v))))
+	return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
+	return uint16(b.value >> ((64 - n) & 63))
+}
+
+func (b *bitReaderShifted) advance(n uint8) {
+	b.bitsRead += n
+	b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderShifted) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+
+	// 2 bounds checks.
+	v := b.in[b.off-4 : b.off]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
+func (b *bitReaderShifted) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderShifted) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63)
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReaderShifted) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderShifted) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bce4e87d4ff69e780440fd6970b44eb4a970b5d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -0,0 +1,210 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
+	enc := ct[symbol]
+	b.bitContainer |= uint64(enc.val) << (b.nBits & 63)
+	if false {
+		if enc.nBits == 0 {
+			panic("nbits 0")
+		}
+	}
+	b.nBits += enc.nBits
+}
+
+// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
+	encA := ct[av]
+	encB := ct[bv]
+	sh := b.nBits & 63
+	combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
+	b.bitContainer |= combined << sh
+	if false {
+		if encA.nBits == 0 {
+			panic("nbitsA 0")
+		}
+		if encB.nBits == 0 {
+			panic("nbitsB 0")
+		}
+	}
+	b.nBits += encA.nBits + encB.nBits
+}
+
+// addBits16ZeroNC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+// This is fastest if bits can be zero.
+func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
+	if bits == 0 {
+		return
+	}
+	value <<= (16 - bits) & 15
+	value >>= (16 - bits) & 15
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+		return
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+		b.bitContainer >>= 1 << 3
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+		b.bitContainer >>= 2 << 3
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+		b.bitContainer >>= 3 << 3
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+		b.bitContainer >>= 4 << 3
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+		b.bitContainer >>= 5 << 3
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+		b.bitContainer >>= 6 << 3
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+		b.bitContainer >>= 7 << 3
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+		b.bitContainer = 0
+		b.nBits = 0
+		return
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
new file mode 100644
index 0000000000000000000000000000000000000000..50bcdf6ea99ce456802f645b08d4489b364349c8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+	v3 := int32(b.b[b.off+3])
+	v2 := int32(b.b[b.off+2])
+	v1 := int32(b.b[b.off+1])
+	v0 := int32(b.b[b.off])
+	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	v3 := uint32(b.b[b.off+3])
+	v2 := uint32(b.b[b.off+2])
+	v1 := uint32(b.b[b.off+1])
+	v0 := uint32(b.b[b.off])
+	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
new file mode 100644
index 0000000000000000000000000000000000000000..8323dc053890b8b37b3cc69cd824cb58bbfa0431
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -0,0 +1,720 @@
+package huff0
+
+import (
+	"fmt"
+	"runtime"
+	"sync"
+)
+
+// Compress1X will compress the input.
+// The output can be decoded using Decompress1X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return nil, false, err
+	}
+	return compress(in, s, s.compress1X)
+}
+
+// Compress4X will compress the input. The input is split into 4 independent blocks
+// and compressed similar to Compress1X.
+// The output can be decoded using Decompress4X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return nil, false, err
+	}
+	if false {
+		// TODO: compress4Xp only slightly faster.
+		const parallelThreshold = 8 << 10
+		if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 {
+			return compress(in, s, s.compress4X)
+		}
+		return compress(in, s, s.compress4Xp)
+	}
+	return compress(in, s, s.compress4X)
+}
+
+func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) {
+	// Nuke previous table if we cannot reuse anyway.
+	if s.Reuse == ReusePolicyNone {
+		s.prevTable = s.prevTable[:0]
+	}
+
+	// Create histogram, if none was provided.
+	maxCount := s.maxCount
+	var canReuse = false
+	if maxCount == 0 {
+		maxCount, canReuse = s.countSimple(in)
+	} else {
+		canReuse = s.canUseTable(s.prevTable)
+	}
+
+	// We want the output size to be less than this:
+	wantSize := len(in)
+	if s.WantLogLess > 0 {
+		wantSize -= wantSize >> s.WantLogLess
+	}
+
+	// Reset for next run.
+	s.clearCount = true
+	s.maxCount = 0
+	if maxCount >= len(in) {
+		if maxCount > len(in) {
+			return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
+		}
+		if len(in) == 1 {
+			return nil, false, ErrIncompressible
+		}
+		// One symbol, use RLE
+		return nil, false, ErrUseRLE
+	}
+	if maxCount == 1 || maxCount < (len(in)>>7) {
+		// Each symbol present maximum once or too well distributed.
+		return nil, false, ErrIncompressible
+	}
+	if s.Reuse == ReusePolicyMust && !canReuse {
+		// We must reuse, but we can't.
+		return nil, false, ErrIncompressible
+	}
+	if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse {
+		keepTable := s.cTable
+		keepTL := s.actualTableLog
+		s.cTable = s.prevTable
+		s.actualTableLog = s.prevTableLog
+		s.Out, err = compressor(in)
+		s.cTable = keepTable
+		s.actualTableLog = keepTL
+		if err == nil && len(s.Out) < wantSize {
+			s.OutData = s.Out
+			return s.Out, true, nil
+		}
+		if s.Reuse == ReusePolicyMust {
+			return nil, false, ErrIncompressible
+		}
+		// Do not attempt to re-use later.
+		s.prevTable = s.prevTable[:0]
+	}
+
+	// Calculate new table.
+	err = s.buildCTable()
+	if err != nil {
+		return nil, false, err
+	}
+
+	if false && !s.canUseTable(s.cTable) {
+		panic("invalid table generated")
+	}
+
+	if s.Reuse == ReusePolicyAllow && canReuse {
+		hSize := len(s.Out)
+		oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
+		newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
+		if oldSize <= hSize+newSize || hSize+12 >= wantSize {
+			// Retain cTable even if we re-use.
+			keepTable := s.cTable
+			keepTL := s.actualTableLog
+
+			s.cTable = s.prevTable
+			s.actualTableLog = s.prevTableLog
+			s.Out, err = compressor(in)
+
+			// Restore ctable.
+			s.cTable = keepTable
+			s.actualTableLog = keepTL
+			if err != nil {
+				return nil, false, err
+			}
+			if len(s.Out) >= wantSize {
+				return nil, false, ErrIncompressible
+			}
+			s.OutData = s.Out
+			return s.Out, true, nil
+		}
+	}
+
+	// Use new table
+	err = s.cTable.write(s)
+	if err != nil {
+		s.OutTable = nil
+		return nil, false, err
+	}
+	s.OutTable = s.Out
+
+	// Compress using new table
+	s.Out, err = compressor(in)
+	if err != nil {
+		s.OutTable = nil
+		return nil, false, err
+	}
+	if len(s.Out) >= wantSize {
+		s.OutTable = nil
+		return nil, false, ErrIncompressible
+	}
+	// Move current table into previous.
+	s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
+	s.OutData = s.Out[len(s.OutTable):]
+	return s.Out, false, nil
+}
+
+// EstimateSizes will estimate the data sizes
+func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return 0, 0, 0, err
+	}
+
+	// Create histogram, if none was provided.
+	tableSz, dataSz, reuseSz = -1, -1, -1
+	maxCount := s.maxCount
+	var canReuse = false
+	if maxCount == 0 {
+		maxCount, canReuse = s.countSimple(in)
+	} else {
+		canReuse = s.canUseTable(s.prevTable)
+	}
+
+	// We want the output size to be less than this:
+	wantSize := len(in)
+	if s.WantLogLess > 0 {
+		wantSize -= wantSize >> s.WantLogLess
+	}
+
+	// Reset for next run.
+	s.clearCount = true
+	s.maxCount = 0
+	if maxCount >= len(in) {
+		if maxCount > len(in) {
+			return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
+		}
+		if len(in) == 1 {
+			return 0, 0, 0, ErrIncompressible
+		}
+		// One symbol, use RLE
+		return 0, 0, 0, ErrUseRLE
+	}
+	if maxCount == 1 || maxCount < (len(in)>>7) {
+		// Each symbol present maximum once or too well distributed.
+		return 0, 0, 0, ErrIncompressible
+	}
+
+	// Calculate new table.
+	err = s.buildCTable()
+	if err != nil {
+		return 0, 0, 0, err
+	}
+
+	if false && !s.canUseTable(s.cTable) {
+		panic("invalid table generated")
+	}
+
+	tableSz, err = s.cTable.estTableSize(s)
+	if err != nil {
+		return 0, 0, 0, err
+	}
+	if canReuse {
+		reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
+	}
+	dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
+
+	// Restore
+	return tableSz, dataSz, reuseSz, nil
+}
+
+func (s *Scratch) compress1X(src []byte) ([]byte, error) {
+	return s.compress1xDo(s.Out, src)
+}
+
+func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
+	var bw = bitWriter{out: dst}
+
+	// N is length divisible by 4.
+	n := len(src)
+	n -= n & 3
+	cTable := s.cTable[:256]
+
+	// Encode last bytes.
+	for i := len(src) & 3; i > 0; i-- {
+		bw.encSymbol(cTable, src[n+i-1])
+	}
+	n -= 4
+	if s.actualTableLog <= 8 {
+		for ; n >= 0; n -= 4 {
+			tmp := src[n : n+4]
+			// tmp should be len 4
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+		}
+	} else {
+		for ; n >= 0; n -= 4 {
+			tmp := src[n : n+4]
+			// tmp should be len 4
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+		}
+	}
+	err := bw.close()
+	return bw.out, err
+}
+
+var sixZeros [6]byte
+
+func (s *Scratch) compress4X(src []byte) ([]byte, error) {
+	if len(src) < 12 {
+		return nil, ErrIncompressible
+	}
+	segmentSize := (len(src) + 3) / 4
+
+	// Add placeholder for output length
+	offsetIdx := len(s.Out)
+	s.Out = append(s.Out, sixZeros[:]...)
+
+	for i := 0; i < 4; i++ {
+		toDo := src
+		if len(toDo) > segmentSize {
+			toDo = toDo[:segmentSize]
+		}
+		src = src[len(toDo):]
+
+		var err error
+		idx := len(s.Out)
+		s.Out, err = s.compress1xDo(s.Out, toDo)
+		if err != nil {
+			return nil, err
+		}
+		// Write compressed length as little endian before block.
+		if i < 3 {
+			// Last length is not written.
+			length := len(s.Out) - idx
+			s.Out[i*2+offsetIdx] = byte(length)
+			s.Out[i*2+offsetIdx+1] = byte(length >> 8)
+		}
+	}
+
+	return s.Out, nil
+}
+
+// compress4Xp will compress 4 streams using separate goroutines.
+func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
+	if len(src) < 12 {
+		return nil, ErrIncompressible
+	}
+	// Add placeholder for output length
+	s.Out = s.Out[:6]
+
+	segmentSize := (len(src) + 3) / 4
+	var wg sync.WaitGroup
+	var errs [4]error
+	wg.Add(4)
+	for i := 0; i < 4; i++ {
+		toDo := src
+		if len(toDo) > segmentSize {
+			toDo = toDo[:segmentSize]
+		}
+		src = src[len(toDo):]
+
+		// Separate goroutine for each block.
+		go func(i int) {
+			s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+			wg.Done()
+		}(i)
+	}
+	wg.Wait()
+	for i := 0; i < 4; i++ {
+		if errs[i] != nil {
+			return nil, errs[i]
+		}
+		o := s.tmpOut[i]
+		// Write compressed length as little endian before block.
+		if i < 3 {
+			// Last length is not written.
+			s.Out[i*2] = byte(len(o))
+			s.Out[i*2+1] = byte(len(o) >> 8)
+		}
+
+		// Write output.
+		s.Out = append(s.Out, o...)
+	}
+	return s.Out, nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
+	reuse = true
+	for _, v := range in {
+		s.count[v]++
+	}
+	m := uint32(0)
+	if len(s.prevTable) > 0 {
+		for i, v := range s.count[:] {
+			if v > m {
+				m = v
+			}
+			if v > 0 {
+				s.symbolLen = uint16(i) + 1
+				if i >= len(s.prevTable) {
+					reuse = false
+				} else {
+					if s.prevTable[i].nBits == 0 {
+						reuse = false
+					}
+				}
+			}
+		}
+		return int(m), reuse
+	}
+	for i, v := range s.count[:] {
+		if v > m {
+			m = v
+		}
+		if v > 0 {
+			s.symbolLen = uint16(i) + 1
+		}
+	}
+	return int(m), false
+}
+
+func (s *Scratch) canUseTable(c cTable) bool {
+	if len(c) < int(s.symbolLen) {
+		return false
+	}
+	for i, v := range s.count[:s.symbolLen] {
+		if v != 0 && c[i].nBits == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (s *Scratch) validateTable(c cTable) bool {
+	if len(c) < int(s.symbolLen) {
+		return false
+	}
+	for i, v := range s.count[:s.symbolLen] {
+		if v != 0 {
+			if c[i].nBits == 0 {
+				return false
+			}
+			if c[i].nBits > s.actualTableLog {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+	minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+	minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
+	if minBitsSrc < minBitsSymbols {
+		return uint8(minBitsSrc)
+	}
+	return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+	tableLog := s.TableLog
+	minBits := s.minTableLog()
+	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minTablelog {
+		tableLog = minTablelog
+	}
+	if tableLog > tableLogMax {
+		tableLog = tableLogMax
+	}
+	s.actualTableLog = tableLog
+}
+
+type cTableEntry struct {
+	val   uint16
+	nBits uint8
+	// We have 8 bits extra
+}
+
+const huffNodesMask = huffNodesLen - 1
+
+func (s *Scratch) buildCTable() error {
+	s.optimalTableLog()
+	s.huffSort()
+	if cap(s.cTable) < maxSymbolValue+1 {
+		s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
+	} else {
+		s.cTable = s.cTable[:s.symbolLen]
+		for i := range s.cTable {
+			s.cTable[i] = cTableEntry{}
+		}
+	}
+
+	var startNode = int16(s.symbolLen)
+	nonNullRank := s.symbolLen - 1
+
+	nodeNb := startNode
+	huffNode := s.nodes[1 : huffNodesLen+1]
+
+	// This overlays the slice above, but allows "-1" index lookups.
+	// Different from reference implementation.
+	huffNode0 := s.nodes[0 : huffNodesLen+1]
+
+	for huffNode[nonNullRank].count == 0 {
+		nonNullRank--
+	}
+
+	lowS := int16(nonNullRank)
+	nodeRoot := nodeNb + lowS - 1
+	lowN := nodeNb
+	huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
+	huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
+	nodeNb++
+	lowS -= 2
+	for n := nodeNb; n <= nodeRoot; n++ {
+		huffNode[n].count = 1 << 30
+	}
+	// fake entry, strong barrier
+	huffNode0[0].count = 1 << 31
+
+	// create parents
+	for nodeNb <= nodeRoot {
+		var n1, n2 int16
+		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+			n1 = lowS
+			lowS--
+		} else {
+			n1 = lowN
+			lowN++
+		}
+		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+			n2 = lowS
+			lowS--
+		} else {
+			n2 = lowN
+			lowN++
+		}
+
+		huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
+		huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
+		nodeNb++
+	}
+
+	// distribute weights (unlimited tree height)
+	huffNode[nodeRoot].nbBits = 0
+	for n := nodeRoot - 1; n >= startNode; n-- {
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+	}
+	for n := uint16(0); n <= nonNullRank; n++ {
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+	}
+	s.actualTableLog = s.setMaxHeight(int(nonNullRank))
+	maxNbBits := s.actualTableLog
+
+	// fill result into tree (val, nbBits)
+	if maxNbBits > tableLogMax {
+		return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
+	}
+	var nbPerRank [tableLogMax + 1]uint16
+	var valPerRank [16]uint16
+	for _, v := range huffNode[:nonNullRank+1] {
+		nbPerRank[v.nbBits]++
+	}
+	// determine stating value per rank
+	{
+		min := uint16(0)
+		for n := maxNbBits; n > 0; n-- {
+			// get starting value within each rank
+			valPerRank[n] = min
+			min += nbPerRank[n]
+			min >>= 1
+		}
+	}
+
+	// push nbBits per symbol, symbol order
+	for _, v := range huffNode[:nonNullRank+1] {
+		s.cTable[v.symbol].nBits = v.nbBits
+	}
+
+	// assign value within rank, symbol order
+	t := s.cTable[:s.symbolLen]
+	for n, val := range t {
+		nbits := val.nBits & 15
+		v := valPerRank[nbits]
+		t[n].val = v
+		valPerRank[nbits] = v + 1
+	}
+
+	return nil
+}
+
+// huffSort will sort symbols, decreasing order.
+func (s *Scratch) huffSort() {
+	type rankPos struct {
+		base    uint32
+		current uint32
+	}
+
+	// Clear nodes
+	nodes := s.nodes[:huffNodesLen+1]
+	s.nodes = nodes
+	nodes = nodes[1 : huffNodesLen+1]
+
+	// Sort into buckets based on length of symbol count.
+	var rank [32]rankPos
+	for _, v := range s.count[:s.symbolLen] {
+		r := highBit32(v+1) & 31
+		rank[r].base++
+	}
+	// maxBitLength is log2(BlockSizeMax) + 1
+	const maxBitLength = 18 + 1
+	for n := maxBitLength; n > 0; n-- {
+		rank[n-1].base += rank[n].base
+	}
+	for n := range rank[:maxBitLength] {
+		rank[n].current = rank[n].base
+	}
+	for n, c := range s.count[:s.symbolLen] {
+		r := (highBit32(c+1) + 1) & 31
+		pos := rank[r].current
+		rank[r].current++
+		prev := nodes[(pos-1)&huffNodesMask]
+		for pos > rank[r].base && c > prev.count {
+			nodes[pos&huffNodesMask] = prev
+			pos--
+			prev = nodes[(pos-1)&huffNodesMask]
+		}
+		nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
+	}
+}
+
+func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
+	maxNbBits := s.actualTableLog
+	huffNode := s.nodes[1 : huffNodesLen+1]
+	//huffNode = huffNode[: huffNodesLen]
+
+	largestBits := huffNode[lastNonNull].nbBits
+
+	// early exit : no elt > maxNbBits
+	if largestBits <= maxNbBits {
+		return largestBits
+	}
+	totalCost := int(0)
+	baseCost := int(1) << (largestBits - maxNbBits)
+	n := uint32(lastNonNull)
+
+	for huffNode[n].nbBits > maxNbBits {
+		totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
+		huffNode[n].nbBits = maxNbBits
+		n--
+	}
+	// n stops at huffNode[n].nbBits <= maxNbBits
+
+	for huffNode[n].nbBits == maxNbBits {
+		n--
+	}
+	// n end at index of smallest symbol using < maxNbBits
+
+	// renorm totalCost
+	totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */
+
+	// repay normalized cost
+	{
+		const noSymbol = 0xF0F0F0F0
+		var rankLast [tableLogMax + 2]uint32
+
+		for i := range rankLast[:] {
+			rankLast[i] = noSymbol
+		}
+
+		// Get pos of last (smallest) symbol per rank
+		{
+			currentNbBits := maxNbBits
+			for pos := int(n); pos >= 0; pos-- {
+				if huffNode[pos].nbBits >= currentNbBits {
+					continue
+				}
+				currentNbBits = huffNode[pos].nbBits // < maxNbBits
+				rankLast[maxNbBits-currentNbBits] = uint32(pos)
+			}
+		}
+
+		for totalCost > 0 {
+			nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1
+
+			for ; nBitsToDecrease > 1; nBitsToDecrease-- {
+				highPos := rankLast[nBitsToDecrease]
+				lowPos := rankLast[nBitsToDecrease-1]
+				if highPos == noSymbol {
+					continue
+				}
+				if lowPos == noSymbol {
+					break
+				}
+				highTotal := huffNode[highPos].count
+				lowTotal := 2 * huffNode[lowPos].count
+				if highTotal <= lowTotal {
+					break
+				}
+			}
+			// only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !)
+			// HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary
+			// FIXME: try to remove
+			for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) {
+				nBitsToDecrease++
+			}
+			totalCost -= 1 << (nBitsToDecrease - 1)
+			if rankLast[nBitsToDecrease-1] == noSymbol {
+				// this rank is no longer empty
+				rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
+			}
+			huffNode[rankLast[nBitsToDecrease]].nbBits++
+			if rankLast[nBitsToDecrease] == 0 {
+				/* special case, reached largest symbol */
+				rankLast[nBitsToDecrease] = noSymbol
+			} else {
+				rankLast[nBitsToDecrease]--
+				if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
+					rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
+				}
+			}
+		}
+
+		for totalCost < 0 { /* Sometimes, cost correction overshoot */
+			if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+				for huffNode[n].nbBits == maxNbBits {
+					n--
+				}
+				huffNode[n+1].nbBits--
+				rankLast[1] = n + 1
+				totalCost++
+				continue
+			}
+			huffNode[rankLast[1]+1].nbBits--
+			rankLast[1]++
+			totalCost++
+		}
+	}
+	return maxNbBits
+}
+
+type nodeElt struct {
+	count  uint32
+	parent uint16
+	symbol byte
+	nbBits uint8
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
new file mode 100644
index 0000000000000000000000000000000000000000..9b7cc8e97bb908e1ee019a2bee68399f560a53e0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -0,0 +1,1371 @@
+package huff0
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/klauspost/compress/fse"
+)
+
+type dTable struct {
+	single []dEntrySingle
+	double []dEntryDouble
+}
+
+// single-symbols decoding
+type dEntrySingle struct {
+	entry uint16
+}
+
+// double-symbols decoding
+type dEntryDouble struct {
+	seq   uint16
+	nBits uint8
+	len   uint8
+}
+
+// Uses special code for all tables that are < 8 bits.
+const use8BitTables = true
+
+// ReadTable will read a table from the input.
+// The size of the input may be larger than the table definition.
+// Any content remaining after the table definition will be returned.
+// If no Scratch is provided a new one is allocated.
+// The returned Scratch can be used for encoding or decoding input using this table.
+func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return s, nil, err
+	}
+	if len(in) <= 1 {
+		return s, nil, errors.New("input too small for table")
+	}
+	iSize := in[0]
+	in = in[1:]
+	if iSize >= 128 {
+		// Uncompressed
+		oSize := iSize - 127
+		iSize = (oSize + 1) / 2
+		if int(iSize) > len(in) {
+			return s, nil, errors.New("input too small for table")
+		}
+		for n := uint8(0); n < oSize; n += 2 {
+			v := in[n/2]
+			s.huffWeight[n] = v >> 4
+			s.huffWeight[n+1] = v & 15
+		}
+		s.symbolLen = uint16(oSize)
+		in = in[iSize:]
+	} else {
+		if len(in) < int(iSize) {
+			return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in))
+		}
+		// FSE compressed weights
+		s.fse.DecompressLimit = 255
+		hw := s.huffWeight[:]
+		s.fse.Out = hw
+		b, err := fse.Decompress(in[:iSize], s.fse)
+		s.fse.Out = nil
+		if err != nil {
+			return s, nil, err
+		}
+		if len(b) > 255 {
+			return s, nil, errors.New("corrupt input: output table too large")
+		}
+		s.symbolLen = uint16(len(b))
+		in = in[iSize:]
+	}
+
+	// collect weight stats
+	var rankStats [16]uint32
+	weightTotal := uint32(0)
+	for _, v := range s.huffWeight[:s.symbolLen] {
+		if v > tableLogMax {
+			return s, nil, errors.New("corrupt input: weight too large")
+		}
+		v2 := v & 15
+		rankStats[v2]++
+		// (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0.
+		weightTotal += (1 << v2) >> 1
+	}
+	if weightTotal == 0 {
+		return s, nil, errors.New("corrupt input: weights zero")
+	}
+
+	// get last non-null symbol weight (implied, total must be 2^n)
+	{
+		tableLog := highBit32(weightTotal) + 1
+		if tableLog > tableLogMax {
+			return s, nil, errors.New("corrupt input: tableLog too big")
+		}
+		s.actualTableLog = uint8(tableLog)
+		// determine last weight
+		{
+			total := uint32(1) << tableLog
+			rest := total - weightTotal
+			verif := uint32(1) << highBit32(rest)
+			lastWeight := highBit32(rest) + 1
+			if verif != rest {
+				// last value must be a clean power of 2
+				return s, nil, errors.New("corrupt input: last value not power of two")
+			}
+			s.huffWeight[s.symbolLen] = uint8(lastWeight)
+			s.symbolLen++
+			rankStats[lastWeight]++
+		}
+	}
+
+	if (rankStats[1] < 2) || (rankStats[1]&1 != 0) {
+		// by construction : at least 2 elts of rank 1, must be even
+		return s, nil, errors.New("corrupt input: min elt size, even check failed ")
+	}
+
+	// TODO: Choose between single/double symbol decoding
+
+	// Calculate starting value for each rank
+	{
+		var nextRankStart uint32
+		for n := uint8(1); n < s.actualTableLog+1; n++ {
+			current := nextRankStart
+			nextRankStart += rankStats[n] << (n - 1)
+			rankStats[n] = current
+		}
+	}
+
+	// fill DTable (always full size)
+	tSize := 1 << tableLogMax
+	if len(s.dt.single) != tSize {
+		s.dt.single = make([]dEntrySingle, tSize)
+	}
+	cTable := s.prevTable
+	if cap(cTable) < maxSymbolValue+1 {
+		cTable = make([]cTableEntry, 0, maxSymbolValue+1)
+	}
+	cTable = cTable[:maxSymbolValue+1]
+	s.prevTable = cTable[:s.symbolLen]
+	s.prevTableLog = s.actualTableLog
+
+	for n, w := range s.huffWeight[:s.symbolLen] {
+		if w == 0 {
+			cTable[n] = cTableEntry{
+				val:   0,
+				nBits: 0,
+			}
+			continue
+		}
+		length := (uint32(1) << w) >> 1
+		d := dEntrySingle{
+			entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
+		}
+
+		rank := &rankStats[w]
+		cTable[n] = cTableEntry{
+			val:   uint16(*rank >> (w - 1)),
+			nBits: uint8(d.entry),
+		}
+
+		single := s.dt.single[*rank : *rank+length]
+		for i := range single {
+			single[i] = d
+		}
+		*rank += length
+	}
+
+	return s, in, nil
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
+	if cap(s.Out) < s.MaxDecodedSize {
+		s.Out = make([]byte, s.MaxDecodedSize)
+	}
+	s.Out = s.Out[:0:s.MaxDecodedSize]
+	s.Out, err = s.Decoder().Decompress1X(s.Out, in)
+	return s.Out, err
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// The length of the supplied input must match the end of a block exactly.
+// The destination size of the uncompressed data must be known and provided.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
+	if dstSize > s.MaxDecodedSize {
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	if cap(s.Out) < dstSize {
+		s.Out = make([]byte, s.MaxDecodedSize)
+	}
+	s.Out = s.Out[:0:dstSize]
+	s.Out, err = s.Decoder().Decompress4X(s.Out, in)
+	return s.Out, err
+}
+
+// Decoder will return a stateless decoder that can be used by multiple
+// decompressors concurrently.
+// Before this is called, the table must be initialized with ReadTable.
+// The Decoder is still linked to the scratch buffer so that cannot be reused.
+// However, it is safe to discard the scratch.
+func (s *Scratch) Decoder() *Decoder {
+	return &Decoder{
+		dt:             s.dt,
+		actualTableLog: s.actualTableLog,
+	}
+}
+
+// Decoder provides stateless decoding.
+type Decoder struct {
+	dt             dTable
+	actualTableLog uint8
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+	if len(d.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	if use8BitTables && d.actualTableLog <= 8 {
+		return d.decompress1X8Bit(dst, src)
+	}
+	var br bitReaderShifted
+	err := br.init(src)
+	if err != nil {
+		return dst, err
+	}
+	maxDecodedSize := cap(dst)
+	dst = dst[:0]
+
+	// Avoid bounds check by always having full sized table.
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	dt := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+
+	for br.off >= 8 {
+		br.fillFast()
+		v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+0] = uint8(v.entry >> 8)
+
+		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+1] = uint8(v.entry >> 8)
+
+		// Refill
+		br.fillFast()
+
+		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+2] = uint8(v.entry >> 8)
+
+		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+3] = uint8(v.entry >> 8)
+
+		off += 4
+		if off == 0 {
+			if len(dst)+256 > maxDecodedSize {
+				br.close()
+				return nil, ErrMaxDecodedSizeExceeded
+			}
+			dst = append(dst, buf[:]...)
+		}
+	}
+
+	if len(dst)+int(off) > maxDecodedSize {
+		br.close()
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	dst = append(dst, buf[:off]...)
+
+	// br < 8, so uint8 is fine
+	bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+	for bitsLeft > 0 {
+		br.fill()
+		if false && br.bitsRead >= 32 {
+			if br.off >= 4 {
+				v := br.in[br.off-4:]
+				v = v[:4]
+				low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+				br.value = (br.value << 32) | uint64(low)
+				br.bitsRead -= 32
+				br.off -= 4
+			} else {
+				for br.off > 0 {
+					br.value = (br.value << 8) | uint64(br.in[br.off-1])
+					br.bitsRead -= 8
+					br.off--
+				}
+			}
+		}
+		if len(dst) >= maxDecodedSize {
+			br.close()
+			return nil, ErrMaxDecodedSizeExceeded
+		}
+		v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+		nBits := uint8(v.entry)
+		br.advance(nBits)
+		bitsLeft -= nBits
+		dst = append(dst, uint8(v.entry>>8))
+	}
+	return dst, br.close()
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
+	if d.actualTableLog == 8 {
+		return d.decompress1X8BitExactly(dst, src)
+	}
+	var br bitReaderBytes
+	err := br.init(src)
+	if err != nil {
+		return dst, err
+	}
+	maxDecodedSize := cap(dst)
+	dst = dst[:0]
+
+	// Avoid bounds check by always having full sized table.
+	dt := d.dt.single[:256]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+
+	switch d.actualTableLog {
+	case 8:
+		const shift = 8 - 8
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	case 7:
+		const shift = 8 - 7
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	case 6:
+		const shift = 8 - 6
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	case 5:
+		const shift = 8 - 5
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	case 4:
+		const shift = 8 - 4
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	case 3:
+		const shift = 8 - 3
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	case 2:
+		const shift = 8 - 2
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	case 1:
+		const shift = 8 - 1
+		for br.off >= 4 {
+			br.fillFast()
+			v := dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+0] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+1] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+2] = uint8(v.entry >> 8)
+
+			v = dt[uint8(br.value>>(56+shift))]
+			br.advance(uint8(v.entry))
+			buf[off+3] = uint8(v.entry >> 8)
+
+			off += 4
+			if off == 0 {
+				if len(dst)+256 > maxDecodedSize {
+					br.close()
+					return nil, ErrMaxDecodedSizeExceeded
+				}
+				dst = append(dst, buf[:]...)
+			}
+		}
+	default:
+		return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
+	}
+
+	if len(dst)+int(off) > maxDecodedSize {
+		br.close()
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	dst = append(dst, buf[:off]...)
+
+	// br < 4, so uint8 is fine
+	bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+	shift := (8 - d.actualTableLog) & 7
+
+	for bitsLeft > 0 {
+		if br.bitsRead >= 64-8 {
+			for br.off > 0 {
+				br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+				br.bitsRead -= 8
+				br.off--
+			}
+		}
+		if len(dst) >= maxDecodedSize {
+			br.close()
+			return nil, ErrMaxDecodedSizeExceeded
+		}
+		v := dt[br.peekByteFast()>>shift]
+		nBits := uint8(v.entry)
+		br.advance(nBits)
+		bitsLeft -= int8(nBits)
+		dst = append(dst, uint8(v.entry>>8))
+	}
+	return dst, br.close()
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
+	var br bitReaderBytes
+	err := br.init(src)
+	if err != nil {
+		return dst, err
+	}
+	maxDecodedSize := cap(dst)
+	dst = dst[:0]
+
+	// Avoid bounds check by always having full sized table.
+	dt := d.dt.single[:256]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+
+	const shift = 56
+
+	//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
+	for br.off >= 4 {
+		br.fillFast()
+		v := dt[uint8(br.value>>shift)]
+		br.advance(uint8(v.entry))
+		buf[off+0] = uint8(v.entry >> 8)
+
+		v = dt[uint8(br.value>>shift)]
+		br.advance(uint8(v.entry))
+		buf[off+1] = uint8(v.entry >> 8)
+
+		v = dt[uint8(br.value>>shift)]
+		br.advance(uint8(v.entry))
+		buf[off+2] = uint8(v.entry >> 8)
+
+		v = dt[uint8(br.value>>shift)]
+		br.advance(uint8(v.entry))
+		buf[off+3] = uint8(v.entry >> 8)
+
+		off += 4
+		if off == 0 {
+			if len(dst)+256 > maxDecodedSize {
+				br.close()
+				return nil, ErrMaxDecodedSizeExceeded
+			}
+			dst = append(dst, buf[:]...)
+		}
+	}
+
+	if len(dst)+int(off) > maxDecodedSize {
+		br.close()
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	dst = append(dst, buf[:off]...)
+
+	// br < 4, so uint8 is fine
+	bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+	for bitsLeft > 0 {
+		if br.bitsRead >= 64-8 {
+			for br.off > 0 {
+				br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+				br.bitsRead -= 8
+				br.off--
+			}
+		}
+		if len(dst) >= maxDecodedSize {
+			br.close()
+			return nil, ErrMaxDecodedSizeExceeded
+		}
+		v := dt[br.peekByteFast()]
+		nBits := uint8(v.entry)
+		br.advance(nBits)
+		bitsLeft -= int8(nBits)
+		dst = append(dst, uint8(v.entry>>8))
+	}
+	return dst, br.close()
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+	if len(d.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	if len(src) < 6+(4*1) {
+		return nil, errors.New("input too small")
+	}
+	if use8BitTables && d.actualTableLog <= 8 {
+		return d.decompress4X8bit(dst, src)
+	}
+
+	var br [4]bitReaderShifted
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+	var decoded int
+
+	// Decode 2 values from each decoder/loop.
+	const bufoff = 256 / 4
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		{
+			const stream = 0
+			const stream2 = 1
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			val := br[stream].peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream] = uint8(v.entry >> 8)
+
+			val2 := br[stream2].peekBitsFast(d.actualTableLog)
+			v2 := single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+
+			val = br[stream].peekBitsFast(d.actualTableLog)
+			v = single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
+
+			val2 = br[stream2].peekBitsFast(d.actualTableLog)
+			v2 = single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+		}
+
+		{
+			const stream = 2
+			const stream2 = 3
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			val := br[stream].peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream] = uint8(v.entry >> 8)
+
+			val2 := br[stream2].peekBitsFast(d.actualTableLog)
+			v2 := single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+
+			val = br[stream].peekBitsFast(d.actualTableLog)
+			v = single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
+
+			val2 = br[stream2].peekBitsFast(d.actualTableLog)
+			v2 = single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+		}
+
+		off += 2
+
+		if off == bufoff {
+			if bufoff > dstEvery {
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(out, buf[:bufoff])
+			copy(out[dstEvery:], buf[bufoff:bufoff*2])
+			copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+			copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+			off = 0
+			out = out[bufoff:]
+			decoded += 256
+			// There must at least be 3 buffers left.
+			if len(out) < dstEvery*3 {
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[:off])
+		copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+		copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+		copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	for i := range br {
+		offset := dstEvery * i
+		br := &br[i]
+		bitsLeft := br.off*8 + uint(64-br.bitsRead)
+		for bitsLeft > 0 {
+			br.fill()
+			if false && br.bitsRead >= 32 {
+				if br.off >= 4 {
+					v := br.in[br.off-4:]
+					v = v[:4]
+					low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+					br.value = (br.value << 32) | uint64(low)
+					br.bitsRead -= 32
+					br.off -= 4
+				} else {
+					for br.off > 0 {
+						br.value = (br.value << 8) | uint64(br.in[br.off-1])
+						br.bitsRead -= 8
+						br.off--
+					}
+				}
+			}
+			// end inline...
+			if offset >= len(out) {
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			val := br.peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= uint(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
+	if d.actualTableLog == 8 {
+		return d.decompress4X8bitExactly(dst, src)
+	}
+
+	var br [4]bitReaderBytes
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	shift := (8 - d.actualTableLog) & 7
+
+	const tlSize = 1 << 8
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+	var decoded int
+
+	// Decode 4 values from each decoder/loop.
+	const bufoff = 256 / 4
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		{
+			// Interleave 2 decodes.
+			const stream = 0
+			const stream2 = 1
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		{
+			const stream = 2
+			const stream2 = 3
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		off += 4
+
+		if off == bufoff {
+			if bufoff > dstEvery {
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(out, buf[:bufoff])
+			copy(out[dstEvery:], buf[bufoff:bufoff*2])
+			copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+			copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+			off = 0
+			out = out[bufoff:]
+			decoded += 256
+			// There must at least be 3 buffers left.
+			if len(out) < dstEvery*3 {
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[:off])
+		copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+		copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+		copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	for i := range br {
+		offset := dstEvery * i
+		br := &br[i]
+		bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+		for bitsLeft > 0 {
+			if br.finished() {
+				return nil, io.ErrUnexpectedEOF
+			}
+			if br.bitsRead >= 56 {
+				if br.off >= 4 {
+					v := br.in[br.off-4:]
+					v = v[:4]
+					low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+					br.value |= uint64(low) << (br.bitsRead - 32)
+					br.bitsRead -= 32
+					br.off -= 4
+				} else {
+					for br.off > 0 {
+						br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+						br.bitsRead -= 8
+						br.off--
+					}
+				}
+			}
+			// end inline...
+			if offset >= len(out) {
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			v := single[br.peekByteFast()>>shift].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= int(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
+	var br [4]bitReaderBytes
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	const shift = 0
+	const tlSize = 1 << 8
+	const tlMask = tlSize - 1
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+	var decoded int
+
+	// Decode 4 values from each decoder/loop.
+	const bufoff = 256 / 4
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		{
+			// Interleave 2 decodes.
+			const stream = 0
+			const stream2 = 1
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		{
+			const stream = 2
+			const stream2 = 3
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		off += 4
+
+		if off == bufoff {
+			if bufoff > dstEvery {
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(out, buf[:bufoff])
+			copy(out[dstEvery:], buf[bufoff:bufoff*2])
+			copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+			copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+			off = 0
+			out = out[bufoff:]
+			decoded += 256
+			// There must at least be 3 buffers left.
+			if len(out) < dstEvery*3 {
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[:off])
+		copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+		copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+		copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	for i := range br {
+		offset := dstEvery * i
+		br := &br[i]
+		bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+		for bitsLeft > 0 {
+			if br.finished() {
+				return nil, io.ErrUnexpectedEOF
+			}
+			if br.bitsRead >= 56 {
+				if br.off >= 4 {
+					v := br.in[br.off-4:]
+					v = v[:4]
+					low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+					br.value |= uint64(low) << (br.bitsRead - 32)
+					br.bitsRead -= 32
+					br.off -= 4
+				} else {
+					for br.off > 0 {
+						br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+						br.bitsRead -= 8
+						br.off--
+					}
+				}
+			}
+			// end inline...
+			if offset >= len(out) {
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			v := single[br.peekByteFast()>>shift].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= int(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
+
+// matches will compare a decoding table to a coding table.
+// Errors are written to the writer.
+// Nothing will be written if table is ok.
+func (s *Scratch) matches(ct cTable, w io.Writer) {
+	if s == nil || len(s.dt.single) == 0 {
+		return
+	}
+	dt := s.dt.single[:1<<s.actualTableLog]
+	tablelog := s.actualTableLog
+	ok := 0
+	broken := 0
+	for sym, enc := range ct {
+		errs := 0
+		broken++
+		if enc.nBits == 0 {
+			for _, dec := range dt {
+				if uint8(dec.entry>>8) == byte(sym) {
+					fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
+					errs++
+					break
+				}
+			}
+			if errs == 0 {
+				broken--
+			}
+			continue
+		}
+		// Unused bits in input
+		ub := tablelog - enc.nBits
+		top := enc.val << ub
+		// decoder looks at top bits.
+		dec := dt[top]
+		if uint8(dec.entry) != enc.nBits {
+			fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
+			errs++
+		}
+		if uint8(dec.entry>>8) != uint8(sym) {
+			fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
+			errs++
+		}
+		if errs > 0 {
+			fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+			continue
+		}
+		// Ensure that all combinations are covered.
+		for i := uint16(0); i < (1 << ub); i++ {
+			vval := top | i
+			dec := dt[vval]
+			if uint8(dec.entry) != enc.nBits {
+				fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
+				errs++
+			}
+			if uint8(dec.entry>>8) != uint8(sym) {
+				fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
+				errs++
+			}
+			if errs > 20 {
+				fmt.Fprintf(w, "%d errros, stopping\n", errs)
+				break
+			}
+		}
+		if errs == 0 {
+			ok++
+			broken--
+		}
+	}
+	if broken > 0 {
+		fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok)
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ee00ecb470ab3340b18d19ae8906af8a6d8d5a1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -0,0 +1,335 @@
+// Package huff0 provides fast huffman encoding as used in zstd.
+//
+// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details.
+package huff0
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"math/bits"
+
+	"github.com/klauspost/compress/fse"
+)
+
+const (
+	maxSymbolValue = 255
+
+	// zstandard limits tablelog to 11, see:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description
+	tableLogMax     = 11
+	tableLogDefault = 11
+	minTablelog     = 5
+	huffNodesLen    = 512
+
+	// BlockSizeMax is maximum input size for a single block uncompressed.
+	BlockSizeMax = 1<<18 - 1
+)
+
+var (
+	// ErrIncompressible is returned when input is judged to be too hard to compress.
+	ErrIncompressible = errors.New("input is not compressible")
+
+	// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+	ErrUseRLE = errors.New("input is single value repeated")
+
+	// ErrTooBig is return if input is too large for a single block.
+	ErrTooBig = errors.New("input too big")
+
+	// ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
+	ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
+)
+
+type ReusePolicy uint8
+
+const (
+	// ReusePolicyAllow will allow reuse if it produces smaller output.
+	ReusePolicyAllow ReusePolicy = iota
+
+	// ReusePolicyPrefer will re-use aggressively if possible.
+	// This will not check if a new table will produce smaller output,
+	// except if the current table is impossible to use or
+	// compressed output is bigger than input.
+	ReusePolicyPrefer
+
+	// ReusePolicyNone will disable re-use of tables.
+	// This is slightly faster than ReusePolicyAllow but may produce larger output.
+	ReusePolicyNone
+
+	// ReusePolicyMust must allow reuse and produce smaller output.
+	ReusePolicyMust
+)
+
+type Scratch struct {
+	count [maxSymbolValue + 1]uint32
+
+	// Per block parameters.
+	// These can be used to override compression parameters of the block.
+	// Do not touch, unless you know what you are doing.
+
+	// Out is output buffer.
+	// If the scratch is re-used before the caller is done processing the output,
+	// set this field to nil.
+	// Otherwise the output buffer will be re-used for next Compression/Decompression step
+	// and allocation will be avoided.
+	Out []byte
+
+	// OutTable will contain the table data only, if a new table has been generated.
+	// Slice of the returned data.
+	OutTable []byte
+
+	// OutData will contain the compressed data.
+	// Slice of the returned data.
+	OutData []byte
+
+	// MaxDecodedSize will set the maximum allowed output size.
+	// This value will automatically be set to BlockSizeMax if not set.
+	// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
+	MaxDecodedSize int
+
+	br byteReader
+
+	// MaxSymbolValue will override the maximum symbol value of the next block.
+	MaxSymbolValue uint8
+
+	// TableLog will attempt to override the tablelog for the next block.
+	// Must be <= 11 and >= 5.
+	TableLog uint8
+
+	// Reuse will specify the reuse policy
+	Reuse ReusePolicy
+
+	// WantLogLess allows to specify a log 2 reduction that should at least be achieved,
+	// otherwise the block will be returned as incompressible.
+	// The reduction should then at least be (input size >> WantLogLess)
+	// If WantLogLess == 0 any improvement will do.
+	WantLogLess uint8
+
+	symbolLen      uint16 // Length of active part of the symbol table.
+	maxCount       int    // count of the most probable symbol
+	clearCount     bool   // clear count
+	actualTableLog uint8  // Selected tablelog.
+	prevTableLog   uint8  // Tablelog for previous table
+	prevTable      cTable // Table used for previous compression.
+	cTable         cTable // compression table
+	dt             dTable // decompression table
+	nodes          []nodeElt
+	tmpOut         [4][]byte
+	fse            *fse.Scratch
+	huffWeight     [maxSymbolValue + 1]byte
+}
+
+// TransferCTable will transfer the previously used compression table.
+func (s *Scratch) TransferCTable(src *Scratch) {
+	if cap(s.prevTable) < len(src.prevTable) {
+		s.prevTable = make(cTable, 0, maxSymbolValue+1)
+	}
+	s.prevTable = s.prevTable[:len(src.prevTable)]
+	copy(s.prevTable, src.prevTable)
+	s.prevTableLog = src.prevTableLog
+}
+
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+	if len(in) > BlockSizeMax {
+		return nil, ErrTooBig
+	}
+	if s == nil {
+		s = &Scratch{}
+	}
+	if s.MaxSymbolValue == 0 {
+		s.MaxSymbolValue = maxSymbolValue
+	}
+	if s.TableLog == 0 {
+		s.TableLog = tableLogDefault
+	}
+	if s.TableLog > tableLogMax || s.TableLog < minTablelog {
+		return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
+	}
+	if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
+		s.MaxDecodedSize = BlockSizeMax
+	}
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	if cap(s.Out) == 0 {
+		s.Out = make([]byte, 0, len(in))
+	}
+	s.Out = s.Out[:0]
+
+	s.OutTable = nil
+	s.OutData = nil
+	if cap(s.nodes) < huffNodesLen+1 {
+		s.nodes = make([]nodeElt, 0, huffNodesLen+1)
+	}
+	s.nodes = s.nodes[:0]
+	if s.fse == nil {
+		s.fse = &fse.Scratch{}
+	}
+	s.br.init(in)
+
+	return s, nil
+}
+
+type cTable []cTableEntry
+
+func (c cTable) write(s *Scratch) error {
+	var (
+		// precomputed conversion table
+		bitsToWeight [tableLogMax + 1]byte
+		huffLog      = s.actualTableLog
+		// last weight is not saved.
+		maxSymbolValue = uint8(s.symbolLen - 1)
+		huffWeight     = s.huffWeight[:256]
+	)
+	const (
+		maxFSETableLog = 6
+	)
+	// convert to weight
+	bitsToWeight[0] = 0
+	for n := uint8(1); n < huffLog+1; n++ {
+		bitsToWeight[n] = huffLog + 1 - n
+	}
+
+	// Acquire histogram for FSE.
+	hist := s.fse.Histogram()
+	hist = hist[:256]
+	for i := range hist[:16] {
+		hist[i] = 0
+	}
+	for n := uint8(0); n < maxSymbolValue; n++ {
+		v := bitsToWeight[c[n].nBits] & 15
+		huffWeight[n] = v
+		hist[v]++
+	}
+
+	// FSE compress if feasible.
+	if maxSymbolValue >= 2 {
+		huffMaxCnt := uint32(0)
+		huffMax := uint8(0)
+		for i, v := range hist[:16] {
+			if v == 0 {
+				continue
+			}
+			huffMax = byte(i)
+			if v > huffMaxCnt {
+				huffMaxCnt = v
+			}
+		}
+		s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
+		s.fse.TableLog = maxFSETableLog
+		b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
+		if err == nil && len(b) < int(s.symbolLen>>1) {
+			s.Out = append(s.Out, uint8(len(b)))
+			s.Out = append(s.Out, b...)
+			return nil
+		}
+		// Unable to compress (RLE/uncompressible)
+	}
+	// write raw values as 4-bits (max : 15)
+	if maxSymbolValue > (256 - 128) {
+		// should not happen : likely means source cannot be compressed
+		return ErrIncompressible
+	}
+	op := s.Out
+	// special case, pack weights 4 bits/weight.
+	op = append(op, 128|(maxSymbolValue-1))
+	// be sure it doesn't cause msan issue in final combination
+	huffWeight[maxSymbolValue] = 0
+	for n := uint16(0); n < uint16(maxSymbolValue); n += 2 {
+		op = append(op, (huffWeight[n]<<4)|huffWeight[n+1])
+	}
+	s.Out = op
+	return nil
+}
+
+func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
+	var (
+		// precomputed conversion table
+		bitsToWeight [tableLogMax + 1]byte
+		huffLog      = s.actualTableLog
+		// last weight is not saved.
+		maxSymbolValue = uint8(s.symbolLen - 1)
+		huffWeight     = s.huffWeight[:256]
+	)
+	const (
+		maxFSETableLog = 6
+	)
+	// convert to weight
+	bitsToWeight[0] = 0
+	for n := uint8(1); n < huffLog+1; n++ {
+		bitsToWeight[n] = huffLog + 1 - n
+	}
+
+	// Acquire histogram for FSE.
+	hist := s.fse.Histogram()
+	hist = hist[:256]
+	for i := range hist[:16] {
+		hist[i] = 0
+	}
+	for n := uint8(0); n < maxSymbolValue; n++ {
+		v := bitsToWeight[c[n].nBits] & 15
+		huffWeight[n] = v
+		hist[v]++
+	}
+
+	// FSE compress if feasible.
+	if maxSymbolValue >= 2 {
+		huffMaxCnt := uint32(0)
+		huffMax := uint8(0)
+		for i, v := range hist[:16] {
+			if v == 0 {
+				continue
+			}
+			huffMax = byte(i)
+			if v > huffMaxCnt {
+				huffMaxCnt = v
+			}
+		}
+		s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
+		s.fse.TableLog = maxFSETableLog
+		b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
+		if err == nil && len(b) < int(s.symbolLen>>1) {
+			sz += 1 + len(b)
+			return sz, nil
+		}
+		// Unable to compress (RLE/uncompressible)
+	}
+	// write raw values as 4-bits (max : 15)
+	if maxSymbolValue > (256 - 128) {
+		// should not happen : likely means source cannot be compressed
+		return 0, ErrIncompressible
+	}
+	// special case, pack weights 4 bits/weight.
+	sz += 1 + int(maxSymbolValue/2)
+	return sz, nil
+}
+
+// estimateSize returns the estimated size in bytes of the input represented in the
+// histogram supplied.
+func (c cTable) estimateSize(hist []uint32) int {
+	nbBits := uint32(7)
+	for i, v := range c[:len(hist)] {
+		nbBits += uint32(v.nBits) * hist[i]
+	}
+	return int(nbBits >> 3)
+}
+
+// minSize returns the minimum possible size considering the shannon limit.
+func (s *Scratch) minSize(total int) int {
+	nbBits := float64(7)
+	fTotal := float64(total)
+	for _, v := range s.count[:s.symbolLen] {
+		n := float64(v)
+		if n > 0 {
+			nbBits += math.Log2(fTotal/n) * n
+		}
+	}
+	return int(nbBits) >> 3
+}
+
+func highBit32(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6050c10f4c8b4c22f50c83715f44f12419f763be
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go
new file mode 100644
index 0000000000000000000000000000000000000000..40796a49d659147f6cc7e2d1e7a5cd26e1f68f34
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go
@@ -0,0 +1,264 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Decode handles the Snappy block format, not the Snappy stream format.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+//
+// Reader handles the Snappy stream format, not the Snappy block format.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+func (r *Reader) fill() error {
+	for r.i >= r.j {
+		if !r.readFull(r.buf[:4], true) {
+			return r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return r.err
+		}
+	}
+
+	return nil
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+
+	if err := r.fill(); err != nil {
+		return 0, err
+	}
+
+	n := copy(p, r.decoded[r.i:r.j])
+	r.i += n
+	return n, nil
+}
+
+// ReadByte satisfies the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+
+	if err := r.fill(); err != nil {
+		return 0, err
+	}
+
+	c := r.decoded[r.i]
+	r.i++
+	return c, nil
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..77395a6b8b9e0d81dbbd0fe09e4fbb4270d01b88
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go
@@ -0,0 +1,113 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice.
+		// If no overlap, use the built-in copy:
+		if offset >= length {
+			copy(dst[d:d+length], dst[d-offset:])
+			d += length
+			continue
+		}
+
+		// Unlike the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		//
+		// We align the slices into a and b and show the compiler they are the same size.
+		// This allows the loop to run without bounds checks.
+		a := dst[d : d+length]
+		b := dst[d-offset:]
+		b = b[:len(a)]
+		for i := range a {
+			a[i] = b[i]
+		}
+		d += length
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go
new file mode 100644
index 0000000000000000000000000000000000000000..13c6040a5dedba942277f450b42601d37b352f73
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go
@@ -0,0 +1,289 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Encode handles the Snappy block format, not the Snappy stream format.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+//
+// Writer handles the Snappy stream format, not the Snappy block format.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..511bba65db8f6f1161d692d233e9f7f9cbc33794
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -0,0 +1,236 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go
new file mode 100644
index 0000000000000000000000000000000000000000..34d01f4aa63af7f28300b25741c9376f8052d961
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snapref implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snapref
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
new file mode 100644
index 0000000000000000000000000000000000000000..2263853fcade79bb73917e2ad340fc9b0e7b28a3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -0,0 +1,4 @@
+module github.com/klauspost/compress
+
+go 1.16
+
diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c8f0f16fc1ecd57ce4b6ae22da1bb13ee9c28284
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -0,0 +1,441 @@
+# zstd 
+
+[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. 
+It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder.
+A high performance compression algorithm is implemented. For now focused on speed. 
+
+This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. 
+
+This package is pure Go and without use of "unsafe". 
+
+The `zstd` package is provided as open source software using a Go standard license.
+
+Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+
+## Installation
+
+Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd)
+
+## Compressor
+
+### Status: 
+
+STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively 
+used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates.
+
+There may still be specific combinations of data types/size/settings that could lead to edge cases, 
+so as always, testing is recommended.  
+
+For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. 
+
+* The "Fastest" compression ratio is roughly equivalent to zstd level 1. 
+* The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
+* The "Better" compression ratio is roughly equivalent to zstd level 7.
+* The "Best" compression ratio is roughly equivalent to zstd level 11.
+
+In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. 
+The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
+
+ 
+### Usage
+
+An Encoder can be used for either compressing a stream via the
+`io.WriteCloser` interface supported by the Encoder or as multiple independent
+tasks via the `EncodeAll` function.
+Smaller encodes are encouraged to use the EncodeAll function.
+Use `NewWriter` to create a new instance that can be used for both.
+
+To create a writer with default options, do like this:
+
+```Go
+// Compress input to output.
+func Compress(in io.Reader, out io.Writer) error {
+    enc, err := zstd.NewWriter(out)
+    if err != nil {
+        return err
+    }
+    _, err = io.Copy(enc, in)
+    if err != nil {
+        enc.Close()
+        return err
+    }
+    return enc.Close()
+}
+```
+
+Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called.
+Even if your encode fails, you should still call `Close()` to release any resources that may be held up.  
+
+The above is fine for big encodes. However, whenever possible try to *reuse* the writer.
+
+To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. 
+This will allow the encoder to reuse all resources and avoid wasteful allocations. 
+
+Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part 
+of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change 
+in the future. So if you want to limit concurrency for future updates, specify the concurrency
+you would like.
+
+You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined 
+compression settings can be specified.
+
+#### Future Compatibility Guarantees
+
+This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change.
+
+The goal will be to keep the default efficiency at the default zstd (level 3). 
+However the encoding should never be assumed to remain the same, 
+and you should not use hashes of compressed output for similarity checks.
+
+The Encoder can be assumed to produce the same output from the exact same code version.
+However, the may be modes in the future that break this, 
+although they will not be enabled without an explicit option.   
+
+This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder.
+
+Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59),
+[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) 
+and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames).
+
+#### Blocks
+
+For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
+
+`EncodeAll` will encode all input in src and append it to dst.
+This function can be called concurrently, but each call will only run on a single goroutine.
+
+Encoded blocks can be concatenated and the result will be the combined input stream.
+Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
+
+Especially when encoding blocks you should take special care to reuse the encoder. 
+This will effectively make it run without allocations after a warmup period. 
+To make it run completely without allocations, supply a destination buffer with space for all content.   
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a writer that caches compressors.
+// For this operation type we supply a nil Reader.
+var encoder, _ = zstd.NewWriter(nil)
+
+// Compress a buffer. 
+// If you have a destination buffer, the allocation in the call can also be eliminated.
+func Compress(src []byte) []byte {
+    return encoder.EncodeAll(src, make([]byte, 0, len(src)))
+} 
+```
+
+You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` 
+option when creating the writer.
+
+Using the Encoder for both a stream and individual blocks concurrently is safe. 
+
+### Performance
+
+I have collected some speed examples to compare speed and compression against other compressors.
+
+* `file` is the input file.
+* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library.
+* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best".
+* `insize`/`outsize` is the input/output size.
+* `millis` is the number of milliseconds used for compression.
+* `mb/s` is megabytes (2^20 bytes) per second.
+
+```
+Silesia Corpus:
+http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
+
+This package:
+file    out     level   insize      outsize     millis  mb/s
+silesia.tar zskp    1   211947520   73101992    643     313.87
+silesia.tar zskp    2   211947520   67504318    969     208.38
+silesia.tar zskp    3   211947520   64595893    2007    100.68
+silesia.tar zskp    4   211947520   60995370    8825    22.90
+
+cgo zstd:
+silesia.tar zstd    1   211947520   73605392    543     371.56
+silesia.tar zstd    3   211947520   66793289    864     233.68
+silesia.tar zstd    6   211947520   62916450    1913    105.66
+silesia.tar zstd    9   211947520   60212393    5063    39.92
+
+gzip, stdlib/this package:
+silesia.tar gzstd   1   211947520   80007735    1654    122.21
+silesia.tar gzkp    1   211947520   80136201    1152    175.45
+
+GOB stream of binary data. Highly compressible.
+https://files.klauspost.com/compress/gob-stream.7z
+
+file        out     level   insize  outsize     millis  mb/s
+gob-stream  zskp    1   1911399616  235022249   3088    590.30
+gob-stream  zskp    2   1911399616  205669791   3786    481.34
+gob-stream  zskp    3   1911399616  175034659   9636    189.17
+gob-stream  zskp    4   1911399616  165609838   50369   36.19
+
+gob-stream  zstd    1   1911399616  249810424   2637    691.26
+gob-stream  zstd    3   1911399616  208192146   3490    522.31
+gob-stream  zstd    6   1911399616  193632038   6687    272.56
+gob-stream  zstd    9   1911399616  177620386   16175   112.70
+
+gob-stream  gzstd   1   1911399616  357382641   10251   177.82
+gob-stream  gzkp    1   1911399616  359753026   5438    335.20
+
+The test data for the Large Text Compression Benchmark is the first
+10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
+http://mattmahoney.net/dc/textdata.html
+
+file    out level   insize      outsize     millis  mb/s
+enwik9  zskp    1   1000000000  343848582   3609    264.18
+enwik9  zskp    2   1000000000  317276632   5746    165.97
+enwik9  zskp    3   1000000000  292243069   12162   78.41
+enwik9  zskp    4   1000000000  262183768   82837   11.51
+
+enwik9  zstd    1   1000000000  358072021   3110    306.65
+enwik9  zstd    3   1000000000  313734672   4784    199.35
+enwik9  zstd    6   1000000000  295138875   10290   92.68
+enwik9  zstd    9   1000000000  278348700   28549   33.40
+
+enwik9  gzstd   1   1000000000  382578136   9604    99.30
+enwik9  gzkp    1   1000000000  383825945   6544    145.73
+
+Highly compressible JSON file.
+https://files.klauspost.com/compress/github-june-2days-2019.json.zst
+
+file                        out level   insize      outsize     millis  mb/s
+github-june-2days-2019.json zskp    1   6273951764  699045015   10620   563.40
+github-june-2days-2019.json zskp    2   6273951764  617881763   11687   511.96
+github-june-2days-2019.json zskp    3   6273951764  524340691   34043   175.75
+github-june-2days-2019.json zskp    4   6273951764  470320075   170190  35.16
+
+github-june-2days-2019.json zstd    1   6273951764  766284037   8450    708.00
+github-june-2days-2019.json zstd    3   6273951764  661889476   10927   547.57
+github-june-2days-2019.json zstd    6   6273951764  642756859   22996   260.18
+github-june-2days-2019.json zstd    9   6273951764  601974523   52413   114.16
+
+github-june-2days-2019.json gzstd   1   6273951764  1164400847  29948   199.79
+github-june-2days-2019.json gzkp    1   6273951764  1125417694  21788   274.61
+
+VM Image, Linux mint with a few installed applications:
+https://files.klauspost.com/compress/rawstudio-mint14.7z
+
+file                    out level   insize      outsize     millis  mb/s
+rawstudio-mint14.tar    zskp    1   8558382592  3667489370  20210   403.84
+rawstudio-mint14.tar    zskp    2   8558382592  3364592300  31873   256.07
+rawstudio-mint14.tar    zskp    3   8558382592  3158085214  77675   105.08
+rawstudio-mint14.tar    zskp    4   8558382592  2965110639  857750  9.52
+
+rawstudio-mint14.tar    zstd    1   8558382592  3609250104  17136   476.27
+rawstudio-mint14.tar    zstd    3   8558382592  3341679997  29262   278.92
+rawstudio-mint14.tar    zstd    6   8558382592  3235846406  77904   104.77
+rawstudio-mint14.tar    zstd    9   8558382592  3160778861  140946  57.91
+
+rawstudio-mint14.tar    gzstd   1   8558382592  3926257486  57722   141.40
+rawstudio-mint14.tar    gzkp    1   8558382592  3962605659  45113   180.92
+
+CSV data:
+https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
+
+file                    out level   insize      outsize     millis  mb/s
+nyc-taxi-data-10M.csv   zskp    1   3325605752  641339945   8925    355.35
+nyc-taxi-data-10M.csv   zskp    2   3325605752  591748091   11268   281.44
+nyc-taxi-data-10M.csv   zskp    3   3325605752  530289687   25239   125.66
+nyc-taxi-data-10M.csv   zskp    4   3325605752  476268884   135958  23.33
+
+nyc-taxi-data-10M.csv   zstd    1   3325605752  687399637   8233    385.18
+nyc-taxi-data-10M.csv   zstd    3   3325605752  598514411   10065   315.07
+nyc-taxi-data-10M.csv   zstd    6   3325605752  570522953   20038   158.27
+nyc-taxi-data-10M.csv   zstd    9   3325605752  517554797   64565   49.12
+
+nyc-taxi-data-10M.csv   gzstd   1   3325605752  928656485   23876   132.83
+nyc-taxi-data-10M.csv   gzkp    1   3325605752  922257165   16780   189.00
+```
+
+## Decompressor
+
+Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+
+This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
+The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, 
+or run it past its limits with ANY input provided.  
+ 
+### Usage
+
+The package has been designed for two main usages, big streams of data and smaller in-memory buffers. 
+There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`.
+
+For streaming use a simple setup could look like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+func Decompress(in io.Reader, out io.Writer) error {
+    d, err := zstd.NewReader(in)
+    if err != nil {
+        return err
+    }
+    defer d.Close()
+    
+    // Copy content...
+    _, err = io.Copy(out, d)
+    return err
+}
+```
+
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. 
+See "Allocation-less operation" below.
+
+For decoding buffers, it could look something like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a reader that caches decompressors.
+// For this operation type we supply a nil Reader.
+var decoder, _ = zstd.NewReader(nil)
+
+// Decompress a buffer. We don't supply a destination buffer,
+// so it will be allocated by the decoder.
+func Decompress(src []byte) ([]byte, error) {
+    return decoder.DecodeAll(src, nil)
+} 
+```
+
+Both of these cases should provide the functionality needed. 
+The decoder can be used for *concurrent* decompression of multiple buffers. 
+It will only allow a certain number of concurrent operations to run. 
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.   
+
+### Dictionaries
+
+Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed.
+
+Dictionaries are added individually to Decoders.
+Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder.
+To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data.
+Several dictionaries can be added at once.
+
+The dictionary will be used automatically for the data that specifies them.
+A re-used Decoder will still contain the dictionaries registered.
+
+When registering multiple dictionaries with the same ID, the last one will be used.
+
+It is possible to use dictionaries when compressing data.
+
+To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used 
+and it will likely be used even if it doesn't improve compression. 
+
+The used dictionary must be used to decompress the content.
+
+For any real gains, the dictionary should be built with similar data. 
+If an unsuitable dictionary is used the output may be slightly larger than using no dictionary.
+Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data.
+For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). 
+
+For now there is a fixed startup performance penalty for compressing content with dictionaries. 
+This will likely be improved over time. Just be aware to test performance when implementing.  
+
+### Allocation-less operation
+
+The decoder has been designed to operate without allocations after a warmup. 
+
+This means that you should *store* the decoder for best performance. 
+To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream.
+A decoder can safely be re-used even if the previous stream failed.
+
+To release the resources, you must call the `Close()` function on a decoder.
+After this it can *no longer be reused*, but all running goroutines will be stopped.
+So you *must* use this if you will no longer need the Reader.
+
+For decompressing smaller buffers a single decoder can be used.
+When decoding buffers, you can supply a destination slice with length 0 and your expected capacity.
+In this case no unneeded allocations should be made. 
+
+### Concurrency
+
+The buffer decoder does everything on the same goroutine and does nothing concurrently.
+It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
+
+The stream decoder operates on
+
+* One goroutine reads input and splits the input to several block decoders.
+* A number of decoders will decode blocks.
+* A goroutine coordinates these blocks and sends history from one to the next.
+
+So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+
+Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
+
+In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
+ 
+ 
+### Benchmarks
+
+These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
+
+The first two are streaming decodes and the last are smaller inputs. 
+ 
+```
+BenchmarkDecoderSilesia-8                          3     385000067 ns/op     550.51 MB/s        5498 B/op          8 allocs/op
+BenchmarkDecoderSilesiaCgo-8                       6     197666567 ns/op    1072.25 MB/s      270672 B/op          8 allocs/op
+
+BenchmarkDecoderEnwik9-8                           1    2027001600 ns/op     493.34 MB/s       10496 B/op         18 allocs/op
+BenchmarkDecoderEnwik9Cgo-8                        2     979499200 ns/op    1020.93 MB/s      270672 B/op          8 allocs/op
+
+Concurrent performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16                28915         42469 ns/op    4340.07 MB/s         114 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16           116505          9965 ns/op    11900.16 MB/s         16 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16              8952        134272 ns/op    3588.70 MB/s         915 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16               11820        102538 ns/op    4161.90 MB/s         594 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16             34782         34184 ns/op    3661.88 MB/s          60 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16              27712         43447 ns/op    3500.58 MB/s          99 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16                 62826         18750 ns/op    21845.10 MB/s        104 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16          631545          1794 ns/op    57078.74 MB/s          2 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16         1690140           712 ns/op    172938.13 MB/s         1 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16                 10432        113593 ns/op    6180.73 MB/s        1143 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-16                    113206         10671 ns/op    9596.27 MB/s          15 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16          1530615           779 ns/op    5229.49 MB/s           0 B/op          0 allocs/op
+
+BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16             65217         16192 ns/op    11383.34 MB/s         46 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16        292671          4039 ns/op    29363.19 MB/s          6 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16          26314         46021 ns/op    10470.43 MB/s        293 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16            33897         34900 ns/op    12227.96 MB/s        205 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16         104348         11433 ns/op    10949.01 MB/s         20 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16           75949         15510 ns/op    9805.60 MB/s          32 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16             173910          6756 ns/op    60624.29 MB/s         37 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16       923076          1339 ns/op    76474.87 MB/s          1 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16       922920          1351 ns/op    91102.57 MB/s          2 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16              27649         43618 ns/op    16096.19 MB/s        407 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16                 279073          4160 ns/op    24614.18 MB/s          6 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16        749938          1579 ns/op    2581.71 MB/s           0 B/op          0 allocs/op
+```
+
+This reflects the performance around May 2020, but this may be out of date.
+
+## Zstd inside ZIP files
+
+It is possible to use zstandard to compress individual files inside zip archives.
+While this isn't widely supported it can be useful for internal files.
+
+To support the compression and decompression of these files you must register a compressor and decompressor.
+
+It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT
+use the global registration functions. The main reason for this is that 2 registrations from 
+different packages will result in a panic.
+
+It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip
+files concurrently, and using a single instance will allow reusing some resources.
+
+See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for 
+how to compress and decompress files inside zip archives.
+
+# Contributions
+
+Contributions are always welcome. 
+For new features/fixes, remember to add tests and for performance enhancements include benchmarks.
+
+For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
+
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
new file mode 100644
index 0000000000000000000000000000000000000000..85445853715413c391237a729b508784662df216
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -0,0 +1,136 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+	"math/bits"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint   // next byte to read is at in[off - 1]
+	value    uint64 // Maybe use [16]byte, but shifting is awkward.
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.bitsRead += 8 - uint8(highBits(uint32(v)))
+	return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) int {
+	if n == 0 /*|| b.bitsRead >= 64 */ {
+		return 0
+	}
+	return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) int {
+	const regMask = 64 - 1
+	v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	b.bitsRead += n
+	return int(v)
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+	// 2 bounds checks.
+	v := b.in[b.off-4:]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off >= 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// overread returns true if more bits have been requested than is on the stream.
+func (b *bitReader) overread() bool {
+	return b.bitsRead > 64
+}
+
+// remain returns the number of bits remaining.
+func (b *bitReader) remain() uint {
+	return b.off*8 + 64 - uint(b.bitsRead)
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func highBits(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
new file mode 100644
index 0000000000000000000000000000000000000000..303ae90f944736ec8f9dec70c8cd55e6c351a789
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -0,0 +1,169 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+var bitMask32 = [32]uint32{
+	0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+	0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+	0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+	0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits32NC will add up to 32 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.bitContainer >>= v << 3
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
new file mode 100644
index 0000000000000000000000000000000000000000..8a98c4562e017ea7889781fcdb6cf79c6bd9c19a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -0,0 +1,736 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/huff0"
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type blockType uint8
+
+//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
+
+const (
+	blockTypeRaw blockType = iota
+	blockTypeRLE
+	blockTypeCompressed
+	blockTypeReserved
+)
+
+type literalsBlockType uint8
+
+const (
+	literalsBlockRaw literalsBlockType = iota
+	literalsBlockRLE
+	literalsBlockCompressed
+	literalsBlockTreeless
+)
+
+const (
+	// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
+	maxCompressedBlockSize = 128 << 10
+
+	// Maximum possible block size (all Raw+Uncompressed).
+	maxBlockSize = (1 << 21) - 1
+
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
+	maxCompressedLiteralSize = 1 << 18
+	maxRLELiteralSize        = 1 << 20
+	maxMatchLen              = 131074
+	maxSequences             = 0x7f00 + 0xffff
+
+	// We support slightly less than the reference decoder to be able to
+	// use ints on 32 bit archs.
+	maxOffsetBits = 30
+)
+
+var (
+	huffDecoderPool = sync.Pool{New: func() interface{} {
+		return &huff0.Scratch{}
+	}}
+
+	fseDecoderPool = sync.Pool{New: func() interface{} {
+		return &fseDecoder{}
+	}}
+)
+
+type blockDec struct {
+	// Raw source data of the block.
+	data        []byte
+	dataStorage []byte
+
+	// Destination of the decoded data.
+	dst []byte
+
+	// Buffer for literals data.
+	literalBuf []byte
+
+	// Window size of the block.
+	WindowSize uint64
+
+	history     chan *history
+	input       chan struct{}
+	result      chan decodeOutput
+	sequenceBuf []seq
+	err         error
+	decWG       sync.WaitGroup
+
+	// Frame to use for singlethreaded decoding.
+	// Should not be used by the decoder itself since parent may be another frame.
+	localFrame *frameDec
+
+	// Block is RLE, this is the size.
+	RLESize uint32
+	tmp     [4]byte
+
+	Type blockType
+
+	// Is this the last block of a frame?
+	Last bool
+
+	// Use less memory
+	lowMem bool
+}
+
+func (b *blockDec) String() string {
+	if b == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
+}
+
+func newBlockDec(lowMem bool) *blockDec {
+	b := blockDec{
+		lowMem:  lowMem,
+		result:  make(chan decodeOutput, 1),
+		input:   make(chan struct{}, 1),
+		history: make(chan *history, 1),
+	}
+	b.decWG.Add(1)
+	go b.startDecoder()
+	return &b
+}
+
+// reset will reset the block.
+// Input must be a start of a block and will be at the end of the block when returned.
+func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
+	b.WindowSize = windowSize
+	tmp, err := br.readSmall(3)
+	if err != nil {
+		println("Reading block header:", err)
+		return err
+	}
+	bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+	b.Last = bh&1 != 0
+	b.Type = blockType((bh >> 1) & 3)
+	// find size.
+	cSize := int(bh >> 3)
+	maxSize := maxBlockSize
+	switch b.Type {
+	case blockTypeReserved:
+		return ErrReservedBlockType
+	case blockTypeRLE:
+		b.RLESize = uint32(cSize)
+		if b.lowMem {
+			maxSize = cSize
+		}
+		cSize = 1
+	case blockTypeCompressed:
+		if debugDecoder {
+			println("Data size on stream:", cSize)
+		}
+		b.RLESize = 0
+		maxSize = maxCompressedBlockSize
+		if windowSize < maxCompressedBlockSize && b.lowMem {
+			maxSize = int(windowSize)
+		}
+		if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
+			if debugDecoder {
+				printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
+			}
+			return ErrCompressedSizeTooBig
+		}
+	case blockTypeRaw:
+		b.RLESize = 0
+		// We do not need a destination for raw blocks.
+		maxSize = -1
+	default:
+		panic("Invalid block type")
+	}
+
+	// Read block data.
+	if cap(b.dataStorage) < cSize {
+		if b.lowMem || cSize > maxCompressedBlockSize {
+			b.dataStorage = make([]byte, 0, cSize)
+		} else {
+			b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
+		}
+	}
+	if cap(b.dst) <= maxSize {
+		b.dst = make([]byte, 0, maxSize+1)
+	}
+	b.data, err = br.readBig(cSize, b.dataStorage)
+	if err != nil {
+		if debugDecoder {
+			println("Reading block:", err, "(", cSize, ")", len(b.data))
+			printf("%T", br)
+		}
+		return err
+	}
+	return nil
+}
+
+// sendEOF will make the decoder send EOF on this frame.
+func (b *blockDec) sendErr(err error) {
+	b.Last = true
+	b.Type = blockTypeReserved
+	b.err = err
+	b.input <- struct{}{}
+}
+
+// Close will release resources.
+// Closed blockDec cannot be reset.
+func (b *blockDec) Close() {
+	close(b.input)
+	close(b.history)
+	close(b.result)
+	b.decWG.Wait()
+}
+
+// decodeAsync will prepare decoding the block when it receives input.
+// This will separate output and history.
+func (b *blockDec) startDecoder() {
+	defer b.decWG.Done()
+	for range b.input {
+		//println("blockDec: Got block input")
+		switch b.Type {
+		case blockTypeRLE:
+			if cap(b.dst) < int(b.RLESize) {
+				if b.lowMem {
+					b.dst = make([]byte, b.RLESize)
+				} else {
+					b.dst = make([]byte, maxBlockSize)
+				}
+			}
+			o := decodeOutput{
+				d:   b,
+				b:   b.dst[:b.RLESize],
+				err: nil,
+			}
+			v := b.data[0]
+			for i := range o.b {
+				o.b[i] = v
+			}
+			hist := <-b.history
+			hist.append(o.b)
+			b.result <- o
+		case blockTypeRaw:
+			o := decodeOutput{
+				d:   b,
+				b:   b.data,
+				err: nil,
+			}
+			hist := <-b.history
+			hist.append(o.b)
+			b.result <- o
+		case blockTypeCompressed:
+			b.dst = b.dst[:0]
+			err := b.decodeCompressed(nil)
+			o := decodeOutput{
+				d:   b,
+				b:   b.dst,
+				err: err,
+			}
+			if debugDecoder {
+				println("Decompressed to", len(b.dst), "bytes, error:", err)
+			}
+			b.result <- o
+		case blockTypeReserved:
+			// Used for returning errors.
+			<-b.history
+			b.result <- decodeOutput{
+				d:   b,
+				b:   nil,
+				err: b.err,
+			}
+		default:
+			panic("Invalid block type")
+		}
+		if debugDecoder {
+			println("blockDec: Finished block")
+		}
+	}
+}
+
+// decodeAsync will prepare decoding the block when it receives the history.
+// If history is provided, it will not fetch it from the channel.
+func (b *blockDec) decodeBuf(hist *history) error {
+	switch b.Type {
+	case blockTypeRLE:
+		if cap(b.dst) < int(b.RLESize) {
+			if b.lowMem {
+				b.dst = make([]byte, b.RLESize)
+			} else {
+				b.dst = make([]byte, maxBlockSize)
+			}
+		}
+		b.dst = b.dst[:b.RLESize]
+		v := b.data[0]
+		for i := range b.dst {
+			b.dst[i] = v
+		}
+		hist.appendKeep(b.dst)
+		return nil
+	case blockTypeRaw:
+		hist.appendKeep(b.data)
+		return nil
+	case blockTypeCompressed:
+		saved := b.dst
+		b.dst = hist.b
+		hist.b = nil
+		err := b.decodeCompressed(hist)
+		if debugDecoder {
+			println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
+		}
+		hist.b = b.dst
+		b.dst = saved
+		return err
+	case blockTypeReserved:
+		// Used for returning errors.
+		return b.err
+	default:
+		panic("Invalid block type")
+	}
+}
+
+// decodeCompressed will start decompressing a block.
+// If no history is supplied the decoder will decodeAsync as much as possible
+// before fetching from blockDec.history
+func (b *blockDec) decodeCompressed(hist *history) error {
+	in := b.data
+	delayedHistory := hist == nil
+
+	if delayedHistory {
+		// We must always grab history.
+		defer func() {
+			if hist == nil {
+				<-b.history
+			}
+		}()
+	}
+	// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
+	if len(in) < 2 {
+		return ErrBlockTooSmall
+	}
+	litType := literalsBlockType(in[0] & 3)
+	var litRegenSize int
+	var litCompSize int
+	sizeFormat := (in[0] >> 2) & 3
+	var fourStreams bool
+	switch litType {
+	case literalsBlockRaw, literalsBlockRLE:
+		switch sizeFormat {
+		case 0, 2:
+			// Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
+			litRegenSize = int(in[0] >> 3)
+			in = in[1:]
+		case 1:
+			// Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
+			litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
+			in = in[2:]
+		case 3:
+			//  Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
+			if len(in) < 3 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
+			in = in[3:]
+		}
+	case literalsBlockCompressed, literalsBlockTreeless:
+		switch sizeFormat {
+		case 0, 1:
+			// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
+			if len(in) < 3 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
+			litRegenSize = int(n & 1023)
+			litCompSize = int(n >> 10)
+			fourStreams = sizeFormat == 1
+			in = in[3:]
+		case 2:
+			fourStreams = true
+			if len(in) < 4 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
+			litRegenSize = int(n & 16383)
+			litCompSize = int(n >> 14)
+			in = in[4:]
+		case 3:
+			fourStreams = true
+			if len(in) < 5 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
+			litRegenSize = int(n & 262143)
+			litCompSize = int(n >> 18)
+			in = in[5:]
+		}
+	}
+	if debugDecoder {
+		println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
+	}
+	var literals []byte
+	var huff *huff0.Scratch
+	switch litType {
+	case literalsBlockRaw:
+		if len(in) < litRegenSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
+			return ErrBlockTooSmall
+		}
+		literals = in[:litRegenSize]
+		in = in[litRegenSize:]
+		//printf("Found %d uncompressed literals\n", litRegenSize)
+	case literalsBlockRLE:
+		if len(in) < 1 {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
+			return ErrBlockTooSmall
+		}
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, litRegenSize)
+			} else {
+				if litRegenSize > maxCompressedLiteralSize {
+					// Exceptional
+					b.literalBuf = make([]byte, litRegenSize)
+				} else {
+					b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
+
+				}
+			}
+		}
+		literals = b.literalBuf[:litRegenSize]
+		v := in[0]
+		for i := range literals {
+			literals[i] = v
+		}
+		in = in[1:]
+		if debugDecoder {
+			printf("Found %d RLE compressed literals\n", litRegenSize)
+		}
+	case literalsBlockTreeless:
+		if len(in) < litCompSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+			return ErrBlockTooSmall
+		}
+		// Store compressed literals, so we defer decoding until we get history.
+		literals = in[:litCompSize]
+		in = in[litCompSize:]
+		if debugDecoder {
+			printf("Found %d compressed literals\n", litCompSize)
+		}
+	case literalsBlockCompressed:
+		if len(in) < litCompSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+			return ErrBlockTooSmall
+		}
+		literals = in[:litCompSize]
+		in = in[litCompSize:]
+		huff = huffDecoderPool.Get().(*huff0.Scratch)
+		var err error
+		// Ensure we have space to store it.
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, 0, litRegenSize)
+			} else {
+				b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+			}
+		}
+		if huff == nil {
+			huff = &huff0.Scratch{}
+		}
+		huff, literals, err = huff0.ReadTable(literals, huff)
+		if err != nil {
+			println("reading huffman table:", err)
+			return err
+		}
+		// Use our out buffer.
+		if fourStreams {
+			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+		} else {
+			literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+		}
+		if err != nil {
+			println("decoding compressed literals:", err)
+			return err
+		}
+		// Make sure we don't leak our literals buffer
+		if len(literals) != litRegenSize {
+			return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+		}
+		if debugDecoder {
+			printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
+		}
+	}
+
+	// Decode Sequences
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
+	if len(in) < 1 {
+		return ErrBlockTooSmall
+	}
+	seqHeader := in[0]
+	nSeqs := 0
+	switch {
+	case seqHeader == 0:
+		in = in[1:]
+	case seqHeader < 128:
+		nSeqs = int(seqHeader)
+		in = in[1:]
+	case seqHeader < 255:
+		if len(in) < 2 {
+			return ErrBlockTooSmall
+		}
+		nSeqs = int(seqHeader-128)<<8 | int(in[1])
+		in = in[2:]
+	case seqHeader == 255:
+		if len(in) < 3 {
+			return ErrBlockTooSmall
+		}
+		nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
+		in = in[3:]
+	}
+	// Allocate sequences
+	if cap(b.sequenceBuf) < nSeqs {
+		if b.lowMem {
+			b.sequenceBuf = make([]seq, nSeqs)
+		} else {
+			// Allocate max
+			b.sequenceBuf = make([]seq, nSeqs, maxSequences)
+		}
+	} else {
+		// Reuse buffer
+		b.sequenceBuf = b.sequenceBuf[:nSeqs]
+	}
+	var seqs = &sequenceDecs{}
+	if nSeqs > 0 {
+		if len(in) < 1 {
+			return ErrBlockTooSmall
+		}
+		br := byteReader{b: in, off: 0}
+		compMode := br.Uint8()
+		br.advance(1)
+		if debugDecoder {
+			printf("Compression modes: 0b%b", compMode)
+		}
+		for i := uint(0); i < 3; i++ {
+			mode := seqCompMode((compMode >> (6 - i*2)) & 3)
+			if debugDecoder {
+				println("Table", tableIndex(i), "is", mode)
+			}
+			var seq *sequenceDec
+			switch tableIndex(i) {
+			case tableLiteralLengths:
+				seq = &seqs.litLengths
+			case tableOffsets:
+				seq = &seqs.offsets
+			case tableMatchLengths:
+				seq = &seqs.matchLengths
+			default:
+				panic("unknown table")
+			}
+			switch mode {
+			case compModePredefined:
+				seq.fse = &fsePredef[i]
+			case compModeRLE:
+				if br.remain() < 1 {
+					return ErrBlockTooSmall
+				}
+				v := br.Uint8()
+				br.advance(1)
+				dec := fseDecoderPool.Get().(*fseDecoder)
+				symb, err := decSymbolValue(v, symbolTableX[i])
+				if err != nil {
+					printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
+					return err
+				}
+				dec.setRLE(symb)
+				seq.fse = dec
+				if debugDecoder {
+					printf("RLE set to %+v, code: %v", symb, v)
+				}
+			case compModeFSE:
+				println("Reading table for", tableIndex(i))
+				dec := fseDecoderPool.Get().(*fseDecoder)
+				err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
+				if err != nil {
+					println("Read table error:", err)
+					return err
+				}
+				err = dec.transform(symbolTableX[i])
+				if err != nil {
+					println("Transform table error:", err)
+					return err
+				}
+				if debugDecoder {
+					println("Read table ok", "symbolLen:", dec.symbolLen)
+				}
+				seq.fse = dec
+			case compModeRepeat:
+				seq.repeat = true
+			}
+			if br.overread() {
+				return io.ErrUnexpectedEOF
+			}
+		}
+		in = br.unread()
+	}
+
+	// Wait for history.
+	// All time spent after this is critical since it is strictly sequential.
+	if hist == nil {
+		hist = <-b.history
+		if hist.error {
+			return ErrDecoderClosed
+		}
+	}
+
+	// Decode treeless literal block.
+	if litType == literalsBlockTreeless {
+		// TODO: We could send the history early WITHOUT the stream history.
+		//   This would allow decoding treeless literals before the byte history is available.
+		//   Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
+		//   So not much obvious gain here.
+
+		if hist.huffTree == nil {
+			return errors.New("literal block was treeless, but no history was defined")
+		}
+		// Ensure we have space to store it.
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, 0, litRegenSize)
+			} else {
+				b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+			}
+		}
+		var err error
+		// Use our out buffer.
+		huff = hist.huffTree
+		if fourStreams {
+			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+		} else {
+			literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+		}
+		// Make sure we don't leak our literals buffer
+		if err != nil {
+			println("decompressing literals:", err)
+			return err
+		}
+		if len(literals) != litRegenSize {
+			return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+		}
+	} else {
+		if hist.huffTree != nil && huff != nil {
+			if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
+				huffDecoderPool.Put(hist.huffTree)
+			}
+			hist.huffTree = nil
+		}
+	}
+	if huff != nil {
+		hist.huffTree = huff
+	}
+	if debugDecoder {
+		println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
+	}
+
+	if nSeqs == 0 {
+		// Decompressed content is defined entirely as Literals Section content.
+		b.dst = append(b.dst, literals...)
+		if delayedHistory {
+			hist.append(literals)
+		}
+		return nil
+	}
+
+	seqs, err := seqs.mergeHistory(&hist.decoders)
+	if err != nil {
+		return err
+	}
+	if debugDecoder {
+		println("History merged ok")
+	}
+	br := &bitReader{}
+	if err := br.init(in); err != nil {
+		return err
+	}
+
+	// TODO: Investigate if sending history without decoders are faster.
+	//   This would allow the sequences to be decoded async and only have to construct stream history.
+	//   If only recent offsets were not transferred, this would be an obvious win.
+	// 	 Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+
+	hbytes := hist.b
+	if len(hbytes) > hist.windowSize {
+		hbytes = hbytes[len(hbytes)-hist.windowSize:]
+		// We do not need history any more.
+		if hist.dict != nil {
+			hist.dict.content = nil
+		}
+	}
+
+	if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
+		println("initializing sequences:", err)
+		return err
+	}
+
+	err = seqs.decode(nSeqs, br, hbytes)
+	if err != nil {
+		return err
+	}
+	if !br.finished() {
+		return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
+	}
+
+	err = br.close()
+	if err != nil {
+		printf("Closing sequences: %v, %+v\n", err, *br)
+	}
+	if len(b.data) > maxCompressedBlockSize {
+		return fmt.Errorf("compressed block size too large (%d)", len(b.data))
+	}
+	// Set output and release references.
+	b.dst = seqs.out
+	seqs.out, seqs.literals, seqs.hist = nil, nil, nil
+
+	if !delayedHistory {
+		// If we don't have delayed history, no need to update.
+		hist.recentOffsets = seqs.prevOffset
+		return nil
+	}
+	if b.Last {
+		// if last block we don't care about history.
+		println("Last block, no history returned")
+		hist.b = hist.b[:0]
+		return nil
+	}
+	hist.append(b.dst)
+	hist.recentOffsets = seqs.prevOffset
+	if debugDecoder {
+		println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
new file mode 100644
index 0000000000000000000000000000000000000000..3df185ee465513f3a3ae09895265b3684801e761
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -0,0 +1,871 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"math/bits"
+
+	"github.com/klauspost/compress/huff0"
+)
+
+type blockEnc struct {
+	size       int
+	literals   []byte
+	sequences  []seq
+	coders     seqCoders
+	litEnc     *huff0.Scratch
+	dictLitEnc *huff0.Scratch
+	wr         bitWriter
+
+	extraLits         int
+	output            []byte
+	recentOffsets     [3]uint32
+	prevRecentOffsets [3]uint32
+
+	last   bool
+	lowMem bool
+}
+
+// init should be used once the block has been created.
+// If called more than once, the effect is the same as calling reset.
+func (b *blockEnc) init() {
+	if b.lowMem {
+		// 1K literals
+		if cap(b.literals) < 1<<10 {
+			b.literals = make([]byte, 0, 1<<10)
+		}
+		const defSeqs = 20
+		if cap(b.sequences) < defSeqs {
+			b.sequences = make([]seq, 0, defSeqs)
+		}
+		// 1K
+		if cap(b.output) < 1<<10 {
+			b.output = make([]byte, 0, 1<<10)
+		}
+	} else {
+		if cap(b.literals) < maxCompressedBlockSize {
+			b.literals = make([]byte, 0, maxCompressedBlockSize)
+		}
+		const defSeqs = 200
+		if cap(b.sequences) < defSeqs {
+			b.sequences = make([]seq, 0, defSeqs)
+		}
+		if cap(b.output) < maxCompressedBlockSize {
+			b.output = make([]byte, 0, maxCompressedBlockSize)
+		}
+	}
+
+	if b.coders.mlEnc == nil {
+		b.coders.mlEnc = &fseEncoder{}
+		b.coders.mlPrev = &fseEncoder{}
+		b.coders.ofEnc = &fseEncoder{}
+		b.coders.ofPrev = &fseEncoder{}
+		b.coders.llEnc = &fseEncoder{}
+		b.coders.llPrev = &fseEncoder{}
+	}
+	b.litEnc = &huff0.Scratch{WantLogLess: 4}
+	b.reset(nil)
+}
+
+// initNewEncode can be used to reset offsets and encoders to the initial state.
+func (b *blockEnc) initNewEncode() {
+	b.recentOffsets = [3]uint32{1, 4, 8}
+	b.litEnc.Reuse = huff0.ReusePolicyNone
+	b.coders.setPrev(nil, nil, nil)
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) reset(prev *blockEnc) {
+	b.extraLits = 0
+	b.literals = b.literals[:0]
+	b.size = 0
+	b.sequences = b.sequences[:0]
+	b.output = b.output[:0]
+	b.last = false
+	if prev != nil {
+		b.recentOffsets = prev.prevRecentOffsets
+	}
+	b.dictLitEnc = nil
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) swapEncoders(prev *blockEnc) {
+	b.coders.swap(&prev.coders)
+	b.litEnc, prev.litEnc = prev.litEnc, b.litEnc
+}
+
+// blockHeader contains the information for a block header.
+type blockHeader uint32
+
+// setLast sets the 'last' indicator on a block.
+func (h *blockHeader) setLast(b bool) {
+	if b {
+		*h = *h | 1
+	} else {
+		const mask = (1 << 24) - 2
+		*h = *h & mask
+	}
+}
+
+// setSize will store the compressed size of a block.
+func (h *blockHeader) setSize(v uint32) {
+	const mask = 7
+	*h = (*h)&mask | blockHeader(v<<3)
+}
+
+// setType sets the block type.
+func (h *blockHeader) setType(t blockType) {
+	const mask = 1 | (((1 << 24) - 1) ^ 7)
+	*h = (*h & mask) | blockHeader(t<<1)
+}
+
+// appendTo will append the block header to a slice.
+func (h blockHeader) appendTo(b []byte) []byte {
+	return append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+}
+
+// String returns a string representation of the block.
+func (h blockHeader) String() string {
+	return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1)
+}
+
+// literalsHeader contains literals header information.
+type literalsHeader uint64
+
+// setType can be used to set the type of literal block.
+func (h *literalsHeader) setType(t literalsBlockType) {
+	const mask = math.MaxUint64 - 3
+	*h = (*h & mask) | literalsHeader(t)
+}
+
+// setSize can be used to set a single size, for uncompressed and RLE content.
+func (h *literalsHeader) setSize(regenLen int) {
+	inBits := bits.Len32(uint32(regenLen))
+	// Only retain 2 bits
+	const mask = 3
+	lh := uint64(*h & mask)
+	switch {
+	case inBits < 5:
+		lh |= (uint64(regenLen) << 3) | (1 << 60)
+		if debugEncoder {
+			got := int(lh>>3) & 0xff
+			if got != regenLen {
+				panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
+			}
+		}
+	case inBits < 12:
+		lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)
+	case inBits < 20:
+		lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)
+	default:
+		panic(fmt.Errorf("internal error: block too big (%d)", regenLen))
+	}
+	*h = literalsHeader(lh)
+}
+
+// setSizes will set the size of a compressed literals section and the input length.
+func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
+	compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
+	// Only retain 2 bits
+	const mask = 3
+	lh := uint64(*h & mask)
+	switch {
+	case compBits <= 10 && inBits <= 10:
+		if !single {
+			lh |= 1 << 2
+		}
+		lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
+		if debugEncoder {
+			const mmask = (1 << 24) - 1
+			n := (lh >> 4) & mmask
+			if int(n&1023) != inLen {
+				panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits))
+			}
+			if int(n>>10) != compLen {
+				panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits))
+			}
+		}
+	case compBits <= 14 && inBits <= 14:
+		lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
+		if single {
+			panic("single stream used with more than 10 bits length.")
+		}
+	case compBits <= 18 && inBits <= 18:
+		lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
+		if single {
+			panic("single stream used with more than 10 bits length.")
+		}
+	default:
+		panic("internal error: block too big")
+	}
+	*h = literalsHeader(lh)
+}
+
+// appendTo will append the literals header to a byte slice.
+func (h literalsHeader) appendTo(b []byte) []byte {
+	size := uint8(h >> 60)
+	switch size {
+	case 1:
+		b = append(b, uint8(h))
+	case 2:
+		b = append(b, uint8(h), uint8(h>>8))
+	case 3:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+	case 4:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24))
+	case 5:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32))
+	default:
+		panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size))
+	}
+	return b
+}
+
+// size returns the output size with currently set values.
+func (h literalsHeader) size() int {
+	return int(h >> 60)
+}
+
+func (h literalsHeader) String() string {
+	return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60)
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) pushOffsets() {
+	b.prevRecentOffsets = b.recentOffsets
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) popOffsets() {
+	b.recentOffsets = b.prevRecentOffsets
+}
+
+// matchOffset will adjust recent offsets and return the adjusted one,
+// if it matches a previous offset.
+func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {
+	// Check if offset is one of the recent offsets.
+	// Adjusts the output offset accordingly.
+	// Gives a tiny bit of compression, typically around 1%.
+	if true {
+		if lits > 0 {
+			switch offset {
+			case b.recentOffsets[0]:
+				offset = 1
+			case b.recentOffsets[1]:
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 2
+			case b.recentOffsets[2]:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 3
+			default:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset += 3
+			}
+		} else {
+			switch offset {
+			case b.recentOffsets[1]:
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 1
+			case b.recentOffsets[2]:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 2
+			case b.recentOffsets[0] - 1:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 3
+			default:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset += 3
+			}
+		}
+	} else {
+		offset += 3
+	}
+	return offset
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRaw(a []byte) {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(a)))
+	bh.setType(blockTypeRaw)
+	b.output = bh.appendTo(b.output[:0])
+	b.output = append(b.output, a...)
+	if debugEncoder {
+		println("Adding RAW block, length", len(a), "last:", b.last)
+	}
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(src)))
+	bh.setType(blockTypeRaw)
+	dst = bh.appendTo(dst)
+	dst = append(dst, src...)
+	if debugEncoder {
+		println("Adding RAW block, length", len(src), "last:", b.last)
+	}
+	return dst
+}
+
+// encodeLits can be used if the block is only litLen.
+func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(lits)))
+
+	// Don't compress extremely small blocks
+	if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
+		if debugEncoder {
+			println("Adding RAW block, length", len(lits), "last:", b.last)
+		}
+		bh.setType(blockTypeRaw)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, lits...)
+		return nil
+	}
+
+	var (
+		out            []byte
+		reUsed, single bool
+		err            error
+	)
+	if b.dictLitEnc != nil {
+		b.litEnc.TransferCTable(b.dictLitEnc)
+		b.litEnc.Reuse = huff0.ReusePolicyAllow
+		b.dictLitEnc = nil
+	}
+	if len(lits) >= 1024 {
+		// Use 4 Streams.
+		out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
+	} else if len(lits) > 32 {
+		// Use 1 stream
+		single = true
+		out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
+	} else {
+		err = huff0.ErrIncompressible
+	}
+
+	switch err {
+	case huff0.ErrIncompressible:
+		if debugEncoder {
+			println("Adding RAW block, length", len(lits), "last:", b.last)
+		}
+		bh.setType(blockTypeRaw)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, lits...)
+		return nil
+	case huff0.ErrUseRLE:
+		if debugEncoder {
+			println("Adding RLE block, length", len(lits))
+		}
+		bh.setType(blockTypeRLE)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, lits[0])
+		return nil
+	case nil:
+	default:
+		return err
+	}
+	// Compressed...
+	// Now, allow reuse
+	b.litEnc.Reuse = huff0.ReusePolicyAllow
+	bh.setType(blockTypeCompressed)
+	var lh literalsHeader
+	if reUsed {
+		if debugEncoder {
+			println("Reused tree, compressed to", len(out))
+		}
+		lh.setType(literalsBlockTreeless)
+	} else {
+		if debugEncoder {
+			println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
+		}
+		lh.setType(literalsBlockCompressed)
+	}
+	// Set sizes
+	lh.setSizes(len(out), len(lits), single)
+	bh.setSize(uint32(len(out) + lh.size() + 1))
+
+	// Write block headers.
+	b.output = bh.appendTo(b.output)
+	b.output = lh.appendTo(b.output)
+	// Add compressed data.
+	b.output = append(b.output, out...)
+	// No sequences.
+	b.output = append(b.output, 0)
+	return nil
+}
+
+// fuzzFseEncoder can be used to fuzz the FSE encoder.
+func fuzzFseEncoder(data []byte) int {
+	if len(data) > maxSequences || len(data) < 2 {
+		return 0
+	}
+	enc := fseEncoder{}
+	hist := enc.Histogram()[:256]
+	maxSym := uint8(0)
+	for i, v := range data {
+		v = v & 63
+		data[i] = v
+		hist[v]++
+		if v > maxSym {
+			maxSym = v
+		}
+	}
+	if maxSym == 0 {
+		// All 0
+		return 0
+	}
+	maxCount := func(a []uint32) int {
+		var max uint32
+		for _, v := range a {
+			if v > max {
+				max = v
+			}
+		}
+		return int(max)
+	}
+	cnt := maxCount(hist[:maxSym])
+	if cnt == len(data) {
+		// RLE
+		return 0
+	}
+	enc.HistogramFinished(maxSym, cnt)
+	err := enc.normalizeCount(len(data))
+	if err != nil {
+		return 0
+	}
+	_, err = enc.writeCount(nil)
+	if err != nil {
+		panic(err)
+	}
+	return 1
+}
+
+// encode will encode the block and append the output in b.output.
+// Previous offset codes must be pushed if more blocks are expected.
+func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
+	if len(b.sequences) == 0 {
+		return b.encodeLits(b.literals, rawAllLits)
+	}
+	// We want some difference to at least account for the headers.
+	saved := b.size - len(b.literals) - (b.size >> 5)
+	if saved < 16 {
+		if org == nil {
+			return errIncompressible
+		}
+		b.popOffsets()
+		return b.encodeLits(org, rawAllLits)
+	}
+
+	var bh blockHeader
+	var lh literalsHeader
+	bh.setLast(b.last)
+	bh.setType(blockTypeCompressed)
+	// Store offset of the block header. Needed when we know the size.
+	bhOffset := len(b.output)
+	b.output = bh.appendTo(b.output)
+
+	var (
+		out            []byte
+		reUsed, single bool
+		err            error
+	)
+	if b.dictLitEnc != nil {
+		b.litEnc.TransferCTable(b.dictLitEnc)
+		b.litEnc.Reuse = huff0.ReusePolicyAllow
+		b.dictLitEnc = nil
+	}
+	if len(b.literals) >= 1024 && !raw {
+		// Use 4 Streams.
+		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
+	} else if len(b.literals) > 32 && !raw {
+		// Use 1 stream
+		single = true
+		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
+	} else {
+		err = huff0.ErrIncompressible
+	}
+
+	switch err {
+	case huff0.ErrIncompressible:
+		lh.setType(literalsBlockRaw)
+		lh.setSize(len(b.literals))
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, b.literals...)
+		if debugEncoder {
+			println("Adding literals RAW, length", len(b.literals))
+		}
+	case huff0.ErrUseRLE:
+		lh.setType(literalsBlockRLE)
+		lh.setSize(len(b.literals))
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, b.literals[0])
+		if debugEncoder {
+			println("Adding literals RLE")
+		}
+	case nil:
+		// Compressed litLen...
+		if reUsed {
+			if debugEncoder {
+				println("reused tree")
+			}
+			lh.setType(literalsBlockTreeless)
+		} else {
+			if debugEncoder {
+				println("new tree, size:", len(b.litEnc.OutTable))
+			}
+			lh.setType(literalsBlockCompressed)
+			if debugEncoder {
+				_, _, err := huff0.ReadTable(out, nil)
+				if err != nil {
+					panic(err)
+				}
+			}
+		}
+		lh.setSizes(len(out), len(b.literals), single)
+		if debugEncoder {
+			printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
+			println("Adding literal header:", lh)
+		}
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, out...)
+		b.litEnc.Reuse = huff0.ReusePolicyAllow
+		if debugEncoder {
+			println("Adding literals compressed")
+		}
+	default:
+		if debugEncoder {
+			println("Adding literals ERROR:", err)
+		}
+		return err
+	}
+	// Sequence compression
+
+	// Write the number of sequences
+	switch {
+	case len(b.sequences) < 128:
+		b.output = append(b.output, uint8(len(b.sequences)))
+	case len(b.sequences) < 0x7f00: // TODO: this could be wrong
+		n := len(b.sequences)
+		b.output = append(b.output, 128+uint8(n>>8), uint8(n))
+	default:
+		n := len(b.sequences) - 0x7f00
+		b.output = append(b.output, 255, uint8(n), uint8(n>>8))
+	}
+	if debugEncoder {
+		println("Encoding", len(b.sequences), "sequences")
+	}
+	b.genCodes()
+	llEnc := b.coders.llEnc
+	ofEnc := b.coders.ofEnc
+	mlEnc := b.coders.mlEnc
+	err = llEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+	err = ofEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+	err = mlEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+
+	// Choose the best compression mode for each type.
+	// Will evaluate the new vs predefined and previous.
+	chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) {
+		// See if predefined/previous is better
+		hist := cur.count[:cur.symbolLen]
+		nSize := cur.approxSize(hist) + cur.maxHeaderSize()
+		predefSize := preDef.approxSize(hist)
+		prevSize := prev.approxSize(hist)
+
+		// Add a small penalty for new encoders.
+		// Don't bother with extremely small (<2 byte gains).
+		nSize = nSize + (nSize+2*8*16)>>4
+		switch {
+		case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
+			if debugEncoder {
+				println("Using predefined", predefSize>>3, "<=", nSize>>3)
+			}
+			return preDef, compModePredefined
+		case prevSize <= nSize:
+			if debugEncoder {
+				println("Using previous", prevSize>>3, "<=", nSize>>3)
+			}
+			return prev, compModeRepeat
+		default:
+			if debugEncoder {
+				println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
+				println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
+			}
+			return cur, compModeFSE
+		}
+	}
+
+	// Write compression mode
+	var mode uint8
+	if llEnc.useRLE {
+		mode |= uint8(compModeRLE) << 6
+		llEnc.setRLE(b.sequences[0].llCode)
+		if debugEncoder {
+			println("llEnc.useRLE")
+		}
+	} else {
+		var m seqCompMode
+		llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths])
+		mode |= uint8(m) << 6
+	}
+	if ofEnc.useRLE {
+		mode |= uint8(compModeRLE) << 4
+		ofEnc.setRLE(b.sequences[0].ofCode)
+		if debugEncoder {
+			println("ofEnc.useRLE")
+		}
+	} else {
+		var m seqCompMode
+		ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets])
+		mode |= uint8(m) << 4
+	}
+
+	if mlEnc.useRLE {
+		mode |= uint8(compModeRLE) << 2
+		mlEnc.setRLE(b.sequences[0].mlCode)
+		if debugEncoder {
+			println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
+		}
+	} else {
+		var m seqCompMode
+		mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths])
+		mode |= uint8(m) << 2
+	}
+	b.output = append(b.output, mode)
+	if debugEncoder {
+		printf("Compression modes: 0b%b", mode)
+	}
+	b.output, err = llEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+	start := len(b.output)
+	b.output, err = ofEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+	if false {
+		println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount)
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen)
+		for i, v := range ofEnc.norm[:ofEnc.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v)
+		}
+	}
+	b.output, err = mlEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+
+	// Maybe in block?
+	wr := &b.wr
+	wr.reset(b.output)
+
+	var ll, of, ml cState
+
+	// Current sequence
+	seq := len(b.sequences) - 1
+	s := b.sequences[seq]
+	llEnc.setBits(llBitsTable[:])
+	mlEnc.setBits(mlBitsTable[:])
+	ofEnc.setBits(nil)
+
+	llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256]
+
+	// We have 3 bounds checks here (and in the loop).
+	// Since we are iterating backwards it is kinda hard to avoid.
+	llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+	ll.init(wr, &llEnc.ct, llB)
+	of.init(wr, &ofEnc.ct, ofB)
+	wr.flush32()
+	ml.init(wr, &mlEnc.ct, mlB)
+
+	// Each of these lookups also generates a bounds check.
+	wr.addBits32NC(s.litLen, llB.outBits)
+	wr.addBits32NC(s.matchLen, mlB.outBits)
+	wr.flush32()
+	wr.addBits32NC(s.offset, ofB.outBits)
+	if debugSequences {
+		println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
+	}
+	seq--
+	if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
+		// No need to flush (common)
+		for seq >= 0 {
+			s = b.sequences[seq]
+			wr.flush32()
+			llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+			// tabelog max is 8 for all.
+			of.encode(ofB)
+			ml.encode(mlB)
+			ll.encode(llB)
+			wr.flush32()
+
+			// We checked that all can stay within 32 bits
+			wr.addBits32NC(s.litLen, llB.outBits)
+			wr.addBits32NC(s.matchLen, mlB.outBits)
+			wr.addBits32NC(s.offset, ofB.outBits)
+
+			if debugSequences {
+				println("Encoded seq", seq, s)
+			}
+
+			seq--
+		}
+	} else {
+		for seq >= 0 {
+			s = b.sequences[seq]
+			wr.flush32()
+			llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+			// tabelog max is below 8 for each.
+			of.encode(ofB)
+			ml.encode(mlB)
+			ll.encode(llB)
+			wr.flush32()
+
+			// ml+ll = max 32 bits total
+			wr.addBits32NC(s.litLen, llB.outBits)
+			wr.addBits32NC(s.matchLen, mlB.outBits)
+			wr.flush32()
+			wr.addBits32NC(s.offset, ofB.outBits)
+
+			if debugSequences {
+				println("Encoded seq", seq, s)
+			}
+
+			seq--
+		}
+	}
+	ml.flush(mlEnc.actualTableLog)
+	of.flush(ofEnc.actualTableLog)
+	ll.flush(llEnc.actualTableLog)
+	err = wr.close()
+	if err != nil {
+		return err
+	}
+	b.output = wr.out
+
+	if len(b.output)-3-bhOffset >= b.size {
+		// Maybe even add a bigger margin.
+		b.litEnc.Reuse = huff0.ReusePolicyNone
+		return errIncompressible
+	}
+
+	// Size is output minus block header.
+	bh.setSize(uint32(len(b.output)-bhOffset) - 3)
+	if debugEncoder {
+		println("Rewriting block header", bh)
+	}
+	_ = bh.appendTo(b.output[bhOffset:bhOffset])
+	b.coders.setPrev(llEnc, mlEnc, ofEnc)
+	return nil
+}
+
+var errIncompressible = errors.New("incompressible")
+
+func (b *blockEnc) genCodes() {
+	if len(b.sequences) == 0 {
+		// nothing to do
+		return
+	}
+
+	if len(b.sequences) > math.MaxUint16 {
+		panic("can only encode up to 64K sequences")
+	}
+	// No bounds checks after here:
+	llH := b.coders.llEnc.Histogram()[:256]
+	ofH := b.coders.ofEnc.Histogram()[:256]
+	mlH := b.coders.mlEnc.Histogram()[:256]
+	for i := range llH {
+		llH[i] = 0
+	}
+	for i := range ofH {
+		ofH[i] = 0
+	}
+	for i := range mlH {
+		mlH[i] = 0
+	}
+
+	var llMax, ofMax, mlMax uint8
+	for i, seq := range b.sequences {
+		v := llCode(seq.litLen)
+		seq.llCode = v
+		llH[v]++
+		if v > llMax {
+			llMax = v
+		}
+
+		v = ofCode(seq.offset)
+		seq.ofCode = v
+		ofH[v]++
+		if v > ofMax {
+			ofMax = v
+		}
+
+		v = mlCode(seq.matchLen)
+		seq.mlCode = v
+		mlH[v]++
+		if v > mlMax {
+			mlMax = v
+			if debugAsserts && mlMax > maxMatchLengthSymbol {
+				panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
+			}
+		}
+		b.sequences[i] = seq
+	}
+	maxCount := func(a []uint32) int {
+		var max uint32
+		for _, v := range a {
+			if v > max {
+				max = v
+			}
+		}
+		return int(max)
+	}
+	if debugAsserts && mlMax > maxMatchLengthSymbol {
+		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
+	}
+	if debugAsserts && ofMax > maxOffsetBits {
+		panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
+	}
+	if debugAsserts && llMax > maxLiteralLengthSymbol {
+		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
+	}
+
+	b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
+	b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
+	b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
new file mode 100644
index 0000000000000000000000000000000000000000..01a01e486e1886a322e8b7c2ac5dba21f732a383
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
@@ -0,0 +1,85 @@
+// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT.
+
+package zstd
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[blockTypeRaw-0]
+	_ = x[blockTypeRLE-1]
+	_ = x[blockTypeCompressed-2]
+	_ = x[blockTypeReserved-3]
+}
+
+const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved"
+
+var _blockType_index = [...]uint8{0, 12, 24, 43, 60}
+
+func (i blockType) String() string {
+	if i >= blockType(len(_blockType_index)-1) {
+		return "blockType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _blockType_name[_blockType_index[i]:_blockType_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[literalsBlockRaw-0]
+	_ = x[literalsBlockRLE-1]
+	_ = x[literalsBlockCompressed-2]
+	_ = x[literalsBlockTreeless-3]
+}
+
+const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless"
+
+var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76}
+
+func (i literalsBlockType) String() string {
+	if i >= literalsBlockType(len(_literalsBlockType_index)-1) {
+		return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[compModePredefined-0]
+	_ = x[compModeRLE-1]
+	_ = x[compModeFSE-2]
+	_ = x[compModeRepeat-3]
+}
+
+const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat"
+
+var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54}
+
+func (i seqCompMode) String() string {
+	if i >= seqCompMode(len(_seqCompMode_index)-1) {
+		return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[tableLiteralLengths-0]
+	_ = x[tableOffsets-1]
+	_ = x[tableMatchLengths-2]
+}
+
+const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths"
+
+var _tableIndex_index = [...]uint8{0, 19, 31, 48}
+
+func (i tableIndex) String() string {
+	if i >= tableIndex(len(_tableIndex_index)-1) {
+		return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]]
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
new file mode 100644
index 0000000000000000000000000000000000000000..aab71c6cf851b922691d5e13f23fcc34b1d06ceb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -0,0 +1,130 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+type byteBuffer interface {
+	// Read up to 8 bytes.
+	// Returns io.ErrUnexpectedEOF if this cannot be satisfied.
+	readSmall(n int) ([]byte, error)
+
+	// Read >8 bytes.
+	// MAY use the destination slice.
+	readBig(n int, dst []byte) ([]byte, error)
+
+	// Read a single byte.
+	readByte() (byte, error)
+
+	// Skip n bytes.
+	skipN(n int) error
+}
+
+// in-memory buffer
+type byteBuf []byte
+
+func (b *byteBuf) readSmall(n int) ([]byte, error) {
+	if debugAsserts && n > 8 {
+		panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+	}
+	bb := *b
+	if len(bb) < n {
+		return nil, io.ErrUnexpectedEOF
+	}
+	r := bb[:n]
+	*b = bb[n:]
+	return r, nil
+}
+
+func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
+	bb := *b
+	if len(bb) < n {
+		return nil, io.ErrUnexpectedEOF
+	}
+	r := bb[:n]
+	*b = bb[n:]
+	return r, nil
+}
+
+func (b *byteBuf) remain() []byte {
+	return *b
+}
+
+func (b *byteBuf) readByte() (byte, error) {
+	bb := *b
+	if len(bb) < 1 {
+		return 0, nil
+	}
+	r := bb[0]
+	*b = bb[1:]
+	return r, nil
+}
+
+func (b *byteBuf) skipN(n int) error {
+	bb := *b
+	if len(bb) < n {
+		return io.ErrUnexpectedEOF
+	}
+	*b = bb[n:]
+	return nil
+}
+
+// wrapper around a reader.
+type readerWrapper struct {
+	r   io.Reader
+	tmp [8]byte
+}
+
+func (r *readerWrapper) readSmall(n int) ([]byte, error) {
+	if debugAsserts && n > 8 {
+		panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+	}
+	n2, err := io.ReadFull(r.r, r.tmp[:n])
+	// We only really care about the actual bytes read.
+	if err != nil {
+		if err == io.EOF {
+			return nil, io.ErrUnexpectedEOF
+		}
+		if debugDecoder {
+			println("readSmall: got", n2, "want", n, "err", err)
+		}
+		return nil, err
+	}
+	return r.tmp[:n], nil
+}
+
+func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
+	if cap(dst) < n {
+		dst = make([]byte, n)
+	}
+	n2, err := io.ReadFull(r.r, dst[:n])
+	if err == io.EOF && n > 0 {
+		err = io.ErrUnexpectedEOF
+	}
+	return dst[:n2], err
+}
+
+func (r *readerWrapper) readByte() (byte, error) {
+	n2, err := r.r.Read(r.tmp[:1])
+	if err != nil {
+		return 0, err
+	}
+	if n2 != 1 {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return r.tmp[0], nil
+}
+
+func (r *readerWrapper) skipN(n int) error {
+	n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
+	if n2 != int64(n) {
+		err = io.ErrUnexpectedEOF
+	}
+	return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c4fca17fa1d7ec9328a09cf8b5553c661e586ad
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -0,0 +1,88 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// overread returns whether we have advanced too far.
+func (b *byteReader) overread() bool {
+	return b.off > len(b.b)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := int32(b2[3])
+	v2 := int32(b2[2])
+	v1 := int32(b2[1])
+	v0 := int32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint8 returns the next byte
+func (b *byteReader) Uint8() uint8 {
+	v := b.b[b.off]
+	return v
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	if r := b.remain(); r < 4 {
+		// Very rare
+		v := uint32(0)
+		for i := 1; i <= r; i++ {
+			v = (v << 8) | uint32(b.b[len(b.b)-i])
+		}
+		return v
+	}
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint32NC returns a little endian uint32 starting at current offset.
+// The caller must be sure if there are at least 4 bytes left.
+func (b byteReader) Uint32NC() uint32 {
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
new file mode 100644
index 0000000000000000000000000000000000000000..69736e8d4bb8c0ac04027f46feb7ed9fef5f4300
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -0,0 +1,202 @@
+// Copyright 2020+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"bytes"
+	"errors"
+	"io"
+)
+
+// HeaderMaxSize is the maximum size of a Frame and Block Header.
+// If less is sent to Header.Decode it *may* still contain enough information.
+const HeaderMaxSize = 14 + 3
+
+// Header contains information about the first frame and block within that.
+type Header struct {
+	// Window Size the window of data to keep while decoding.
+	// Will only be set if HasFCS is false.
+	WindowSize uint64
+
+	// Frame content size.
+	// Expected size of the entire frame.
+	FrameContentSize uint64
+
+	// Dictionary ID.
+	// If 0, no dictionary.
+	DictionaryID uint32
+
+	// First block information.
+	FirstBlock struct {
+		// OK will be set if first block could be decoded.
+		OK bool
+
+		// Is this the last block of a frame?
+		Last bool
+
+		// Is the data compressed?
+		// If true CompressedSize will be populated.
+		// Unfortunately DecompressedSize cannot be determined
+		// without decoding the blocks.
+		Compressed bool
+
+		// DecompressedSize is the expected decompressed size of the block.
+		// Will be 0 if it cannot be determined.
+		DecompressedSize int
+
+		// CompressedSize of the data in the block.
+		// Does not include the block header.
+		// Will be equal to DecompressedSize if not Compressed.
+		CompressedSize int
+	}
+
+	// Skippable will be true if the frame is meant to be skipped.
+	// No other information will be populated.
+	Skippable bool
+
+	// If set there is a checksum present for the block content.
+	HasCheckSum bool
+
+	// If this is true FrameContentSize will have a valid value
+	HasFCS bool
+
+	SingleSegment bool
+}
+
+// Decode the header from the beginning of the stream.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) Decode(in []byte) error {
+	if len(in) < 4 {
+		return io.ErrUnexpectedEOF
+	}
+	b, in := in[:4], in[4:]
+	if !bytes.Equal(b, frameMagic) {
+		if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
+			return ErrMagicMismatch
+		}
+		*h = Header{Skippable: true}
+		return nil
+	}
+	if len(in) < 1 {
+		return io.ErrUnexpectedEOF
+	}
+
+	// Clear output
+	*h = Header{}
+	fhd, in := in[0], in[1:]
+	h.SingleSegment = fhd&(1<<5) != 0
+	h.HasCheckSum = fhd&(1<<2) != 0
+
+	if fhd&(1<<3) != 0 {
+		return errors.New("reserved bit set on frame header")
+	}
+
+	// Read Window_Descriptor
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+	if !h.SingleSegment {
+		if len(in) < 1 {
+			return io.ErrUnexpectedEOF
+		}
+		var wd byte
+		wd, in = in[0], in[1:]
+		windowLog := 10 + (wd >> 3)
+		windowBase := uint64(1) << windowLog
+		windowAdd := (windowBase / 8) * uint64(wd&0x7)
+		h.WindowSize = windowBase + windowAdd
+	}
+
+	// Read Dictionary_ID
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+	if size := fhd & 3; size != 0 {
+		if size == 3 {
+			size = 4
+		}
+		if len(in) < int(size) {
+			return io.ErrUnexpectedEOF
+		}
+		b, in = in[:size], in[size:]
+		if b == nil {
+			return io.ErrUnexpectedEOF
+		}
+		switch size {
+		case 1:
+			h.DictionaryID = uint32(b[0])
+		case 2:
+			h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+		case 4:
+			h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		}
+	}
+
+	// Read Frame_Content_Size
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+	var fcsSize int
+	v := fhd >> 6
+	switch v {
+	case 0:
+		if h.SingleSegment {
+			fcsSize = 1
+		}
+	default:
+		fcsSize = 1 << v
+	}
+
+	if fcsSize > 0 {
+		h.HasFCS = true
+		if len(in) < fcsSize {
+			return io.ErrUnexpectedEOF
+		}
+		b, in = in[:fcsSize], in[fcsSize:]
+		if b == nil {
+			return io.ErrUnexpectedEOF
+		}
+		switch fcsSize {
+		case 1:
+			h.FrameContentSize = uint64(b[0])
+		case 2:
+			// When FCS_Field_Size is 2, the offset of 256 is added.
+			h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+		case 4:
+			h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+		case 8:
+			d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+			d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+			h.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+		}
+	}
+
+	// Frame Header done, we will not fail from now on.
+	if len(in) < 3 {
+		return nil
+	}
+	tmp := in[:3]
+	bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+	h.FirstBlock.Last = bh&1 != 0
+	blockType := blockType((bh >> 1) & 3)
+	// find size.
+	cSize := int(bh >> 3)
+	switch blockType {
+	case blockTypeReserved:
+		return nil
+	case blockTypeRLE:
+		h.FirstBlock.Compressed = true
+		h.FirstBlock.DecompressedSize = cSize
+		h.FirstBlock.CompressedSize = 1
+	case blockTypeCompressed:
+		h.FirstBlock.Compressed = true
+		h.FirstBlock.CompressedSize = cSize
+	case blockTypeRaw:
+		h.FirstBlock.DecompressedSize = cSize
+		h.FirstBlock.CompressedSize = cSize
+	default:
+		panic("Invalid block type")
+	}
+
+	h.FirstBlock.OK = true
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..f430f58b5726c8877593b0a23e150f223562770e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -0,0 +1,555 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"io"
+	"sync"
+)
+
+// Decoder provides decoding of zstandard streams.
+// The decoder has been designed to operate without allocations after a warmup.
+// This means that you should store the decoder for best performance.
+// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream.
+// A decoder can safely be re-used even if the previous stream failed.
+// To release the resources, you must call the Close() function on a decoder.
+type Decoder struct {
+	o decoderOptions
+
+	// Unreferenced decoders, ready for use.
+	decoders chan *blockDec
+
+	// Streams ready to be decoded.
+	stream chan decodeStream
+
+	// Current read position used for Reader functionality.
+	current decoderState
+
+	// Custom dictionaries.
+	// Always uses copies.
+	dicts map[uint32]dict
+
+	// streamWg is the waitgroup for all streams
+	streamWg sync.WaitGroup
+}
+
+// decoderState is used for maintaining state when the decoder
+// is used for streaming.
+type decoderState struct {
+	// current block being written to stream.
+	decodeOutput
+
+	// output in order to be written to stream.
+	output chan decodeOutput
+
+	// cancel remaining output.
+	cancel chan struct{}
+
+	flushed bool
+}
+
+var (
+	// Check the interfaces we want to support.
+	_ = io.WriterTo(&Decoder{})
+	_ = io.Reader(&Decoder{})
+)
+
+// NewReader creates a new decoder.
+// A nil Reader can be provided in which case Reset can be used to start a decode.
+//
+// A Decoder can be used in two modes:
+//
+// 1) As a stream, or
+// 2) For stateless decoding using DecodeAll.
+//
+// Only a single stream can be decoded concurrently, but the same decoder
+// can run multiple concurrent stateless decodes. It is even possible to
+// use stateless decodes while a stream is being decoded.
+//
+// The Reset function can be used to initiate a new stream, which is will considerably
+// reduce the allocations normally caused by NewReader.
+func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
+	initPredefined()
+	var d Decoder
+	d.o.setDefault()
+	for _, o := range opts {
+		err := o(&d.o)
+		if err != nil {
+			return nil, err
+		}
+	}
+	d.current.output = make(chan decodeOutput, d.o.concurrent)
+	d.current.flushed = true
+
+	if r == nil {
+		d.current.err = ErrDecoderNilInput
+	}
+
+	// Transfer option dicts.
+	d.dicts = make(map[uint32]dict, len(d.o.dicts))
+	for _, dc := range d.o.dicts {
+		d.dicts[dc.id] = dc
+	}
+	d.o.dicts = nil
+
+	// Create decoders
+	d.decoders = make(chan *blockDec, d.o.concurrent)
+	for i := 0; i < d.o.concurrent; i++ {
+		dec := newBlockDec(d.o.lowMem)
+		dec.localFrame = newFrameDec(d.o)
+		d.decoders <- dec
+	}
+
+	if r == nil {
+		return &d, nil
+	}
+	return &d, d.Reset(r)
+}
+
+// Read bytes from the decompressed stream into p.
+// Returns the number of bytes written and any error that occurred.
+// When the stream is done, io.EOF will be returned.
+func (d *Decoder) Read(p []byte) (int, error) {
+	var n int
+	for {
+		if len(d.current.b) > 0 {
+			filled := copy(p, d.current.b)
+			p = p[filled:]
+			d.current.b = d.current.b[filled:]
+			n += filled
+		}
+		if len(p) == 0 {
+			break
+		}
+		if len(d.current.b) == 0 {
+			// We have an error and no more data
+			if d.current.err != nil {
+				break
+			}
+			if !d.nextBlock(n == 0) {
+				return n, nil
+			}
+		}
+	}
+	if len(d.current.b) > 0 {
+		if debugDecoder {
+			println("returning", n, "still bytes left:", len(d.current.b))
+		}
+		// Only return error at end of block
+		return n, nil
+	}
+	if d.current.err != nil {
+		d.drainOutput()
+	}
+	if debugDecoder {
+		println("returning", n, d.current.err, len(d.decoders))
+	}
+	return n, d.current.err
+}
+
+// Reset will reset the decoder the supplied stream after the current has finished processing.
+// Note that this functionality cannot be used after Close has been called.
+// Reset can be called with a nil reader to release references to the previous reader.
+// After being called with a nil reader, no other operations than Reset or DecodeAll or Close
+// should be used.
+func (d *Decoder) Reset(r io.Reader) error {
+	if d.current.err == ErrDecoderClosed {
+		return d.current.err
+	}
+
+	d.drainOutput()
+
+	if r == nil {
+		d.current.err = ErrDecoderNilInput
+		if len(d.current.b) > 0 {
+			d.current.b = d.current.b[:0]
+		}
+		d.current.flushed = true
+		return nil
+	}
+
+	// If bytes buffer and < 5MB, do sync decoding anyway.
+	if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+		bb2 := bb
+		if debugDecoder {
+			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
+		}
+		b := bb2.Bytes()
+		var dst []byte
+		if cap(d.current.b) > 0 {
+			dst = d.current.b
+		}
+
+		dst, err := d.DecodeAll(b, dst[:0])
+		if err == nil {
+			err = io.EOF
+		}
+		d.current.b = dst
+		d.current.err = err
+		d.current.flushed = true
+		if debugDecoder {
+			println("sync decode to", len(dst), "bytes, err:", err)
+		}
+		return nil
+	}
+
+	if d.stream == nil {
+		d.stream = make(chan decodeStream, 1)
+		d.streamWg.Add(1)
+		go d.startStreamDecoder(d.stream)
+	}
+
+	// Remove current block.
+	d.current.decodeOutput = decodeOutput{}
+	d.current.err = nil
+	d.current.cancel = make(chan struct{})
+	d.current.flushed = false
+	d.current.d = nil
+
+	d.stream <- decodeStream{
+		r:      r,
+		output: d.current.output,
+		cancel: d.current.cancel,
+	}
+	return nil
+}
+
+// drainOutput will drain the output until errEndOfStream is sent.
+func (d *Decoder) drainOutput() {
+	if d.current.cancel != nil {
+		println("cancelling current")
+		close(d.current.cancel)
+		d.current.cancel = nil
+	}
+	if d.current.d != nil {
+		if debugDecoder {
+			printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
+		}
+		d.decoders <- d.current.d
+		d.current.d = nil
+		d.current.b = nil
+	}
+	if d.current.output == nil || d.current.flushed {
+		println("current already flushed")
+		return
+	}
+	for v := range d.current.output {
+		if v.d != nil {
+			if debugDecoder {
+				printf("re-adding decoder %p", v.d)
+			}
+			d.decoders <- v.d
+		}
+		if v.err == errEndOfStream {
+			println("current flushed")
+			d.current.flushed = true
+			return
+		}
+	}
+}
+
+// WriteTo writes data to w until there's no more data to write or when an error occurs.
+// The return value n is the number of bytes written.
+// Any error encountered during the write is also returned.
+func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
+	var n int64
+	for {
+		if len(d.current.b) > 0 {
+			n2, err2 := w.Write(d.current.b)
+			n += int64(n2)
+			if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) {
+				d.current.err = err2
+			} else if n2 != len(d.current.b) {
+				d.current.err = io.ErrShortWrite
+			}
+		}
+		if d.current.err != nil {
+			break
+		}
+		d.nextBlock(true)
+	}
+	err := d.current.err
+	if err != nil {
+		d.drainOutput()
+	}
+	if err == io.EOF {
+		err = nil
+	}
+	return n, err
+}
+
+// DecodeAll allows stateless decoding of a blob of bytes.
+// Output will be appended to dst, so if the destination size is known
+// you can pre-allocate the destination slice to avoid allocations.
+// DecodeAll can be used concurrently.
+// The Decoder concurrency limits will be respected.
+func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
+	if d.current.err == ErrDecoderClosed {
+		return dst, ErrDecoderClosed
+	}
+
+	// Grab a block decoder and frame decoder.
+	block := <-d.decoders
+	frame := block.localFrame
+	defer func() {
+		if debugDecoder {
+			printf("re-adding decoder: %p", block)
+		}
+		frame.rawInput = nil
+		frame.bBuf = nil
+		d.decoders <- block
+	}()
+	frame.bBuf = input
+
+	for {
+		frame.history.reset()
+		err := frame.reset(&frame.bBuf)
+		if err == io.EOF {
+			if debugDecoder {
+				println("frame reset return EOF")
+			}
+			return dst, nil
+		}
+		if frame.DictionaryID != nil {
+			dict, ok := d.dicts[*frame.DictionaryID]
+			if !ok {
+				return nil, ErrUnknownDictionary
+			}
+			frame.history.setDict(&dict)
+		}
+		if err != nil {
+			return dst, err
+		}
+		if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+			return dst, ErrDecoderSizeExceeded
+		}
+		if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
+			// Never preallocate moe than 1 GB up front.
+			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
+				dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+				copy(dst2, dst)
+				dst = dst2
+			}
+		}
+		if cap(dst) == 0 {
+			// Allocate len(input) * 2 by default if nothing is provided
+			// and we didn't get frame content size.
+			size := len(input) * 2
+			// Cap to 1 MB.
+			if size > 1<<20 {
+				size = 1 << 20
+			}
+			if uint64(size) > d.o.maxDecodedSize {
+				size = int(d.o.maxDecodedSize)
+			}
+			dst = make([]byte, 0, size)
+		}
+
+		dst, err = frame.runDecoder(dst, block)
+		if err != nil {
+			return dst, err
+		}
+		if len(frame.bBuf) == 0 {
+			if debugDecoder {
+				println("frame dbuf empty")
+			}
+			break
+		}
+	}
+	return dst, nil
+}
+
+// nextBlock returns the next block.
+// If an error occurs d.err will be set.
+// Optionally the function can block for new output.
+// If non-blocking mode is used the returned boolean will be false
+// if no data was available without blocking.
+func (d *Decoder) nextBlock(blocking bool) (ok bool) {
+	if d.current.d != nil {
+		if debugDecoder {
+			printf("re-adding current decoder %p", d.current.d)
+		}
+		d.decoders <- d.current.d
+		d.current.d = nil
+	}
+	if d.current.err != nil {
+		// Keep error state.
+		return blocking
+	}
+
+	if blocking {
+		d.current.decodeOutput = <-d.current.output
+	} else {
+		select {
+		case d.current.decodeOutput = <-d.current.output:
+		default:
+			return false
+		}
+	}
+	if debugDecoder {
+		println("got", len(d.current.b), "bytes, error:", d.current.err)
+	}
+	return true
+}
+
+// Close will release all resources.
+// It is NOT possible to reuse the decoder after this.
+func (d *Decoder) Close() {
+	if d.current.err == ErrDecoderClosed {
+		return
+	}
+	d.drainOutput()
+	if d.stream != nil {
+		close(d.stream)
+		d.streamWg.Wait()
+		d.stream = nil
+	}
+	if d.decoders != nil {
+		close(d.decoders)
+		for dec := range d.decoders {
+			dec.Close()
+		}
+		d.decoders = nil
+	}
+	if d.current.d != nil {
+		d.current.d.Close()
+		d.current.d = nil
+	}
+	d.current.err = ErrDecoderClosed
+}
+
+// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
+// Any changes to the decoder will be reflected, so the returned ReadCloser
+// can be reused along with the decoder.
+// io.WriterTo is also supported by the returned ReadCloser.
+func (d *Decoder) IOReadCloser() io.ReadCloser {
+	return closeWrapper{d: d}
+}
+
+// closeWrapper wraps a function call as a closer.
+type closeWrapper struct {
+	d *Decoder
+}
+
+// WriteTo forwards WriteTo calls to the decoder.
+func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
+	return c.d.WriteTo(w)
+}
+
+// Read forwards read calls to the decoder.
+func (c closeWrapper) Read(p []byte) (n int, err error) {
+	return c.d.Read(p)
+}
+
+// Close closes the decoder.
+func (c closeWrapper) Close() error {
+	c.d.Close()
+	return nil
+}
+
+type decodeOutput struct {
+	d   *blockDec
+	b   []byte
+	err error
+}
+
+type decodeStream struct {
+	r io.Reader
+
+	// Blocks ready to be written to output.
+	output chan decodeOutput
+
+	// cancel reading from the input
+	cancel chan struct{}
+}
+
+// errEndOfStream indicates that everything from the stream was read.
+var errEndOfStream = errors.New("end-of-stream")
+
+// Create Decoder:
+// Spawn n block decoders. These accept tasks to decode a block.
+// Create goroutine that handles stream processing, this will send history to decoders as they are available.
+// Decoders update the history as they decode.
+// When a block is returned:
+// 		a) history is sent to the next decoder,
+// 		b) content written to CRC.
+// 		c) return data to WRITER.
+// 		d) wait for next block to return data.
+// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
+func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
+	defer d.streamWg.Done()
+	frame := newFrameDec(d.o)
+	for stream := range inStream {
+		if debugDecoder {
+			println("got new stream")
+		}
+		br := readerWrapper{r: stream.r}
+	decodeStream:
+		for {
+			frame.history.reset()
+			err := frame.reset(&br)
+			if debugDecoder && err != nil {
+				println("Frame decoder returned", err)
+			}
+			if err == nil && frame.DictionaryID != nil {
+				dict, ok := d.dicts[*frame.DictionaryID]
+				if !ok {
+					err = ErrUnknownDictionary
+				} else {
+					frame.history.setDict(&dict)
+				}
+			}
+			if err != nil {
+				stream.output <- decodeOutput{
+					err: err,
+				}
+				break
+			}
+			if debugDecoder {
+				println("starting frame decoder")
+			}
+
+			// This goroutine will forward history between frames.
+			frame.frameDone.Add(1)
+			frame.initAsync()
+
+			go frame.startDecoder(stream.output)
+		decodeFrame:
+			// Go through all blocks of the frame.
+			for {
+				dec := <-d.decoders
+				select {
+				case <-stream.cancel:
+					if !frame.sendErr(dec, io.EOF) {
+						// To not let the decoder dangle, send it back.
+						stream.output <- decodeOutput{d: dec}
+					}
+					break decodeStream
+				default:
+				}
+				err := frame.next(dec)
+				switch err {
+				case io.EOF:
+					// End of current frame, no error
+					println("EOF on next block")
+					break decodeFrame
+				case nil:
+					continue
+				default:
+					println("block decoder returned", err)
+					break decodeStream
+				}
+			}
+			// All blocks have started decoding, check if there are more frames.
+			println("waiting for done")
+			frame.frameDone.Wait()
+			println("done waiting...")
+		}
+		frame.frameDone.Wait()
+		println("Sending EOS")
+		stream.output <- decodeOutput{err: errEndOfStream}
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..95cc9b8b81f2133d8f884d6bb46504eab66956da
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -0,0 +1,102 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"runtime"
+)
+
+// DOption is an option for creating a decoder.
+type DOption func(*decoderOptions) error
+
+// options retains accumulated state of multiple options.
+type decoderOptions struct {
+	lowMem         bool
+	concurrent     int
+	maxDecodedSize uint64
+	maxWindowSize  uint64
+	dicts          []dict
+}
+
+func (o *decoderOptions) setDefault() {
+	*o = decoderOptions{
+		// use less ram: true for now, but may change.
+		lowMem:        true,
+		concurrent:    runtime.GOMAXPROCS(0),
+		maxWindowSize: MaxWindowSize,
+	}
+	o.maxDecodedSize = 1 << 63
+}
+
+// WithDecoderLowmem will set whether to use a lower amount of memory,
+// but possibly have to allocate more while running.
+func WithDecoderLowmem(b bool) DOption {
+	return func(o *decoderOptions) error { o.lowMem = b; return nil }
+}
+
+// WithDecoderConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithDecoderConcurrency(n int) DOption {
+	return func(o *decoderOptions) error {
+		if n <= 0 {
+			return errors.New("concurrency must be at least 1")
+		}
+		o.concurrent = n
+		return nil
+	}
+}
+
+// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
+// non-streaming operations or maximum window size for streaming operations.
+// This can be used to control memory usage of potentially hostile content.
+// Maximum and default is 1 << 63 bytes.
+func WithDecoderMaxMemory(n uint64) DOption {
+	return func(o *decoderOptions) error {
+		if n == 0 {
+			return errors.New("WithDecoderMaxMemory must be at least 1")
+		}
+		if n > 1<<63 {
+			return errors.New("WithDecoderMaxmemory must be less than 1 << 63")
+		}
+		o.maxDecodedSize = n
+		return nil
+	}
+}
+
+// WithDecoderDicts allows to register one or more dictionaries for the decoder.
+// If several dictionaries with the same ID is provided the last one will be used.
+func WithDecoderDicts(dicts ...[]byte) DOption {
+	return func(o *decoderOptions) error {
+		for _, b := range dicts {
+			d, err := loadDict(b)
+			if err != nil {
+				return err
+			}
+			o.dicts = append(o.dicts, *d)
+		}
+		return nil
+	}
+}
+
+// WithDecoderMaxWindow allows to set a maximum window size for decodes.
+// This allows rejecting packets that will cause big memory usage.
+// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
+// If WithDecoderMaxMemory is set to a lower value, that will be used.
+// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec.
+func WithDecoderMaxWindow(size uint64) DOption {
+	return func(o *decoderOptions) error {
+		if size < MinWindowSize {
+			return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes")
+		}
+		if size > (1<<41)+7*(1<<38) {
+			return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB")
+		}
+		o.maxWindowSize = size
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
new file mode 100644
index 0000000000000000000000000000000000000000..a36ae83ef579d65b2b603a69007d987ced450667
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -0,0 +1,122 @@
+package zstd
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/klauspost/compress/huff0"
+)
+
+type dict struct {
+	id uint32
+
+	litEnc              *huff0.Scratch
+	llDec, ofDec, mlDec sequenceDec
+	//llEnc, ofEnc, mlEnc []*fseEncoder
+	offsets [3]int
+	content []byte
+}
+
+var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+
+// ID returns the dictionary id or 0 if d is nil.
+func (d *dict) ID() uint32 {
+	if d == nil {
+		return 0
+	}
+	return d.id
+}
+
+// DictContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) DictContentSize() int {
+	if d == nil {
+		return 0
+	}
+	return len(d.content)
+}
+
+// Load a dictionary as described in
+// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+func loadDict(b []byte) (*dict, error) {
+	// Check static field size.
+	if len(b) <= 8+(3*4) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	d := dict{
+		llDec: sequenceDec{fse: &fseDecoder{}},
+		ofDec: sequenceDec{fse: &fseDecoder{}},
+		mlDec: sequenceDec{fse: &fseDecoder{}},
+	}
+	if !bytes.Equal(b[:4], dictMagic[:]) {
+		return nil, ErrMagicMismatch
+	}
+	d.id = binary.LittleEndian.Uint32(b[4:8])
+	if d.id == 0 {
+		return nil, errors.New("dictionaries cannot have ID 0")
+	}
+
+	// Read literal table
+	var err error
+	d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
+	if err != nil {
+		return nil, err
+	}
+	d.litEnc.Reuse = huff0.ReusePolicyMust
+
+	br := byteReader{
+		b:   b,
+		off: 0,
+	}
+	readDec := func(i tableIndex, dec *fseDecoder) error {
+		if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil {
+			return err
+		}
+		if br.overread() {
+			return io.ErrUnexpectedEOF
+		}
+		err = dec.transform(symbolTableX[i])
+		if err != nil {
+			println("Transform table error:", err)
+			return err
+		}
+		if debugDecoder || debugEncoder {
+			println("Read table ok", "symbolLen:", dec.symbolLen)
+		}
+		// Set decoders as predefined so they aren't reused.
+		dec.preDefined = true
+		return nil
+	}
+
+	if err := readDec(tableOffsets, d.ofDec.fse); err != nil {
+		return nil, err
+	}
+	if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil {
+		return nil, err
+	}
+	if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil {
+		return nil, err
+	}
+	if br.remain() < 12 {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	d.offsets[0] = int(br.Uint32())
+	br.advance(4)
+	d.offsets[1] = int(br.Uint32())
+	br.advance(4)
+	d.offsets[2] = int(br.Uint32())
+	br.advance(4)
+	if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 {
+		return nil, errors.New("invalid offset in dictionary")
+	}
+	d.content = make([]byte, br.remain())
+	copy(d.content, br.unread())
+	if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) {
+		return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets)
+	}
+
+	return &d, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
new file mode 100644
index 0000000000000000000000000000000000000000..295cd602a424979f80713b90f54ae0b394991a82
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -0,0 +1,178 @@
+package zstd
+
+import (
+	"fmt"
+	"math/bits"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+const (
+	dictShardBits = 6
+)
+
+type fastBase struct {
+	// cur is the offset at the start of hist
+	cur int32
+	// maximum offset. Should be at least 2x block size.
+	maxMatchOff int32
+	hist        []byte
+	crc         *xxhash.Digest
+	tmp         [8]byte
+	blk         *blockEnc
+	lastDictID  uint32
+	lowMem      bool
+}
+
+// CRC returns the underlying CRC writer.
+func (e *fastBase) CRC() *xxhash.Digest {
+	return e.crc
+}
+
+// AppendCRC will append the CRC to the destination slice and return it.
+func (e *fastBase) AppendCRC(dst []byte) []byte {
+	crc := e.crc.Sum(e.tmp[:0])
+	dst = append(dst, crc[7], crc[6], crc[5], crc[4])
+	return dst
+}
+
+// WindowSize returns the window size of the encoder,
+// or a window size small enough to contain the input size, if > 0.
+func (e *fastBase) WindowSize(size int64) int32 {
+	if size > 0 && size < int64(e.maxMatchOff) {
+		b := int32(1) << uint(bits.Len(uint(size)))
+		// Keep minimum window.
+		if b < 1024 {
+			b = 1024
+		}
+		return b
+	}
+	return e.maxMatchOff
+}
+
+// Block returns the current block.
+func (e *fastBase) Block() *blockEnc {
+	return e.blk
+}
+
+func (e *fastBase) addBlock(src []byte) int32 {
+	if debugAsserts && e.cur > bufferReset {
+		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+	}
+	// check if we have space already
+	if len(e.hist)+len(src) > cap(e.hist) {
+		if cap(e.hist) == 0 {
+			e.ensureHist(len(src))
+		} else {
+			if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) {
+				panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff))
+			}
+			// Move down
+			offset := int32(len(e.hist)) - e.maxMatchOff
+			copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
+			e.cur += offset
+			e.hist = e.hist[:e.maxMatchOff]
+		}
+	}
+	s := int32(len(e.hist))
+	e.hist = append(e.hist, src...)
+	return s
+}
+
+// ensureHist will ensure that history can keep at least this many bytes.
+func (e *fastBase) ensureHist(n int) {
+	if cap(e.hist) >= n {
+		return
+	}
+	l := e.maxMatchOff
+	if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize {
+		l += maxCompressedBlockSize
+	} else {
+		l += e.maxMatchOff
+	}
+	// Make it at least 1MB.
+	if l < 1<<20 && !e.lowMem {
+		l = 1 << 20
+	}
+	// Make it at least the requested size.
+	if l < int32(n) {
+		l = int32(n)
+	}
+	e.hist = make([]byte, 0, l)
+}
+
+// useBlock will replace the block with the provided one,
+// but transfer recent offsets from the previous.
+func (e *fastBase) UseBlock(enc *blockEnc) {
+	enc.reset(e.blk)
+	e.blk = enc
+}
+
+func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
+
+func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
+	if debugAsserts {
+		if s < 0 {
+			err := fmt.Sprintf("s (%d) < 0", s)
+			panic(err)
+		}
+		if t < 0 {
+			err := fmt.Sprintf("s (%d) < 0", s)
+			panic(err)
+		}
+		if s-t > e.maxMatchOff {
+			err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff)
+			panic(err)
+		}
+		if len(src)-int(s) > maxCompressedBlockSize {
+			panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
+		}
+	}
+
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastBase) resetBase(d *dict, singleBlock bool) {
+	if e.blk == nil {
+		e.blk = &blockEnc{lowMem: e.lowMem}
+		e.blk.init()
+	} else {
+		e.blk.reset(nil)
+	}
+	e.blk.initNewEncode()
+	if e.crc == nil {
+		e.crc = xxhash.New()
+	} else {
+		e.crc.Reset()
+	}
+	if d != nil {
+		low := e.lowMem
+		if singleBlock {
+			e.lowMem = true
+		}
+		e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
+		e.lowMem = low
+	}
+
+	// We offset current position so everything will be out of reach.
+	// If above reset line, history will be purged.
+	if e.cur < bufferReset {
+		e.cur += e.maxMatchOff + int32(len(e.hist))
+	}
+	e.hist = e.hist[:0]
+	if d != nil {
+		// Set offsets (currently not used)
+		for i, off := range d.offsets {
+			e.blk.recentOffsets[i] = uint32(off)
+			e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i]
+		}
+		// Transfer litenc.
+		e.blk.dictLitEnc = d.litEnc
+		e.hist = append(e.hist, d.content...)
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
new file mode 100644
index 0000000000000000000000000000000000000000..96028ecd8366ca7fcd32abc5c756b0ad3c94cd48
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -0,0 +1,558 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/klauspost/compress"
+)
+
+const (
+	bestLongTableBits = 22                     // Bits used in the long match table
+	bestLongTableSize = 1 << bestLongTableBits // Size of the table
+	bestLongLen       = 8                      // Bytes used for table hash
+
+	// Note: Increasing the short table bits or making the hash shorter
+	// can actually lead to compression degradation since it will 'steal' more from the
+	// long match table and match offsets are quite big.
+	// This greatly depends on the type of input.
+	bestShortTableBits = 18                      // Bits used in the short match table
+	bestShortTableSize = 1 << bestShortTableBits // Size of the table
+	bestShortLen       = 4                       // Bytes used for table hash
+
+)
+
+type match struct {
+	offset int32
+	s      int32
+	length int32
+	rep    int32
+	est    int32
+}
+
+const highScore = 25000
+
+// estBits will estimate output bits from predefined tables.
+func (m *match) estBits(bitsPerByte int32) {
+	mlc := mlCode(uint32(m.length - zstdMinMatch))
+	var ofc uint8
+	if m.rep < 0 {
+		ofc = ofCode(uint32(m.s-m.offset) + 3)
+	} else {
+		ofc = ofCode(uint32(m.rep))
+	}
+	// Cost, excluding
+	ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
+
+	// Add cost of match encoding...
+	m.est = int32(ofTT.outBits + mlTT.outBits)
+	m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
+	// Subtract savings compared to literal encoding...
+	m.est -= (m.length * bitsPerByte) >> 10
+	if m.est > 0 {
+		// Unlikely gain..
+		m.length = 0
+		m.est = highScore
+	}
+}
+
+// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type bestFastEncoder struct {
+	fastBase
+	table         [bestShortTableSize]prevEntry
+	longTable     [bestLongTableSize]prevEntry
+	dictTable     []prevEntry
+	dictLongTable []prevEntry
+}
+
+// Encode improves compression...
+func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 4
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = prevEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = prevEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			v2 := e.table[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.table[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			v2 := e.longTable[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.longTable[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Use this to estimate literal cost.
+	// Scaled by 10 bits.
+	bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
+	// Huffman can never go < 1 bit/byte
+	if bitsPerByte < 1024 {
+		bitsPerByte = 1024
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	const kSearchStrength = 10
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+	offset3 := int32(blk.recentOffsets[2])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	_ = addLiterals
+
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+
+		if debugAsserts && canRepeat && offset1 == 0 {
+			panic("offset0 was 0")
+		}
+
+		bestOf := func(a, b match) match {
+			if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
+				return a
+			}
+			return b
+		}
+		const goodEnough = 100
+
+		nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
+		nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
+		candidateL := e.longTable[nextHashL]
+		candidateS := e.table[nextHashS]
+
+		matchAt := func(offset int32, s int32, first uint32, rep int32) match {
+			if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
+				return match{s: s, est: highScore}
+			}
+			if debugAsserts {
+				if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
+					panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+				}
+			}
+			m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
+			m.estBits(bitsPerByte)
+			return m
+		}
+
+		best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
+		best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+		best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+
+		if canRepeat && best.length < goodEnough {
+			cv32 := uint32(cv >> 8)
+			spp := s + 1
+			best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
+			best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
+			best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
+			if best.length > 0 {
+				cv32 = uint32(cv >> 24)
+				spp += 2
+				best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
+				best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
+				best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
+			}
+		}
+		// Load next and check...
+		e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
+		e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+
+		// Look far ahead, unless we have a really long match already...
+		if best.length < goodEnough {
+			// No match found, move forward on input, no need to check forward...
+			if best.length < 4 {
+				s += 1 + (s-nextEmit)>>(kSearchStrength-1)
+				if s >= sLimit {
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+
+			s++
+			candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
+			cv = load6432(src, s)
+			cv2 := load6432(src, s+1)
+			candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
+			candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
+
+			// Short at s+1
+			best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+			// Long at s+1, s+2
+			best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
+			best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
+			best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
+			best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
+			if false {
+				// Short at s+3.
+				// Too often worse...
+				best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
+			}
+			// See if we can find a better match by checking where the current best ends.
+			// Use that offset to see if we can find a better full match.
+			if sAt := best.s + best.length; sAt < sLimit {
+				nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
+				candidateEnd := e.longTable[nextHashL]
+				if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
+					bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
+					if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
+						bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
+					}
+					best = bestEnd
+				}
+			}
+		}
+
+		if debugAsserts {
+			if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
+				panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
+			}
+		}
+
+		// We have a match, we can store the forward value
+		if best.rep > 0 {
+			s = best.s
+			var seq seq
+			seq.matchLen = uint32(best.length - zstdMinMatch)
+
+			// We might be able to match backwards.
+			// Extend as long as we can.
+			start := best.s
+			// We end the search early, so we don't risk 0 literals
+			// and have to do special offset treatment.
+			startLimit := nextEmit + 1
+
+			tMin := s - e.maxMatchOff
+			if tMin < 0 {
+				tMin = 0
+			}
+			repIndex := best.offset
+			for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+				repIndex--
+				start--
+				seq.matchLen++
+			}
+			addLiterals(&seq, start)
+
+			// rep 0
+			seq.offset = uint32(best.rep)
+			if debugSequences {
+				println("repeat sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Index match start+1 (long) -> s - 1
+			index0 := s
+			s = best.s + best.length
+
+			nextEmit = s
+			if s >= sLimit {
+				if debugEncoder {
+					println("repeat ended", s, best.length)
+
+				}
+				break encodeLoop
+			}
+			// Index skipped...
+			off := index0 + e.cur
+			for index0 < s-1 {
+				cv0 := load6432(src, index0)
+				h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
+				h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
+				e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+				e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+				off++
+				index0++
+			}
+			switch best.rep {
+			case 2:
+				offset1, offset2 = offset2, offset1
+			case 3:
+				offset1, offset2, offset3 = offset3, offset1, offset2
+			}
+			cv = load6432(src, s)
+			continue
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		s = best.s
+		t := best.offset
+		offset1, offset2, offset3 = s-t, offset1, offset2
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the n-byte match as long as possible.
+		l := best.length
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) -> s - 1
+		index0 := s - l + 1
+		// every entry
+		for index0 < s-1 {
+			cv0 := load6432(src, index0)
+			h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
+			h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
+			off := index0 + e.cur
+			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+			e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+			index0++
+		}
+
+		cv = load6432(src, s)
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
+			nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+			e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	blk.recentOffsets[2] = uint32(offset3)
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	e.ensureHist(len(src))
+	e.Encode(blk, src)
+}
+
+// Reset will reset and set a dictionary if not nil
+func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d == nil {
+		return
+	}
+	// Init or copy dict table
+	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+		if len(e.dictTable) != len(e.table) {
+			e.dictTable = make([]prevEntry, len(e.table))
+		}
+		end := int32(len(d.content)) - 8 + e.maxMatchOff
+		for i := e.maxMatchOff; i < end; i += 4 {
+			const hashLog = bestShortTableBits
+
+			cv := load6432(d.content, i-e.maxMatchOff)
+			nextHash := hashLen(cv, hashLog, bestShortLen)      // 0 -> 4
+			nextHash1 := hashLen(cv>>8, hashLog, bestShortLen)  // 1 -> 5
+			nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
+			nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
+			e.dictTable[nextHash] = prevEntry{
+				prev:   e.dictTable[nextHash].offset,
+				offset: i,
+			}
+			e.dictTable[nextHash1] = prevEntry{
+				prev:   e.dictTable[nextHash1].offset,
+				offset: i + 1,
+			}
+			e.dictTable[nextHash2] = prevEntry{
+				prev:   e.dictTable[nextHash2].offset,
+				offset: i + 2,
+			}
+			e.dictTable[nextHash3] = prevEntry{
+				prev:   e.dictTable[nextHash3].offset,
+				offset: i + 3,
+			}
+		}
+		e.lastDictID = d.id
+	}
+
+	// Init or copy dict table
+	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+		if len(e.dictLongTable) != len(e.longTable) {
+			e.dictLongTable = make([]prevEntry, len(e.longTable))
+		}
+		if len(d.content) >= 8 {
+			cv := load6432(d.content, 0)
+			h := hashLen(cv, bestLongTableBits, bestLongLen)
+			e.dictLongTable[h] = prevEntry{
+				offset: e.maxMatchOff,
+				prev:   e.dictLongTable[h].offset,
+			}
+
+			end := int32(len(d.content)) - 8 + e.maxMatchOff
+			off := 8 // First to read
+			for i := e.maxMatchOff + 1; i < end; i++ {
+				cv = cv>>8 | (uint64(d.content[off]) << 56)
+				h := hashLen(cv, bestLongTableBits, bestLongLen)
+				e.dictLongTable[h] = prevEntry{
+					offset: i,
+					prev:   e.dictLongTable[h].offset,
+				}
+				off++
+			}
+		}
+		e.lastDictID = d.id
+	}
+	// Reset table to initial state
+	copy(e.longTable[:], e.dictLongTable)
+
+	e.cur = e.maxMatchOff
+	// Reset table to initial state
+	copy(e.table[:], e.dictTable)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
new file mode 100644
index 0000000000000000000000000000000000000000..602c05ee0c4cec83c3c782523d899bf3a33b0bd5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -0,0 +1,1237 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+const (
+	betterLongTableBits = 19                       // Bits used in the long match table
+	betterLongTableSize = 1 << betterLongTableBits // Size of the table
+	betterLongLen       = 8                        // Bytes used for table hash
+
+	// Note: Increasing the short table bits or making the hash shorter
+	// can actually lead to compression degradation since it will 'steal' more from the
+	// long match table and match offsets are quite big.
+	// This greatly depends on the type of input.
+	betterShortTableBits = 13                        // Bits used in the short match table
+	betterShortTableSize = 1 << betterShortTableBits // Size of the table
+	betterShortLen       = 5                         // Bytes used for table hash
+
+	betterLongTableShardCnt  = 1 << (betterLongTableBits - dictShardBits)    // Number of shards in the table
+	betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
+
+	betterShortTableShardCnt  = 1 << (betterShortTableBits - dictShardBits)     // Number of shards in the table
+	betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard
+)
+
+type prevEntry struct {
+	offset int32
+	prev   int32
+}
+
+// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type betterFastEncoder struct {
+	fastBase
+	table     [betterShortTableSize]tableEntry
+	longTable [betterLongTableSize]prevEntry
+}
+
+type betterFastEncoderDict struct {
+	betterFastEncoder
+	dictTable            []tableEntry
+	dictLongTable        []prevEntry
+	shortTableShardDirty [betterShortTableShardCnt]bool
+	longTableShardDirty  [betterLongTableShardCnt]bool
+	allDirty             bool
+}
+
+// Encode improves compression...
+func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = prevEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			v2 := e.longTable[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.longTable[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 9
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+		var matched int32
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			off := s + e.cur
+			e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					// Index match start+1 (long) -> s - 1
+					index0 := s + repOff
+					s += lenght + repOff
+
+					nextEmit = s
+					if s >= sLimit {
+						if debugEncoder {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					continue
+				}
+				const repOff2 = 1
+
+				// We deviate from the reference encoder and also check offset 2.
+				// Still slower and not much better, so disabled.
+				// repIndex = s - offset2 + repOff2
+				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+					// Consider history as well.
+					var seq seq
+					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff2
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 2
+					seq.offset = 2
+					if debugSequences {
+						println("repeat sequence 2", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					index0 := s + repOff2
+					s += lenght + repOff2
+					nextEmit = s
+					if s >= sLimit {
+						if debugEncoder {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					// Swap offsets
+					offset1, offset2 = offset2, offset1
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := candidateL.offset - e.cur
+			coffsetLP := candidateL.prev - e.cur
+
+			// Check if we have a long match.
+			if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetL+8, src) + 8
+				t = coffsetL
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+
+				if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+					// Found a long match, at least 8 bytes.
+					prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+					if prevMatch > matched {
+						matched = prevMatch
+						t = coffsetLP
+					}
+					if debugAsserts && s <= t {
+						panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+					}
+					if debugAsserts && s-t > e.maxMatchOff {
+						panic("s - t >e.maxMatchOff")
+					}
+					if debugMatches {
+						println("long match")
+					}
+				}
+				break
+			}
+
+			// Check if we have a long match on prev.
+			if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+				t = coffsetLP
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			coffsetS := candidateS.offset - e.cur
+
+			// Check if we have a short match.
+			if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = candidateL.offset - e.cur
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("long match (after short)")
+						}
+						break
+					}
+				}
+
+				// Check prev long...
+				coffsetL = candidateL.prev - e.cur
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match (after short)")
+						}
+						break
+					}
+				}
+				t = coffsetS
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// Try to find a better match by searching for a long match at the end of the current best match
+		if s+matched < sLimit {
+			nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
+			cv := load3232(src, s)
+			candidateL := e.longTable[nextHashL]
+			coffsetL := candidateL.offset - e.cur - matched
+			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				// Found a long match, at least 4 bytes.
+				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				if matchedNext > matched {
+					t = coffsetL
+					matched = matchedNext
+					if debugMatches {
+						println("long match at end-of-match")
+					}
+				}
+			}
+
+			// Check prev long...
+			if true {
+				coffsetL = candidateL.prev - e.cur - matched
+				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+					// Found a long match, at least 4 bytes.
+					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					if matchedNext > matched {
+						t = coffsetL
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match at end-of-match")
+						}
+					}
+				}
+			}
+		}
+		// A match has been found. Update recent offsets.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the n-byte match as long as possible.
+		l := matched
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) -> s - 1
+		index0 := s - l + 1
+		for index0 < s-1 {
+			cv0 := load6432(src, index0)
+			cv1 := cv0 >> 8
+			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+			off := index0 + e.cur
+			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+			e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+			index0 += 2
+		}
+
+		cv = load6432(src, s)
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+			e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	e.ensureHist(len(src))
+	e.Encode(blk, src)
+}
+
+// Encode improves compression...
+func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = prevEntry{}
+			}
+			e.cur = e.maxMatchOff
+			e.allDirty = true
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			v2 := e.longTable[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.longTable[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		e.allDirty = true
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 9
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+		var matched int32
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			off := s + e.cur
+			e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+			e.markShortShardDirty(nextHashS)
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					// Index match start+1 (long) -> s - 1
+					index0 := s + repOff
+					s += lenght + repOff
+
+					nextEmit = s
+					if s >= sLimit {
+						if debugEncoder {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.markLongShardDirty(h0)
+						h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
+						e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						e.markShortShardDirty(h1)
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					continue
+				}
+				const repOff2 = 1
+
+				// We deviate from the reference encoder and also check offset 2.
+				// Still slower and not much better, so disabled.
+				// repIndex = s - offset2 + repOff2
+				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+					// Consider history as well.
+					var seq seq
+					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff2
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 2
+					seq.offset = 2
+					if debugSequences {
+						println("repeat sequence 2", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					index0 := s + repOff2
+					s += lenght + repOff2
+					nextEmit = s
+					if s >= sLimit {
+						if debugEncoder {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.markLongShardDirty(h0)
+						h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
+						e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						e.markShortShardDirty(h1)
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					// Swap offsets
+					offset1, offset2 = offset2, offset1
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := candidateL.offset - e.cur
+			coffsetLP := candidateL.prev - e.cur
+
+			// Check if we have a long match.
+			if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetL+8, src) + 8
+				t = coffsetL
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+
+				if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+					// Found a long match, at least 8 bytes.
+					prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+					if prevMatch > matched {
+						matched = prevMatch
+						t = coffsetLP
+					}
+					if debugAsserts && s <= t {
+						panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+					}
+					if debugAsserts && s-t > e.maxMatchOff {
+						panic("s - t >e.maxMatchOff")
+					}
+					if debugMatches {
+						println("long match")
+					}
+				}
+				break
+			}
+
+			// Check if we have a long match on prev.
+			if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+				t = coffsetLP
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			coffsetS := candidateS.offset - e.cur
+
+			// Check if we have a short match.
+			if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = candidateL.offset - e.cur
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+				e.markLongShardDirty(nextHashL)
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("long match (after short)")
+						}
+						break
+					}
+				}
+
+				// Check prev long...
+				coffsetL = candidateL.prev - e.cur
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match (after short)")
+						}
+						break
+					}
+				}
+				t = coffsetS
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// Try to find a better match by searching for a long match at the end of the current best match
+		if s+matched < sLimit {
+			nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
+			cv := load3232(src, s)
+			candidateL := e.longTable[nextHashL]
+			coffsetL := candidateL.offset - e.cur - matched
+			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				// Found a long match, at least 4 bytes.
+				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				if matchedNext > matched {
+					t = coffsetL
+					matched = matchedNext
+					if debugMatches {
+						println("long match at end-of-match")
+					}
+				}
+			}
+
+			// Check prev long...
+			if true {
+				coffsetL = candidateL.prev - e.cur - matched
+				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+					// Found a long match, at least 4 bytes.
+					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					if matchedNext > matched {
+						t = coffsetL
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match at end-of-match")
+						}
+					}
+				}
+			}
+		}
+		// A match has been found. Update recent offsets.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the n-byte match as long as possible.
+		l := matched
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) -> s - 1
+		index0 := s - l + 1
+		for index0 < s-1 {
+			cv0 := load6432(src, index0)
+			cv1 := cv0 >> 8
+			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+			off := index0 + e.cur
+			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+			e.markLongShardDirty(h0)
+			h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
+			e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+			e.markShortShardDirty(h1)
+			index0 += 2
+		}
+
+		cv = load6432(src, s)
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.markShortShardDirty(nextHashS)
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d != nil {
+		panic("betterFastEncoder: Reset with dict")
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d == nil {
+		return
+	}
+	// Init or copy dict table
+	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+		if len(e.dictTable) != len(e.table) {
+			e.dictTable = make([]tableEntry, len(e.table))
+		}
+		end := int32(len(d.content)) - 8 + e.maxMatchOff
+		for i := e.maxMatchOff; i < end; i += 4 {
+			const hashLog = betterShortTableBits
+
+			cv := load6432(d.content, i-e.maxMatchOff)
+			nextHash := hashLen(cv, hashLog, betterShortLen)      // 0 -> 4
+			nextHash1 := hashLen(cv>>8, hashLog, betterShortLen)  // 1 -> 5
+			nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
+			nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
+			e.dictTable[nextHash] = tableEntry{
+				val:    uint32(cv),
+				offset: i,
+			}
+			e.dictTable[nextHash1] = tableEntry{
+				val:    uint32(cv >> 8),
+				offset: i + 1,
+			}
+			e.dictTable[nextHash2] = tableEntry{
+				val:    uint32(cv >> 16),
+				offset: i + 2,
+			}
+			e.dictTable[nextHash3] = tableEntry{
+				val:    uint32(cv >> 24),
+				offset: i + 3,
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+
+	// Init or copy dict table
+	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+		if len(e.dictLongTable) != len(e.longTable) {
+			e.dictLongTable = make([]prevEntry, len(e.longTable))
+		}
+		if len(d.content) >= 8 {
+			cv := load6432(d.content, 0)
+			h := hashLen(cv, betterLongTableBits, betterLongLen)
+			e.dictLongTable[h] = prevEntry{
+				offset: e.maxMatchOff,
+				prev:   e.dictLongTable[h].offset,
+			}
+
+			end := int32(len(d.content)) - 8 + e.maxMatchOff
+			off := 8 // First to read
+			for i := e.maxMatchOff + 1; i < end; i++ {
+				cv = cv>>8 | (uint64(d.content[off]) << 56)
+				h := hashLen(cv, betterLongTableBits, betterLongLen)
+				e.dictLongTable[h] = prevEntry{
+					offset: i,
+					prev:   e.dictLongTable[h].offset,
+				}
+				off++
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+
+	// Reset table to initial state
+	{
+		dirtyShardCnt := 0
+		if !e.allDirty {
+			for i := range e.shortTableShardDirty {
+				if e.shortTableShardDirty[i] {
+					dirtyShardCnt++
+				}
+			}
+		}
+		const shardCnt = betterShortTableShardCnt
+		const shardSize = betterShortTableShardSize
+		if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+			copy(e.table[:], e.dictTable)
+			for i := range e.shortTableShardDirty {
+				e.shortTableShardDirty[i] = false
+			}
+		} else {
+			for i := range e.shortTableShardDirty {
+				if !e.shortTableShardDirty[i] {
+					continue
+				}
+
+				copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+				e.shortTableShardDirty[i] = false
+			}
+		}
+	}
+	{
+		dirtyShardCnt := 0
+		if !e.allDirty {
+			for i := range e.shortTableShardDirty {
+				if e.shortTableShardDirty[i] {
+					dirtyShardCnt++
+				}
+			}
+		}
+		const shardCnt = betterLongTableShardCnt
+		const shardSize = betterLongTableShardSize
+		if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+			copy(e.longTable[:], e.dictLongTable)
+			for i := range e.longTableShardDirty {
+				e.longTableShardDirty[i] = false
+			}
+		} else {
+			for i := range e.longTableShardDirty {
+				if !e.longTableShardDirty[i] {
+					continue
+				}
+
+				copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize])
+				e.longTableShardDirty[i] = false
+			}
+		}
+	}
+	e.cur = e.maxMatchOff
+	e.allDirty = false
+}
+
+func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) {
+	e.longTableShardDirty[entryNum/betterLongTableShardSize] = true
+}
+
+func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) {
+	e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6b3104240b0a78a9723f542f37bacbc3e738953
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -0,0 +1,1124 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+const (
+	dFastLongTableBits = 17                      // Bits used in the long match table
+	dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
+	dFastLongTableMask = dFastLongTableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks.
+	dFastLongLen       = 8                       // Bytes used for table hash
+
+	dLongTableShardCnt  = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
+	dLongTableShardSize = dFastLongTableSize / tableShardCnt        // Size of an individual shard
+
+	dFastShortTableBits = tableBits                // Bits used in the short match table
+	dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
+	dFastShortTableMask = dFastShortTableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks.
+	dFastShortLen       = 5                        // Bytes used for table hash
+
+)
+
+type doubleFastEncoder struct {
+	fastEncoder
+	longTable [dFastLongTableSize]tableEntry
+}
+
+type doubleFastEncoderDict struct {
+	fastEncoderDict
+	longTable           [dFastLongTableSize]tableEntry
+	dictLongTable       []tableEntry
+	longTableShardDirty [dLongTableShardCnt]bool
+}
+
+// Encode mimmics functionality in zstd_dfast.c
+func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.longTable[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += lenght + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debugEncoder {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlen(s+4, t+4, src) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+		e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+		e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
+
+		cv = load6432(src, s)
+
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	if e.cur >= bufferReset {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		for i := range e.longTable[:] {
+			e.longTable[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		for {
+
+			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+
+			if len(blk.sequences) > 2 {
+				if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					//length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
+
+					seq.matchLen = uint32(length - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += length + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debugEncoder {
+							println("repeat ended", s, length)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+		e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+		e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
+
+		cv = load6432(src, s)
+
+		if len(blk.sequences) <= 2 {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
+			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+
+	// We do not store history, so we must offset e.cur to avoid false matches for next user.
+	if e.cur < bufferReset {
+		e.cur += int32(len(src))
+	}
+}
+
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = tableEntry{}
+			}
+			e.markAllShardsDirty()
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.longTable[i].offset = v
+		}
+		e.markAllShardsDirty()
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = entry
+			e.markShardDirty(nextHashS)
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += lenght + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debugEncoder {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				e.markLongShardDirty(nextHashL)
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlen(s+4, t+4, src) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+		longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+		e.longTable[longHash1] = te0
+		e.longTable[longHash2] = te1
+		e.markLongShardDirty(longHash1)
+		e.markLongShardDirty(longHash2)
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
+		hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
+		e.table[hashVal1] = te0
+		e.markShardDirty(hashVal1)
+		e.table[hashVal2] = te1
+		e.markShardDirty(hashVal2)
+
+		cv = load6432(src, s)
+
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = entry
+			e.markShardDirty(nextHashS)
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+	// If we encoded more than 64K mark all dirty.
+	if len(src) > 64<<10 {
+		e.markAllShardsDirty()
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
+	e.fastEncoder.Reset(d, singleBlock)
+	if d != nil {
+		panic("doubleFastEncoder: Reset with dict not supported")
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
+	allDirty := e.allDirty
+	e.fastEncoderDict.Reset(d, singleBlock)
+	if d == nil {
+		return
+	}
+
+	// Init or copy dict table
+	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+		if len(e.dictLongTable) != len(e.longTable) {
+			e.dictLongTable = make([]tableEntry, len(e.longTable))
+		}
+		if len(d.content) >= 8 {
+			cv := load6432(d.content, 0)
+			e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
+				val:    uint32(cv),
+				offset: e.maxMatchOff,
+			}
+			end := int32(len(d.content)) - 8 + e.maxMatchOff
+			for i := e.maxMatchOff + 1; i < end; i++ {
+				cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
+				e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
+					val:    uint32(cv),
+					offset: i,
+				}
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+	// Reset table to initial state
+	e.cur = e.maxMatchOff
+
+	dirtyShardCnt := 0
+	if !allDirty {
+		for i := range e.longTableShardDirty {
+			if e.longTableShardDirty[i] {
+				dirtyShardCnt++
+			}
+		}
+	}
+
+	if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
+		copy(e.longTable[:], e.dictLongTable)
+		for i := range e.longTableShardDirty {
+			e.longTableShardDirty[i] = false
+		}
+		return
+	}
+	for i := range e.longTableShardDirty {
+		if !e.longTableShardDirty[i] {
+			continue
+		}
+
+		copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		e.longTableShardDirty[i] = false
+	}
+}
+
+func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) {
+	e.longTableShardDirty[entryNum/dLongTableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
new file mode 100644
index 0000000000000000000000000000000000000000..f2502629bc551bf0593433fdd7c167dafabc2e8e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -0,0 +1,1019 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"math"
+	"math/bits"
+)
+
+const (
+	tableBits        = 15                               // Bits used in the table
+	tableSize        = 1 << tableBits                   // Size of the table
+	tableShardCnt    = 1 << (tableBits - dictShardBits) // Number of shards in the table
+	tableShardSize   = tableSize / tableShardCnt        // Size of an individual shard
+	tableFastHashLen = 6
+	tableMask        = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+	maxMatchLength   = 131074
+)
+
+type tableEntry struct {
+	val    uint32
+	offset int32
+}
+
+type fastEncoder struct {
+	fastBase
+	table [tableSize]tableEntry
+}
+
+type fastEncoderDict struct {
+	fastEncoder
+	dictTable       []tableEntry
+	tableShardDirty [tableShardCnt]bool
+	allDirty        bool
+}
+
+// Encode mimmics functionality in zstd_fast.c
+func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 7
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHash := hashLen(cv, hashLog, tableFastHashLen)
+			nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				var length int32
+				// length = 4 + e.matchlen(s+6, repIndex+4, src)
+				{
+					a := src[s+6:]
+					b := src[repIndex+4:]
+					endI := len(a) & (math.MaxInt32 - 7)
+					length = int32(endI) + 4
+					for i := 0; i < endI; i += 8 {
+						if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+							length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+							break
+						}
+					}
+				}
+
+				seq.matchLen = uint32(length - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += length + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debugEncoder {
+						println("repeat ended", s, length)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		var l int32
+		{
+			a := src[s+4:]
+			b := src[t+4:]
+			endI := len(a) & (math.MaxInt32 - 7)
+			l = int32(endI) + 4
+			for i := 0; i < endI; i += 8 {
+				if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+					l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+					break
+				}
+			}
+		}
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			var l int32
+			{
+				a := src[s+4:]
+				b := src[o2+4:]
+				endI := len(a) & (math.MaxInt32 - 7)
+				l = int32(endI) + 4
+				for i := 0; i < endI; i += 8 {
+					if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+						l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+						break
+					}
+				}
+			}
+
+			// Store this, since we have it.
+			nextHash := hashLen(cv, hashLog, tableFastHashLen)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+	if debugEncoder {
+		if len(src) > maxBlockSize {
+			panic("src too big")
+		}
+	}
+
+	// Protect against e.cur wraparound.
+	if e.cur >= bufferReset {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+
+		for {
+			nextHash := hashLen(cv, hashLog, tableFastHashLen)
+			nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+			if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				// length := 4 + e.matchlen(s+6, repIndex+4, src)
+				// length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
+				var length int32
+				{
+					a := src[s+6:]
+					b := src[repIndex+4:]
+					endI := len(a) & (math.MaxInt32 - 7)
+					length = int32(endI) + 4
+					for i := 0; i < endI; i += 8 {
+						if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+							length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+							break
+						}
+					}
+				}
+
+				seq.matchLen = uint32(length - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += length + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debugEncoder {
+						println("repeat ended", s, length)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff))
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && t < 0 {
+			panic(fmt.Sprintf("t (%d) < 0 ", t))
+		}
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlenNoHist(s+4, t+4, src) + 4
+		// l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+		var l int32
+		{
+			a := src[s+4:]
+			b := src[t+4:]
+			endI := len(a) & (math.MaxInt32 - 7)
+			l = int32(endI) + 4
+			for i := 0; i < endI; i += 8 {
+				if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+					l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+					break
+				}
+			}
+		}
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlenNoHist(s+4, o2+4, src)
+			// l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+			var l int32
+			{
+				a := src[s+4:]
+				b := src[o2+4:]
+				endI := len(a) & (math.MaxInt32 - 7)
+				l = int32(endI) + 4
+				for i := 0; i < endI; i += 8 {
+					if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+						l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+						break
+					}
+				}
+			}
+
+			// Store this, since we have it.
+			nextHash := hashLen(cv, hashLog, tableFastHashLen)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+	// We do not store history, so we must offset e.cur to avoid false matches for next user.
+	if e.cur < bufferReset {
+		e.cur += int32(len(src))
+	}
+}
+
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+	if e.allDirty || len(src) > 32<<10 {
+		e.fastEncoder.Encode(blk, src)
+		e.allDirty = true
+		return
+	}
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 7
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debugEncoder {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHash := hashLen(cv, hashLog, tableFastHashLen)
+			nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.markShardDirty(nextHash)
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+			e.markShardDirty(nextHash2)
+
+			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				var length int32
+				// length = 4 + e.matchlen(s+6, repIndex+4, src)
+				{
+					a := src[s+6:]
+					b := src[repIndex+4:]
+					endI := len(a) & (math.MaxInt32 - 7)
+					length = int32(endI) + 4
+					for i := 0; i < endI; i += 8 {
+						if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+							length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+							break
+						}
+					}
+				}
+
+				seq.matchLen = uint32(length - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += length + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debugEncoder {
+						println("repeat ended", s, length)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		var l int32
+		{
+			a := src[s+4:]
+			b := src[t+4:]
+			endI := len(a) & (math.MaxInt32 - 7)
+			l = int32(endI) + 4
+			for i := 0; i < endI; i += 8 {
+				if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+					l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+					break
+				}
+			}
+		}
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			var l int32
+			{
+				a := src[s+4:]
+				b := src[o2+4:]
+				endI := len(a) & (math.MaxInt32 - 7)
+				l = int32(endI) + 4
+				for i := 0; i < endI; i += 8 {
+					if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+						l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+						break
+					}
+				}
+			}
+
+			// Store this, since we have it.
+			nextHash := hashLen(cv, hashLog, tableFastHashLen)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.markShardDirty(nextHash)
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debugEncoder {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d != nil {
+		panic("fastEncoder: Reset with dict")
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d == nil {
+		return
+	}
+
+	// Init or copy dict table
+	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+		if len(e.dictTable) != len(e.table) {
+			e.dictTable = make([]tableEntry, len(e.table))
+		}
+		if true {
+			end := e.maxMatchOff + int32(len(d.content)) - 8
+			for i := e.maxMatchOff; i < end; i += 3 {
+				const hashLog = tableBits
+
+				cv := load6432(d.content, i-e.maxMatchOff)
+				nextHash := hashLen(cv, hashLog, tableFastHashLen)      // 0 -> 5
+				nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen)  // 1 -> 6
+				nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
+				e.dictTable[nextHash] = tableEntry{
+					val:    uint32(cv),
+					offset: i,
+				}
+				e.dictTable[nextHash1] = tableEntry{
+					val:    uint32(cv >> 8),
+					offset: i + 1,
+				}
+				e.dictTable[nextHash2] = tableEntry{
+					val:    uint32(cv >> 16),
+					offset: i + 2,
+				}
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+
+	e.cur = e.maxMatchOff
+	dirtyShardCnt := 0
+	if !e.allDirty {
+		for i := range e.tableShardDirty {
+			if e.tableShardDirty[i] {
+				dirtyShardCnt++
+			}
+		}
+	}
+
+	const shardCnt = tableShardCnt
+	const shardSize = tableShardSize
+	if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+		copy(e.table[:], e.dictTable)
+		for i := range e.tableShardDirty {
+			e.tableShardDirty[i] = false
+		}
+		e.allDirty = false
+		return
+	}
+	for i := range e.tableShardDirty {
+		if !e.tableShardDirty[i] {
+			continue
+		}
+
+		copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		e.tableShardDirty[i] = false
+	}
+	e.allDirty = false
+}
+
+func (e *fastEncoderDict) markAllShardsDirty() {
+	e.allDirty = true
+}
+
+func (e *fastEncoderDict) markShardDirty(entryNum uint32) {
+	e.tableShardDirty[entryNum/tableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6e315969b00b89c8536b5d9f4753a896d3aba92
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -0,0 +1,599 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"crypto/rand"
+	"fmt"
+	"io"
+	rdebug "runtime/debug"
+	"sync"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+// Encoder provides encoding to Zstandard.
+// An Encoder can be used for either compressing a stream via the
+// io.WriteCloser interface supported by the Encoder or as multiple independent
+// tasks via the EncodeAll function.
+// Smaller encodes are encouraged to use the EncodeAll function.
+// Use NewWriter to create a new instance.
+type Encoder struct {
+	o        encoderOptions
+	encoders chan encoder
+	state    encoderState
+	init     sync.Once
+}
+
+type encoder interface {
+	Encode(blk *blockEnc, src []byte)
+	EncodeNoHist(blk *blockEnc, src []byte)
+	Block() *blockEnc
+	CRC() *xxhash.Digest
+	AppendCRC([]byte) []byte
+	WindowSize(size int64) int32
+	UseBlock(*blockEnc)
+	Reset(d *dict, singleBlock bool)
+}
+
+type encoderState struct {
+	w                io.Writer
+	filling          []byte
+	current          []byte
+	previous         []byte
+	encoder          encoder
+	writing          *blockEnc
+	err              error
+	writeErr         error
+	nWritten         int64
+	nInput           int64
+	frameContentSize int64
+	headerWritten    bool
+	eofWritten       bool
+	fullFrameWritten bool
+
+	// This waitgroup indicates an encode is running.
+	wg sync.WaitGroup
+	// This waitgroup indicates we have a block encoding/writing.
+	wWg sync.WaitGroup
+}
+
+// NewWriter will create a new Zstandard encoder.
+// If the encoder will be used for encoding blocks a nil writer can be used.
+func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
+	initPredefined()
+	var e Encoder
+	e.o.setDefault()
+	for _, o := range opts {
+		err := o(&e.o)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if w != nil {
+		e.Reset(w)
+	}
+	return &e, nil
+}
+
+func (e *Encoder) initialize() {
+	if e.o.concurrent == 0 {
+		e.o.setDefault()
+	}
+	e.encoders = make(chan encoder, e.o.concurrent)
+	for i := 0; i < e.o.concurrent; i++ {
+		enc := e.o.encoder()
+		e.encoders <- enc
+	}
+}
+
+// Reset will re-initialize the writer and new writes will encode to the supplied writer
+// as a new, independent stream.
+func (e *Encoder) Reset(w io.Writer) {
+	s := &e.state
+	s.wg.Wait()
+	s.wWg.Wait()
+	if cap(s.filling) == 0 {
+		s.filling = make([]byte, 0, e.o.blockSize)
+	}
+	if cap(s.current) == 0 {
+		s.current = make([]byte, 0, e.o.blockSize)
+	}
+	if cap(s.previous) == 0 {
+		s.previous = make([]byte, 0, e.o.blockSize)
+	}
+	if s.encoder == nil {
+		s.encoder = e.o.encoder()
+	}
+	if s.writing == nil {
+		s.writing = &blockEnc{lowMem: e.o.lowMem}
+		s.writing.init()
+	}
+	s.writing.initNewEncode()
+	s.filling = s.filling[:0]
+	s.current = s.current[:0]
+	s.previous = s.previous[:0]
+	s.encoder.Reset(e.o.dict, false)
+	s.headerWritten = false
+	s.eofWritten = false
+	s.fullFrameWritten = false
+	s.w = w
+	s.err = nil
+	s.nWritten = 0
+	s.nInput = 0
+	s.writeErr = nil
+	s.frameContentSize = 0
+}
+
+// ResetContentSize will reset and set a content size for the next stream.
+// If the bytes written does not match the size given an error will be returned
+// when calling Close().
+// This is removed when Reset is called.
+// Sizes <= 0 results in no content size set.
+func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
+	e.Reset(w)
+	if size >= 0 {
+		e.state.frameContentSize = size
+	}
+}
+
+// Write data to the encoder.
+// Input data will be buffered and as the buffer fills up
+// content will be compressed and written to the output.
+// When done writing, use Close to flush the remaining output
+// and write CRC if requested.
+func (e *Encoder) Write(p []byte) (n int, err error) {
+	s := &e.state
+	for len(p) > 0 {
+		if len(p)+len(s.filling) < e.o.blockSize {
+			if e.o.crc {
+				_, _ = s.encoder.CRC().Write(p)
+			}
+			s.filling = append(s.filling, p...)
+			return n + len(p), nil
+		}
+		add := p
+		if len(p)+len(s.filling) > e.o.blockSize {
+			add = add[:e.o.blockSize-len(s.filling)]
+		}
+		if e.o.crc {
+			_, _ = s.encoder.CRC().Write(add)
+		}
+		s.filling = append(s.filling, add...)
+		p = p[len(add):]
+		n += len(add)
+		if len(s.filling) < e.o.blockSize {
+			return n, nil
+		}
+		err := e.nextBlock(false)
+		if err != nil {
+			return n, err
+		}
+		if debugAsserts && len(s.filling) > 0 {
+			panic(len(s.filling))
+		}
+	}
+	return n, nil
+}
+
+// nextBlock will synchronize and start compressing input in e.state.filling.
+// If an error has occurred during encoding it will be returned.
+func (e *Encoder) nextBlock(final bool) error {
+	s := &e.state
+	// Wait for current block.
+	s.wg.Wait()
+	if s.err != nil {
+		return s.err
+	}
+	if len(s.filling) > e.o.blockSize {
+		return fmt.Errorf("block > maxStoreBlockSize")
+	}
+	if !s.headerWritten {
+		// If we have a single block encode, do a sync compression.
+		if final && len(s.filling) == 0 && !e.o.fullZero {
+			s.headerWritten = true
+			s.fullFrameWritten = true
+			s.eofWritten = true
+			return nil
+		}
+		if final && len(s.filling) > 0 {
+			s.current = e.EncodeAll(s.filling, s.current[:0])
+			var n2 int
+			n2, s.err = s.w.Write(s.current)
+			if s.err != nil {
+				return s.err
+			}
+			s.nWritten += int64(n2)
+			s.nInput += int64(len(s.filling))
+			s.current = s.current[:0]
+			s.filling = s.filling[:0]
+			s.headerWritten = true
+			s.fullFrameWritten = true
+			s.eofWritten = true
+			return nil
+		}
+
+		var tmp [maxHeaderSize]byte
+		fh := frameHeader{
+			ContentSize:   uint64(s.frameContentSize),
+			WindowSize:    uint32(s.encoder.WindowSize(s.frameContentSize)),
+			SingleSegment: false,
+			Checksum:      e.o.crc,
+			DictID:        e.o.dict.ID(),
+		}
+
+		dst, err := fh.appendTo(tmp[:0])
+		if err != nil {
+			return err
+		}
+		s.headerWritten = true
+		s.wWg.Wait()
+		var n2 int
+		n2, s.err = s.w.Write(dst)
+		if s.err != nil {
+			return s.err
+		}
+		s.nWritten += int64(n2)
+	}
+	if s.eofWritten {
+		// Ensure we only write it once.
+		final = false
+	}
+
+	if len(s.filling) == 0 {
+		// Final block, but no data.
+		if final {
+			enc := s.encoder
+			blk := enc.Block()
+			blk.reset(nil)
+			blk.last = true
+			blk.encodeRaw(nil)
+			s.wWg.Wait()
+			_, s.err = s.w.Write(blk.output)
+			s.nWritten += int64(len(blk.output))
+			s.eofWritten = true
+		}
+		return s.err
+	}
+
+	// Move blocks forward.
+	s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
+	s.nInput += int64(len(s.current))
+	s.wg.Add(1)
+	go func(src []byte) {
+		if debugEncoder {
+			println("Adding block,", len(src), "bytes, final:", final)
+		}
+		defer func() {
+			if r := recover(); r != nil {
+				s.err = fmt.Errorf("panic while encoding: %v", r)
+				rdebug.PrintStack()
+			}
+			s.wg.Done()
+		}()
+		enc := s.encoder
+		blk := enc.Block()
+		enc.Encode(blk, src)
+		blk.last = final
+		if final {
+			s.eofWritten = true
+		}
+		// Wait for pending writes.
+		s.wWg.Wait()
+		if s.writeErr != nil {
+			s.err = s.writeErr
+			return
+		}
+		// Transfer encoders from previous write block.
+		blk.swapEncoders(s.writing)
+		// Transfer recent offsets to next.
+		enc.UseBlock(s.writing)
+		s.writing = blk
+		s.wWg.Add(1)
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r)
+					rdebug.PrintStack()
+				}
+				s.wWg.Done()
+			}()
+			err := errIncompressible
+			// If we got the exact same number of literals as input,
+			// assume the literals cannot be compressed.
+			if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
+				err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+			}
+			switch err {
+			case errIncompressible:
+				if debugEncoder {
+					println("Storing incompressible block as raw")
+				}
+				blk.encodeRaw(src)
+				// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
+			case nil:
+			default:
+				s.writeErr = err
+				return
+			}
+			_, s.writeErr = s.w.Write(blk.output)
+			s.nWritten += int64(len(blk.output))
+		}()
+	}(s.current)
+	return nil
+}
+
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except io.EOF encountered during the read is also returned.
+//
+// The Copy function uses ReaderFrom if available.
+func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
+	if debugEncoder {
+		println("Using ReadFrom")
+	}
+
+	// Flush any current writes.
+	if len(e.state.filling) > 0 {
+		if err := e.nextBlock(false); err != nil {
+			return 0, err
+		}
+	}
+	e.state.filling = e.state.filling[:e.o.blockSize]
+	src := e.state.filling
+	for {
+		n2, err := r.Read(src)
+		if e.o.crc {
+			_, _ = e.state.encoder.CRC().Write(src[:n2])
+		}
+		// src is now the unfilled part...
+		src = src[n2:]
+		n += int64(n2)
+		switch err {
+		case io.EOF:
+			e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
+			if debugEncoder {
+				println("ReadFrom: got EOF final block:", len(e.state.filling))
+			}
+			return n, nil
+		case nil:
+		default:
+			if debugEncoder {
+				println("ReadFrom: got error:", err)
+			}
+			e.state.err = err
+			return n, err
+		}
+		if len(src) > 0 {
+			if debugEncoder {
+				println("ReadFrom: got space left in source:", len(src))
+			}
+			continue
+		}
+		err = e.nextBlock(false)
+		if err != nil {
+			return n, err
+		}
+		e.state.filling = e.state.filling[:e.o.blockSize]
+		src = e.state.filling
+	}
+}
+
+// Flush will send the currently written data to output
+// and block until everything has been written.
+// This should only be used on rare occasions where pushing the currently queued data is critical.
+func (e *Encoder) Flush() error {
+	s := &e.state
+	if len(s.filling) > 0 {
+		err := e.nextBlock(false)
+		if err != nil {
+			return err
+		}
+	}
+	s.wg.Wait()
+	s.wWg.Wait()
+	if s.err != nil {
+		return s.err
+	}
+	return s.writeErr
+}
+
+// Close will flush the final output and close the stream.
+// The function will block until everything has been written.
+// The Encoder can still be re-used after calling this.
+func (e *Encoder) Close() error {
+	s := &e.state
+	if s.encoder == nil {
+		return nil
+	}
+	err := e.nextBlock(true)
+	if err != nil {
+		return err
+	}
+	if s.frameContentSize > 0 {
+		if s.nInput != s.frameContentSize {
+			return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput)
+		}
+	}
+	if e.state.fullFrameWritten {
+		return s.err
+	}
+	s.wg.Wait()
+	s.wWg.Wait()
+
+	if s.err != nil {
+		return s.err
+	}
+	if s.writeErr != nil {
+		return s.writeErr
+	}
+
+	// Write CRC
+	if e.o.crc && s.err == nil {
+		// heap alloc.
+		var tmp [4]byte
+		_, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0]))
+		s.nWritten += 4
+	}
+
+	// Add padding with content from crypto/rand.Reader
+	if s.err == nil && e.o.pad > 0 {
+		add := calcSkippableFrame(s.nWritten, int64(e.o.pad))
+		frame, err := skippableFrame(s.filling[:0], add, rand.Reader)
+		if err != nil {
+			return err
+		}
+		_, s.err = s.w.Write(frame)
+	}
+	return s.err
+}
+
+// EncodeAll will encode all input in src and append it to dst.
+// This function can be called concurrently, but each call will only run on a single goroutine.
+// If empty input is given, nothing is returned, unless WithZeroFrames is specified.
+// Encoded blocks can be concatenated and the result will be the combined input stream.
+// Data compressed with EncodeAll can be decoded with the Decoder,
+// using either a stream or DecodeAll.
+func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+	if len(src) == 0 {
+		if e.o.fullZero {
+			// Add frame header.
+			fh := frameHeader{
+				ContentSize:   0,
+				WindowSize:    MinWindowSize,
+				SingleSegment: true,
+				// Adding a checksum would be a waste of space.
+				Checksum: false,
+				DictID:   0,
+			}
+			dst, _ = fh.appendTo(dst)
+
+			// Write raw block as last one only.
+			var blk blockHeader
+			blk.setSize(0)
+			blk.setType(blockTypeRaw)
+			blk.setLast(true)
+			dst = blk.appendTo(dst)
+		}
+		return dst
+	}
+	e.init.Do(e.initialize)
+	enc := <-e.encoders
+	defer func() {
+		// Release encoder reference to last block.
+		// If a non-single block is needed the encoder will reset again.
+		e.encoders <- enc
+	}()
+	// Use single segments when above minimum window and below 1MB.
+	single := len(src) < 1<<20 && len(src) > MinWindowSize
+	if e.o.single != nil {
+		single = *e.o.single
+	}
+	fh := frameHeader{
+		ContentSize:   uint64(len(src)),
+		WindowSize:    uint32(enc.WindowSize(int64(len(src)))),
+		SingleSegment: single,
+		Checksum:      e.o.crc,
+		DictID:        e.o.dict.ID(),
+	}
+
+	// If less than 1MB, allocate a buffer up front.
+	if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
+		dst = make([]byte, 0, len(src))
+	}
+	dst, err := fh.appendTo(dst)
+	if err != nil {
+		panic(err)
+	}
+
+	// If we can do everything in one block, prefer that.
+	if len(src) <= maxCompressedBlockSize {
+		enc.Reset(e.o.dict, true)
+		// Slightly faster with no history and everything in one block.
+		if e.o.crc {
+			_, _ = enc.CRC().Write(src)
+		}
+		blk := enc.Block()
+		blk.last = true
+		if e.o.dict == nil {
+			enc.EncodeNoHist(blk, src)
+		} else {
+			enc.Encode(blk, src)
+		}
+
+		// If we got the exact same number of literals as input,
+		// assume the literals cannot be compressed.
+		err := errIncompressible
+		oldout := blk.output
+		if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
+			// Output directly to dst
+			blk.output = dst
+			err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+		}
+
+		switch err {
+		case errIncompressible:
+			if debugEncoder {
+				println("Storing incompressible block as raw")
+			}
+			dst = blk.encodeRawTo(dst, src)
+		case nil:
+			dst = blk.output
+		default:
+			panic(err)
+		}
+		blk.output = oldout
+	} else {
+		enc.Reset(e.o.dict, false)
+		blk := enc.Block()
+		for len(src) > 0 {
+			todo := src
+			if len(todo) > e.o.blockSize {
+				todo = todo[:e.o.blockSize]
+			}
+			src = src[len(todo):]
+			if e.o.crc {
+				_, _ = enc.CRC().Write(todo)
+			}
+			blk.pushOffsets()
+			enc.Encode(blk, todo)
+			if len(src) == 0 {
+				blk.last = true
+			}
+			err := errIncompressible
+			// If we got the exact same number of literals as input,
+			// assume the literals cannot be compressed.
+			if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
+				err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+			}
+
+			switch err {
+			case errIncompressible:
+				if debugEncoder {
+					println("Storing incompressible block as raw")
+				}
+				dst = blk.encodeRawTo(dst, todo)
+				blk.popOffsets()
+			case nil:
+				dst = append(dst, blk.output...)
+			default:
+				panic(err)
+			}
+			blk.reset(nil)
+		}
+	}
+	if e.o.crc {
+		dst = enc.AppendCRC(dst)
+	}
+	// Add padding with content from crypto/rand.Reader
+	if e.o.pad > 0 {
+		add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+		dst, err = skippableFrame(dst, add, rand.Reader)
+		if err != nil {
+			panic(err)
+		}
+	}
+	return dst
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d29e1d689eefbc249ebd41d032c317a63da2979
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -0,0 +1,312 @@
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"runtime"
+	"strings"
+)
+
+// EOption is an option for creating a encoder.
+type EOption func(*encoderOptions) error
+
+// options retains accumulated state of multiple options.
+type encoderOptions struct {
+	concurrent      int
+	level           EncoderLevel
+	single          *bool
+	pad             int
+	blockSize       int
+	windowSize      int
+	crc             bool
+	fullZero        bool
+	noEntropy       bool
+	allLitEntropy   bool
+	customWindow    bool
+	customALEntropy bool
+	lowMem          bool
+	dict            *dict
+}
+
+func (o *encoderOptions) setDefault() {
+	*o = encoderOptions{
+		concurrent:    runtime.GOMAXPROCS(0),
+		crc:           true,
+		single:        nil,
+		blockSize:     1 << 16,
+		windowSize:    8 << 20,
+		level:         SpeedDefault,
+		allLitEntropy: true,
+		lowMem:        false,
+	}
+}
+
+// encoder returns an encoder with the selected options.
+func (o encoderOptions) encoder() encoder {
+	switch o.level {
+	case SpeedFastest:
+		if o.dict != nil {
+			return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+		}
+		return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+
+	case SpeedDefault:
+		if o.dict != nil {
+			return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+		}
+		return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+	case SpeedBetterCompression:
+		if o.dict != nil {
+			return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+		}
+		return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+	case SpeedBestCompression:
+		return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+	}
+	panic("unknown compression level")
+}
+
+// WithEncoderCRC will add CRC value to output.
+// Output will be 4 bytes larger.
+func WithEncoderCRC(b bool) EOption {
+	return func(o *encoderOptions) error { o.crc = b; return nil }
+}
+
+// WithEncoderConcurrency will set the concurrency,
+// meaning the maximum number of encoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithEncoderConcurrency(n int) EOption {
+	return func(o *encoderOptions) error {
+		if n <= 0 {
+			return fmt.Errorf("concurrency must be at least 1")
+		}
+		o.concurrent = n
+		return nil
+	}
+}
+
+// WithWindowSize will set the maximum allowed back-reference distance.
+// The value must be a power of two between MinWindowSize and MaxWindowSize.
+// A larger value will enable better compression but allocate more memory and,
+// for above-default values, take considerably longer.
+// The default value is determined by the compression level.
+func WithWindowSize(n int) EOption {
+	return func(o *encoderOptions) error {
+		switch {
+		case n < MinWindowSize:
+			return fmt.Errorf("window size must be at least %d", MinWindowSize)
+		case n > MaxWindowSize:
+			return fmt.Errorf("window size must be at most %d", MaxWindowSize)
+		case (n & (n - 1)) != 0:
+			return errors.New("window size must be a power of 2")
+		}
+
+		o.windowSize = n
+		o.customWindow = true
+		if o.blockSize > o.windowSize {
+			o.blockSize = o.windowSize
+		}
+		return nil
+	}
+}
+
+// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
+// This can be used to obfuscate the exact output size or make blocks of a certain size.
+// The contents will be a skippable frame, so it will be invisible by the decoder.
+// n must be > 0 and <= 1GB, 1<<30 bytes.
+// The padded area will be filled with data from crypto/rand.Reader.
+// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this.
+func WithEncoderPadding(n int) EOption {
+	return func(o *encoderOptions) error {
+		if n <= 0 {
+			return fmt.Errorf("padding must be at least 1")
+		}
+		// No need to waste our time.
+		if n == 1 {
+			o.pad = 0
+		}
+		if n > 1<<30 {
+			return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
+		}
+		o.pad = n
+		return nil
+	}
+}
+
+// EncoderLevel predefines encoder compression levels.
+// Only use the constants made available, since the actual mapping
+// of these values are very likely to change and your compression could change
+// unpredictably when upgrading the library.
+type EncoderLevel int
+
+const (
+	speedNotSet EncoderLevel = iota
+
+	// SpeedFastest will choose the fastest reasonable compression.
+	// This is roughly equivalent to the fastest Zstandard mode.
+	SpeedFastest
+
+	// SpeedDefault is the default "pretty fast" compression option.
+	// This is roughly equivalent to the default Zstandard mode (level 3).
+	SpeedDefault
+
+	// SpeedBetterCompression will yield better compression than the default.
+	// Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage.
+	// By using this, notice that CPU usage may go up in the future.
+	SpeedBetterCompression
+
+	// SpeedBestCompression will choose the best available compression option.
+	// This will offer the best compression no matter the CPU cost.
+	SpeedBestCompression
+
+	// speedLast should be kept as the last actual compression option.
+	// The is not for external usage, but is used to keep track of the valid options.
+	speedLast
+)
+
+// EncoderLevelFromString will convert a string representation of an encoding level back
+// to a compression level. The compare is not case sensitive.
+// If the string wasn't recognized, (false, SpeedDefault) will be returned.
+func EncoderLevelFromString(s string) (bool, EncoderLevel) {
+	for l := speedNotSet + 1; l < speedLast; l++ {
+		if strings.EqualFold(s, l.String()) {
+			return true, l
+		}
+	}
+	return false, SpeedDefault
+}
+
+// EncoderLevelFromZstd will return an encoder level that closest matches the compression
+// ratio of a specific zstd compression level.
+// Many input values will provide the same compression level.
+func EncoderLevelFromZstd(level int) EncoderLevel {
+	switch {
+	case level < 3:
+		return SpeedFastest
+	case level >= 3 && level < 6:
+		return SpeedDefault
+	case level >= 6 && level < 10:
+		return SpeedBetterCompression
+	case level >= 10:
+		return SpeedBestCompression
+	}
+	return SpeedDefault
+}
+
+// String provides a string representation of the compression level.
+func (e EncoderLevel) String() string {
+	switch e {
+	case SpeedFastest:
+		return "fastest"
+	case SpeedDefault:
+		return "default"
+	case SpeedBetterCompression:
+		return "better"
+	case SpeedBestCompression:
+		return "best"
+	default:
+		return "invalid"
+	}
+}
+
+// WithEncoderLevel specifies a predefined compression level.
+func WithEncoderLevel(l EncoderLevel) EOption {
+	return func(o *encoderOptions) error {
+		switch {
+		case l <= speedNotSet || l >= speedLast:
+			return fmt.Errorf("unknown encoder level")
+		}
+		o.level = l
+		if !o.customWindow {
+			switch o.level {
+			case SpeedFastest:
+				o.windowSize = 4 << 20
+			case SpeedDefault:
+				o.windowSize = 8 << 20
+			case SpeedBetterCompression:
+				o.windowSize = 16 << 20
+			case SpeedBestCompression:
+				o.windowSize = 32 << 20
+			}
+		}
+		if !o.customALEntropy {
+			o.allLitEntropy = l > SpeedFastest
+		}
+
+		return nil
+	}
+}
+
+// WithZeroFrames will encode 0 length input as full frames.
+// This can be needed for compatibility with zstandard usage,
+// but is not needed for this package.
+func WithZeroFrames(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.fullZero = b
+		return nil
+	}
+}
+
+// WithAllLitEntropyCompression will apply entropy compression if no matches are found.
+// Disabling this will skip incompressible data faster, but in cases with no matches but
+// skewed character distribution compression is lost.
+// Default value depends on the compression level selected.
+func WithAllLitEntropyCompression(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.customALEntropy = true
+		o.allLitEntropy = b
+		return nil
+	}
+}
+
+// WithNoEntropyCompression will always skip entropy compression of literals.
+// This can be useful if content has matches, but unlikely to benefit from entropy
+// compression. Usually the slight speed improvement is not worth enabling this.
+func WithNoEntropyCompression(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.noEntropy = b
+		return nil
+	}
+}
+
+// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
+// If this flag is set, data must be regenerated within a single continuous memory segment.
+// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
+// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content.
+// In order to preserve the decoder from unreasonable memory requirements,
+// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
+// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
+// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
+// If this is not specified, block encodes will automatically choose this based on the input size.
+// This setting has no effect on streamed encodes.
+func WithSingleSegment(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.single = &b
+		return nil
+	}
+}
+
+// WithLowerEncoderMem will trade in some memory cases trade less memory usage for
+// slower encoding speed.
+// This will not change the window size which is the primary function for reducing
+// memory usage. See WithWindowSize.
+func WithLowerEncoderMem(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.lowMem = b
+		return nil
+	}
+}
+
+// WithEncoderDict allows to register a dictionary that will be used for the encode.
+// The encoder *may* choose to use no dictionary instead for certain payloads.
+func WithEncoderDict(dict []byte) EOption {
+	return func(o *encoderOptions) error {
+		d, err := loadDict(dict)
+		if err != nil {
+			return err
+		}
+		o.dict = d
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
new file mode 100644
index 0000000000000000000000000000000000000000..989c79f8c3150e9afb63322fb481a7dfa05faf10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -0,0 +1,521 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"hash"
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type frameDec struct {
+	o      decoderOptions
+	crc    hash.Hash64
+	offset int64
+
+	WindowSize uint64
+
+	// In order queue of blocks being decoded.
+	decoding chan *blockDec
+
+	// Frame history passed between blocks
+	history history
+
+	rawInput byteBuffer
+
+	// Byte buffer that can be reused for small input blocks.
+	bBuf byteBuf
+
+	FrameContentSize uint64
+	frameDone        sync.WaitGroup
+
+	DictionaryID  *uint32
+	HasCheckSum   bool
+	SingleSegment bool
+
+	// asyncRunning indicates whether the async routine processes input on 'decoding'.
+	asyncRunningMu sync.Mutex
+	asyncRunning   bool
+}
+
+const (
+	// MinWindowSize is the minimum Window Size, which is 1 KB.
+	MinWindowSize = 1 << 10
+
+	// MaxWindowSize is the maximum encoder window size
+	// and the default decoder maximum window size.
+	MaxWindowSize = 1 << 29
+)
+
+var (
+	frameMagic          = []byte{0x28, 0xb5, 0x2f, 0xfd}
+	skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+)
+
+func newFrameDec(o decoderOptions) *frameDec {
+	if o.maxWindowSize > o.maxDecodedSize {
+		o.maxWindowSize = o.maxDecodedSize
+	}
+	d := frameDec{
+		o: o,
+	}
+	return &d
+}
+
+// reset will read the frame header and prepare for block decoding.
+// If nothing can be read from the input, io.EOF will be returned.
+// Any other error indicated that the stream contained data, but
+// there was a problem.
+func (d *frameDec) reset(br byteBuffer) error {
+	d.HasCheckSum = false
+	d.WindowSize = 0
+	var signature [4]byte
+	for {
+		var err error
+		// Check if we can read more...
+		b, err := br.readSmall(1)
+		switch err {
+		case io.EOF, io.ErrUnexpectedEOF:
+			return io.EOF
+		default:
+			return err
+		case nil:
+			signature[0] = b[0]
+		}
+		// Read the rest, don't allow io.ErrUnexpectedEOF
+		b, err = br.readSmall(3)
+		switch err {
+		case io.EOF:
+			return io.EOF
+		default:
+			return err
+		case nil:
+			copy(signature[1:], b)
+		}
+
+		if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
+			if debugDecoder {
+				println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
+			}
+			// Break if not skippable frame.
+			break
+		}
+		// Read size to skip
+		b, err = br.readSmall(4)
+		if err != nil {
+			if debugDecoder {
+				println("Reading Frame Size", err)
+			}
+			return err
+		}
+		n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		println("Skipping frame with", n, "bytes.")
+		err = br.skipN(int(n))
+		if err != nil {
+			if debugDecoder {
+				println("Reading discarded frame", err)
+			}
+			return err
+		}
+	}
+	if !bytes.Equal(signature[:], frameMagic) {
+		if debugDecoder {
+			println("Got magic numbers: ", signature, "want:", frameMagic)
+		}
+		return ErrMagicMismatch
+	}
+
+	// Read Frame_Header_Descriptor
+	fhd, err := br.readByte()
+	if err != nil {
+		if debugDecoder {
+			println("Reading Frame_Header_Descriptor", err)
+		}
+		return err
+	}
+	d.SingleSegment = fhd&(1<<5) != 0
+
+	if fhd&(1<<3) != 0 {
+		return errors.New("reserved bit set on frame header")
+	}
+
+	// Read Window_Descriptor
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+	d.WindowSize = 0
+	if !d.SingleSegment {
+		wd, err := br.readByte()
+		if err != nil {
+			if debugDecoder {
+				println("Reading Window_Descriptor", err)
+			}
+			return err
+		}
+		printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+		windowLog := 10 + (wd >> 3)
+		windowBase := uint64(1) << windowLog
+		windowAdd := (windowBase / 8) * uint64(wd&0x7)
+		d.WindowSize = windowBase + windowAdd
+	}
+
+	// Read Dictionary_ID
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+	d.DictionaryID = nil
+	if size := fhd & 3; size != 0 {
+		if size == 3 {
+			size = 4
+		}
+
+		b, err := br.readSmall(int(size))
+		if err != nil {
+			println("Reading Dictionary_ID", err)
+			return err
+		}
+		var id uint32
+		switch size {
+		case 1:
+			id = uint32(b[0])
+		case 2:
+			id = uint32(b[0]) | (uint32(b[1]) << 8)
+		case 4:
+			id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		}
+		if debugDecoder {
+			println("Dict size", size, "ID:", id)
+		}
+		if id > 0 {
+			// ID 0 means "sorry, no dictionary anyway".
+			// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
+			d.DictionaryID = &id
+		}
+	}
+
+	// Read Frame_Content_Size
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+	var fcsSize int
+	v := fhd >> 6
+	switch v {
+	case 0:
+		if d.SingleSegment {
+			fcsSize = 1
+		}
+	default:
+		fcsSize = 1 << v
+	}
+	d.FrameContentSize = 0
+	if fcsSize > 0 {
+		b, err := br.readSmall(fcsSize)
+		if err != nil {
+			println("Reading Frame content", err)
+			return err
+		}
+		switch fcsSize {
+		case 1:
+			d.FrameContentSize = uint64(b[0])
+		case 2:
+			// When FCS_Field_Size is 2, the offset of 256 is added.
+			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+		case 4:
+			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+		case 8:
+			d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+			d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+			d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+		}
+		if debugDecoder {
+			println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
+		}
+	}
+	// Move this to shared.
+	d.HasCheckSum = fhd&(1<<2) != 0
+	if d.HasCheckSum {
+		if d.crc == nil {
+			d.crc = xxhash.New()
+		}
+		d.crc.Reset()
+	}
+
+	if d.WindowSize == 0 && d.SingleSegment {
+		// We may not need window in this case.
+		d.WindowSize = d.FrameContentSize
+		if d.WindowSize < MinWindowSize {
+			d.WindowSize = MinWindowSize
+		}
+	}
+
+	if d.WindowSize > uint64(d.o.maxWindowSize) {
+		if debugDecoder {
+			printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+		}
+		return ErrWindowSizeExceeded
+	}
+	// The minimum Window_Size is 1 KB.
+	if d.WindowSize < MinWindowSize {
+		if debugDecoder {
+			println("got window size: ", d.WindowSize)
+		}
+		return ErrWindowSizeTooSmall
+	}
+	d.history.windowSize = int(d.WindowSize)
+	if d.o.lowMem && d.history.windowSize < maxBlockSize {
+		d.history.maxSize = d.history.windowSize * 2
+	} else {
+		d.history.maxSize = d.history.windowSize + maxBlockSize
+	}
+	// history contains input - maybe we do something
+	d.rawInput = br
+	return nil
+}
+
+// next will start decoding the next block from stream.
+func (d *frameDec) next(block *blockDec) error {
+	if debugDecoder {
+		printf("decoding new block %p:%p", block, block.data)
+	}
+	err := block.reset(d.rawInput, d.WindowSize)
+	if err != nil {
+		println("block error:", err)
+		// Signal the frame decoder we have a problem.
+		d.sendErr(block, err)
+		return err
+	}
+	block.input <- struct{}{}
+	if debugDecoder {
+		println("next block:", block)
+	}
+	d.asyncRunningMu.Lock()
+	defer d.asyncRunningMu.Unlock()
+	if !d.asyncRunning {
+		return nil
+	}
+	if block.Last {
+		// We indicate the frame is done by sending io.EOF
+		d.decoding <- block
+		return io.EOF
+	}
+	d.decoding <- block
+	return nil
+}
+
+// sendEOF will queue an error block on the frame.
+// This will cause the frame decoder to return when it encounters the block.
+// Returns true if the decoder was added.
+func (d *frameDec) sendErr(block *blockDec, err error) bool {
+	d.asyncRunningMu.Lock()
+	defer d.asyncRunningMu.Unlock()
+	if !d.asyncRunning {
+		return false
+	}
+
+	println("sending error", err.Error())
+	block.sendErr(err)
+	d.decoding <- block
+	return true
+}
+
+// checkCRC will check the checksum if the frame has one.
+// Will return ErrCRCMismatch if crc check failed, otherwise nil.
+func (d *frameDec) checkCRC() error {
+	if !d.HasCheckSum {
+		return nil
+	}
+	var tmp [4]byte
+	got := d.crc.Sum64()
+	// Flip to match file order.
+	tmp[0] = byte(got >> 0)
+	tmp[1] = byte(got >> 8)
+	tmp[2] = byte(got >> 16)
+	tmp[3] = byte(got >> 24)
+
+	// We can overwrite upper tmp now
+	want, err := d.rawInput.readSmall(4)
+	if err != nil {
+		println("CRC missing?", err)
+		return err
+	}
+
+	if !bytes.Equal(tmp[:], want) {
+		if debugDecoder {
+			println("CRC Check Failed:", tmp[:], "!=", want)
+		}
+		return ErrCRCMismatch
+	}
+	if debugDecoder {
+		println("CRC ok", tmp[:])
+	}
+	return nil
+}
+
+func (d *frameDec) initAsync() {
+	if !d.o.lowMem && !d.SingleSegment {
+		// set max extra size history to 2MB.
+		d.history.maxSize = d.history.windowSize + maxBlockSize
+	}
+	// re-alloc if more than one extra block size.
+	if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
+		d.history.b = make([]byte, 0, d.history.maxSize)
+	}
+	if cap(d.history.b) < d.history.maxSize {
+		d.history.b = make([]byte, 0, d.history.maxSize)
+	}
+	if cap(d.decoding) < d.o.concurrent {
+		d.decoding = make(chan *blockDec, d.o.concurrent)
+	}
+	if debugDecoder {
+		h := d.history
+		printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
+	}
+	d.asyncRunningMu.Lock()
+	d.asyncRunning = true
+	d.asyncRunningMu.Unlock()
+}
+
+// startDecoder will start decoding blocks and write them to the writer.
+// The decoder will stop as soon as an error occurs or at end of frame.
+// When the frame has finished decoding the *bufio.Reader
+// containing the remaining input will be sent on frameDec.frameDone.
+func (d *frameDec) startDecoder(output chan decodeOutput) {
+	written := int64(0)
+
+	defer func() {
+		d.asyncRunningMu.Lock()
+		d.asyncRunning = false
+		d.asyncRunningMu.Unlock()
+
+		// Drain the currently decoding.
+		d.history.error = true
+	flushdone:
+		for {
+			select {
+			case b := <-d.decoding:
+				b.history <- &d.history
+				output <- <-b.result
+			default:
+				break flushdone
+			}
+		}
+		println("frame decoder done, signalling done")
+		d.frameDone.Done()
+	}()
+	// Get decoder for first block.
+	block := <-d.decoding
+	block.history <- &d.history
+	for {
+		var next *blockDec
+		// Get result
+		r := <-block.result
+		if r.err != nil {
+			println("Result contained error", r.err)
+			output <- r
+			return
+		}
+		if debugDecoder {
+			println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
+			d.offset += int64(len(r.b))
+		}
+		if !block.Last {
+			// Send history to next block
+			select {
+			case next = <-d.decoding:
+				if debugDecoder {
+					println("Sending ", len(d.history.b), "bytes as history")
+				}
+				next.history <- &d.history
+			default:
+				// Wait until we have sent the block, so
+				// other decoders can potentially get the decoder.
+				next = nil
+			}
+		}
+
+		// Add checksum, async to decoding.
+		if d.HasCheckSum {
+			n, err := d.crc.Write(r.b)
+			if err != nil {
+				r.err = err
+				if n != len(r.b) {
+					r.err = io.ErrShortWrite
+				}
+				output <- r
+				return
+			}
+		}
+		written += int64(len(r.b))
+		if d.SingleSegment && uint64(written) > d.FrameContentSize {
+			println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
+			r.err = ErrFrameSizeExceeded
+			output <- r
+			return
+		}
+		if block.Last {
+			r.err = d.checkCRC()
+			output <- r
+			return
+		}
+		output <- r
+		if next == nil {
+			// There was no decoder available, we wait for one now that we have sent to the writer.
+			if debugDecoder {
+				println("Sending ", len(d.history.b), " bytes as history")
+			}
+			next = <-d.decoding
+			next.history <- &d.history
+		}
+		block = next
+	}
+}
+
+// runDecoder will create a sync decoder that will decode a block of data.
+func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
+	saved := d.history.b
+
+	// We use the history for output to avoid copying it.
+	d.history.b = dst
+	// Store input length, so we only check new data.
+	crcStart := len(dst)
+	var err error
+	for {
+		err = dec.reset(d.rawInput, d.WindowSize)
+		if err != nil {
+			break
+		}
+		if debugDecoder {
+			println("next block:", dec)
+		}
+		err = dec.decodeBuf(&d.history)
+		if err != nil || dec.Last {
+			break
+		}
+		if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+			err = ErrDecoderSizeExceeded
+			break
+		}
+		if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
+			println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
+			err = ErrFrameSizeExceeded
+			break
+		}
+	}
+	dst = d.history.b
+	if err == nil {
+		if d.HasCheckSum {
+			var n int
+			n, err = d.crc.Write(dst[crcStart:])
+			if err == nil {
+				if n != len(dst)-crcStart {
+					err = io.ErrShortWrite
+				} else {
+					err = d.checkCRC()
+				}
+			}
+		}
+	}
+	d.history.b = saved
+	return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ef7f5a3e3d53a9c6d909a63ef97e8d499f80c66
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -0,0 +1,137 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+	"math/bits"
+)
+
+type frameHeader struct {
+	ContentSize   uint64
+	WindowSize    uint32
+	SingleSegment bool
+	Checksum      bool
+	DictID        uint32
+}
+
+const maxHeaderSize = 14
+
+func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+	dst = append(dst, frameMagic...)
+	var fhd uint8
+	if f.Checksum {
+		fhd |= 1 << 2
+	}
+	if f.SingleSegment {
+		fhd |= 1 << 5
+	}
+
+	var dictIDContent []byte
+	if f.DictID > 0 {
+		var tmp [4]byte
+		if f.DictID < 256 {
+			fhd |= 1
+			tmp[0] = uint8(f.DictID)
+			dictIDContent = tmp[:1]
+		} else if f.DictID < 1<<16 {
+			fhd |= 2
+			binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID))
+			dictIDContent = tmp[:2]
+		} else {
+			fhd |= 3
+			binary.LittleEndian.PutUint32(tmp[:4], f.DictID)
+			dictIDContent = tmp[:4]
+		}
+	}
+	var fcs uint8
+	if f.ContentSize >= 256 {
+		fcs++
+	}
+	if f.ContentSize >= 65536+256 {
+		fcs++
+	}
+	if f.ContentSize >= 0xffffffff {
+		fcs++
+	}
+
+	fhd |= fcs << 6
+
+	dst = append(dst, fhd)
+	if !f.SingleSegment {
+		const winLogMin = 10
+		windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
+		dst = append(dst, uint8(windowLog))
+	}
+	if f.DictID > 0 {
+		dst = append(dst, dictIDContent...)
+	}
+	switch fcs {
+	case 0:
+		if f.SingleSegment {
+			dst = append(dst, uint8(f.ContentSize))
+		}
+		// Unless SingleSegment is set, framessizes < 256 are nto stored.
+	case 1:
+		f.ContentSize -= 256
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
+	case 2:
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24))
+	case 3:
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24),
+			uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56))
+	default:
+		panic("invalid fcs")
+	}
+	return dst, nil
+}
+
+const skippableFrameHeader = 4 + 4
+
+// calcSkippableFrame will return a total size to be added for written
+// to be divisible by multiple.
+// The value will always be > skippableFrameHeader.
+// The function will panic if written < 0 or wantMultiple <= 0.
+func calcSkippableFrame(written, wantMultiple int64) int {
+	if wantMultiple <= 0 {
+		panic("wantMultiple <= 0")
+	}
+	if written < 0 {
+		panic("written < 0")
+	}
+	leftOver := written % wantMultiple
+	if leftOver == 0 {
+		return 0
+	}
+	toAdd := wantMultiple - leftOver
+	for toAdd < skippableFrameHeader {
+		toAdd += wantMultiple
+	}
+	return int(toAdd)
+}
+
+// skippableFrame will add a skippable frame with a total size of bytes.
+// total should be >= skippableFrameHeader and < math.MaxUint32.
+func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
+	if total == 0 {
+		return dst, nil
+	}
+	if total < skippableFrameHeader {
+		return dst, fmt.Errorf("requested skippable frame (%d) < 8", total)
+	}
+	if int64(total) > math.MaxUint32 {
+		return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total)
+	}
+	dst = append(dst, 0x50, 0x2a, 0x4d, 0x18)
+	f := uint32(total - skippableFrameHeader)
+	dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24))
+	start := len(dst)
+	dst = append(dst, make([]byte, f)...)
+	_, err := io.ReadFull(r, dst[start:])
+	return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6d3d49b39c0e96d0c9b54a258f3c908e882b201
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -0,0 +1,385 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+)
+
+const (
+	tablelogAbsoluteMax = 9
+)
+
+const (
+	/*!MEMORY_USAGE :
+	 *  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+	 *  Increasing memory usage improves compression ratio
+	 *  Reduced memory usage can improve speed, due to cache effect
+	 *  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+	maxMemoryUsage = tablelogAbsoluteMax + 2
+
+	maxTableLog    = maxMemoryUsage - 2
+	maxTablesize   = 1 << maxTableLog
+	maxTableMask   = (1 << maxTableLog) - 1
+	minTablelog    = 5
+	maxSymbolValue = 255
+)
+
+// fseDecoder provides temporary storage for compression and decompression.
+type fseDecoder struct {
+	dt             [maxTablesize]decSymbol // Decompression table.
+	symbolLen      uint16                  // Length of active part of the symbol table.
+	actualTableLog uint8                   // Selected tablelog.
+	maxBits        uint8                   // Maximum number of additional bits
+
+	// used for table creation to avoid allocations.
+	stateTable [256]uint16
+	norm       [maxSymbolValue + 1]int16
+	preDefined bool
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+	return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
+	var (
+		charnum   uint16
+		previous0 bool
+	)
+	if b.remain() < 4 {
+		return errors.New("input too small")
+	}
+	bitStream := b.Uint32NC()
+	nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+	if nbBits > tablelogAbsoluteMax {
+		println("Invalid tablelog:", nbBits)
+		return errors.New("tableLog too large")
+	}
+	bitStream >>= 4
+	bitCount := uint(4)
+
+	s.actualTableLog = uint8(nbBits)
+	remaining := int32((1 << nbBits) + 1)
+	threshold := int32(1 << nbBits)
+	gotTotal := int32(0)
+	nbBits++
+
+	for remaining > 1 && charnum <= maxSymbol {
+		if previous0 {
+			//println("prev0")
+			n0 := charnum
+			for (bitStream & 0xFFFF) == 0xFFFF {
+				//println("24 x 0")
+				n0 += 24
+				if r := b.remain(); r > 5 {
+					b.advance(2)
+					// The check above should make sure we can read 32 bits
+					bitStream = b.Uint32NC() >> bitCount
+				} else {
+					// end of bit stream
+					bitStream >>= 16
+					bitCount += 16
+				}
+			}
+			//printf("bitstream: %d, 0b%b", bitStream&3, bitStream)
+			for (bitStream & 3) == 3 {
+				n0 += 3
+				bitStream >>= 2
+				bitCount += 2
+			}
+			n0 += uint16(bitStream & 3)
+			bitCount += 2
+
+			if n0 > maxSymbolValue {
+				return errors.New("maxSymbolValue too small")
+			}
+			//println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0)
+			for charnum < n0 {
+				s.norm[uint8(charnum)] = 0
+				charnum++
+			}
+
+			if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
+				b.advance(bitCount >> 3)
+				bitCount &= 7
+				// The check above should make sure we can read 32 bits
+				bitStream = b.Uint32NC() >> bitCount
+			} else {
+				bitStream >>= 2
+			}
+		}
+
+		max := (2*threshold - 1) - remaining
+		var count int32
+
+		if int32(bitStream)&(threshold-1) < max {
+			count = int32(bitStream) & (threshold - 1)
+			if debugAsserts && nbBits < 1 {
+				panic("nbBits underflow")
+			}
+			bitCount += nbBits - 1
+		} else {
+			count = int32(bitStream) & (2*threshold - 1)
+			if count >= threshold {
+				count -= max
+			}
+			bitCount += nbBits
+		}
+
+		// extra accuracy
+		count--
+		if count < 0 {
+			// -1 means +1
+			remaining += count
+			gotTotal -= count
+		} else {
+			remaining -= count
+			gotTotal += count
+		}
+		s.norm[charnum&0xff] = int16(count)
+		charnum++
+		previous0 = count == 0
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
+			b.advance(bitCount >> 3)
+			bitCount &= 7
+			// The check above should make sure we can read 32 bits
+			bitStream = b.Uint32NC() >> (bitCount & 31)
+		} else {
+			bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+			b.off = len(b.b) - 4
+			bitStream = b.Uint32() >> (bitCount & 31)
+		}
+	}
+	s.symbolLen = charnum
+	if s.symbolLen <= 1 {
+		return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+	}
+	if s.symbolLen > maxSymbolValue+1 {
+		return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+	}
+	if remaining != 1 {
+		return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+	}
+	if bitCount > 32 {
+		return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+	}
+	if gotTotal != 1<<s.actualTableLog {
+		return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
+	}
+	b.advance((bitCount + 7) >> 3)
+	// println(s.norm[:s.symbolLen], s.symbolLen)
+	return s.buildDtable()
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+// Using a composite uint64 is faster than a struct with separate members.
+type decSymbol uint64
+
+func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol {
+	return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d decSymbol) nbBits() uint8 {
+	return uint8(d)
+}
+
+func (d decSymbol) addBits() uint8 {
+	return uint8(d >> 8)
+}
+
+func (d decSymbol) newState() uint16 {
+	return uint16(d >> 16)
+}
+
+func (d decSymbol) baseline() uint32 {
+	return uint32(d >> 32)
+}
+
+func (d decSymbol) baselineInt() int {
+	return int(d >> 32)
+}
+
+func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
+	*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d *decSymbol) setNBits(nBits uint8) {
+	const mask = 0xffffffffffffff00
+	*d = (*d & mask) | decSymbol(nBits)
+}
+
+func (d *decSymbol) setAddBits(addBits uint8) {
+	const mask = 0xffffffffffff00ff
+	*d = (*d & mask) | (decSymbol(addBits) << 8)
+}
+
+func (d *decSymbol) setNewState(state uint16) {
+	const mask = 0xffffffff0000ffff
+	*d = (*d & mask) | decSymbol(state)<<16
+}
+
+func (d *decSymbol) setBaseline(baseline uint32) {
+	const mask = 0xffffffff
+	*d = (*d & mask) | decSymbol(baseline)<<32
+}
+
+func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
+	const mask = 0xffff00ff
+	*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
+}
+
+// decSymbolValue returns the transformed decSymbol for the given symbol.
+func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) {
+	if int(symb) >= len(t) {
+		return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
+	}
+	lu := t[symb]
+	return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil
+}
+
+// setRLE will set the decoder til RLE mode.
+func (s *fseDecoder) setRLE(symbol decSymbol) {
+	s.actualTableLog = 0
+	s.maxBits = symbol.addBits()
+	s.dt[0] = symbol
+}
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	symbolNext := s.stateTable[:256]
+
+	// Init, lay down lowprob symbols
+	{
+		for i, v := range s.norm[:s.symbolLen] {
+			if v == -1 {
+				s.dt[highThreshold].setAddBits(uint8(i))
+				highThreshold--
+				symbolNext[i] = 1
+			} else {
+				symbolNext[i] = uint16(v)
+			}
+		}
+	}
+	// Spread symbols
+	{
+		tableMask := tableSize - 1
+		step := tableStep(tableSize)
+		position := uint32(0)
+		for ss, v := range s.norm[:s.symbolLen] {
+			for i := 0; i < int(v); i++ {
+				s.dt[position].setAddBits(uint8(ss))
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					// lowprob area
+					position = (position + step) & tableMask
+				}
+			}
+		}
+		if position != 0 {
+			// position must reach all cells once, otherwise normalizedCounter is incorrect
+			return errors.New("corrupted input (position != 0)")
+		}
+	}
+
+	// Build Decoding table
+	{
+		tableSize := uint16(1 << s.actualTableLog)
+		for u, v := range s.dt[:tableSize] {
+			symbol := v.addBits()
+			nextState := symbolNext[symbol]
+			symbolNext[symbol] = nextState + 1
+			nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+			s.dt[u&maxTableMask].setNBits(nBits)
+			newState := (nextState << nBits) - tableSize
+			if newState > tableSize {
+				return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+			}
+			if newState == uint16(u) && nBits == 0 {
+				// Seems weird that this is possible with nbits > 0.
+				return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+			}
+			s.dt[u&maxTableMask].setNewState(newState)
+		}
+	}
+	return nil
+}
+
+// transform will transform the decoder table into a table usable for
+// decoding without having to apply the transformation while decoding.
+// The state will contain the base value and the number of bits to read.
+func (s *fseDecoder) transform(t []baseOffset) error {
+	tableSize := uint16(1 << s.actualTableLog)
+	s.maxBits = 0
+	for i, v := range s.dt[:tableSize] {
+		add := v.addBits()
+		if int(add) >= len(t) {
+			return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t))
+		}
+		lu := t[add]
+		if lu.addBits > s.maxBits {
+			s.maxBits = lu.addBits
+		}
+		v.setExt(lu.addBits, lu.baseLine)
+		s.dt[i] = v
+	}
+	return nil
+}
+
+type fseState struct {
+	dt    []decSymbol
+	state decSymbol
+}
+
+// Initialize and decodeAsync first state and symbol.
+func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
+	s.dt = dt
+	br.fill()
+	s.state = dt[br.getBits(tableLog)]
+}
+
+// next returns the current symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) next(br *bitReader) {
+	lowBits := uint16(br.getBits(s.state.nbBits()))
+	s.state = s.dt[s.state.newState()+lowBits]
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (s *fseState) finished(br *bitReader) bool {
+	return br.finished() && s.state.nbBits() > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (s *fseState) final() (int, uint8) {
+	return s.state.baselineInt(), s.state.addBits()
+}
+
+// final returns the current state symbol without decoding the next.
+func (s decSymbol) final() (int, uint8) {
+	return s.baselineInt(), s.addBits()
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
+	lowBits := uint16(br.getBitsFast(s.state.nbBits()))
+	s.state = s.dt[s.state.newState()+lowBits]
+	return s.state.baseline(), s.state.addBits()
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..b4757ee3f03bd5ef517e332724017c8803aabe31
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -0,0 +1,725 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"math"
+)
+
+const (
+	// For encoding we only support up to
+	maxEncTableLog    = 8
+	maxEncTablesize   = 1 << maxTableLog
+	maxEncTableMask   = (1 << maxTableLog) - 1
+	minEncTablelog    = 5
+	maxEncSymbolValue = maxMatchLengthSymbol
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type fseEncoder struct {
+	symbolLen      uint16 // Length of active part of the symbol table.
+	actualTableLog uint8  // Selected tablelog.
+	ct             cTable // Compression tables.
+	maxCount       int    // count of the most probable symbol
+	zeroBits       bool   // no bits has prob > 50%.
+	clearCount     bool   // clear count
+	useRLE         bool   // This encoder is for RLE
+	preDefined     bool   // This encoder is predefined.
+	reUsed         bool   // Set to know when the encoder has been reused.
+	rleVal         uint8  // RLE Symbol
+	maxBits        uint8  // Maximum output bits after transform.
+
+	// TODO: Technically zstd should be fine with 64 bytes.
+	count [256]uint32
+	norm  [256]int16
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+	tableSymbol []byte
+	stateTable  []uint16
+	symbolTT    []symbolTransform
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+	deltaNbBits    uint32
+	deltaFindState int16
+	outBits        uint8
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+	return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits)
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *fseEncoder) Histogram() []uint32 {
+	return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
+	s.maxCount = maxCount
+	s.symbolLen = uint16(maxSymbol) + 1
+	s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *fseEncoder) prepare() (*fseEncoder, error) {
+	if s == nil {
+		s = &fseEncoder{}
+	}
+	s.useRLE = false
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	return s, nil
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *fseEncoder) allocCtable() {
+	tableSize := 1 << s.actualTableLog
+	// get tableSymbol that is big enough.
+	if cap(s.ct.tableSymbol) < tableSize {
+		s.ct.tableSymbol = make([]byte, tableSize)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+	ctSize := tableSize
+	if cap(s.ct.stateTable) < ctSize {
+		s.ct.stateTable = make([]uint16, ctSize)
+	}
+	s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+	if cap(s.ct.symbolTT) < 256 {
+		s.ct.symbolTT = make([]symbolTransform, 256)
+	}
+	s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *fseEncoder) buildCTable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	var cumul [256]int16
+
+	s.allocCtable()
+	tableSymbol := s.ct.tableSymbol[:tableSize]
+	// symbol start positions
+	{
+		cumul[0] = 0
+		for ui, v := range s.norm[:s.symbolLen-1] {
+			u := byte(ui) // one less than reference
+			if v == -1 {
+				// Low proba symbol
+				cumul[u+1] = cumul[u] + 1
+				tableSymbol[highThreshold] = u
+				highThreshold--
+			} else {
+				cumul[u+1] = cumul[u] + v
+			}
+		}
+		// Encode last symbol separately to avoid overflowing u
+		u := int(s.symbolLen - 1)
+		v := s.norm[s.symbolLen-1]
+		if v == -1 {
+			// Low proba symbol
+			cumul[u+1] = cumul[u] + 1
+			tableSymbol[highThreshold] = byte(u)
+			highThreshold--
+		} else {
+			cumul[u+1] = cumul[u] + v
+		}
+		if uint32(cumul[s.symbolLen]) != tableSize {
+			return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+		}
+		cumul[s.symbolLen] = int16(tableSize) + 1
+	}
+	// Spread symbols
+	s.zeroBits = false
+	{
+		step := tableStep(tableSize)
+		tableMask := tableSize - 1
+		var position uint32
+		// if any symbol > largeLimit, we may have 0 bits output.
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for ui, v := range s.norm[:s.symbolLen] {
+			symbol := byte(ui)
+			if v > largeLimit {
+				s.zeroBits = true
+			}
+			for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+				tableSymbol[position] = symbol
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					position = (position + step) & tableMask
+				} /* Low proba area */
+			}
+		}
+
+		// Check if we have gone through all positions
+		if position != 0 {
+			return errors.New("position!=0")
+		}
+	}
+
+	// Build table
+	table := s.ct.stateTable
+	{
+		tsi := int(tableSize)
+		for u, v := range tableSymbol {
+			// TableU16 : sorted by symbol order; gives next state value
+			table[cumul[v]] = uint16(tsi + u)
+			cumul[v]++
+		}
+	}
+
+	// Build Symbol Transformation Table
+	{
+		total := int16(0)
+		symbolTT := s.ct.symbolTT[:s.symbolLen]
+		tableLog := s.actualTableLog
+		tl := (uint32(tableLog) << 16) - (1 << tableLog)
+		for i, v := range s.norm[:s.symbolLen] {
+			switch v {
+			case 0:
+			case -1, 1:
+				symbolTT[i].deltaNbBits = tl
+				symbolTT[i].deltaFindState = total - 1
+				total++
+			default:
+				maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
+				minStatePlus := uint32(v) << maxBitsOut
+				symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+				symbolTT[i].deltaFindState = total - v
+				total += v
+			}
+		}
+		if total != int16(tableSize) {
+			return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+		}
+	}
+	return nil
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+func (s *fseEncoder) setRLE(val byte) {
+	s.allocCtable()
+	s.actualTableLog = 0
+	s.ct.stateTable = s.ct.stateTable[:1]
+	s.ct.symbolTT[val] = symbolTransform{
+		deltaFindState: 0,
+		deltaNbBits:    0,
+	}
+	if debugEncoder {
+		println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
+	}
+	s.rleVal = val
+	s.useRLE = true
+}
+
+// setBits will set output bits for the transform.
+// if nil is provided, the number of bits is equal to the index.
+func (s *fseEncoder) setBits(transform []byte) {
+	if s.reUsed || s.preDefined {
+		return
+	}
+	if s.useRLE {
+		if transform == nil {
+			s.ct.symbolTT[s.rleVal].outBits = s.rleVal
+			s.maxBits = s.rleVal
+			return
+		}
+		s.maxBits = transform[s.rleVal]
+		s.ct.symbolTT[s.rleVal].outBits = s.maxBits
+		return
+	}
+	if transform == nil {
+		for i := range s.ct.symbolTT[:s.symbolLen] {
+			s.ct.symbolTT[i].outBits = uint8(i)
+		}
+		s.maxBits = uint8(s.symbolLen - 1)
+		return
+	}
+	s.maxBits = 0
+	for i, v := range transform[:s.symbolLen] {
+		s.ct.symbolTT[i].outBits = v
+		if v > s.maxBits {
+			// We could assume bits always going up, but we play safe.
+			s.maxBits = v
+		}
+	}
+}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+// If successful, compression tables will also be made ready.
+func (s *fseEncoder) normalizeCount(length int) error {
+	if s.reUsed {
+		return nil
+	}
+	s.optimalTableLog(length)
+	var (
+		tableLog          = s.actualTableLog
+		scale             = 62 - uint64(tableLog)
+		step              = (1 << 62) / uint64(length)
+		vStep             = uint64(1) << (scale - 20)
+		stillToDistribute = int16(1 << tableLog)
+		largest           int
+		largestP          int16
+		lowThreshold      = (uint32)(length >> tableLog)
+	)
+	if s.maxCount == length {
+		s.useRLE = true
+		return nil
+	}
+	s.useRLE = false
+	for i, cnt := range s.count[:s.symbolLen] {
+		// already handled
+		// if (count[s] == s.length) return 0;   /* rle special case */
+
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			stillToDistribute--
+		} else {
+			proba := (int16)((uint64(cnt) * step) >> scale)
+			if proba < 8 {
+				restToBeat := vStep * uint64(rtbTable[proba])
+				v := uint64(cnt)*step - (uint64(proba) << scale)
+				if v > restToBeat {
+					proba++
+				}
+			}
+			if proba > largestP {
+				largestP = proba
+				largest = i
+			}
+			s.norm[i] = proba
+			stillToDistribute -= proba
+		}
+	}
+
+	if -stillToDistribute >= (s.norm[largest] >> 1) {
+		// corner case, need another normalization method
+		err := s.normalizeCount2(length)
+		if err != nil {
+			return err
+		}
+		if debugAsserts {
+			err = s.validateNorm()
+			if err != nil {
+				return err
+			}
+		}
+		return s.buildCTable()
+	}
+	s.norm[largest] += stillToDistribute
+	if debugAsserts {
+		err := s.validateNorm()
+		if err != nil {
+			return err
+		}
+	}
+	return s.buildCTable()
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *fseEncoder) normalizeCount2(length int) error {
+	const notYetAssigned = -2
+	var (
+		distributed  uint32
+		total        = uint32(length)
+		tableLog     = s.actualTableLog
+		lowThreshold = total >> tableLog
+		lowOne       = (total * 3) >> (tableLog + 1)
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			distributed++
+			total -= cnt
+			continue
+		}
+		if cnt <= lowOne {
+			s.norm[i] = 1
+			distributed++
+			total -= cnt
+			continue
+		}
+		s.norm[i] = notYetAssigned
+	}
+	toDistribute := (1 << tableLog) - distributed
+
+	if (total / toDistribute) > lowOne {
+		// risk of rounding to zero
+		lowOne = (total * 3) / (toDistribute * 2)
+		for i, cnt := range s.count[:s.symbolLen] {
+			if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+				s.norm[i] = 1
+				distributed++
+				total -= cnt
+				continue
+			}
+		}
+		toDistribute = (1 << tableLog) - distributed
+	}
+	if distributed == uint32(s.symbolLen)+1 {
+		// all values are pretty poor;
+		//   probably incompressible data (should have already been detected);
+		//   find max, then give all remaining points to max
+		var maxV int
+		var maxC uint32
+		for i, cnt := range s.count[:s.symbolLen] {
+			if cnt > maxC {
+				maxV = i
+				maxC = cnt
+			}
+		}
+		s.norm[maxV] += int16(toDistribute)
+		return nil
+	}
+
+	if total == 0 {
+		// all of the symbols were low enough for the lowOne or lowThreshold
+		for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+			if s.norm[i] > 0 {
+				toDistribute--
+				s.norm[i]++
+			}
+		}
+		return nil
+	}
+
+	var (
+		vStepLog = 62 - uint64(tableLog)
+		mid      = uint64((1 << (vStepLog - 1)) - 1)
+		rStep    = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+		tmpTotal = mid
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if s.norm[i] == notYetAssigned {
+			var (
+				end    = tmpTotal + uint64(cnt)*rStep
+				sStart = uint32(tmpTotal >> vStepLog)
+				sEnd   = uint32(end >> vStepLog)
+				weight = sEnd - sStart
+			)
+			if weight < 1 {
+				return errors.New("weight < 1")
+			}
+			s.norm[i] = int16(weight)
+			tmpTotal = end
+		}
+	}
+	return nil
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *fseEncoder) optimalTableLog(length int) {
+	tableLog := uint8(maxEncTableLog)
+	minBitsSrc := highBit(uint32(length)) + 1
+	minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2
+	minBits := uint8(minBitsSymbols)
+	if minBitsSrc < minBitsSymbols {
+		minBits = uint8(minBitsSrc)
+	}
+
+	maxBitsSrc := uint8(highBit(uint32(length-1))) - 2
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minEncTablelog {
+		tableLog = minEncTablelog
+	}
+	if tableLog > maxEncTableLog {
+		tableLog = maxEncTableLog
+	}
+	s.actualTableLog = tableLog
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *fseEncoder) validateNorm() (err error) {
+	var total int
+	for _, v := range s.norm[:s.symbolLen] {
+		if v >= 0 {
+			total += int(v)
+		} else {
+			total -= int(v)
+		}
+	}
+	defer func() {
+		if err == nil {
+			return
+		}
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+		for i, v := range s.norm[:s.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+		}
+	}()
+	if total != (1 << s.actualTableLog) {
+		return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
+	}
+	for i, v := range s.count[s.symbolLen:] {
+		if v != 0 {
+			return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
+		}
+	}
+	return nil
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
+	if s.useRLE {
+		return append(out, s.rleVal), nil
+	}
+	if s.preDefined || s.reUsed {
+		// Never write predefined.
+		return out, nil
+	}
+
+	var (
+		tableLog  = s.actualTableLog
+		tableSize = 1 << tableLog
+		previous0 bool
+		charnum   uint16
+
+		// maximum header size plus 2 extra bytes for final output if bitCount == 0.
+		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2
+
+		// Write Table Size
+		bitStream = uint32(tableLog - minEncTablelog)
+		bitCount  = uint(4)
+		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+		threshold = int16(tableSize)
+		nbBits    = uint(tableLog + 1)
+		outP      = len(out)
+	)
+	if cap(out) < outP+maxHeaderSize {
+		out = append(out, make([]byte, maxHeaderSize*3)...)
+		out = out[:len(out)-maxHeaderSize*3]
+	}
+	out = out[:outP+maxHeaderSize]
+
+	// stops at 1
+	for remaining > 1 {
+		if previous0 {
+			start := charnum
+			for s.norm[charnum] == 0 {
+				charnum++
+			}
+			for charnum >= start+24 {
+				start += 24
+				bitStream += uint32(0xFFFF) << bitCount
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+			}
+			for charnum >= start+3 {
+				start += 3
+				bitStream += 3 << bitCount
+				bitCount += 2
+			}
+			bitStream += uint32(charnum-start) << bitCount
+			bitCount += 2
+			if bitCount > 16 {
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+				bitCount -= 16
+			}
+		}
+
+		count := s.norm[charnum]
+		charnum++
+		max := (2*threshold - 1) - remaining
+		if count < 0 {
+			remaining += count
+		} else {
+			remaining -= count
+		}
+		count++ // +1 for extra accuracy
+		if count >= threshold {
+			count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+		}
+		bitStream += uint32(count) << bitCount
+		bitCount += nbBits
+		if count < max {
+			bitCount--
+		}
+
+		previous0 = count == 1
+		if remaining < 1 {
+			return nil, errors.New("internal error: remaining < 1")
+		}
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if bitCount > 16 {
+			out[outP] = byte(bitStream)
+			out[outP+1] = byte(bitStream >> 8)
+			outP += 2
+			bitStream >>= 16
+			bitCount -= 16
+		}
+	}
+
+	if outP+2 > len(out) {
+		return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
+	}
+	out[outP] = byte(bitStream)
+	out[outP+1] = byte(bitStream >> 8)
+	outP += int((bitCount + 7) / 8)
+
+	if charnum > s.symbolLen {
+		return nil, errors.New("internal error: charnum > s.symbolLen")
+	}
+	return out[:outP], nil
+}
+
+// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+// note 1 : assume symbolValue is valid (<= maxSymbolValue)
+// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits *
+func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
+	minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16
+	threshold := (minNbBits + 1) << 16
+	if debugAsserts {
+		if !(s.actualTableLog < 16) {
+			panic("!s.actualTableLog < 16")
+		}
+		// ensure enough room for renormalization double shift
+		if !(uint8(accuracyLog) < 31-s.actualTableLog) {
+			panic("!uint8(accuracyLog) < 31-s.actualTableLog")
+		}
+	}
+	tableSize := uint32(1) << s.actualTableLog
+	deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize)
+	// linear interpolation (very approximate)
+	normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog
+	bitMultiplier := uint32(1) << accuracyLog
+	if debugAsserts {
+		if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold {
+			panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold")
+		}
+		if normalizedDeltaFromThreshold > bitMultiplier {
+			panic("normalizedDeltaFromThreshold > bitMultiplier")
+		}
+	}
+	return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold
+}
+
+// Returns the cost in bits of encoding the distribution in count using ctable.
+// Histogram should only be up to the last non-zero symbol.
+// Returns an -1 if ctable cannot represent all the symbols in count.
+func (s *fseEncoder) approxSize(hist []uint32) uint32 {
+	if int(s.symbolLen) < len(hist) {
+		// More symbols than we have.
+		return math.MaxUint32
+	}
+	if s.useRLE {
+		// We will never reuse RLE encoders.
+		return math.MaxUint32
+	}
+	const kAccuracyLog = 8
+	badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog
+	var cost uint32
+	for i, v := range hist {
+		if v == 0 {
+			continue
+		}
+		if s.norm[i] == 0 {
+			return math.MaxUint32
+		}
+		bitCost := s.bitCost(uint8(i), kAccuracyLog)
+		if bitCost > badCost {
+			return math.MaxUint32
+		}
+		cost += v * bitCost
+	}
+	return cost >> kAccuracyLog
+}
+
+// maxHeaderSize returns the maximum header size in bits.
+// This is not exact size, but we want a penalty for new tables anyway.
+func (s *fseEncoder) maxHeaderSize() uint32 {
+	if s.preDefined {
+		return 0
+	}
+	if s.useRLE {
+		return 8
+	}
+	return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+	bw         *bitWriter
+	stateTable []uint16
+	state      uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
+	c.bw = bw
+	c.stateTable = ct.stateTable
+	if len(c.stateTable) == 1 {
+		// RLE
+		c.stateTable[0] = uint16(0)
+		c.state = 0
+		return
+	}
+	nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+	im := int32((nbBitsOut << 16) - first.deltaNbBits)
+	lu := (im >> nbBitsOut) + int32(first.deltaFindState)
+	c.state = c.stateTable[lu]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
+	c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+	c.bw.flush32()
+	c.bw.addBits16NC(c.state, tableLog)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
new file mode 100644
index 0000000000000000000000000000000000000000..474cb77d2b999172be4f82e424b4158b9efbae1c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
@@ -0,0 +1,158 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"math"
+	"sync"
+)
+
+var (
+	// fsePredef are the predefined fse tables as defined here:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+	// These values are already transformed.
+	fsePredef [3]fseDecoder
+
+	// fsePredefEnc are the predefined encoder based on fse tables as defined here:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+	// These values are already transformed.
+	fsePredefEnc [3]fseEncoder
+
+	// symbolTableX contain the transformations needed for each type as defined in
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+	symbolTableX [3][]baseOffset
+
+	// maxTableSymbol is the biggest supported symbol for each table type
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+	maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol}
+
+	// bitTables is the bits table for each table.
+	bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]}
+)
+
+type tableIndex uint8
+
+const (
+	// indexes for fsePredef and symbolTableX
+	tableLiteralLengths tableIndex = 0
+	tableOffsets        tableIndex = 1
+	tableMatchLengths   tableIndex = 2
+
+	maxLiteralLengthSymbol = 35
+	maxOffsetLengthSymbol  = 30
+	maxMatchLengthSymbol   = 52
+)
+
+// baseOffset is used for calculating transformations.
+type baseOffset struct {
+	baseLine uint32
+	addBits  uint8
+}
+
+// fillBase will precalculate base offsets with the given bit distributions.
+func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
+	if len(bits) != len(dst) {
+		panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits)))
+	}
+	for i, bit := range bits {
+		if base > math.MaxInt32 {
+			panic("invalid decoding table, base overflows int32")
+		}
+
+		dst[i] = baseOffset{
+			baseLine: base,
+			addBits:  bit,
+		}
+		base += 1 << bit
+	}
+}
+
+var predef sync.Once
+
+func initPredefined() {
+	predef.Do(func() {
+		// Literals length codes
+		tmp := make([]baseOffset, 36)
+		for i := range tmp[:16] {
+			tmp[i] = baseOffset{
+				baseLine: uint32(i),
+				addBits:  0,
+			}
+		}
+		fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+		symbolTableX[tableLiteralLengths] = tmp
+
+		// Match length codes
+		tmp = make([]baseOffset, 53)
+		for i := range tmp[:32] {
+			tmp[i] = baseOffset{
+				// The transformation adds the 3 length.
+				baseLine: uint32(i) + 3,
+				addBits:  0,
+			}
+		}
+		fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+		symbolTableX[tableMatchLengths] = tmp
+
+		// Offset codes
+		tmp = make([]baseOffset, maxOffsetBits+1)
+		tmp[1] = baseOffset{
+			baseLine: 1,
+			addBits:  1,
+		}
+		fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
+		symbolTableX[tableOffsets] = tmp
+
+		// Fill predefined tables and transform them.
+		// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+		for i := range fsePredef[:] {
+			f := &fsePredef[i]
+			switch tableIndex(i) {
+			case tableLiteralLengths:
+				// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
+				f.actualTableLog = 6
+				copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+					-1, -1, -1, -1})
+				f.symbolLen = 36
+			case tableOffsets:
+				// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
+				f.actualTableLog = 5
+				copy(f.norm[:], []int16{
+					1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
+				f.symbolLen = 29
+			case tableMatchLengths:
+				//https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
+				f.actualTableLog = 6
+				copy(f.norm[:], []int16{
+					1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+					-1, -1, -1, -1, -1})
+				f.symbolLen = 53
+			}
+			if err := f.buildDtable(); err != nil {
+				panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+			}
+			if err := f.transform(symbolTableX[i]); err != nil {
+				panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+			}
+			f.preDefined = true
+
+			// Create encoder as well
+			enc := &fsePredefEnc[i]
+			copy(enc.norm[:], f.norm[:])
+			enc.symbolLen = f.symbolLen
+			enc.actualTableLog = f.actualTableLog
+			if err := enc.buildCTable(); err != nil {
+				panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
+			}
+			enc.setBits(bitTables[i])
+			enc.preDefined = true
+		}
+	})
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf33f29a1b488eac4cbee823e22b996162306198
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -0,0 +1,41 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+const (
+	prime3bytes = 506832829
+	prime4bytes = 2654435761
+	prime5bytes = 889523592379
+	prime6bytes = 227718039650203
+	prime7bytes = 58295818150454627
+	prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+	switch mls {
+	case 3:
+		return (uint32(u<<8) * prime3bytes) >> (32 - length)
+	case 5:
+		return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+	case 6:
+		return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+	case 7:
+		return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+	case 8:
+		return uint32((u * prime8bytes) >> (64 - length))
+	default:
+		return (uint32(u) * prime4bytes) >> (32 - length)
+	}
+}
+
+// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash3(u uint32, h uint8) uint32 {
+	return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
new file mode 100644
index 0000000000000000000000000000000000000000..f783e32d251b45d73a92572104e8a6a9522b32c1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -0,0 +1,89 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"github.com/klauspost/compress/huff0"
+)
+
+// history contains the information transferred between blocks.
+type history struct {
+	b             []byte
+	huffTree      *huff0.Scratch
+	recentOffsets [3]int
+	decoders      sequenceDecs
+	windowSize    int
+	maxSize       int
+	error         bool
+	dict          *dict
+}
+
+// reset will reset the history to initial state of a frame.
+// The history must already have been initialized to the desired size.
+func (h *history) reset() {
+	h.b = h.b[:0]
+	h.error = false
+	h.recentOffsets = [3]int{1, 4, 8}
+	if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	h.decoders = sequenceDecs{}
+	if h.huffTree != nil {
+		if h.dict == nil || h.dict.litEnc != h.huffTree {
+			huffDecoderPool.Put(h.huffTree)
+		}
+	}
+	h.huffTree = nil
+	h.dict = nil
+	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) setDict(dict *dict) {
+	if dict == nil {
+		return
+	}
+	h.dict = dict
+	h.decoders.litLengths = dict.llDec
+	h.decoders.offsets = dict.ofDec
+	h.decoders.matchLengths = dict.mlDec
+	h.recentOffsets = dict.offsets
+	h.huffTree = dict.litEnc
+}
+
+// append bytes to history.
+// This function will make sure there is space for it,
+// if the buffer has been allocated with enough extra space.
+func (h *history) append(b []byte) {
+	if len(b) >= h.windowSize {
+		// Discard all history by simply overwriting
+		h.b = h.b[:h.windowSize]
+		copy(h.b, b[len(b)-h.windowSize:])
+		return
+	}
+
+	// If there is space, append it.
+	if len(b) < cap(h.b)-len(h.b) {
+		h.b = append(h.b, b...)
+		return
+	}
+
+	// Move data down so we only have window size left.
+	// We know we have less than window size in b at this point.
+	discard := len(b) + len(h.b) - h.windowSize
+	copy(h.b, h.b[discard:])
+	h.b = h.b[:h.windowSize]
+	copy(h.b[h.windowSize-len(b):], b)
+}
+
+// append bytes to history without ever discarding anything.
+func (h *history) appendKeep(b []byte) {
+	h.b = append(h.b, b...)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..24b53065f40b5d7d277a64375956ec19cb2123c5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..69aa3bb587c8d9d316c1658de4d262a7ddfb1532
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -0,0 +1,58 @@
+# xxhash
+
+VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
+
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+    func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
+| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c112a0ab1c1bb6c0a047e44062e67a0becf8953
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -0,0 +1,237 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package.
+
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+	var d Digest
+	d.Reset()
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+	d.v1 = prime1v + prime2
+	d.v2 = prime2
+	d.v3 = 0
+	d.v4 = -prime1v
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(d.mem[d.n:], b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		copy(d.mem[d.n:], b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[32-d.n:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	i, end := 0, d.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(d.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(d.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
new file mode 100644
index 0000000000000000000000000000000000000000..0ae847f75b05fb644aaa414ee5cc3c33486fafd9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
@@ -0,0 +1,12 @@
+//go:build !appengine && gc && !purego
+// +build !appengine,gc,!purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
new file mode 100644
index 0000000000000000000000000000000000000000..be8db5bf796015120afa0748cf1c39f2acf4f576
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX	h
+// SI	pointer to advance through b
+// DX	n
+// BX	loop end
+// R8	v1, k1
+// R9	v2
+// R10	v3
+// R11	v4
+// R12	tmp
+// R13	prime1v
+// R14	prime2v
+// DI	prime4v
+
+// round reads from and advances the buffer pointer in SI.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+	MOVQ  (SI), R12 \
+	ADDQ  $8, SI    \
+	IMULQ R14, R12  \
+	ADDQ  R12, r    \
+	ROLQ  $31, r    \
+	IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
+#define mergeRound(acc, val) \
+	IMULQ R14, val \
+	ROLQ  $31, val \
+	IMULQ R13, val \
+	XORQ  val, acc \
+	IMULQ R13, acc \
+	ADDQ  DI, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+	// Load fixed primes.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+	MOVQ ·prime4v(SB), DI
+
+	// Load slice.
+	MOVQ b_base+0(FP), SI
+	MOVQ b_len+8(FP), DX
+	LEAQ (SI)(DX*1), BX
+
+	// The first loop limit will be len(b)-32.
+	SUBQ $32, BX
+
+	// Check whether we have at least one block.
+	CMPQ DX, $32
+	JLT  noBlocks
+
+	// Set up initial state (v1, v2, v3, v4).
+	MOVQ R13, R8
+	ADDQ R14, R8
+	MOVQ R14, R9
+	XORQ R10, R10
+	XORQ R11, R11
+	SUBQ R13, R11
+
+	// Loop until SI > BX.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ SI, BX
+	JLE  blockLoop
+
+	MOVQ R8, AX
+	ROLQ $1, AX
+	MOVQ R9, R12
+	ROLQ $7, R12
+	ADDQ R12, AX
+	MOVQ R10, R12
+	ROLQ $12, R12
+	ADDQ R12, AX
+	MOVQ R11, R12
+	ROLQ $18, R12
+	ADDQ R12, AX
+
+	mergeRound(AX, R8)
+	mergeRound(AX, R9)
+	mergeRound(AX, R10)
+	mergeRound(AX, R11)
+
+	JMP afterBlocks
+
+noBlocks:
+	MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+	ADDQ DX, AX
+
+	// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
+	ADDQ $24, BX
+
+	CMPQ SI, BX
+	JG   fourByte
+
+wordLoop:
+	// Calculate k1.
+	MOVQ  (SI), R8
+	ADDQ  $8, SI
+	IMULQ R14, R8
+	ROLQ  $31, R8
+	IMULQ R13, R8
+
+	XORQ  R8, AX
+	ROLQ  $27, AX
+	IMULQ R13, AX
+	ADDQ  DI, AX
+
+	CMPQ SI, BX
+	JLE  wordLoop
+
+fourByte:
+	ADDQ $4, BX
+	CMPQ SI, BX
+	JG   singles
+
+	MOVL  (SI), R8
+	ADDQ  $4, SI
+	IMULQ R13, R8
+	XORQ  R8, AX
+
+	ROLQ  $23, AX
+	IMULQ R14, AX
+	ADDQ  ·prime3v(SB), AX
+
+singles:
+	ADDQ $4, BX
+	CMPQ SI, BX
+	JGE  finalize
+
+singlesLoop:
+	MOVBQZX (SI), R12
+	ADDQ    $1, SI
+	IMULQ   ·prime5v(SB), R12
+	XORQ    R12, AX
+
+	ROLQ  $11, AX
+	IMULQ R13, AX
+
+	CMPQ SI, BX
+	JL   singlesLoop
+
+finalize:
+	MOVQ  AX, R12
+	SHRQ  $33, R12
+	XORQ  R12, AX
+	IMULQ R14, AX
+	MOVQ  AX, R12
+	SHRQ  $29, R12
+	XORQ  R12, AX
+	IMULQ ·prime3v(SB), AX
+	MOVQ  AX, R12
+	SHRQ  $32, R12
+	XORQ  R12, AX
+
+	MOVQ AX, ret+24(FP)
+	RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+	// Load fixed primes needed for round.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+
+	// Load slice.
+	MOVQ b_base+8(FP), SI
+	MOVQ b_len+16(FP), DX
+	LEAQ (SI)(DX*1), BX
+	SUBQ $32, BX
+
+	// Load vN from d.
+	MOVQ d+0(FP), AX
+	MOVQ 0(AX), R8   // v1
+	MOVQ 8(AX), R9   // v2
+	MOVQ 16(AX), R10 // v3
+	MOVQ 24(AX), R11 // v4
+
+	// We don't need to check the loop condition here; this function is
+	// always called with at least one block of data to process.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ SI, BX
+	JLE  blockLoop
+
+	// Copy vN back to d.
+	MOVQ R8, 0(AX)
+	MOVQ R9, 8(AX)
+	MOVQ R10, 16(AX)
+	MOVQ R11, 24(AX)
+
+	// The number of bytes written is SI minus the old base pointer.
+	SUBQ b_base+8(FP), SI
+	MOVQ SI, ret+32(FP)
+
+	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f52f296e71fb8b40d4501d0587d837e072d07ec
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -0,0 +1,77 @@
+//go:build !amd64 || appengine || !gc || purego
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+	// A simpler version would be
+	//   d := New()
+	//   d.Write(b)
+	//   return d.Sum64()
+	// but this is faster, particularly for small inputs.
+
+	n := len(b)
+	var h uint64
+
+	if n >= 32 {
+		v1 := prime1v + prime2
+		v2 := prime2
+		v3 := uint64(0)
+		v4 := -prime1v
+		for len(b) >= 32 {
+			v1 = round(v1, u64(b[0:8:len(b)]))
+			v2 = round(v2, u64(b[8:16:len(b)]))
+			v3 = round(v3, u64(b[16:24:len(b)]))
+			v4 = round(v4, u64(b[24:32:len(b)]))
+			b = b[32:len(b):len(b)]
+		}
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = prime5
+	}
+
+	h += uint64(n)
+
+	i, end := 0, len(b)
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(b[i:i+8:len(b)]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for ; i < end; i++ {
+		h ^= uint64(b[i]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+	n := len(b)
+	for len(b) >= 32 {
+		v1 = round(v1, u64(b[0:8:len(b)]))
+		v2 = round(v2, u64(b[8:16:len(b)]))
+		v3 = round(v3, u64(b[16:24:len(b)]))
+		v4 = round(v4, u64(b[24:32:len(b)]))
+		b = b[32:len(b):len(b)]
+	}
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+	return n - len(b)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f3b0cb10264c553f9bbf0bcab8ca0fc0158d8ca
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
@@ -0,0 +1,11 @@
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+	return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
new file mode 100644
index 0000000000000000000000000000000000000000..1dd39e63b7e83f8e894aaeb90e8e63cfc94936cc
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -0,0 +1,492 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+type seq struct {
+	litLen   uint32
+	matchLen uint32
+	offset   uint32
+
+	// Codes are stored here for the encoder
+	// so they only have to be looked up once.
+	llCode, mlCode, ofCode uint8
+}
+
+func (s seq) String() string {
+	if s.offset <= 3 {
+		if s.offset == 0 {
+			return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)")
+		}
+		return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)")
+	}
+	return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)")
+}
+
+type seqCompMode uint8
+
+const (
+	compModePredefined seqCompMode = iota
+	compModeRLE
+	compModeFSE
+	compModeRepeat
+)
+
+type sequenceDec struct {
+	// decoder keeps track of the current state and updates it from the bitstream.
+	fse    *fseDecoder
+	state  fseState
+	repeat bool
+}
+
+// init the state of the decoder with input from stream.
+func (s *sequenceDec) init(br *bitReader) error {
+	if s.fse == nil {
+		return errors.New("sequence decoder not defined")
+	}
+	s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<<s.fse.actualTableLog])
+	return nil
+}
+
+// sequenceDecs contains all 3 sequence decoders and their state.
+type sequenceDecs struct {
+	litLengths   sequenceDec
+	offsets      sequenceDec
+	matchLengths sequenceDec
+	prevOffset   [3]int
+	hist         []byte
+	dict         []byte
+	literals     []byte
+	out          []byte
+	windowSize   int
+	maxBits      uint8
+}
+
+// initialize all 3 decoders from the stream input.
+func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
+	if err := s.litLengths.init(br); err != nil {
+		return errors.New("litLengths:" + err.Error())
+	}
+	if err := s.offsets.init(br); err != nil {
+		return errors.New("offsets:" + err.Error())
+	}
+	if err := s.matchLengths.init(br); err != nil {
+		return errors.New("matchLengths:" + err.Error())
+	}
+	s.literals = literals
+	s.hist = hist.b
+	s.prevOffset = hist.recentOffsets
+	s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
+	s.windowSize = hist.windowSize
+	s.out = out
+	s.dict = nil
+	if hist.dict != nil {
+		s.dict = hist.dict.content
+	}
+	return nil
+}
+
+// decode sequences from the stream with the provided history.
+func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
+	startSize := len(s.out)
+	// Grab full sizes tables, to avoid bounds checks.
+	llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+	for i := seqs - 1; i >= 0; i-- {
+		if br.overread() {
+			printf("reading sequence %d, exceeded available data\n", seqs-i)
+			return io.ErrUnexpectedEOF
+		}
+		var ll, mo, ml int
+		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+			// inlined function:
+			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+			// Final will not read from stream.
+			var llB, mlB, moB uint8
+			ll, llB = llState.final()
+			ml, mlB = mlState.final()
+			mo, moB = ofState.final()
+
+			// extra bits are stored in reverse order.
+			br.fillFast()
+			mo += br.getBits(moB)
+			if s.maxBits > 32 {
+				br.fillFast()
+			}
+			ml += br.getBits(mlB)
+			ll += br.getBits(llB)
+
+			if moB > 1 {
+				s.prevOffset[2] = s.prevOffset[1]
+				s.prevOffset[1] = s.prevOffset[0]
+				s.prevOffset[0] = mo
+			} else {
+				// mo = s.adjustOffset(mo, ll, moB)
+				// Inlined for rather big speedup
+				if ll == 0 {
+					// There is an exception though, when current sequence's literals_length = 0.
+					// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+					// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+					mo++
+				}
+
+				if mo == 0 {
+					mo = s.prevOffset[0]
+				} else {
+					var temp int
+					if mo == 3 {
+						temp = s.prevOffset[0] - 1
+					} else {
+						temp = s.prevOffset[mo]
+					}
+
+					if temp == 0 {
+						// 0 is not valid; input is corrupted; force offset to 1
+						println("temp was 0")
+						temp = 1
+					}
+
+					if mo != 1 {
+						s.prevOffset[2] = s.prevOffset[1]
+					}
+					s.prevOffset[1] = s.prevOffset[0]
+					s.prevOffset[0] = temp
+					mo = temp
+				}
+			}
+			br.fillFast()
+		} else {
+			ll, mo, ml = s.next(br, llState, mlState, ofState)
+			br.fill()
+		}
+
+		if debugSequences {
+			println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+		}
+
+		if ll > len(s.literals) {
+			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
+		}
+		size := ll + ml + len(s.out)
+		if size-startSize > maxBlockSize {
+			return fmt.Errorf("output (%d) bigger than max block size", size)
+		}
+		if size > cap(s.out) {
+			// Not enough size, which can happen under high volume block streaming conditions
+			// but could be if destination slice is too small for sync operations.
+			// over-allocating here can create a large amount of GC pressure so we try to keep
+			// it as contained as possible
+			used := len(s.out) - startSize
+			addBytes := 256 + ll + ml + used>>2
+			// Clamp to max block size.
+			if used+addBytes > maxBlockSize {
+				addBytes = maxBlockSize - used
+			}
+			s.out = append(s.out, make([]byte, addBytes)...)
+			s.out = s.out[:len(s.out)-addBytes]
+		}
+		if ml > maxMatchLen {
+			return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+		}
+
+		// Add literals
+		s.out = append(s.out, s.literals[:ll]...)
+		s.literals = s.literals[ll:]
+		out := s.out
+
+		if mo == 0 && ml > 0 {
+			return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+		}
+
+		if mo > len(s.out)+len(hist) || mo > s.windowSize {
+			if len(s.dict) == 0 {
+				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+			}
+
+			// we may be in dictionary.
+			dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
+			if dictO < 0 || dictO >= len(s.dict) {
+				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+			}
+			end := dictO + ml
+			if end > len(s.dict) {
+				out = append(out, s.dict[dictO:]...)
+				mo -= len(s.dict) - dictO
+				ml -= len(s.dict) - dictO
+			} else {
+				out = append(out, s.dict[dictO:end]...)
+				mo = 0
+				ml = 0
+			}
+		}
+
+		// Copy from history.
+		// TODO: Blocks without history could be made to ignore this completely.
+		if v := mo - len(s.out); v > 0 {
+			// v is the start position in history from end.
+			start := len(s.hist) - v
+			if ml > v {
+				// Some goes into current block.
+				// Copy remainder of history
+				out = append(out, s.hist[start:]...)
+				mo -= v
+				ml -= v
+			} else {
+				out = append(out, s.hist[start:start+ml]...)
+				ml = 0
+			}
+		}
+		// We must be in current buffer now
+		if ml > 0 {
+			start := len(s.out) - mo
+			if ml <= len(s.out)-start {
+				// No overlap
+				out = append(out, s.out[start:start+ml]...)
+			} else {
+				// Overlapping copy
+				// Extend destination slice and copy one byte at the time.
+				out = out[:len(out)+ml]
+				src := out[start : start+ml]
+				// Destination is the space we just added.
+				dst := out[len(out)-ml:]
+				dst = dst[:len(src)]
+				for i := range src {
+					dst[i] = src[i]
+				}
+			}
+		}
+		s.out = out
+		if i == 0 {
+			// This is the last sequence, so we shouldn't update state.
+			break
+		}
+
+		// Manually inlined, ~ 5-20% faster
+		// Update all 3 states at once. Approx 20% faster.
+		nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+		if nBits == 0 {
+			llState = llTable[llState.newState()&maxTableMask]
+			mlState = mlTable[mlState.newState()&maxTableMask]
+			ofState = ofTable[ofState.newState()&maxTableMask]
+		} else {
+			bits := br.getBitsFast(nBits)
+			lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+			llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+			lowBits = uint16(bits >> (ofState.nbBits() & 31))
+			lowBits &= bitMask[mlState.nbBits()&15]
+			mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+			lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+			ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+		}
+	}
+
+	// Add final literals
+	s.out = append(s.out, s.literals...)
+	return nil
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) update(br *bitReader) {
+	// Max 8 bits
+	s.litLengths.state.next(br)
+	// Max 9 bits
+	s.matchLengths.state.next(br)
+	// Max 8 bits
+	s.offsets.state.next(br)
+}
+
+var bitMask [16]uint16
+
+func init() {
+	for i := range bitMask[:] {
+		bitMask[i] = uint16((1 << uint(i)) - 1)
+	}
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) updateAlt(br *bitReader) {
+	// Update all 3 states at once. Approx 20% faster.
+	a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+	nBits := a.nbBits() + b.nbBits() + c.nbBits()
+	if nBits == 0 {
+		s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
+		s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
+		s.offsets.state.state = s.offsets.state.dt[c.newState()]
+		return
+	}
+	bits := br.getBitsFast(nBits)
+	lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
+	s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
+
+	lowBits = uint16(bits >> (c.nbBits() & 31))
+	lowBits &= bitMask[b.nbBits()&15]
+	s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
+
+	lowBits = uint16(bits) & bitMask[c.nbBits()&15]
+	s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
+}
+
+// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
+func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
+	// Final will not read from stream.
+	ll, llB := llState.final()
+	ml, mlB := mlState.final()
+	mo, moB := ofState.final()
+
+	// extra bits are stored in reverse order.
+	br.fillFast()
+	mo += br.getBits(moB)
+	if s.maxBits > 32 {
+		br.fillFast()
+	}
+	ml += br.getBits(mlB)
+	ll += br.getBits(llB)
+
+	if moB > 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+		s.prevOffset[1] = s.prevOffset[0]
+		s.prevOffset[0] = mo
+		return
+	}
+	// mo = s.adjustOffset(mo, ll, moB)
+	// Inlined for rather big speedup
+	if ll == 0 {
+		// There is an exception though, when current sequence's literals_length = 0.
+		// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+		// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+		mo++
+	}
+
+	if mo == 0 {
+		mo = s.prevOffset[0]
+		return
+	}
+	var temp int
+	if mo == 3 {
+		temp = s.prevOffset[0] - 1
+	} else {
+		temp = s.prevOffset[mo]
+	}
+
+	if temp == 0 {
+		// 0 is not valid; input is corrupted; force offset to 1
+		println("temp was 0")
+		temp = 1
+	}
+
+	if mo != 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+	}
+	s.prevOffset[1] = s.prevOffset[0]
+	s.prevOffset[0] = temp
+	mo = temp
+	return
+}
+
+func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
+	// Final will not read from stream.
+	ll, llB := llState.final()
+	ml, mlB := mlState.final()
+	mo, moB := ofState.final()
+
+	// extra bits are stored in reverse order.
+	br.fill()
+	if s.maxBits <= 32 {
+		mo += br.getBits(moB)
+		ml += br.getBits(mlB)
+		ll += br.getBits(llB)
+	} else {
+		mo += br.getBits(moB)
+		br.fill()
+		// matchlength+literal length, max 32 bits
+		ml += br.getBits(mlB)
+		ll += br.getBits(llB)
+
+	}
+	mo = s.adjustOffset(mo, ll, moB)
+	return
+}
+
+func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
+	if offsetB > 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+		s.prevOffset[1] = s.prevOffset[0]
+		s.prevOffset[0] = offset
+		return offset
+	}
+
+	if litLen == 0 {
+		// There is an exception though, when current sequence's literals_length = 0.
+		// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+		// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+		offset++
+	}
+
+	if offset == 0 {
+		return s.prevOffset[0]
+	}
+	var temp int
+	if offset == 3 {
+		temp = s.prevOffset[0] - 1
+	} else {
+		temp = s.prevOffset[offset]
+	}
+
+	if temp == 0 {
+		// 0 is not valid; input is corrupted; force offset to 1
+		println("temp was 0")
+		temp = 1
+	}
+
+	if offset != 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+	}
+	s.prevOffset[1] = s.prevOffset[0]
+	s.prevOffset[0] = temp
+	return temp
+}
+
+// mergeHistory will merge history.
+func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
+	for i := uint(0); i < 3; i++ {
+		var sNew, sHist *sequenceDec
+		switch i {
+		default:
+			// same as "case 0":
+			sNew = &s.litLengths
+			sHist = &hist.litLengths
+		case 1:
+			sNew = &s.offsets
+			sHist = &hist.offsets
+		case 2:
+			sNew = &s.matchLengths
+			sHist = &hist.matchLengths
+		}
+		if sNew.repeat {
+			if sHist.fse == nil {
+				return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
+			}
+			continue
+		}
+		if sNew.fse == nil {
+			return nil, fmt.Errorf("sequence stream %d, no fse found", i)
+		}
+		if sHist.fse != nil && !sHist.fse.preDefined {
+			fseDecoderPool.Put(sHist.fse)
+		}
+		sHist.fse = sNew.fse
+	}
+	return hist, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
new file mode 100644
index 0000000000000000000000000000000000000000..8014174a7713e94fc17640baa70335f5f14d0fc5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -0,0 +1,114 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "math/bits"
+
+type seqCoders struct {
+	llEnc, ofEnc, mlEnc    *fseEncoder
+	llPrev, ofPrev, mlPrev *fseEncoder
+}
+
+// swap coders with another (block).
+func (s *seqCoders) swap(other *seqCoders) {
+	*s, *other = *other, *s
+}
+
+// setPrev will update the previous encoders to the actually used ones
+// and make sure a fresh one is in the main slot.
+func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) {
+	compareSwap := func(used *fseEncoder, current, prev **fseEncoder) {
+		// We used the new one, more current to history and reuse the previous history
+		if *current == used {
+			*prev, *current = *current, *prev
+			c := *current
+			p := *prev
+			c.reUsed = false
+			p.reUsed = true
+			return
+		}
+		if used == *prev {
+			return
+		}
+		// Ensure we cannot reuse by accident
+		prevEnc := *prev
+		prevEnc.symbolLen = 0
+	}
+	compareSwap(ll, &s.llEnc, &s.llPrev)
+	compareSwap(ml, &s.mlEnc, &s.mlPrev)
+	compareSwap(of, &s.ofEnc, &s.ofPrev)
+}
+
+func highBit(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
+
+var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7,
+	8, 9, 10, 11, 12, 13, 14, 15,
+	16, 16, 17, 17, 18, 18, 19, 19,
+	20, 20, 20, 20, 21, 21, 21, 21,
+	22, 22, 22, 22, 22, 22, 22, 22,
+	23, 23, 23, 23, 23, 23, 23, 23,
+	24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24}
+
+// Up to 6 bits
+const maxLLCode = 35
+
+// llBitsTable translates from ll code to number of bits.
+var llBitsTable = [maxLLCode + 1]byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 2, 2, 3, 3,
+	4, 6, 7, 8, 9, 10, 11, 12,
+	13, 14, 15, 16}
+
+// llCode returns the code that represents the literal length requested.
+func llCode(litLength uint32) uint8 {
+	const llDeltaCode = 19
+	if litLength <= 63 {
+		// Compiler insists on bounds check (Go 1.12)
+		return llCodeTable[litLength&63]
+	}
+	return uint8(highBit(litLength)) + llDeltaCode
+}
+
+var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+	16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+	32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+	38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+	40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+	41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+	42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+	42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
+
+// Up to 6 bits
+const maxMLCode = 52
+
+// mlBitsTable translates from ml code to number of bits.
+var mlBitsTable = [maxMLCode + 1]byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 2, 2, 3, 3,
+	4, 4, 5, 7, 8, 9, 10, 11,
+	12, 13, 14, 15, 16}
+
+// note : mlBase = matchLength - MINMATCH;
+// because it's the format it's stored in seqStore->sequences
+func mlCode(mlBase uint32) uint8 {
+	const mlDeltaCode = 36
+	if mlBase <= 127 {
+		// Compiler insists on bounds check (Go 1.12)
+		return mlCodeTable[mlBase&127]
+	}
+	return uint8(highBit(mlBase)) + mlDeltaCode
+}
+
+func ofCode(offset uint32) uint8 {
+	// A valid offset will always be > 0.
+	return uint8(bits.Len32(offset) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
new file mode 100644
index 0000000000000000000000000000000000000000..9e1baad73be8d9e0194abbaa223bf308b368184f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -0,0 +1,435 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"errors"
+	"hash/crc32"
+	"io"
+
+	"github.com/klauspost/compress/huff0"
+	snappy "github.com/klauspost/compress/internal/snapref"
+)
+
+const (
+	snappyTagLiteral = 0x00
+	snappyTagCopy1   = 0x01
+	snappyTagCopy2   = 0x02
+	snappyTagCopy4   = 0x03
+)
+
+const (
+	snappyChecksumSize = 4
+	snappyMagicBody    = "sNaPpY"
+
+	// snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	snappyMaxBlockSize = 65536
+
+	// snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	snappyMaxEncodedLenOfMaxBlockSize = 76490
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var (
+	// ErrSnappyCorrupt reports that the input is invalid.
+	ErrSnappyCorrupt = errors.New("snappy: corrupt input")
+	// ErrSnappyTooLarge reports that the uncompressed length is too large.
+	ErrSnappyTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrSnappyUnsupported reports that the input isn't supported.
+	ErrSnappyUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd.
+// Conversion is done by converting the stream directly from Snappy without intermediate
+// full decoding.
+// Therefore the compression ratio is much less than what can be done by a full decompression
+// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
+// any errors being generated.
+// No CRC value is being generated and not all CRC values of the Snappy stream are checked.
+// However, it provides really fast recompression of Snappy streams.
+// The converter can be reused to avoid allocations, even after errors.
+type SnappyConverter struct {
+	r     io.Reader
+	err   error
+	buf   []byte
+	block *blockEnc
+}
+
+// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'.
+// If any error is detected on the Snappy stream it is returned.
+// The number of bytes written is returned.
+func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
+	initPredefined()
+	r.err = nil
+	r.r = in
+	if r.block == nil {
+		r.block = &blockEnc{}
+		r.block.init()
+	}
+	r.block.initNewEncode()
+	if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize {
+		r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize)
+	}
+	r.block.litEnc.Reuse = huff0.ReusePolicyNone
+	var written int64
+	var readHeader bool
+	{
+		var header []byte
+		var n int
+		header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+
+		n, r.err = w.Write(header)
+		if r.err != nil {
+			return written, r.err
+		}
+		written += int64(n)
+	}
+
+	for {
+		if !r.readFull(r.buf[:4], true) {
+			// Add empty last block
+			r.block.reset(nil)
+			r.block.last = true
+			err := r.block.encodeLits(r.block.literals, false)
+			if err != nil {
+				return written, err
+			}
+			n, err := w.Write(r.block.output)
+			if err != nil {
+				return written, err
+			}
+			written += int64(n)
+
+			return written, r.err
+		}
+		chunkType := r.buf[0]
+		if !readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				println("chunkType != chunkTypeStreamIdentifier", chunkType)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			println("chunkLen > len(r.buf)", chunkType)
+			r.err = ErrSnappyUnsupported
+			return written, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < snappyChecksumSize {
+				println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return written, r.err
+			}
+			//checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[snappyChecksumSize:]
+
+			n, hdr, err := snappyDecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return written, r.err
+			}
+			buf = buf[hdr:]
+			if n > snappyMaxBlockSize {
+				println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.reset(nil)
+			r.block.pushOffsets()
+			if err := decodeSnappy(r.block, buf); err != nil {
+				r.err = err
+				return written, r.err
+			}
+			if r.block.size+r.block.extraLits != n {
+				printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			err = r.block.encode(nil, false, false)
+			switch err {
+			case errIncompressible:
+				r.block.popOffsets()
+				r.block.reset(nil)
+				r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen])
+				if err != nil {
+					return written, err
+				}
+				err = r.block.encodeLits(r.block.literals, false)
+				if err != nil {
+					return written, err
+				}
+			case nil:
+			default:
+				return written, err
+			}
+
+			n, r.err = w.Write(r.block.output)
+			if r.err != nil {
+				return written, err
+			}
+			written += int64(n)
+			continue
+		case chunkTypeUncompressedData:
+			if debugEncoder {
+				println("Uncompressed, chunklen", chunkLen)
+			}
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < snappyChecksumSize {
+				println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.reset(nil)
+			buf := r.buf[:snappyChecksumSize]
+			if !r.readFull(buf, false) {
+				return written, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - snappyChecksumSize
+			if n > snappyMaxBlockSize {
+				println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.literals = r.block.literals[:n]
+			if !r.readFull(r.block.literals, false) {
+				return written, r.err
+			}
+			if snappyCRC(r.block.literals) != checksum {
+				println("literals crc mismatch")
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			err := r.block.encodeLits(r.block.literals, false)
+			if err != nil {
+				return written, err
+			}
+			n, r.err = w.Write(r.block.output)
+			if r.err != nil {
+				return written, err
+			}
+			written += int64(n)
+			continue
+
+		case chunkTypeStreamIdentifier:
+			if debugEncoder {
+				println("stream id", chunkLen, len(snappyMagicBody))
+			}
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(snappyMagicBody) {
+				println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody))
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
+				return written, r.err
+			}
+			for i := 0; i < len(snappyMagicBody); i++ {
+				if r.buf[i] != snappyMagicBody[i] {
+					println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
+					r.err = ErrSnappyCorrupt
+					return written, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			println("chunkType <= 0x7f")
+			r.err = ErrSnappyUnsupported
+			return written, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return written, r.err
+		}
+	}
+}
+
+// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read.
+func decodeSnappy(blk *blockEnc, src []byte) error {
+	//decodeRef(make([]byte, snappyMaxBlockSize), src)
+	var s, length int
+	lits := blk.extraLits
+	var offset uint32
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case snappyTagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			if x > snappyMaxBlockSize {
+				println("x > snappyMaxBlockSize", x, snappyMaxBlockSize)
+				return ErrSnappyCorrupt
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				println("length <= 0 ", length)
+
+				return errUnsupportedLiteralLength
+			}
+			//if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s {
+			//	return ErrSnappyCorrupt
+			//}
+
+			blk.literals = append(blk.literals, src[s:s+length]...)
+			//println(length, "litLen")
+			lits += length
+			s += length
+			continue
+
+		case snappyTagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])
+
+		case snappyTagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = uint32(src[s-2]) | uint32(src[s-1])<<8
+
+		case snappyTagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+		}
+
+		if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ {
+			println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits)
+
+			return ErrSnappyCorrupt
+		}
+
+		// Check if offset is one of the recent offsets.
+		// Adjusts the output offset accordingly.
+		// Gives a tiny bit of compression, typically around 1%.
+		if false {
+			offset = blk.matchOffset(offset, uint32(lits))
+		} else {
+			offset += 3
+		}
+
+		blk.sequences = append(blk.sequences, seq{
+			litLen:   uint32(lits),
+			offset:   offset,
+			matchLen: uint32(length) - zstdMinMatch,
+		})
+		blk.size += length + lits
+		lits = 0
+	}
+	blk.extraLits = lits
+	return nil
+}
+
+func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrSnappyCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func snappyCRC(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return c>>15 | c<<17 + 0xa282ead8
+}
+
+// snappyDecodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrSnappyCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrSnappyTooLarge
+	}
+	return int(v), n, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
new file mode 100644
index 0000000000000000000000000000000000000000..967f29b3120e923620715900249ee31281ad0856
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -0,0 +1,122 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"errors"
+	"io"
+	"sync"
+)
+
+// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip.
+// See https://www.winzip.com/win/en/comp_info.html
+const ZipMethodWinZip = 93
+
+// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression.
+// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression.
+// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
+const ZipMethodPKWare = 20
+
+var zipReaderPool sync.Pool
+
+// newZipReader cannot be used since we would leak goroutines...
+func newZipReader(r io.Reader) io.ReadCloser {
+	dec, ok := zipReaderPool.Get().(*Decoder)
+	if ok {
+		dec.Reset(r)
+	} else {
+		d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
+		if err != nil {
+			panic(err)
+		}
+		dec = d
+	}
+	return &pooledZipReader{dec: dec}
+}
+
+type pooledZipReader struct {
+	mu  sync.Mutex // guards Close and Read
+	dec *Decoder
+}
+
+func (r *pooledZipReader) Read(p []byte) (n int, err error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.dec == nil {
+		return 0, errors.New("Read after Close")
+	}
+	dec, err := r.dec.Read(p)
+
+	return dec, err
+}
+
+func (r *pooledZipReader) Close() error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	var err error
+	if r.dec != nil {
+		err = r.dec.Reset(nil)
+		zipReaderPool.Put(r.dec)
+		r.dec = nil
+	}
+	return err
+}
+
+type pooledZipWriter struct {
+	mu   sync.Mutex // guards Close and Read
+	enc  *Encoder
+	pool *sync.Pool
+}
+
+func (w *pooledZipWriter) Write(p []byte) (n int, err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	if w.enc == nil {
+		return 0, errors.New("Write after Close")
+	}
+	return w.enc.Write(p)
+}
+
+func (w *pooledZipWriter) Close() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	var err error
+	if w.enc != nil {
+		err = w.enc.Close()
+		w.pool.Put(w.enc)
+		w.enc = nil
+	}
+	return err
+}
+
+// ZipCompressor returns a compressor that can be registered with zip libraries.
+// The provided encoder options will be used on all encodes.
+func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
+	var pool sync.Pool
+	return func(w io.Writer) (io.WriteCloser, error) {
+		enc, ok := pool.Get().(*Encoder)
+		if ok {
+			enc.Reset(w)
+		} else {
+			var err error
+			enc, err = NewWriter(w, opts...)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return &pooledZipWriter{enc: enc, pool: &pool}, nil
+	}
+}
+
+// ZipDecompressor returns a decompressor that can be registered with zip libraries.
+// See ZipCompressor for example.
+func ZipDecompressor() func(r io.Reader) io.ReadCloser {
+	return func(r io.Reader) io.ReadCloser {
+		d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
+		if err != nil {
+			panic(err)
+		}
+		return d.IOReadCloser()
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef1d49a009cc75e73a9519495b1dab05e6bdcfb9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -0,0 +1,152 @@
+// Package zstd provides decompression of zstandard files.
+//
+// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd
+package zstd
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"log"
+	"math"
+	"math/bits"
+)
+
+// enable debug printing
+const debug = false
+
+// enable encoding debug printing
+const debugEncoder = debug
+
+// enable decoding debug printing
+const debugDecoder = debug
+
+// Enable extra assertions.
+const debugAsserts = debug || false
+
+// print sequence details
+const debugSequences = false
+
+// print detailed matching information
+const debugMatches = false
+
+// force encoder to use predefined tables.
+const forcePreDef = false
+
+// zstdMinMatch is the minimum zstd match length.
+const zstdMinMatch = 3
+
+// Reset the buffer offset when reaching this.
+const bufferReset = math.MaxInt32 - MaxWindowSize
+
+var (
+	// ErrReservedBlockType is returned when a reserved block type is found.
+	// Typically this indicates wrong or corrupted input.
+	ErrReservedBlockType = errors.New("invalid input: reserved block type encountered")
+
+	// ErrCompressedSizeTooBig is returned when a block is bigger than allowed.
+	// Typically this indicates wrong or corrupted input.
+	ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big")
+
+	// ErrBlockTooSmall is returned when a block is too small to be decoded.
+	// Typically returned on invalid input.
+	ErrBlockTooSmall = errors.New("block too small")
+
+	// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
+	// Typically this indicates wrong or corrupted input.
+	ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
+
+	// ErrWindowSizeExceeded is returned when a reference exceeds the valid window size.
+	// Typically this indicates wrong or corrupted input.
+	ErrWindowSizeExceeded = errors.New("window size exceeded")
+
+	// ErrWindowSizeTooSmall is returned when no window size is specified.
+	// Typically this indicates wrong or corrupted input.
+	ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small")
+
+	// ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit.
+	ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
+
+	// ErrUnknownDictionary is returned if the dictionary ID is unknown.
+	// For the time being dictionaries are not supported.
+	ErrUnknownDictionary = errors.New("unknown dictionary")
+
+	// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
+	// This is only returned if SingleSegment is specified on the frame.
+	ErrFrameSizeExceeded = errors.New("frame size exceeded")
+
+	// ErrCRCMismatch is returned if CRC mismatches.
+	ErrCRCMismatch = errors.New("CRC check failed")
+
+	// ErrDecoderClosed will be returned if the Decoder was used after
+	// Close has been called.
+	ErrDecoderClosed = errors.New("decoder used after Close")
+
+	// ErrDecoderNilInput is returned when a nil Reader was provided
+	// and an operation other than Reset/DecodeAll/Close was attempted.
+	ErrDecoderNilInput = errors.New("nil input provided as reader")
+)
+
+func println(a ...interface{}) {
+	if debug || debugDecoder || debugEncoder {
+		log.Println(a...)
+	}
+}
+
+func printf(format string, a ...interface{}) {
+	if debug || debugDecoder || debugEncoder {
+		log.Printf(format, a...)
+	}
+}
+
+// matchLenFast does matching, but will not match the last up to 7 bytes.
+func matchLenFast(a, b []byte) int {
+	endI := len(a) & (math.MaxInt32 - 7)
+	for i := 0; i < endI; i += 8 {
+		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+			return i + bits.TrailingZeros64(diff)>>3
+		}
+	}
+	return endI
+}
+
+// matchLen returns the maximum length.
+// a must be the shortest of the two.
+// The function also returns whether all bytes matched.
+func matchLen(a, b []byte) int {
+	b = b[:len(a)]
+	for i := 0; i < len(a)-7; i += 8 {
+		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+			return i + (bits.TrailingZeros64(diff) >> 3)
+		}
+	}
+
+	checked := (len(a) >> 3) << 3
+	a = a[checked:]
+	b = b[checked:]
+	for i := range a {
+		if a[i] != b[i] {
+			return i + checked
+		}
+	}
+	return len(a) + checked
+}
+
+func load3232(b []byte, i int32) uint32 {
+	return binary.LittleEndian.Uint32(b[i:])
+}
+
+func load6432(b []byte, i int32) uint64 {
+	return binary.LittleEndian.Uint64(b[i:])
+}
+
+func load64(b []byte, i int) uint64 {
+	return binary.LittleEndian.Uint64(b[i:])
+}
+
+type byter interface {
+	Bytes() []byte
+	Len() int
+}
+
+var _ byter = &bytes.Buffer{}
diff --git a/vendor/github.com/montanaflynn/stats/.gitignore b/vendor/github.com/montanaflynn/stats/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..75a2a3a3bde7b62afd0a23dcfc7a882d8eb24e05
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/.gitignore
@@ -0,0 +1,7 @@
+coverage.out
+coverage.txt
+release-notes.txt
+.directory
+.chglog
+.vscode
+.DS_Store
\ No newline at end of file
diff --git a/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/vendor/github.com/montanaflynn/stats/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..73c3b782b6cf0efbaccf0f8c4c1fb3b9a70a9940
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/CHANGELOG.md
@@ -0,0 +1,534 @@
+<a name="unreleased"></a>
+## [Unreleased]
+
+
+<a name="v0.7.1"></a>
+## [v0.7.1] - 2023-05-11
+### Add
+- Add describe functions ([#77](https://github.com/montanaflynn/stats/issues/77))
+
+### Update
+- Update .gitignore
+- Update README.md, LICENSE and DOCUMENTATION.md files
+- Update github action go workflow to run on push
+
+
+<a name="v0.7.0"></a>
+## [v0.7.0] - 2023-01-08
+### Add
+- Add geometric distribution functions ([#75](https://github.com/montanaflynn/stats/issues/75))
+- Add GitHub action go workflow
+
+### Remove
+- Remove travis CI config
+
+### Update
+- Update changelog with v0.7.0 changes
+- Update changelog with v0.7.0 changes
+- Update github action go workflow
+- Update geometric distribution tests
+
+
+<a name="v0.6.6"></a>
+## [v0.6.6] - 2021-04-26
+### Add
+- Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68))
+- Add latest versions of Go to test against
+
+### Update
+- Update changelog with v0.6.6 changes
+
+### Use
+- Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64))
+
+
+<a name="v0.6.5"></a>
+## [v0.6.5] - 2021-02-21
+### Add
+- Add Float64Data.Quartiles documentation
+- Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60))
+
+### Fix
+- Fix make release changelog command and add changelog history
+
+### Update
+- Update changelog with v0.6.5 changes
+- Update changelog with v0.6.4 changes
+- Update README.md links to CHANGELOG.md and DOCUMENTATION.md
+- Update README.md and Makefile with new release commands
+
+
+<a name="v0.6.4"></a>
+## [v0.6.4] - 2021-01-13
+### Fix
+- Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58))
+
+### Update
+- Update changelog with v0.6.4 changes
+- Update examples directory to include a README.md used for synopsis
+- Update go.mod to include go version where modules are enabled by default
+- Update changelog with v0.6.3 changes
+
+
+<a name="v0.6.3"></a>
+## [v0.6.3] - 2020-02-18
+### Add
+- Add creating and committing changelog to Makefile release directive
+- Add release-notes.txt and .chglog directory to .gitignore
+
+### Update
+- Update exported tests to use import for better example documentation
+- Update documentation using godoc2md
+- Update changelog with v0.6.2 release
+
+
+<a name="v0.6.2"></a>
+## [v0.6.2] - 2020-02-18
+### Fix
+- Fix linting errcheck warnings in go benchmarks
+
+### Update
+- Update Makefile release directive to use correct release name
+
+
+<a name="v0.6.1"></a>
+## [v0.6.1] - 2020-02-18
+### Add
+- Add StableSample function signature to readme
+
+### Fix
+- Fix linting warnings for normal distribution functions formatting and tests
+
+### Update
+- Update documentation links and rename DOC.md to DOCUMENTATION.md
+- Update README with link to pkg.go.dev reference and release section
+- Update Makefile with new changelog, docs, and release directives
+- Update DOC.md links to GitHub source code
+- Update doc.go comment and add DOC.md package reference file
+- Update changelog using git-chglog
+
+
+<a name="v0.6.0"></a>
+## [v0.6.0] - 2020-02-17
+### Add
+- Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56))
+- Add previous versions of Go to travis CI config
+- Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51))
+- Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48))
+- Add doc.go file to show description and usage on godoc.org
+- Add comments to new error and legacy error variables
+- Add ExampleRound function to tests
+- Add go.mod file for module support
+- Add Sigmoid, SoftMax and Entropy methods and tests
+- Add Entropy documentation, example and benchmarks
+- Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44))
+
+### Fix
+- Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47))
+- Fix AutoCorrelation name in comments and remove unneeded Sprintf
+
+### Improve
+- Improve documentation section with command comments
+
+### Remove
+- Remove very old versions of Go in travis CI config
+- Remove boolean comparison to get rid of gometalinter warning
+
+### Update
+- Update license dates
+- Update Distance functions signatures to use Float64Data
+- Update Sigmoid examples
+- Update error names with backward compatibility
+
+### Use
+- Use relative link to examples/main.go
+- Use a single var block for exported errors
+
+
+<a name="v0.5.0"></a>
+## [v0.5.0] - 2019-01-16
+### Add
+- Add Sigmoid and Softmax functions
+
+### Fix
+- Fix syntax highlighting and add CumulativeSum func
+
+
+<a name="v0.4.0"></a>
+## [v0.4.0] - 2019-01-14
+### Add
+- Add goreport badge and documentation section to README.md
+- Add Examples to test files
+- Add AutoCorrelation and nist tests
+- Add String method to statsErr type
+- Add Y coordinate error for ExponentialRegression
+- Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43))
+- Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40))
+- Add more tests and rename distance files
+- Add coverage and benchmarks to azure pipeline
+- Add go tests to azure pipeline
+
+### Change
+- Change travis tip alias to master
+- Change codecov to coveralls for code coverage
+
+### Fix
+- Fix a few lint warnings
+- Fix example error
+
+### Improve
+- Improve test coverage of distance functions
+
+### Only
+- Only run travis on stable and tip versions
+- Only check code coverage on tip
+
+### Remove
+- Remove azure CI pipeline
+- Remove unnecessary type conversions
+
+### Return
+- Return EmptyInputErr instead of EmptyInput
+
+### Set
+- Set up CI with Azure Pipelines
+
+
+<a name="0.3.0"></a>
+## [0.3.0] - 2017-12-02
+### Add
+- Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35))
+- Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34))
+- Add support for time.Duration
+- Add LoadRawData to docs and examples
+- Add unit test for edge case that wasn't covered
+- Add unit tests for edge cases that weren't covered
+- Add pearson alias delegating to correlation
+- Add CovariancePopulation to Float64Data
+- Add pearson product-moment correlation coefficient
+- Add population covariance
+- Add random slice benchmarks
+- Add all applicable functions as methods to Float64Data type
+- Add MIT license badge
+- Add link to examples/methods.go
+- Add Protips for usage and documentation sections
+- Add tests for rounding up
+- Add webdoc target and remove linting from test target
+- Add example usage and consolidate contributing information
+
+### Added
+- Added MedianAbsoluteDeviation
+
+### Annotation
+- Annotation spelling error
+
+### Auto
+- auto commit
+- auto commit
+
+### Calculate
+- Calculate correlation with sdev and covp
+
+### Clean
+- Clean up README.md and add info for offline docs
+
+### Consolidated
+- Consolidated all error values.
+
+### Fix
+- Fix Percentile logic
+- Fix InterQuartileRange method test
+- Fix zero percent bug and add test
+- Fix usage example output typos
+
+### Improve
+- Improve bounds checking in Percentile
+- Improve error log messaging
+
+### Imput
+- Imput -> Input
+
+### Include
+- Include alternative way to set Float64Data in example
+
+### Make
+- Make various changes to README.md
+
+### Merge
+- Merge branch 'master' of github.com:montanaflynn/stats
+- Merge master
+
+### Mode
+- Mode calculation fix and tests
+
+### Realized
+- Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice.  Benchmark joy ensued.
+
+### Refactor
+- Refactor testing of Round()
+- Refactor setting Coordinate y field using Exp in place of Pow
+- Refactor Makefile and add docs target
+
+### Remove
+- Remove deep links to types and functions
+
+### Rename
+- Rename file from types to data
+
+### Retrieve
+- Retrieve InterQuartileRange for the Float64Data.
+
+### Split
+- Split up stats.go into separate files
+
+### Support
+- Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36))
+
+### Switch
+- Switch default and check targets
+
+### Update
+- Update Readme
+- Update example methods and some text
+- Update README and include Float64Data type method examples
+
+### Pull Requests
+- Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile
+- Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test
+- Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master
+- Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds
+- Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1
+- Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration
+- Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master
+- Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master
+- Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug
+- Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master
+- Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master
+- Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master
+- Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson
+- Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD
+- Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master
+- Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce
+- Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite
+
+
+<a name="0.2.0"></a>
+## [0.2.0] - 2015-10-14
+### Add
+- Add Makefile with gometalinter, testing, benchmarking and coverage report targets
+- Add comments describing functions and structs
+- Add Correlation func
+- Add Covariance func
+- Add tests for new function shortcuts
+- Add StandardDeviation function as a shortcut to StandardDeviationPopulation
+- Add Float64Data and Series types
+
+### Change
+- Change Sample to return a standard []float64 type
+
+### Fix
+- Fix broken link to Makefile
+- Fix broken link and simplify code coverage reporting command
+- Fix go vet warning about printf type placeholder
+- Fix failing codecov test coverage reporting
+- Fix link to CHANGELOG.md
+
+### Fixed
+- Fixed typographical error, changed accomdate to accommodate in README.
+
+### Include
+- Include Variance and StandardDeviation shortcuts
+
+### Pass
+- Pass gometalinter
+
+### Refactor
+- Refactor Variance function to be the same as population variance
+
+### Release
+- Release version 0.2.0
+
+### Remove
+- Remove unneeded do packages and update cover URL
+- Remove sudo from pip install
+
+### Reorder
+- Reorder functions and sections
+
+### Revert
+- Revert to legacy containers to preserve go1.1 testing
+
+### Switch
+- Switch from legacy to container-based CI infrastructure
+
+### Update
+- Update contributing instructions and mention Makefile
+
+### Pull Requests
+- Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate
+
+
+<a name="0.1.0"></a>
+## [0.1.0] - 2015-08-19
+### Add
+- Add CONTRIBUTING.md
+
+### Rename
+- Rename functions while preserving backwards compatibility
+
+
+<a name="0.0.9"></a>
+## 0.0.9 - 2015-08-18
+### Add
+- Add HarmonicMean func
+- Add GeometricMean func
+- Add .gitignore to avoid commiting test coverage report
+- Add Outliers stuct and QuantileOutliers func
+- Add Interquartile Range, Midhinge and Trimean examples
+- Add Trimean
+- Add Midhinge
+- Add Inter Quartile Range
+- Add a unit test to check for an empty slice error
+- Add Quantiles struct and Quantile func
+- Add more tests and fix a typo
+- Add Golang 1.5 to build tests
+- Add a standard MIT license file
+- Add basic benchmarking
+- Add regression models
+- Add codecov token
+- Add codecov
+- Add check for slices with a single item
+- Add coverage tests
+- Add back previous Go versions to Travis CI
+- Add Travis CI
+- Add GoDoc badge
+- Add Percentile and Float64ToInt functions
+- Add another rounding test for whole numbers
+- Add build status badge
+- Add code coverage badge
+- Add test for NaN, achieving 100% code coverage
+- Add round function
+- Add standard deviation function
+- Add sum function
+
+### Add
+- add tests for sample
+- add sample
+
+### Added
+- Added sample and population variance and deviation functions
+- Added README
+
+### Adjust
+- Adjust API ordering
+
+### Avoid
+- Avoid unintended consequence of using sort
+
+### Better
+- Better performing min/max
+- Better description
+
+### Change
+- Change package path to potentially fix a bug in earlier versions of Go
+
+### Clean
+- Clean up README and add some more information
+- Clean up test error
+
+### Consistent
+- Consistent empty slice error messages
+- Consistent var naming
+- Consistent func declaration
+
+### Convert
+- Convert ints to floats
+
+### Duplicate
+- Duplicate packages for all versions
+
+### Export
+- Export Coordinate struct fields
+
+### First
+- First commit
+
+### Fix
+- Fix copy pasta mistake testing the wrong function
+- Fix error message
+- Fix usage output and edit API doc section
+- Fix testing edgecase where map was in wrong order
+- Fix usage example
+- Fix usage examples
+
+### Include
+- Include the Nearest Rank method of calculating percentiles
+
+### More
+- More commenting
+
+### Move
+- Move GoDoc link to top
+
+### Redirect
+- Redirect kills newer versions of Go
+
+### Refactor
+- Refactor code and error checking
+
+### Remove
+- Remove unnecassary typecasting in sum func
+- Remove cover since it doesn't work for later versions of go
+- Remove golint and gocoveralls
+
+### Rename
+- Rename StandardDev to StdDev
+- Rename StandardDev to StdDev
+
+### Return
+- Return errors for all functions
+
+### Run
+- Run go fmt to clean up formatting
+
+### Simplify
+- Simplify min/max function
+
+### Start
+- Start with minimal tests
+
+### Switch
+- Switch wercker to travis and update todos
+
+### Table
+- table testing style
+
+### Update
+- Update README and move the example main.go into it's own file
+- Update TODO list
+- Update README
+- Update usage examples and todos
+
+### Use
+- Use codecov the recommended way
+- Use correct string formatting types
+
+### Pull Requests
+- Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample
+
+
+[Unreleased]: https://github.com/montanaflynn/stats/compare/v0.7.1...HEAD
+[v0.7.1]: https://github.com/montanaflynn/stats/compare/v0.7.0...v0.7.1
+[v0.7.0]: https://github.com/montanaflynn/stats/compare/v0.6.6...v0.7.0
+[v0.6.6]: https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6
+[v0.6.5]: https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5
+[v0.6.4]: https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4
+[v0.6.3]: https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3
+[v0.6.2]: https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2
+[v0.6.1]: https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1
+[v0.6.0]: https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0
+[v0.5.0]: https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0
+[v0.4.0]: https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0
+[0.3.0]: https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0
+[0.2.0]: https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0
+[0.1.0]: https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0
diff --git a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md
new file mode 100644
index 0000000000000000000000000000000000000000..978df2ffc072db54f42be65d904cdc99bf6a14a8
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md
@@ -0,0 +1,1271 @@
+
+
+# stats
+`import "github.com/montanaflynn/stats"`
+
+* [Overview](#pkg-overview)
+* [Index](#pkg-index)
+* [Examples](#pkg-examples)
+* [Subdirectories](#pkg-subdirectories)
+
+## <a name="pkg-overview">Overview</a>
+Package stats is a well tested and comprehensive
+statistics library package with no dependencies.
+
+Example Usage:
+
+
+	// start with some source data to use
+	data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
+	
+	// you could also use different types like this
+	// data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
+	// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
+	// etc...
+	
+	median, _ := stats.Median(data)
+	fmt.Println(median) // 3.65
+	
+	roundedMedian, _ := stats.Round(median, 0)
+	fmt.Println(roundedMedian) // 4
+
+MIT License Copyright (c) 2014-2020 Montana Flynn (<a href="https://montanaflynn.com">https://montanaflynn.com</a>)
+
+
+
+
+## <a name="pkg-index">Index</a>
+* [Variables](#pkg-variables)
+* [func AutoCorrelation(data Float64Data, lags int) (float64, error)](#AutoCorrelation)
+* [func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ChebyshevDistance)
+* [func Correlation(data1, data2 Float64Data) (float64, error)](#Correlation)
+* [func Covariance(data1, data2 Float64Data) (float64, error)](#Covariance)
+* [func CovariancePopulation(data1, data2 Float64Data) (float64, error)](#CovariancePopulation)
+* [func CumulativeSum(input Float64Data) ([]float64, error)](#CumulativeSum)
+* [func Entropy(input Float64Data) (float64, error)](#Entropy)
+* [func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#EuclideanDistance)
+* [func ExpGeom(p float64) (exp float64, err error)](#ExpGeom)
+* [func GeometricMean(input Float64Data) (float64, error)](#GeometricMean)
+* [func HarmonicMean(input Float64Data) (float64, error)](#HarmonicMean)
+* [func InterQuartileRange(input Float64Data) (float64, error)](#InterQuartileRange)
+* [func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ManhattanDistance)
+* [func Max(input Float64Data) (max float64, err error)](#Max)
+* [func Mean(input Float64Data) (float64, error)](#Mean)
+* [func Median(input Float64Data) (median float64, err error)](#Median)
+* [func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviation)
+* [func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviationPopulation)
+* [func Midhinge(input Float64Data) (float64, error)](#Midhinge)
+* [func Min(input Float64Data) (min float64, err error)](#Min)
+* [func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)](#MinkowskiDistance)
+* [func Mode(input Float64Data) (mode []float64, err error)](#Mode)
+* [func Ncr(n, r int) int](#Ncr)
+* [func NormBoxMullerRvs(loc float64, scale float64, size int) []float64](#NormBoxMullerRvs)
+* [func NormCdf(x float64, loc float64, scale float64) float64](#NormCdf)
+* [func NormEntropy(loc float64, scale float64) float64](#NormEntropy)
+* [func NormFit(data []float64) [2]float64](#NormFit)
+* [func NormInterval(alpha float64, loc float64, scale float64) [2]float64](#NormInterval)
+* [func NormIsf(p float64, loc float64, scale float64) (x float64)](#NormIsf)
+* [func NormLogCdf(x float64, loc float64, scale float64) float64](#NormLogCdf)
+* [func NormLogPdf(x float64, loc float64, scale float64) float64](#NormLogPdf)
+* [func NormLogSf(x float64, loc float64, scale float64) float64](#NormLogSf)
+* [func NormMean(loc float64, scale float64) float64](#NormMean)
+* [func NormMedian(loc float64, scale float64) float64](#NormMedian)
+* [func NormMoment(n int, loc float64, scale float64) float64](#NormMoment)
+* [func NormPdf(x float64, loc float64, scale float64) float64](#NormPdf)
+* [func NormPpf(p float64, loc float64, scale float64) (x float64)](#NormPpf)
+* [func NormPpfRvs(loc float64, scale float64, size int) []float64](#NormPpfRvs)
+* [func NormSf(x float64, loc float64, scale float64) float64](#NormSf)
+* [func NormStats(loc float64, scale float64, moments string) []float64](#NormStats)
+* [func NormStd(loc float64, scale float64) float64](#NormStd)
+* [func NormVar(loc float64, scale float64) float64](#NormVar)
+* [func Pearson(data1, data2 Float64Data) (float64, error)](#Pearson)
+* [func Percentile(input Float64Data, percent float64) (percentile float64, err error)](#Percentile)
+* [func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)](#PercentileNearestRank)
+* [func PopulationVariance(input Float64Data) (pvar float64, err error)](#PopulationVariance)
+* [func ProbGeom(a int, b int, p float64) (prob float64, err error)](#ProbGeom)
+* [func Round(input float64, places int) (rounded float64, err error)](#Round)
+* [func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)](#Sample)
+* [func SampleVariance(input Float64Data) (svar float64, err error)](#SampleVariance)
+* [func Sigmoid(input Float64Data) ([]float64, error)](#Sigmoid)
+* [func SoftMax(input Float64Data) ([]float64, error)](#SoftMax)
+* [func StableSample(input Float64Data, takenum int) ([]float64, error)](#StableSample)
+* [func StandardDeviation(input Float64Data) (sdev float64, err error)](#StandardDeviation)
+* [func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)](#StandardDeviationPopulation)
+* [func StandardDeviationSample(input Float64Data) (sdev float64, err error)](#StandardDeviationSample)
+* [func StdDevP(input Float64Data) (sdev float64, err error)](#StdDevP)
+* [func StdDevS(input Float64Data) (sdev float64, err error)](#StdDevS)
+* [func Sum(input Float64Data) (sum float64, err error)](#Sum)
+* [func Trimean(input Float64Data) (float64, error)](#Trimean)
+* [func VarGeom(p float64) (exp float64, err error)](#VarGeom)
+* [func VarP(input Float64Data) (sdev float64, err error)](#VarP)
+* [func VarS(input Float64Data) (sdev float64, err error)](#VarS)
+* [func Variance(input Float64Data) (sdev float64, err error)](#Variance)
+* [type Coordinate](#Coordinate)
+  * [func ExpReg(s []Coordinate) (regressions []Coordinate, err error)](#ExpReg)
+  * [func LinReg(s []Coordinate) (regressions []Coordinate, err error)](#LinReg)
+  * [func LogReg(s []Coordinate) (regressions []Coordinate, err error)](#LogReg)
+* [type Float64Data](#Float64Data)
+  * [func LoadRawData(raw interface{}) (f Float64Data)](#LoadRawData)
+  * [func (f Float64Data) AutoCorrelation(lags int) (float64, error)](#Float64Data.AutoCorrelation)
+  * [func (f Float64Data) Correlation(d Float64Data) (float64, error)](#Float64Data.Correlation)
+  * [func (f Float64Data) Covariance(d Float64Data) (float64, error)](#Float64Data.Covariance)
+  * [func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)](#Float64Data.CovariancePopulation)
+  * [func (f Float64Data) CumulativeSum() ([]float64, error)](#Float64Data.CumulativeSum)
+  * [func (f Float64Data) Entropy() (float64, error)](#Float64Data.Entropy)
+  * [func (f Float64Data) GeometricMean() (float64, error)](#Float64Data.GeometricMean)
+  * [func (f Float64Data) Get(i int) float64](#Float64Data.Get)
+  * [func (f Float64Data) HarmonicMean() (float64, error)](#Float64Data.HarmonicMean)
+  * [func (f Float64Data) InterQuartileRange() (float64, error)](#Float64Data.InterQuartileRange)
+  * [func (f Float64Data) Len() int](#Float64Data.Len)
+  * [func (f Float64Data) Less(i, j int) bool](#Float64Data.Less)
+  * [func (f Float64Data) Max() (float64, error)](#Float64Data.Max)
+  * [func (f Float64Data) Mean() (float64, error)](#Float64Data.Mean)
+  * [func (f Float64Data) Median() (float64, error)](#Float64Data.Median)
+  * [func (f Float64Data) MedianAbsoluteDeviation() (float64, error)](#Float64Data.MedianAbsoluteDeviation)
+  * [func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)](#Float64Data.MedianAbsoluteDeviationPopulation)
+  * [func (f Float64Data) Midhinge(d Float64Data) (float64, error)](#Float64Data.Midhinge)
+  * [func (f Float64Data) Min() (float64, error)](#Float64Data.Min)
+  * [func (f Float64Data) Mode() ([]float64, error)](#Float64Data.Mode)
+  * [func (f Float64Data) Pearson(d Float64Data) (float64, error)](#Float64Data.Pearson)
+  * [func (f Float64Data) Percentile(p float64) (float64, error)](#Float64Data.Percentile)
+  * [func (f Float64Data) PercentileNearestRank(p float64) (float64, error)](#Float64Data.PercentileNearestRank)
+  * [func (f Float64Data) PopulationVariance() (float64, error)](#Float64Data.PopulationVariance)
+  * [func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)](#Float64Data.Quartile)
+  * [func (f Float64Data) QuartileOutliers() (Outliers, error)](#Float64Data.QuartileOutliers)
+  * [func (f Float64Data) Quartiles() (Quartiles, error)](#Float64Data.Quartiles)
+  * [func (f Float64Data) Sample(n int, r bool) ([]float64, error)](#Float64Data.Sample)
+  * [func (f Float64Data) SampleVariance() (float64, error)](#Float64Data.SampleVariance)
+  * [func (f Float64Data) Sigmoid() ([]float64, error)](#Float64Data.Sigmoid)
+  * [func (f Float64Data) SoftMax() ([]float64, error)](#Float64Data.SoftMax)
+  * [func (f Float64Data) StandardDeviation() (float64, error)](#Float64Data.StandardDeviation)
+  * [func (f Float64Data) StandardDeviationPopulation() (float64, error)](#Float64Data.StandardDeviationPopulation)
+  * [func (f Float64Data) StandardDeviationSample() (float64, error)](#Float64Data.StandardDeviationSample)
+  * [func (f Float64Data) Sum() (float64, error)](#Float64Data.Sum)
+  * [func (f Float64Data) Swap(i, j int)](#Float64Data.Swap)
+  * [func (f Float64Data) Trimean(d Float64Data) (float64, error)](#Float64Data.Trimean)
+  * [func (f Float64Data) Variance() (float64, error)](#Float64Data.Variance)
+* [type Outliers](#Outliers)
+  * [func QuartileOutliers(input Float64Data) (Outliers, error)](#QuartileOutliers)
+* [type Quartiles](#Quartiles)
+  * [func Quartile(input Float64Data) (Quartiles, error)](#Quartile)
+* [type Series](#Series)
+  * [func ExponentialRegression(s Series) (regressions Series, err error)](#ExponentialRegression)
+  * [func LinearRegression(s Series) (regressions Series, err error)](#LinearRegression)
+  * [func LogarithmicRegression(s Series) (regressions Series, err error)](#LogarithmicRegression)
+
+#### <a name="pkg-examples">Examples</a>
+* [AutoCorrelation](#example_AutoCorrelation)
+* [ChebyshevDistance](#example_ChebyshevDistance)
+* [Correlation](#example_Correlation)
+* [CumulativeSum](#example_CumulativeSum)
+* [Entropy](#example_Entropy)
+* [ExpGeom](#example_ExpGeom)
+* [LinearRegression](#example_LinearRegression)
+* [LoadRawData](#example_LoadRawData)
+* [Max](#example_Max)
+* [Median](#example_Median)
+* [Min](#example_Min)
+* [ProbGeom](#example_ProbGeom)
+* [Round](#example_Round)
+* [Sigmoid](#example_Sigmoid)
+* [SoftMax](#example_SoftMax)
+* [Sum](#example_Sum)
+* [VarGeom](#example_VarGeom)
+
+#### <a name="pkg-files">Package files</a>
+[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [geometric_distribution.go](/src/github.com/montanaflynn/stats/geometric_distribution.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go) 
+
+
+
+## <a name="pkg-variables">Variables</a>
+``` go
+var (
+    // ErrEmptyInput Input must not be empty
+    ErrEmptyInput = statsError{"Input must not be empty."}
+    // ErrNaN Not a number
+    ErrNaN = statsError{"Not a number."}
+    // ErrNegative Must not contain negative values
+    ErrNegative = statsError{"Must not contain negative values."}
+    // ErrZero Must not contain zero values
+    ErrZero = statsError{"Must not contain zero values."}
+    // ErrBounds Input is outside of range
+    ErrBounds = statsError{"Input is outside of range."}
+    // ErrSize Must be the same length
+    ErrSize = statsError{"Must be the same length."}
+    // ErrInfValue Value is infinite
+    ErrInfValue = statsError{"Value is infinite."}
+    // ErrYCoord Y Value must be greater than zero
+    ErrYCoord = statsError{"Y Value must be greater than zero."}
+)
+```
+These are the package-wide error values.
+All error identification should use these values.
+<a href="https://github.com/golang/go/wiki/Errors#naming">https://github.com/golang/go/wiki/Errors#naming</a>
+
+``` go
+var (
+    EmptyInputErr = ErrEmptyInput
+    NaNErr        = ErrNaN
+    NegativeErr   = ErrNegative
+    ZeroErr       = ErrZero
+    BoundsErr     = ErrBounds
+    SizeErr       = ErrSize
+    InfValue      = ErrInfValue
+    YCoordErr     = ErrYCoord
+    EmptyInput    = ErrEmptyInput
+)
+```
+Legacy error names that didn't start with Err
+
+
+
+## <a name="AutoCorrelation">func</a> [AutoCorrelation](/correlation.go?s=853:918#L38)
+``` go
+func AutoCorrelation(data Float64Data, lags int) (float64, error)
+```
+AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
+
+
+
+## <a name="ChebyshevDistance">func</a> [ChebyshevDistance](/distances.go?s=368:456#L20)
+``` go
+func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)
+```
+ChebyshevDistance computes the Chebyshev distance between two data sets
+
+
+
+## <a name="Correlation">func</a> [Correlation](/correlation.go?s=112:171#L8)
+``` go
+func Correlation(data1, data2 Float64Data) (float64, error)
+```
+Correlation describes the degree of relationship between two sets of data
+
+
+
+## <a name="Covariance">func</a> [Covariance](/variance.go?s=1284:1342#L53)
+``` go
+func Covariance(data1, data2 Float64Data) (float64, error)
+```
+Covariance is a measure of how much two sets of data change
+
+
+
+## <a name="CovariancePopulation">func</a> [CovariancePopulation](/variance.go?s=1864:1932#L81)
+``` go
+func CovariancePopulation(data1, data2 Float64Data) (float64, error)
+```
+CovariancePopulation computes covariance for entire population between two variables.
+
+
+
+## <a name="CumulativeSum">func</a> [CumulativeSum](/cumulative_sum.go?s=81:137#L4)
+``` go
+func CumulativeSum(input Float64Data) ([]float64, error)
+```
+CumulativeSum calculates the cumulative sum of the input slice
+
+
+
+## <a name="Entropy">func</a> [Entropy](/entropy.go?s=77:125#L6)
+``` go
+func Entropy(input Float64Data) (float64, error)
+```
+Entropy provides calculation of the entropy
+
+
+
+## <a name="EuclideanDistance">func</a> [EuclideanDistance](/distances.go?s=836:924#L36)
+``` go
+func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)
+```
+EuclideanDistance computes the Euclidean distance between two data sets
+
+
+
+## <a name="ExpGeom">func</a> [ExpGeom](/geometric_distribution.go?s=652:700#L27)
+``` go
+func ExpGeom(p float64) (exp float64, err error)
+```
+ProbGeom generates the expectation or average number of trials
+for a geometric random variable with parameter p
+
+
+
+## <a name="GeometricMean">func</a> [GeometricMean](/mean.go?s=319:373#L18)
+``` go
+func GeometricMean(input Float64Data) (float64, error)
+```
+GeometricMean gets the geometric mean for a slice of numbers
+
+
+
+## <a name="HarmonicMean">func</a> [HarmonicMean](/mean.go?s=717:770#L40)
+``` go
+func HarmonicMean(input Float64Data) (float64, error)
+```
+HarmonicMean gets the harmonic mean for a slice of numbers
+
+
+
+## <a name="InterQuartileRange">func</a> [InterQuartileRange](/quartile.go?s=821:880#L45)
+``` go
+func InterQuartileRange(input Float64Data) (float64, error)
+```
+InterQuartileRange finds the range between Q1 and Q3
+
+
+
+## <a name="ManhattanDistance">func</a> [ManhattanDistance](/distances.go?s=1277:1365#L50)
+``` go
+func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)
+```
+ManhattanDistance computes the Manhattan distance between two data sets
+
+
+
+## <a name="Max">func</a> [Max](/max.go?s=78:130#L8)
+``` go
+func Max(input Float64Data) (max float64, err error)
+```
+Max finds the highest number in a slice
+
+
+
+## <a name="Mean">func</a> [Mean](/mean.go?s=77:122#L6)
+``` go
+func Mean(input Float64Data) (float64, error)
+```
+Mean gets the average of a slice of numbers
+
+
+
+## <a name="Median">func</a> [Median](/median.go?s=85:143#L6)
+``` go
+func Median(input Float64Data) (median float64, err error)
+```
+Median gets the median number in a slice of numbers
+
+
+
+## <a name="MedianAbsoluteDeviation">func</a> [MedianAbsoluteDeviation](/deviation.go?s=125:197#L6)
+``` go
+func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)
+```
+MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median
+
+
+
+## <a name="MedianAbsoluteDeviationPopulation">func</a> [MedianAbsoluteDeviationPopulation](/deviation.go?s=360:442#L11)
+``` go
+func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)
+```
+MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
+
+
+
+## <a name="Midhinge">func</a> [Midhinge](/quartile.go?s=1075:1124#L55)
+``` go
+func Midhinge(input Float64Data) (float64, error)
+```
+Midhinge finds the average of the first and third quartiles
+
+
+
+## <a name="Min">func</a> [Min](/min.go?s=78:130#L6)
+``` go
+func Min(input Float64Data) (min float64, err error)
+```
+Min finds the lowest number in a set of data
+
+
+
+## <a name="MinkowskiDistance">func</a> [MinkowskiDistance](/distances.go?s=2152:2256#L75)
+``` go
+func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)
+```
+MinkowskiDistance computes the Minkowski distance between two data sets
+
+Arguments:
+
+
+	dataPointX: First set of data points
+	dataPointY: Second set of data points. Length of both data
+	            sets must be equal.
+	lambda:     aka p or city blocks; With lambda = 1
+	            returned distance is manhattan distance and
+	            lambda = 2; it is euclidean distance. Lambda
+	            reaching to infinite - distance would be chebysev
+	            distance.
+
+Return:
+
+
+	Distance or error
+
+
+
+## <a name="Mode">func</a> [Mode](/mode.go?s=85:141#L4)
+``` go
+func Mode(input Float64Data) (mode []float64, err error)
+```
+Mode gets the mode [most frequent value(s)] of a slice of float64s
+
+
+
+## <a name="Ncr">func</a> [Ncr](/norm.go?s=7384:7406#L239)
+``` go
+func Ncr(n, r int) int
+```
+Ncr is an N choose R algorithm.
+Aaron Cannon's algorithm.
+
+
+
+## <a name="NormBoxMullerRvs">func</a> [NormBoxMullerRvs](/norm.go?s=667:736#L23)
+``` go
+func NormBoxMullerRvs(loc float64, scale float64, size int) []float64
+```
+NormBoxMullerRvs generates random variates using the Box–Muller transform.
+For more information please visit: <a href="http://mathworld.wolfram.com/Box-MullerTransformation.html">http://mathworld.wolfram.com/Box-MullerTransformation.html</a>
+
+
+
+## <a name="NormCdf">func</a> [NormCdf](/norm.go?s=1826:1885#L52)
+``` go
+func NormCdf(x float64, loc float64, scale float64) float64
+```
+NormCdf is the cumulative distribution function.
+
+
+
+## <a name="NormEntropy">func</a> [NormEntropy](/norm.go?s=5773:5825#L180)
+``` go
+func NormEntropy(loc float64, scale float64) float64
+```
+NormEntropy is the differential entropy of the RV.
+
+
+
+## <a name="NormFit">func</a> [NormFit](/norm.go?s=6058:6097#L187)
+``` go
+func NormFit(data []float64) [2]float64
+```
+NormFit returns the maximum likelihood estimators for the Normal Distribution.
+Takes array of float64 values.
+Returns array of Mean followed by Standard Deviation.
+
+
+
+## <a name="NormInterval">func</a> [NormInterval](/norm.go?s=6976:7047#L221)
+``` go
+func NormInterval(alpha float64, loc float64, scale float64) [2]float64
+```
+NormInterval finds endpoints of the range that contains alpha percent of the distribution.
+
+
+
+## <a name="NormIsf">func</a> [NormIsf](/norm.go?s=4330:4393#L137)
+``` go
+func NormIsf(p float64, loc float64, scale float64) (x float64)
+```
+NormIsf is the inverse survival function (inverse of sf).
+
+
+
+## <a name="NormLogCdf">func</a> [NormLogCdf](/norm.go?s=2016:2078#L57)
+``` go
+func NormLogCdf(x float64, loc float64, scale float64) float64
+```
+NormLogCdf is the log of the cumulative distribution function.
+
+
+
+## <a name="NormLogPdf">func</a> [NormLogPdf](/norm.go?s=1590:1652#L47)
+``` go
+func NormLogPdf(x float64, loc float64, scale float64) float64
+```
+NormLogPdf is the log of the probability density function.
+
+
+
+## <a name="NormLogSf">func</a> [NormLogSf](/norm.go?s=2423:2484#L67)
+``` go
+func NormLogSf(x float64, loc float64, scale float64) float64
+```
+NormLogSf is the log of the survival function.
+
+
+
+## <a name="NormMean">func</a> [NormMean](/norm.go?s=6560:6609#L206)
+``` go
+func NormMean(loc float64, scale float64) float64
+```
+NormMean is the mean/expected value of the distribution.
+
+
+
+## <a name="NormMedian">func</a> [NormMedian](/norm.go?s=6431:6482#L201)
+``` go
+func NormMedian(loc float64, scale float64) float64
+```
+NormMedian is the median of the distribution.
+
+
+
+## <a name="NormMoment">func</a> [NormMoment](/norm.go?s=4694:4752#L146)
+``` go
+func NormMoment(n int, loc float64, scale float64) float64
+```
+NormMoment approximates the non-central (raw) moment of order n.
+For more information please visit: <a href="https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution">https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution</a>
+
+
+
+## <a name="NormPdf">func</a> [NormPdf](/norm.go?s=1357:1416#L42)
+``` go
+func NormPdf(x float64, loc float64, scale float64) float64
+```
+NormPdf is the probability density function.
+
+
+
+## <a name="NormPpf">func</a> [NormPpf](/norm.go?s=2854:2917#L75)
+``` go
+func NormPpf(p float64, loc float64, scale float64) (x float64)
+```
+NormPpf is the point percentile function.
+This is based on Peter John Acklam's inverse normal CDF.
+algorithm: <a href="http://home.online.no/~pjacklam/notes/invnorm/">http://home.online.no/~pjacklam/notes/invnorm/</a> (no longer visible).
+For more information please visit: <a href="https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/">https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/</a>
+
+
+
+## <a name="NormPpfRvs">func</a> [NormPpfRvs](/norm.go?s=247:310#L12)
+``` go
+func NormPpfRvs(loc float64, scale float64, size int) []float64
+```
+NormPpfRvs generates random variates using the Point Percentile Function.
+For more information please visit: <a href="https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/">https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/</a>
+
+
+
+## <a name="NormSf">func</a> [NormSf](/norm.go?s=2250:2308#L62)
+``` go
+func NormSf(x float64, loc float64, scale float64) float64
+```
+NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate).
+
+
+
+## <a name="NormStats">func</a> [NormStats](/norm.go?s=5277:5345#L162)
+``` go
+func NormStats(loc float64, scale float64, moments string) []float64
+```
+NormStats returns the mean, variance, skew, and/or kurtosis.
+Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’).
+Takes string containing any of 'mvsk'.
+Returns array of m v s k in that order.
+
+
+
+## <a name="NormStd">func</a> [NormStd](/norm.go?s=6814:6862#L216)
+``` go
+func NormStd(loc float64, scale float64) float64
+```
+NormStd is the standard deviation of the distribution.
+
+
+
+## <a name="NormVar">func</a> [NormVar](/norm.go?s=6675:6723#L211)
+``` go
+func NormVar(loc float64, scale float64) float64
+```
+NormVar is the variance of the distribution.
+
+
+
+## <a name="Pearson">func</a> [Pearson](/correlation.go?s=655:710#L33)
+``` go
+func Pearson(data1, data2 Float64Data) (float64, error)
+```
+Pearson calculates the Pearson product-moment correlation coefficient between two variables
+
+
+
+## <a name="Percentile">func</a> [Percentile](/percentile.go?s=98:181#L8)
+``` go
+func Percentile(input Float64Data, percent float64) (percentile float64, err error)
+```
+Percentile finds the relative standing in a slice of floats
+
+
+
+## <a name="PercentileNearestRank">func</a> [PercentileNearestRank](/percentile.go?s=1079:1173#L54)
+``` go
+func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)
+```
+PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method
+
+
+
+## <a name="PopulationVariance">func</a> [PopulationVariance](/variance.go?s=828:896#L31)
+``` go
+func PopulationVariance(input Float64Data) (pvar float64, err error)
+```
+PopulationVariance finds the amount of variance within a population
+
+
+
+## <a name="ProbGeom">func</a> [ProbGeom](/geometric_distribution.go?s=258:322#L10)
+``` go
+func ProbGeom(a int, b int, p float64) (prob float64, err error)
+```
+ProbGeom generates the probability for a geometric random variable
+with parameter p to achieve success in the interval of [a, b] trials
+See <a href="https://en.wikipedia.org/wiki/Geometric_distribution">https://en.wikipedia.org/wiki/Geometric_distribution</a> for more information
+
+
+
+## <a name="Round">func</a> [Round](/round.go?s=88:154#L6)
+``` go
+func Round(input float64, places int) (rounded float64, err error)
+```
+Round a float to a specific decimal place or precision
+
+
+
+## <a name="Sample">func</a> [Sample](/sample.go?s=112:192#L9)
+``` go
+func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)
+```
+Sample returns sample from input with replacement or without
+
+
+
+## <a name="SampleVariance">func</a> [SampleVariance](/variance.go?s=1058:1122#L42)
+``` go
+func SampleVariance(input Float64Data) (svar float64, err error)
+```
+SampleVariance finds the amount of variance within a sample
+
+
+
+## <a name="Sigmoid">func</a> [Sigmoid](/sigmoid.go?s=228:278#L9)
+``` go
+func Sigmoid(input Float64Data) ([]float64, error)
+```
+Sigmoid returns the input values in the range of -1 to 1
+along the sigmoid or s-shaped curve, commonly used in
+machine learning while training neural networks as an
+activation function.
+
+
+
+## <a name="SoftMax">func</a> [SoftMax](/softmax.go?s=206:256#L8)
+``` go
+func SoftMax(input Float64Data) ([]float64, error)
+```
+SoftMax returns the input values in the range of 0 to 1
+with sum of all the probabilities being equal to one. It
+is commonly used in machine learning neural networks.
+
+
+
+## <a name="StableSample">func</a> [StableSample](/sample.go?s=974:1042#L50)
+``` go
+func StableSample(input Float64Data, takenum int) ([]float64, error)
+```
+StableSample like stable sort, it returns samples from input while keeps the order of original data.
+
+
+
+## <a name="StandardDeviation">func</a> [StandardDeviation](/deviation.go?s=695:762#L27)
+``` go
+func StandardDeviation(input Float64Data) (sdev float64, err error)
+```
+StandardDeviation the amount of variation in the dataset
+
+
+
+## <a name="StandardDeviationPopulation">func</a> [StandardDeviationPopulation](/deviation.go?s=892:969#L32)
+``` go
+func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)
+```
+StandardDeviationPopulation finds the amount of variation from the population
+
+
+
+## <a name="StandardDeviationSample">func</a> [StandardDeviationSample](/deviation.go?s=1250:1323#L46)
+``` go
+func StandardDeviationSample(input Float64Data) (sdev float64, err error)
+```
+StandardDeviationSample finds the amount of variation from a sample
+
+
+
+## <a name="StdDevP">func</a> [StdDevP](/legacy.go?s=339:396#L14)
+``` go
+func StdDevP(input Float64Data) (sdev float64, err error)
+```
+StdDevP is a shortcut to StandardDeviationPopulation
+
+
+
+## <a name="StdDevS">func</a> [StdDevS](/legacy.go?s=497:554#L19)
+``` go
+func StdDevS(input Float64Data) (sdev float64, err error)
+```
+StdDevS is a shortcut to StandardDeviationSample
+
+
+
+## <a name="Sum">func</a> [Sum](/sum.go?s=78:130#L6)
+``` go
+func Sum(input Float64Data) (sum float64, err error)
+```
+Sum adds all the numbers of a slice together
+
+
+
+## <a name="Trimean">func</a> [Trimean](/quartile.go?s=1320:1368#L65)
+``` go
+func Trimean(input Float64Data) (float64, error)
+```
+Trimean finds the average of the median and the midhinge
+
+
+
+## <a name="VarGeom">func</a> [VarGeom](/geometric_distribution.go?s=885:933#L37)
+``` go
+func VarGeom(p float64) (exp float64, err error)
+```
+ProbGeom generates the variance for number for a
+geometric random variable with parameter p
+
+
+
+## <a name="VarP">func</a> [VarP](/legacy.go?s=59:113#L4)
+``` go
+func VarP(input Float64Data) (sdev float64, err error)
+```
+VarP is a shortcut to PopulationVariance
+
+
+
+## <a name="VarS">func</a> [VarS](/legacy.go?s=193:247#L9)
+``` go
+func VarS(input Float64Data) (sdev float64, err error)
+```
+VarS is a shortcut to SampleVariance
+
+
+
+## <a name="Variance">func</a> [Variance](/variance.go?s=659:717#L26)
+``` go
+func Variance(input Float64Data) (sdev float64, err error)
+```
+Variance the amount of variation in the dataset
+
+
+
+
+## <a name="Coordinate">type</a> [Coordinate](/regression.go?s=143:183#L9)
+``` go
+type Coordinate struct {
+    X, Y float64
+}
+
+```
+Coordinate holds the data in a series
+
+
+
+
+
+
+
+### <a name="ExpReg">func</a> [ExpReg](/legacy.go?s=791:856#L29)
+``` go
+func ExpReg(s []Coordinate) (regressions []Coordinate, err error)
+```
+ExpReg is a shortcut to ExponentialRegression
+
+
+### <a name="LinReg">func</a> [LinReg](/legacy.go?s=643:708#L24)
+``` go
+func LinReg(s []Coordinate) (regressions []Coordinate, err error)
+```
+LinReg is a shortcut to LinearRegression
+
+
+### <a name="LogReg">func</a> [LogReg](/legacy.go?s=944:1009#L34)
+``` go
+func LogReg(s []Coordinate) (regressions []Coordinate, err error)
+```
+LogReg is a shortcut to LogarithmicRegression
+
+
+
+
+
+## <a name="Float64Data">type</a> [Float64Data](/data.go?s=80:106#L4)
+``` go
+type Float64Data []float64
+```
+Float64Data is a named type for []float64 with helper methods
+
+
+
+
+
+
+
+### <a name="LoadRawData">func</a> [LoadRawData](/load.go?s=145:194#L12)
+``` go
+func LoadRawData(raw interface{}) (f Float64Data)
+```
+LoadRawData parses and converts a slice of mixed data types to floats
+
+
+
+
+
+### <a name="Float64Data.AutoCorrelation">func</a> (Float64Data) [AutoCorrelation](/data.go?s=3257:3320#L91)
+``` go
+func (f Float64Data) AutoCorrelation(lags int) (float64, error)
+```
+AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
+
+
+
+
+### <a name="Float64Data.Correlation">func</a> (Float64Data) [Correlation](/data.go?s=3058:3122#L86)
+``` go
+func (f Float64Data) Correlation(d Float64Data) (float64, error)
+```
+Correlation describes the degree of relationship between two sets of data
+
+
+
+
+### <a name="Float64Data.Covariance">func</a> (Float64Data) [Covariance](/data.go?s=4801:4864#L141)
+``` go
+func (f Float64Data) Covariance(d Float64Data) (float64, error)
+```
+Covariance is a measure of how much two sets of data change
+
+
+
+
+### <a name="Float64Data.CovariancePopulation">func</a> (Float64Data) [CovariancePopulation](/data.go?s=4983:5056#L146)
+``` go
+func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)
+```
+CovariancePopulation computes covariance for entire population between two variables
+
+
+
+
+### <a name="Float64Data.CumulativeSum">func</a> (Float64Data) [CumulativeSum](/data.go?s=883:938#L28)
+``` go
+func (f Float64Data) CumulativeSum() ([]float64, error)
+```
+CumulativeSum returns the cumulative sum of the data
+
+
+
+
+### <a name="Float64Data.Entropy">func</a> (Float64Data) [Entropy](/data.go?s=5480:5527#L162)
+``` go
+func (f Float64Data) Entropy() (float64, error)
+```
+Entropy provides calculation of the entropy
+
+
+
+
+### <a name="Float64Data.GeometricMean">func</a> (Float64Data) [GeometricMean](/data.go?s=1332:1385#L40)
+``` go
+func (f Float64Data) GeometricMean() (float64, error)
+```
+GeometricMean returns the median of the data
+
+
+
+
+### <a name="Float64Data.Get">func</a> (Float64Data) [Get](/data.go?s=129:168#L7)
+``` go
+func (f Float64Data) Get(i int) float64
+```
+Get item in slice
+
+
+
+
+### <a name="Float64Data.HarmonicMean">func</a> (Float64Data) [HarmonicMean](/data.go?s=1460:1512#L43)
+``` go
+func (f Float64Data) HarmonicMean() (float64, error)
+```
+HarmonicMean returns the mode of the data
+
+
+
+
+### <a name="Float64Data.InterQuartileRange">func</a> (Float64Data) [InterQuartileRange](/data.go?s=3755:3813#L106)
+``` go
+func (f Float64Data) InterQuartileRange() (float64, error)
+```
+InterQuartileRange finds the range between Q1 and Q3
+
+
+
+
+### <a name="Float64Data.Len">func</a> (Float64Data) [Len](/data.go?s=217:247#L10)
+``` go
+func (f Float64Data) Len() int
+```
+Len returns length of slice
+
+
+
+
+### <a name="Float64Data.Less">func</a> (Float64Data) [Less](/data.go?s=318:358#L13)
+``` go
+func (f Float64Data) Less(i, j int) bool
+```
+Less returns if one number is less than another
+
+
+
+
+### <a name="Float64Data.Max">func</a> (Float64Data) [Max](/data.go?s=645:688#L22)
+``` go
+func (f Float64Data) Max() (float64, error)
+```
+Max returns the maximum number in the data
+
+
+
+
+### <a name="Float64Data.Mean">func</a> (Float64Data) [Mean](/data.go?s=1005:1049#L31)
+``` go
+func (f Float64Data) Mean() (float64, error)
+```
+Mean returns the mean of the data
+
+
+
+
+### <a name="Float64Data.Median">func</a> (Float64Data) [Median](/data.go?s=1111:1157#L34)
+``` go
+func (f Float64Data) Median() (float64, error)
+```
+Median returns the median of the data
+
+
+
+
+### <a name="Float64Data.MedianAbsoluteDeviation">func</a> (Float64Data) [MedianAbsoluteDeviation](/data.go?s=1630:1693#L46)
+``` go
+func (f Float64Data) MedianAbsoluteDeviation() (float64, error)
+```
+MedianAbsoluteDeviation the median of the absolute deviations from the dataset median
+
+
+
+
+### <a name="Float64Data.MedianAbsoluteDeviationPopulation">func</a> (Float64Data) [MedianAbsoluteDeviationPopulation](/data.go?s=1842:1915#L51)
+``` go
+func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)
+```
+MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
+
+
+
+
+### <a name="Float64Data.Midhinge">func</a> (Float64Data) [Midhinge](/data.go?s=3912:3973#L111)
+``` go
+func (f Float64Data) Midhinge(d Float64Data) (float64, error)
+```
+Midhinge finds the average of the first and third quartiles
+
+
+
+
+### <a name="Float64Data.Min">func</a> (Float64Data) [Min](/data.go?s=536:579#L19)
+``` go
+func (f Float64Data) Min() (float64, error)
+```
+Min returns the minimum number in the data
+
+
+
+
+### <a name="Float64Data.Mode">func</a> (Float64Data) [Mode](/data.go?s=1217:1263#L37)
+``` go
+func (f Float64Data) Mode() ([]float64, error)
+```
+Mode returns the mode of the data
+
+
+
+
+### <a name="Float64Data.Pearson">func</a> (Float64Data) [Pearson](/data.go?s=3455:3515#L96)
+``` go
+func (f Float64Data) Pearson(d Float64Data) (float64, error)
+```
+Pearson calculates the Pearson product-moment correlation coefficient between two variables.
+
+
+
+
+### <a name="Float64Data.Percentile">func</a> (Float64Data) [Percentile](/data.go?s=2696:2755#L76)
+``` go
+func (f Float64Data) Percentile(p float64) (float64, error)
+```
+Percentile finds the relative standing in a slice of floats
+
+
+
+
+### <a name="Float64Data.PercentileNearestRank">func</a> (Float64Data) [PercentileNearestRank](/data.go?s=2869:2939#L81)
+``` go
+func (f Float64Data) PercentileNearestRank(p float64) (float64, error)
+```
+PercentileNearestRank finds the relative standing using the Nearest Rank method
+
+
+
+
+### <a name="Float64Data.PopulationVariance">func</a> (Float64Data) [PopulationVariance](/data.go?s=4495:4553#L131)
+``` go
+func (f Float64Data) PopulationVariance() (float64, error)
+```
+PopulationVariance finds the amount of variance within a population
+
+
+
+
+### <a name="Float64Data.Quartile">func</a> (Float64Data) [Quartile](/data.go?s=3610:3673#L101)
+``` go
+func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)
+```
+Quartile returns the three quartile points from a slice of data
+
+
+
+
+### <a name="Float64Data.QuartileOutliers">func</a> (Float64Data) [QuartileOutliers](/data.go?s=2542:2599#L71)
+``` go
+func (f Float64Data) QuartileOutliers() (Outliers, error)
+```
+QuartileOutliers finds the mild and extreme outliers
+
+
+
+
+### <a name="Float64Data.Quartiles">func</a> (Float64Data) [Quartiles](/data.go?s=5628:5679#L167)
+``` go
+func (f Float64Data) Quartiles() (Quartiles, error)
+```
+Quartiles returns the three quartile points from instance of Float64Data
+
+
+
+
+### <a name="Float64Data.Sample">func</a> (Float64Data) [Sample](/data.go?s=4208:4269#L121)
+``` go
+func (f Float64Data) Sample(n int, r bool) ([]float64, error)
+```
+Sample returns sample from input with replacement or without
+
+
+
+
+### <a name="Float64Data.SampleVariance">func</a> (Float64Data) [SampleVariance](/data.go?s=4652:4706#L136)
+``` go
+func (f Float64Data) SampleVariance() (float64, error)
+```
+SampleVariance finds the amount of variance within a sample
+
+
+
+
+### <a name="Float64Data.Sigmoid">func</a> (Float64Data) [Sigmoid](/data.go?s=5169:5218#L151)
+``` go
+func (f Float64Data) Sigmoid() ([]float64, error)
+```
+Sigmoid returns the input values along the sigmoid or s-shaped curve
+
+
+
+
+### <a name="Float64Data.SoftMax">func</a> (Float64Data) [SoftMax](/data.go?s=5359:5408#L157)
+``` go
+func (f Float64Data) SoftMax() ([]float64, error)
+```
+SoftMax returns the input values in the range of 0 to 1
+with sum of all the probabilities being equal to one.
+
+
+
+
+### <a name="Float64Data.StandardDeviation">func</a> (Float64Data) [StandardDeviation](/data.go?s=2026:2083#L56)
+``` go
+func (f Float64Data) StandardDeviation() (float64, error)
+```
+StandardDeviation the amount of variation in the dataset
+
+
+
+
+### <a name="Float64Data.StandardDeviationPopulation">func</a> (Float64Data) [StandardDeviationPopulation](/data.go?s=2199:2266#L61)
+``` go
+func (f Float64Data) StandardDeviationPopulation() (float64, error)
+```
+StandardDeviationPopulation finds the amount of variation from the population
+
+
+
+
+### <a name="Float64Data.StandardDeviationSample">func</a> (Float64Data) [StandardDeviationSample](/data.go?s=2382:2445#L66)
+``` go
+func (f Float64Data) StandardDeviationSample() (float64, error)
+```
+StandardDeviationSample finds the amount of variation from a sample
+
+
+
+
+### <a name="Float64Data.Sum">func</a> (Float64Data) [Sum](/data.go?s=764:807#L25)
+``` go
+func (f Float64Data) Sum() (float64, error)
+```
+Sum returns the total of all the numbers in the data
+
+
+
+
+### <a name="Float64Data.Swap">func</a> (Float64Data) [Swap](/data.go?s=425:460#L16)
+``` go
+func (f Float64Data) Swap(i, j int)
+```
+Swap switches out two numbers in slice
+
+
+
+
+### <a name="Float64Data.Trimean">func</a> (Float64Data) [Trimean](/data.go?s=4059:4119#L116)
+``` go
+func (f Float64Data) Trimean(d Float64Data) (float64, error)
+```
+Trimean finds the average of the median and the midhinge
+
+
+
+
+### <a name="Float64Data.Variance">func</a> (Float64Data) [Variance](/data.go?s=4350:4398#L126)
+``` go
+func (f Float64Data) Variance() (float64, error)
+```
+Variance the amount of variation in the dataset
+
+
+
+
+## <a name="Outliers">type</a> [Outliers](/outlier.go?s=73:139#L4)
+``` go
+type Outliers struct {
+    Mild    Float64Data
+    Extreme Float64Data
+}
+
+```
+Outliers holds mild and extreme outliers found in data
+
+
+
+
+
+
+
+### <a name="QuartileOutliers">func</a> [QuartileOutliers](/outlier.go?s=197:255#L10)
+``` go
+func QuartileOutliers(input Float64Data) (Outliers, error)
+```
+QuartileOutliers finds the mild and extreme outliers
+
+
+
+
+
+## <a name="Quartiles">type</a> [Quartiles](/quartile.go?s=75:136#L6)
+``` go
+type Quartiles struct {
+    Q1 float64
+    Q2 float64
+    Q3 float64
+}
+
+```
+Quartiles holds the three quartile points
+
+
+
+
+
+
+
+### <a name="Quartile">func</a> [Quartile](/quartile.go?s=205:256#L13)
+``` go
+func Quartile(input Float64Data) (Quartiles, error)
+```
+Quartile returns the three quartile points from a slice of data
+
+
+
+
+
+## <a name="Series">type</a> [Series](/regression.go?s=76:100#L6)
+``` go
+type Series []Coordinate
+```
+Series is a container for a series of data
+
+
+
+
+
+
+
+### <a name="ExponentialRegression">func</a> [ExponentialRegression](/regression.go?s=1089:1157#L50)
+``` go
+func ExponentialRegression(s Series) (regressions Series, err error)
+```
+ExponentialRegression returns an exponential regression on data series
+
+
+### <a name="LinearRegression">func</a> [LinearRegression](/regression.go?s=262:325#L14)
+``` go
+func LinearRegression(s Series) (regressions Series, err error)
+```
+LinearRegression finds the least squares linear regression on data series
+
+
+### <a name="LogarithmicRegression">func</a> [LogarithmicRegression](/regression.go?s=1903:1971#L85)
+``` go
+func LogarithmicRegression(s Series) (regressions Series, err error)
+```
+LogarithmicRegression returns an logarithmic regression on data series
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
diff --git a/vendor/github.com/montanaflynn/stats/LICENSE b/vendor/github.com/montanaflynn/stats/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..3162cb1a585e6ba58952e9e140740a4856af0000
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/montanaflynn/stats/Makefile b/vendor/github.com/montanaflynn/stats/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..969df128085189707c61821979430098df399540
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/Makefile
@@ -0,0 +1,34 @@
+.PHONY: all
+
+default: test lint
+
+format: 
+	go fmt .
+
+test:
+	go test -race 
+	
+check: format test
+
+benchmark:
+	go test -bench=. -benchmem
+
+coverage:
+	go test -coverprofile=coverage.out
+	go tool cover -html="coverage.out"
+
+lint: format
+	golangci-lint run .
+
+docs:
+	godoc2md github.com/montanaflynn/stats | sed -e s#src/target/##g > DOCUMENTATION.md
+
+release:
+	git-chglog --output CHANGELOG.md --next-tag ${TAG}
+	git add CHANGELOG.md
+	git commit -m "Update changelog with ${TAG} changes"
+	git tag ${TAG}
+	git-chglog $(TAG) | tail -n +4 | gsed '1s/^/$(TAG)\n/gm' > release-notes.txt
+	git push origin master ${TAG}
+	hub release create --copy -F release-notes.txt ${TAG}
+
diff --git a/vendor/github.com/montanaflynn/stats/README.md b/vendor/github.com/montanaflynn/stats/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9c1889073743c66da85008907a3df2b30483785f
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/README.md
@@ -0,0 +1,237 @@
+# Stats - Golang Statistics Package
+
+[![][action-svg]][action-url] [![][codecov-svg]][codecov-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url]
+
+A well tested and comprehensive Golang statistics library / package / module with no dependencies.
+
+If you have any suggestions, problems or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated!
+
+## Installation
+
+```
+go get github.com/montanaflynn/stats
+```
+
+## Example Usage
+
+All the functions can be seen in [examples/main.go](examples/main.go) but here's a little taste:
+
+```go
+// start with some source data to use
+data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
+
+// you could also use different types like this
+// data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
+// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
+// etc...
+
+median, _ := stats.Median(data)
+fmt.Println(median) // 3.65
+
+roundedMedian, _ := stats.Round(median, 0)
+fmt.Println(roundedMedian) // 4
+```
+
+## Documentation
+
+The entire API documentation is available on [GoDoc.org](http://godoc.org/github.com/montanaflynn/stats) or [pkg.go.dev](https://pkg.go.dev/github.com/montanaflynn/stats).
+
+You can also view docs offline with the following commands:
+
+```
+# Command line
+godoc .              # show all exported apis
+godoc . Median       # show a single function
+godoc -ex . Round    # show function with example
+godoc . Float64Data  # show the type and methods
+
+# Local website
+godoc -http=:4444    # start the godoc server on port 4444
+open http://localhost:4444/pkg/github.com/montanaflynn/stats/
+```
+
+The exported API is as follows:
+
+```go
+var (
+    ErrEmptyInput = statsError{"Input must not be empty."}
+    ErrNaN        = statsError{"Not a number."}
+    ErrNegative   = statsError{"Must not contain negative values."}
+    ErrZero       = statsError{"Must not contain zero values."}
+    ErrBounds     = statsError{"Input is outside of range."}
+    ErrSize       = statsError{"Must be the same length."}
+    ErrInfValue   = statsError{"Value is infinite."}
+    ErrYCoord     = statsError{"Y Value must be greater than zero."}
+)
+
+func Round(input float64, places int) (rounded float64, err error) {}
+
+type Float64Data []float64
+
+func LoadRawData(raw interface{}) (f Float64Data) {}
+
+func AutoCorrelation(data Float64Data, lags int) (float64, error) {}
+func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
+func Correlation(data1, data2 Float64Data) (float64, error) {}
+func Covariance(data1, data2 Float64Data) (float64, error) {}
+func CovariancePopulation(data1, data2 Float64Data) (float64, error) {}
+func CumulativeSum(input Float64Data) ([]float64, error) {}
+func Describe(input Float64Data, allowNaN bool, percentiles *[]float64) (*Description, error) {}
+func DescribePercentileFunc(input Float64Data, allowNaN bool, percentiles *[]float64, percentileFunc func(Float64Data, float64) (float64, error)) (*Description, error) {}
+func Entropy(input Float64Data) (float64, error) {}
+func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
+func GeometricMean(input Float64Data) (float64, error) {}
+func HarmonicMean(input Float64Data) (float64, error) {}
+func InterQuartileRange(input Float64Data) (float64, error) {}
+func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
+func Max(input Float64Data) (max float64, err error) {}
+func Mean(input Float64Data) (float64, error) {}
+func Median(input Float64Data) (median float64, err error) {}
+func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {}
+func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {}
+func Midhinge(input Float64Data) (float64, error) {}
+func Min(input Float64Data) (min float64, err error) {}
+func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {}
+func Mode(input Float64Data) (mode []float64, err error) {}
+func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {}
+func NormCdf(x float64, loc float64, scale float64) float64 {}
+func NormEntropy(loc float64, scale float64) float64 {}
+func NormFit(data []float64) [2]float64{}
+func NormInterval(alpha float64, loc float64,  scale float64 ) [2]float64 {}
+func NormIsf(p float64, loc float64, scale float64) (x float64) {}
+func NormLogCdf(x float64, loc float64, scale float64) float64 {}
+func NormLogPdf(x float64, loc float64, scale float64) float64 {}
+func NormLogSf(x float64, loc float64, scale float64) float64 {}
+func NormMean(loc float64, scale float64) float64 {}
+func NormMedian(loc float64, scale float64) float64 {}
+func NormMoment(n int, loc float64, scale float64) float64 {}
+func NormPdf(x float64, loc float64, scale float64) float64 {}
+func NormPpf(p float64, loc float64, scale float64) (x float64) {}
+func NormPpfRvs(loc float64, scale float64, size int) []float64 {}
+func NormSf(x float64, loc float64, scale float64) float64 {}
+func NormStats(loc float64, scale float64, moments string) []float64 {}
+func NormStd(loc float64, scale float64) float64 {}
+func NormVar(loc float64, scale float64) float64 {}
+func Pearson(data1, data2 Float64Data) (float64, error) {}
+func Percentile(input Float64Data, percent float64) (percentile float64, err error) {}
+func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {}
+func PopulationVariance(input Float64Data) (pvar float64, err error) {}
+func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {}
+func SampleVariance(input Float64Data) (svar float64, err error) {}
+func Sigmoid(input Float64Data) ([]float64, error) {}
+func SoftMax(input Float64Data) ([]float64, error) {}
+func StableSample(input Float64Data, takenum int) ([]float64, error) {}
+func StandardDeviation(input Float64Data) (sdev float64, err error) {}
+func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {}
+func StandardDeviationSample(input Float64Data) (sdev float64, err error) {}
+func StdDevP(input Float64Data) (sdev float64, err error) {}
+func StdDevS(input Float64Data) (sdev float64, err error) {}
+func Sum(input Float64Data) (sum float64, err error) {}
+func Trimean(input Float64Data) (float64, error) {}
+func VarP(input Float64Data) (sdev float64, err error) {}
+func VarS(input Float64Data) (sdev float64, err error) {}
+func Variance(input Float64Data) (sdev float64, err error) {}
+func ProbGeom(a int, b int, p float64) (prob float64, err error) {}
+func ExpGeom(p float64) (exp float64, err error) {}
+func VarGeom(p float64) (exp float64, err error) {}
+
+type Coordinate struct {
+    X, Y float64
+}
+
+type Series []Coordinate
+
+func ExponentialRegression(s Series) (regressions Series, err error) {}
+func LinearRegression(s Series) (regressions Series, err error) {}
+func LogarithmicRegression(s Series) (regressions Series, err error) {}
+
+type Outliers struct {
+    Mild    Float64Data
+    Extreme Float64Data
+}
+
+type Quartiles struct {
+    Q1 float64
+    Q2 float64
+    Q3 float64
+}
+
+func Quartile(input Float64Data) (Quartiles, error) {}
+func QuartileOutliers(input Float64Data) (Outliers, error) {}
+```
+
+## Contributing
+
+Pull request are always welcome no matter how big or small. I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more.
+
+1. Fork the repo and clone your fork
+2. Create new branch (`git checkout -b some-thing`)
+3. Make the desired changes
+4. Ensure tests pass (`go test -cover` or `make test`)
+5. Run lint and fix problems (`go vet .` or `make lint`)
+6. Commit changes (`git commit -am 'Did something'`)
+7. Push branch (`git push origin some-thing`)
+8. Submit pull request
+
+To make things as seamless as possible please also consider the following steps:
+
+- Update `examples/main.go` with a simple example of the new feature
+- Update `README.md` documentation section with any new exported API
+- Keep 100% code coverage (you can check with `make coverage`)
+- Squash commits into single units of work with `git rebase -i new-feature`
+
+## Releasing
+
+This is not required by contributors and mostly here as a reminder to myself as the maintainer of this repo. To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md).
+
+First install the tools used to generate the markdown files and release:
+
+```
+go install github.com/davecheney/godoc2md@latest
+go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+brew tap git-chglog/git-chglog
+brew install gnu-sed hub git-chglog
+```
+
+Then you can run these `make` directives:
+
+```
+# Generate DOCUMENTATION.md
+make docs
+```
+
+Then we can create a [CHANGELOG.md](/CHANGELOG.md) a new git tag and a github release:
+
+```
+make release TAG=v0.x.x
+```
+
+To authenticate `hub` for the release you will need to create a personal access token and use it as the password when it's requested.
+
+## MIT License
+
+Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+[action-url]: https://github.com/montanaflynn/stats/actions
+[action-svg]: https://img.shields.io/github/actions/workflow/status/montanaflynn/stats/go.yml
+
+[codecov-url]: https://app.codecov.io/gh/montanaflynn/stats
+[codecov-svg]: https://img.shields.io/codecov/c/github/montanaflynn/stats?token=wnw8dActnH
+
+[goreport-url]: https://goreportcard.com/report/github.com/montanaflynn/stats
+[goreport-svg]: https://goreportcard.com/badge/github.com/montanaflynn/stats
+
+[godoc-url]: https://godoc.org/github.com/montanaflynn/stats
+[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg
+
+[pkggodev-url]: https://pkg.go.dev/github.com/montanaflynn/stats
+[pkggodev-svg]: https://gistcdn.githack.com/montanaflynn/b02f1d78d8c0de8435895d7e7cd0d473/raw/17f2a5a69f1323ecd42c00e0683655da96d9ecc8/badge.svg
+
+[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE
+[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg
diff --git a/vendor/github.com/montanaflynn/stats/correlation.go b/vendor/github.com/montanaflynn/stats/correlation.go
new file mode 100644
index 0000000000000000000000000000000000000000..4acab94dc91a3e61ad5f3d72b35b5b280874d074
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/correlation.go
@@ -0,0 +1,60 @@
+package stats
+
+import (
+	"math"
+)
+
+// Correlation describes the degree of relationship between two sets of data
+func Correlation(data1, data2 Float64Data) (float64, error) {
+
+	l1 := data1.Len()
+	l2 := data2.Len()
+
+	if l1 == 0 || l2 == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	if l1 != l2 {
+		return math.NaN(), SizeErr
+	}
+
+	sdev1, _ := StandardDeviationPopulation(data1)
+	sdev2, _ := StandardDeviationPopulation(data2)
+
+	if sdev1 == 0 || sdev2 == 0 {
+		return 0, nil
+	}
+
+	covp, _ := CovariancePopulation(data1, data2)
+	return covp / (sdev1 * sdev2), nil
+}
+
+// Pearson calculates the Pearson product-moment correlation coefficient between two variables
+func Pearson(data1, data2 Float64Data) (float64, error) {
+	return Correlation(data1, data2)
+}
+
+// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
+func AutoCorrelation(data Float64Data, lags int) (float64, error) {
+	if len(data) < 1 {
+		return 0, EmptyInputErr
+	}
+
+	mean, _ := Mean(data)
+
+	var result, q float64
+
+	for i := 0; i < lags; i++ {
+		v := (data[0] - mean) * (data[0] - mean)
+		for i := 1; i < len(data); i++ {
+			delta0 := data[i-1] - mean
+			delta1 := data[i] - mean
+			q += (delta0*delta1 - q) / float64(i+1)
+			v += (delta1*delta1 - v) / float64(i+1)
+		}
+
+		result = q / v
+	}
+
+	return result, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/cumulative_sum.go b/vendor/github.com/montanaflynn/stats/cumulative_sum.go
new file mode 100644
index 0000000000000000000000000000000000000000..e5305daf393d9834561596c51ebaa7e4f6d6d346
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/cumulative_sum.go
@@ -0,0 +1,21 @@
+package stats
+
+// CumulativeSum calculates the cumulative sum of the input slice
+func CumulativeSum(input Float64Data) ([]float64, error) {
+
+	if input.Len() == 0 {
+		return Float64Data{}, EmptyInput
+	}
+
+	cumSum := make([]float64, input.Len())
+
+	for i, val := range input {
+		if i == 0 {
+			cumSum[i] = val
+		} else {
+			cumSum[i] = cumSum[i-1] + val
+		}
+	}
+
+	return cumSum, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/data.go b/vendor/github.com/montanaflynn/stats/data.go
new file mode 100644
index 0000000000000000000000000000000000000000..b86f0d84ddda379ffd43e6d119ff5f04ff996414
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/data.go
@@ -0,0 +1,169 @@
+package stats
+
+// Float64Data is a named type for []float64 with helper methods
+type Float64Data []float64
+
+// Get item in slice
+func (f Float64Data) Get(i int) float64 { return f[i] }
+
+// Len returns length of slice
+func (f Float64Data) Len() int { return len(f) }
+
+// Less returns if one number is less than another
+func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] }
+
+// Swap switches out two numbers in slice
+func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+
+// Min returns the minimum number in the data
+func (f Float64Data) Min() (float64, error) { return Min(f) }
+
+// Max returns the maximum number in the data
+func (f Float64Data) Max() (float64, error) { return Max(f) }
+
+// Sum returns the total of all the numbers in the data
+func (f Float64Data) Sum() (float64, error) { return Sum(f) }
+
+// CumulativeSum returns the cumulative sum of the data
+func (f Float64Data) CumulativeSum() ([]float64, error) { return CumulativeSum(f) }
+
+// Mean returns the mean of the data
+func (f Float64Data) Mean() (float64, error) { return Mean(f) }
+
+// Median returns the median of the data
+func (f Float64Data) Median() (float64, error) { return Median(f) }
+
+// Mode returns the mode of the data
+func (f Float64Data) Mode() ([]float64, error) { return Mode(f) }
+
+// GeometricMean returns the median of the data
+func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) }
+
+// HarmonicMean returns the mode of the data
+func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) }
+
+// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median
+func (f Float64Data) MedianAbsoluteDeviation() (float64, error) {
+	return MedianAbsoluteDeviation(f)
+}
+
+// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
+func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) {
+	return MedianAbsoluteDeviationPopulation(f)
+}
+
+// StandardDeviation the amount of variation in the dataset
+func (f Float64Data) StandardDeviation() (float64, error) {
+	return StandardDeviation(f)
+}
+
+// StandardDeviationPopulation finds the amount of variation from the population
+func (f Float64Data) StandardDeviationPopulation() (float64, error) {
+	return StandardDeviationPopulation(f)
+}
+
+// StandardDeviationSample finds the amount of variation from a sample
+func (f Float64Data) StandardDeviationSample() (float64, error) {
+	return StandardDeviationSample(f)
+}
+
+// QuartileOutliers finds the mild and extreme outliers
+func (f Float64Data) QuartileOutliers() (Outliers, error) {
+	return QuartileOutliers(f)
+}
+
+// Percentile finds the relative standing in a slice of floats
+func (f Float64Data) Percentile(p float64) (float64, error) {
+	return Percentile(f, p)
+}
+
+// PercentileNearestRank finds the relative standing using the Nearest Rank method
+func (f Float64Data) PercentileNearestRank(p float64) (float64, error) {
+	return PercentileNearestRank(f, p)
+}
+
+// Correlation describes the degree of relationship between two sets of data
+func (f Float64Data) Correlation(d Float64Data) (float64, error) {
+	return Correlation(f, d)
+}
+
+// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay
+func (f Float64Data) AutoCorrelation(lags int) (float64, error) {
+	return AutoCorrelation(f, lags)
+}
+
+// Pearson calculates the Pearson product-moment correlation coefficient between two variables.
+func (f Float64Data) Pearson(d Float64Data) (float64, error) {
+	return Pearson(f, d)
+}
+
+// Quartile returns the three quartile points from a slice of data
+func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) {
+	return Quartile(d)
+}
+
+// InterQuartileRange finds the range between Q1 and Q3
+func (f Float64Data) InterQuartileRange() (float64, error) {
+	return InterQuartileRange(f)
+}
+
+// Midhinge finds the average of the first and third quartiles
+func (f Float64Data) Midhinge(d Float64Data) (float64, error) {
+	return Midhinge(d)
+}
+
+// Trimean finds the average of the median and the midhinge
+func (f Float64Data) Trimean(d Float64Data) (float64, error) {
+	return Trimean(d)
+}
+
+// Sample returns sample from input with replacement or without
+func (f Float64Data) Sample(n int, r bool) ([]float64, error) {
+	return Sample(f, n, r)
+}
+
+// Variance the amount of variation in the dataset
+func (f Float64Data) Variance() (float64, error) {
+	return Variance(f)
+}
+
+// PopulationVariance finds the amount of variance within a population
+func (f Float64Data) PopulationVariance() (float64, error) {
+	return PopulationVariance(f)
+}
+
+// SampleVariance finds the amount of variance within a sample
+func (f Float64Data) SampleVariance() (float64, error) {
+	return SampleVariance(f)
+}
+
+// Covariance is a measure of how much two sets of data change
+func (f Float64Data) Covariance(d Float64Data) (float64, error) {
+	return Covariance(f, d)
+}
+
+// CovariancePopulation computes covariance for entire population between two variables
+func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) {
+	return CovariancePopulation(f, d)
+}
+
+// Sigmoid returns the input values along the sigmoid or s-shaped curve
+func (f Float64Data) Sigmoid() ([]float64, error) {
+	return Sigmoid(f)
+}
+
+// SoftMax returns the input values in the range of 0 to 1
+// with sum of all the probabilities being equal to one.
+func (f Float64Data) SoftMax() ([]float64, error) {
+	return SoftMax(f)
+}
+
+// Entropy provides calculation of the entropy
+func (f Float64Data) Entropy() (float64, error) {
+	return Entropy(f)
+}
+
+// Quartiles returns the three quartile points from instance of Float64Data
+func (f Float64Data) Quartiles() (Quartiles, error) {
+	return Quartile(f)
+}
diff --git a/vendor/github.com/montanaflynn/stats/describe.go b/vendor/github.com/montanaflynn/stats/describe.go
new file mode 100644
index 0000000000000000000000000000000000000000..86b72425c6e2e66615a757970d409c65ace99c83
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/describe.go
@@ -0,0 +1,81 @@
+package stats
+
+import "fmt"
+
+// Holds information about the dataset provided to Describe
+type Description struct {
+	Count                  int
+	Mean                   float64
+	Std                    float64
+	Max                    float64
+	Min                    float64
+	DescriptionPercentiles []descriptionPercentile
+	AllowedNaN             bool
+}
+
+// Specifies percentiles to be computed
+type descriptionPercentile struct {
+	Percentile float64
+	Value      float64
+}
+
+// Describe generates descriptive statistics about a provided dataset, similar to python's pandas.describe()
+func Describe(input Float64Data, allowNaN bool, percentiles *[]float64) (*Description, error) {
+	return DescribePercentileFunc(input, allowNaN, percentiles, Percentile)
+}
+
+// Describe generates descriptive statistics about a provided dataset, similar to python's pandas.describe()
+// Takes in a function to use for percentile calculation
+func DescribePercentileFunc(input Float64Data, allowNaN bool, percentiles *[]float64, percentileFunc func(Float64Data, float64) (float64, error)) (*Description, error) {
+	var description Description
+	description.AllowedNaN = allowNaN
+	description.Count = input.Len()
+
+	if description.Count == 0 && !allowNaN {
+		return &description, ErrEmptyInput
+	}
+
+	// Disregard error, since it cannot be thrown if Count is > 0 and allowNaN is false, else NaN is accepted
+	description.Std, _ = StandardDeviation(input)
+	description.Max, _ = Max(input)
+	description.Min, _ = Min(input)
+	description.Mean, _ = Mean(input)
+
+	if percentiles != nil {
+		for _, percentile := range *percentiles {
+			if value, err := percentileFunc(input, percentile); err == nil || allowNaN {
+				description.DescriptionPercentiles = append(description.DescriptionPercentiles, descriptionPercentile{Percentile: percentile, Value: value})
+			}
+		}
+	}
+
+	return &description, nil
+}
+
+/*
+Represents the Description instance in a string format with specified number of decimals
+
+	count   3
+	mean    2.00
+	std     0.82
+	max     3.00
+	min     1.00
+	25.00%  NaN
+	50.00%  1.50
+	75.00%  2.50
+	NaN OK  true
+*/
+func (d *Description) String(decimals int) string {
+	var str string
+
+	str += fmt.Sprintf("count\t%d\n", d.Count)
+	str += fmt.Sprintf("mean\t%.*f\n", decimals, d.Mean)
+	str += fmt.Sprintf("std\t%.*f\n", decimals, d.Std)
+	str += fmt.Sprintf("max\t%.*f\n", decimals, d.Max)
+	str += fmt.Sprintf("min\t%.*f\n", decimals, d.Min)
+	for _, percentile := range d.DescriptionPercentiles {
+		str += fmt.Sprintf("%.2f%%\t%.*f\n", percentile.Percentile, decimals, percentile.Value)
+	}
+	str += fmt.Sprintf("NaN OK\t%t", d.AllowedNaN)
+	return str
+}
diff --git a/vendor/github.com/montanaflynn/stats/deviation.go b/vendor/github.com/montanaflynn/stats/deviation.go
new file mode 100644
index 0000000000000000000000000000000000000000..e69a19f60d57e36c0f05031717fd127e406bafb3
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/deviation.go
@@ -0,0 +1,57 @@
+package stats
+
+import "math"
+
+// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median
+func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {
+	return MedianAbsoluteDeviationPopulation(input)
+}
+
+// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median
+func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	i := copyslice(input)
+	m, _ := Median(i)
+
+	for key, value := range i {
+		i[key] = math.Abs(value - m)
+	}
+
+	return Median(i)
+}
+
+// StandardDeviation the amount of variation in the dataset
+func StandardDeviation(input Float64Data) (sdev float64, err error) {
+	return StandardDeviationPopulation(input)
+}
+
+// StandardDeviationPopulation finds the amount of variation from the population
+func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {
+
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Get the population variance
+	vp, _ := PopulationVariance(input)
+
+	// Return the population standard deviation
+	return math.Sqrt(vp), nil
+}
+
+// StandardDeviationSample finds the amount of variation from a sample
+func StandardDeviationSample(input Float64Data) (sdev float64, err error) {
+
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Get the sample variance
+	vs, _ := SampleVariance(input)
+
+	// Return the sample standard deviation
+	return math.Sqrt(vs), nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/distances.go b/vendor/github.com/montanaflynn/stats/distances.go
new file mode 100644
index 0000000000000000000000000000000000000000..8a6330e3887889ca09680ec56e5d064482008c58
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/distances.go
@@ -0,0 +1,91 @@
+package stats
+
+import (
+	"math"
+)
+
+// Validate data for distance calculation
+func validateData(dataPointX, dataPointY Float64Data) error {
+	if len(dataPointX) == 0 || len(dataPointY) == 0 {
+		return EmptyInputErr
+	}
+
+	if len(dataPointX) != len(dataPointY) {
+		return SizeErr
+	}
+	return nil
+}
+
+// ChebyshevDistance computes the Chebyshev distance between two data sets
+func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {
+	err = validateData(dataPointX, dataPointY)
+	if err != nil {
+		return math.NaN(), err
+	}
+	var tempDistance float64
+	for i := 0; i < len(dataPointY); i++ {
+		tempDistance = math.Abs(dataPointX[i] - dataPointY[i])
+		if distance < tempDistance {
+			distance = tempDistance
+		}
+	}
+	return distance, nil
+}
+
+// EuclideanDistance computes the Euclidean distance between two data sets
+func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {
+
+	err = validateData(dataPointX, dataPointY)
+	if err != nil {
+		return math.NaN(), err
+	}
+	distance = 0
+	for i := 0; i < len(dataPointX); i++ {
+		distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i]))
+	}
+	return math.Sqrt(distance), nil
+}
+
+// ManhattanDistance computes the Manhattan distance between two data sets
+func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {
+	err = validateData(dataPointX, dataPointY)
+	if err != nil {
+		return math.NaN(), err
+	}
+	distance = 0
+	for i := 0; i < len(dataPointX); i++ {
+		distance = distance + math.Abs(dataPointX[i]-dataPointY[i])
+	}
+	return distance, nil
+}
+
+// MinkowskiDistance computes the Minkowski distance between two data sets
+//
+// Arguments:
+//
+//	dataPointX: First set of data points
+//	dataPointY: Second set of data points. Length of both data
+//	            sets must be equal.
+//	lambda:     aka p or city blocks; With lambda = 1
+//	            returned distance is manhattan distance and
+//	            lambda = 2; it is euclidean distance. Lambda
+//	            reaching to infinite - distance would be chebysev
+//	            distance.
+//
+// Return:
+//
+//	Distance or error
+func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {
+	err = validateData(dataPointX, dataPointY)
+	if err != nil {
+		return math.NaN(), err
+	}
+	for i := 0; i < len(dataPointY); i++ {
+		distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda)
+	}
+	distance = math.Pow(distance, 1/lambda)
+	if math.IsInf(distance, 1) {
+		return math.NaN(), InfValue
+	}
+	return distance, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/doc.go b/vendor/github.com/montanaflynn/stats/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..facb8d57bfe1eb29bac8887c9f37762d019a2298
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/doc.go
@@ -0,0 +1,23 @@
+/*
+Package stats is a well tested and comprehensive
+statistics library package with no dependencies.
+
+Example Usage:
+
+	// start with some source data to use
+	data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
+
+	// you could also use different types like this
+	// data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
+	// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
+	// etc...
+
+	median, _ := stats.Median(data)
+	fmt.Println(median) // 3.65
+
+	roundedMedian, _ := stats.Round(median, 0)
+	fmt.Println(roundedMedian) // 4
+
+MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com)
+*/
+package stats
diff --git a/vendor/github.com/montanaflynn/stats/entropy.go b/vendor/github.com/montanaflynn/stats/entropy.go
new file mode 100644
index 0000000000000000000000000000000000000000..95263b0fced8cce386870587f27a8b412a016fb5
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/entropy.go
@@ -0,0 +1,31 @@
+package stats
+
+import "math"
+
+// Entropy provides calculation of the entropy
+func Entropy(input Float64Data) (float64, error) {
+	input, err := normalize(input)
+	if err != nil {
+		return math.NaN(), err
+	}
+	var result float64
+	for i := 0; i < input.Len(); i++ {
+		v := input.Get(i)
+		if v == 0 {
+			continue
+		}
+		result += (v * math.Log(v))
+	}
+	return -result, nil
+}
+
+func normalize(input Float64Data) (Float64Data, error) {
+	sum, err := input.Sum()
+	if err != nil {
+		return Float64Data{}, err
+	}
+	for i := 0; i < input.Len(); i++ {
+		input[i] = input[i] / sum
+	}
+	return input, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/errors.go b/vendor/github.com/montanaflynn/stats/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..95f82ff7b736c49cd4c28aa51b2ab31f1037407e
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/errors.go
@@ -0,0 +1,35 @@
+package stats
+
+type statsError struct {
+	err string
+}
+
+func (s statsError) Error() string {
+	return s.err
+}
+
+func (s statsError) String() string {
+	return s.err
+}
+
+// These are the package-wide error values.
+// All error identification should use these values.
+// https://github.com/golang/go/wiki/Errors#naming
+var (
+	// ErrEmptyInput Input must not be empty
+	ErrEmptyInput = statsError{"Input must not be empty."}
+	// ErrNaN Not a number
+	ErrNaN = statsError{"Not a number."}
+	// ErrNegative Must not contain negative values
+	ErrNegative = statsError{"Must not contain negative values."}
+	// ErrZero Must not contain zero values
+	ErrZero = statsError{"Must not contain zero values."}
+	// ErrBounds Input is outside of range
+	ErrBounds = statsError{"Input is outside of range."}
+	// ErrSize Must be the same length
+	ErrSize = statsError{"Must be the same length."}
+	// ErrInfValue Value is infinite
+	ErrInfValue = statsError{"Value is infinite."}
+	// ErrYCoord Y Value must be greater than zero
+	ErrYCoord = statsError{"Y Value must be greater than zero."}
+)
diff --git a/vendor/github.com/montanaflynn/stats/geometric_distribution.go b/vendor/github.com/montanaflynn/stats/geometric_distribution.go
new file mode 100644
index 0000000000000000000000000000000000000000..db785dda2462a8fa1dca283ecef6f75f6ef9eb82
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/geometric_distribution.go
@@ -0,0 +1,42 @@
+package stats
+
+import (
+	"math"
+)
+
+// ProbGeom generates the probability for a geometric random variable
+// with parameter p to achieve success in the interval of [a, b] trials
+// See https://en.wikipedia.org/wiki/Geometric_distribution for more information
+func ProbGeom(a int, b int, p float64) (prob float64, err error) {
+	if (a > b) || (a < 1) {
+		return math.NaN(), ErrBounds
+	}
+
+	prob = 0
+	q := 1 - p // probability of failure
+
+	for k := a + 1; k <= b; k++ {
+		prob = prob + p*math.Pow(q, float64(k-1))
+	}
+
+	return prob, nil
+}
+
+// ProbGeom generates the expectation or average number of trials
+// for a geometric random variable with parameter p
+func ExpGeom(p float64) (exp float64, err error) {
+	if (p > 1) || (p < 0) {
+		return math.NaN(), ErrNegative
+	}
+
+	return 1 / p, nil
+}
+
+// ProbGeom generates the variance for number for a
+// geometric random variable with parameter p
+func VarGeom(p float64) (exp float64, err error) {
+	if (p > 1) || (p < 0) {
+		return math.NaN(), ErrNegative
+	}
+	return (1 - p) / math.Pow(p, 2), nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/legacy.go b/vendor/github.com/montanaflynn/stats/legacy.go
new file mode 100644
index 0000000000000000000000000000000000000000..0f3d1e8bb22cc594f65ad5dfc4998ad43d3a90e0
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/legacy.go
@@ -0,0 +1,49 @@
+package stats
+
+// VarP is a shortcut to PopulationVariance
+func VarP(input Float64Data) (sdev float64, err error) {
+	return PopulationVariance(input)
+}
+
+// VarS is a shortcut to SampleVariance
+func VarS(input Float64Data) (sdev float64, err error) {
+	return SampleVariance(input)
+}
+
+// StdDevP is a shortcut to StandardDeviationPopulation
+func StdDevP(input Float64Data) (sdev float64, err error) {
+	return StandardDeviationPopulation(input)
+}
+
+// StdDevS is a shortcut to StandardDeviationSample
+func StdDevS(input Float64Data) (sdev float64, err error) {
+	return StandardDeviationSample(input)
+}
+
+// LinReg is a shortcut to LinearRegression
+func LinReg(s []Coordinate) (regressions []Coordinate, err error) {
+	return LinearRegression(s)
+}
+
+// ExpReg is a shortcut to ExponentialRegression
+func ExpReg(s []Coordinate) (regressions []Coordinate, err error) {
+	return ExponentialRegression(s)
+}
+
+// LogReg is a shortcut to LogarithmicRegression
+func LogReg(s []Coordinate) (regressions []Coordinate, err error) {
+	return LogarithmicRegression(s)
+}
+
+// Legacy error names that didn't start with Err
+var (
+	EmptyInputErr = ErrEmptyInput
+	NaNErr        = ErrNaN
+	NegativeErr   = ErrNegative
+	ZeroErr       = ErrZero
+	BoundsErr     = ErrBounds
+	SizeErr       = ErrSize
+	InfValue      = ErrInfValue
+	YCoordErr     = ErrYCoord
+	EmptyInput    = ErrEmptyInput
+)
diff --git a/vendor/github.com/montanaflynn/stats/load.go b/vendor/github.com/montanaflynn/stats/load.go
new file mode 100644
index 0000000000000000000000000000000000000000..0eb0e27290c9e4a53edd16c83b6dc3eb50ea1bde
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/load.go
@@ -0,0 +1,199 @@
+package stats
+
+import (
+	"bufio"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// LoadRawData parses and converts a slice of mixed data types to floats
+func LoadRawData(raw interface{}) (f Float64Data) {
+	var r []interface{}
+	var s Float64Data
+
+	switch t := raw.(type) {
+	case []interface{}:
+		r = t
+	case []uint:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []uint8:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []uint16:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []uint32:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []uint64:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []bool:
+		for _, v := range t {
+			if v {
+				s = append(s, 1.0)
+			} else {
+				s = append(s, 0.0)
+			}
+		}
+		return s
+	case []float64:
+		return Float64Data(t)
+	case []int:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []int8:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []int16:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []int32:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []int64:
+		for _, v := range t {
+			s = append(s, float64(v))
+		}
+		return s
+	case []string:
+		for _, v := range t {
+			r = append(r, v)
+		}
+	case []time.Duration:
+		for _, v := range t {
+			r = append(r, v)
+		}
+	case map[int]int:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]int8:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]int16:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]int32:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]int64:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]string:
+		for i := 0; i < len(t); i++ {
+			r = append(r, t[i])
+		}
+	case map[int]uint:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]uint8:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]uint16:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]uint32:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]uint64:
+		for i := 0; i < len(t); i++ {
+			s = append(s, float64(t[i]))
+		}
+		return s
+	case map[int]bool:
+		for i := 0; i < len(t); i++ {
+			if t[i] {
+				s = append(s, 1.0)
+			} else {
+				s = append(s, 0.0)
+			}
+		}
+		return s
+	case map[int]float64:
+		for i := 0; i < len(t); i++ {
+			s = append(s, t[i])
+		}
+		return s
+	case map[int]time.Duration:
+		for i := 0; i < len(t); i++ {
+			r = append(r, t[i])
+		}
+	case string:
+		for _, v := range strings.Fields(t) {
+			r = append(r, v)
+		}
+	case io.Reader:
+		scanner := bufio.NewScanner(t)
+		for scanner.Scan() {
+			l := scanner.Text()
+			for _, v := range strings.Fields(l) {
+				r = append(r, v)
+			}
+		}
+	}
+
+	for _, v := range r {
+		switch t := v.(type) {
+		case int:
+			a := float64(t)
+			f = append(f, a)
+		case uint:
+			f = append(f, float64(t))
+		case float64:
+			f = append(f, t)
+		case string:
+			fl, err := strconv.ParseFloat(t, 64)
+			if err == nil {
+				f = append(f, fl)
+			}
+		case bool:
+			if t {
+				f = append(f, 1.0)
+			} else {
+				f = append(f, 0.0)
+			}
+		case time.Duration:
+			f = append(f, float64(t))
+		}
+	}
+	return f
+}
diff --git a/vendor/github.com/montanaflynn/stats/max.go b/vendor/github.com/montanaflynn/stats/max.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb8c83c325a5e9a10f8fc1862a2e3901bd99de4d
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/max.go
@@ -0,0 +1,26 @@
+package stats
+
+import (
+	"math"
+)
+
+// Max finds the highest number in a slice
+func Max(input Float64Data) (max float64, err error) {
+
+	// Return an error if there are no numbers
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Get the first value as the starting point
+	max = input.Get(0)
+
+	// Loop and replace higher values
+	for i := 1; i < input.Len(); i++ {
+		if input.Get(i) > max {
+			max = input.Get(i)
+		}
+	}
+
+	return max, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/mean.go b/vendor/github.com/montanaflynn/stats/mean.go
new file mode 100644
index 0000000000000000000000000000000000000000..a78d299aec1478a0621012d11085aca89f183991
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/mean.go
@@ -0,0 +1,60 @@
+package stats
+
+import "math"
+
+// Mean gets the average of a slice of numbers
+func Mean(input Float64Data) (float64, error) {
+
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	sum, _ := input.Sum()
+
+	return sum / float64(input.Len()), nil
+}
+
+// GeometricMean gets the geometric mean for a slice of numbers
+func GeometricMean(input Float64Data) (float64, error) {
+
+	l := input.Len()
+	if l == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Get the product of all the numbers
+	var p float64
+	for _, n := range input {
+		if p == 0 {
+			p = n
+		} else {
+			p *= n
+		}
+	}
+
+	// Calculate the geometric mean
+	return math.Pow(p, 1/float64(l)), nil
+}
+
+// HarmonicMean gets the harmonic mean for a slice of numbers
+func HarmonicMean(input Float64Data) (float64, error) {
+
+	l := input.Len()
+	if l == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Get the sum of all the numbers reciprocals and return an
+	// error for values that cannot be included in harmonic mean
+	var p float64
+	for _, n := range input {
+		if n < 0 {
+			return math.NaN(), NegativeErr
+		} else if n == 0 {
+			return math.NaN(), ZeroErr
+		}
+		p += (1 / n)
+	}
+
+	return float64(l) / p, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/median.go b/vendor/github.com/montanaflynn/stats/median.go
new file mode 100644
index 0000000000000000000000000000000000000000..a678c3653286907d06a3431cb14e6121bc8cf00a
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/median.go
@@ -0,0 +1,25 @@
+package stats
+
+import "math"
+
+// Median gets the median number in a slice of numbers
+func Median(input Float64Data) (median float64, err error) {
+
+	// Start by sorting a copy of the slice
+	c := sortedCopy(input)
+
+	// No math is needed if there are no numbers
+	// For even numbers we add the two middle numbers
+	// and divide by two using the mean function above
+	// For odd numbers we just use the middle number
+	l := len(c)
+	if l == 0 {
+		return math.NaN(), EmptyInputErr
+	} else if l%2 == 0 {
+		median, _ = Mean(c[l/2-1 : l/2+1])
+	} else {
+		median = c[l/2]
+	}
+
+	return median, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/min.go b/vendor/github.com/montanaflynn/stats/min.go
new file mode 100644
index 0000000000000000000000000000000000000000..bf7e70acffe375bf94a507bf5096661ec896a799
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/min.go
@@ -0,0 +1,26 @@
+package stats
+
+import "math"
+
+// Min finds the lowest number in a set of data
+func Min(input Float64Data) (min float64, err error) {
+
+	// Get the count of numbers in the slice
+	l := input.Len()
+
+	// Return an error if there are no numbers
+	if l == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Get the first value as the starting point
+	min = input.Get(0)
+
+	// Iterate until done checking for a lower value
+	for i := 1; i < l; i++ {
+		if input.Get(i) < min {
+			min = input.Get(i)
+		}
+	}
+	return min, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/mode.go b/vendor/github.com/montanaflynn/stats/mode.go
new file mode 100644
index 0000000000000000000000000000000000000000..a7cf9f7a4bc208f172913cc01d8aa9567821efdc
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/mode.go
@@ -0,0 +1,47 @@
+package stats
+
+// Mode gets the mode [most frequent value(s)] of a slice of float64s
+func Mode(input Float64Data) (mode []float64, err error) {
+	// Return the input if there's only one number
+	l := input.Len()
+	if l == 1 {
+		return input, nil
+	} else if l == 0 {
+		return nil, EmptyInputErr
+	}
+
+	c := sortedCopyDif(input)
+	// Traverse sorted array,
+	// tracking the longest repeating sequence
+	mode = make([]float64, 5)
+	cnt, maxCnt := 1, 1
+	for i := 1; i < l; i++ {
+		switch {
+		case c[i] == c[i-1]:
+			cnt++
+		case cnt == maxCnt && maxCnt != 1:
+			mode = append(mode, c[i-1])
+			cnt = 1
+		case cnt > maxCnt:
+			mode = append(mode[:0], c[i-1])
+			maxCnt, cnt = cnt, 1
+		default:
+			cnt = 1
+		}
+	}
+	switch {
+	case cnt == maxCnt:
+		mode = append(mode, c[l-1])
+	case cnt > maxCnt:
+		mode = append(mode[:0], c[l-1])
+		maxCnt = cnt
+	}
+
+	// Since length must be greater than 1,
+	// check for slices of distinct values
+	if maxCnt == 1 || len(mode)*maxCnt == l && maxCnt != l {
+		return Float64Data{}, nil
+	}
+
+	return mode, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/norm.go b/vendor/github.com/montanaflynn/stats/norm.go
new file mode 100644
index 0000000000000000000000000000000000000000..4eb8eb8b993446e57f53e7095f4cf4e0dc687451
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/norm.go
@@ -0,0 +1,254 @@
+package stats
+
+import (
+	"math"
+	"math/rand"
+	"strings"
+	"time"
+)
+
+// NormPpfRvs generates random variates using the Point Percentile Function.
+// For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/
+func NormPpfRvs(loc float64, scale float64, size int) []float64 {
+	rand.Seed(time.Now().UnixNano())
+	var toReturn []float64
+	for i := 0; i < size; i++ {
+		toReturn = append(toReturn, NormPpf(rand.Float64(), loc, scale))
+	}
+	return toReturn
+}
+
+// NormBoxMullerRvs generates random variates using the Box–Muller transform.
+// For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html
+func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {
+	rand.Seed(time.Now().UnixNano())
+	var toReturn []float64
+	for i := 0; i < int(float64(size/2)+float64(size%2)); i++ {
+		// u1 and u2 are uniformly distributed random numbers between 0 and 1.
+		u1 := rand.Float64()
+		u2 := rand.Float64()
+		// x1 and x2 are normally distributed random numbers.
+		x1 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2)))
+		toReturn = append(toReturn, x1)
+		if (i+1)*2 <= size {
+			x2 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Sin(2*math.Pi*u2)))
+			toReturn = append(toReturn, x2)
+		}
+	}
+	return toReturn
+}
+
+// NormPdf is the probability density function.
+func NormPdf(x float64, loc float64, scale float64) float64 {
+	return (math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi))
+}
+
+// NormLogPdf is the log of the probability density function.
+func NormLogPdf(x float64, loc float64, scale float64) float64 {
+	return math.Log((math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi)))
+}
+
+// NormCdf is the cumulative distribution function.
+func NormCdf(x float64, loc float64, scale float64) float64 {
+	return 0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2))))
+}
+
+// NormLogCdf is the log of the cumulative distribution function.
+func NormLogCdf(x float64, loc float64, scale float64) float64 {
+	return math.Log(0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2)))))
+}
+
+// NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate).
+func NormSf(x float64, loc float64, scale float64) float64 {
+	return 1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2))))
+}
+
+// NormLogSf is the log of the survival function.
+func NormLogSf(x float64, loc float64, scale float64) float64 {
+	return math.Log(1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2)))))
+}
+
+// NormPpf is the point percentile function.
+// This is based on Peter John Acklam's inverse normal CDF.
+// algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible).
+// For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/
+func NormPpf(p float64, loc float64, scale float64) (x float64) {
+	const (
+		a1 = -3.969683028665376e+01
+		a2 = 2.209460984245205e+02
+		a3 = -2.759285104469687e+02
+		a4 = 1.383577518672690e+02
+		a5 = -3.066479806614716e+01
+		a6 = 2.506628277459239e+00
+
+		b1 = -5.447609879822406e+01
+		b2 = 1.615858368580409e+02
+		b3 = -1.556989798598866e+02
+		b4 = 6.680131188771972e+01
+		b5 = -1.328068155288572e+01
+
+		c1 = -7.784894002430293e-03
+		c2 = -3.223964580411365e-01
+		c3 = -2.400758277161838e+00
+		c4 = -2.549732539343734e+00
+		c5 = 4.374664141464968e+00
+		c6 = 2.938163982698783e+00
+
+		d1 = 7.784695709041462e-03
+		d2 = 3.224671290700398e-01
+		d3 = 2.445134137142996e+00
+		d4 = 3.754408661907416e+00
+
+		plow  = 0.02425
+		phigh = 1 - plow
+	)
+
+	if p < 0 || p > 1 {
+		return math.NaN()
+	} else if p == 0 {
+		return -math.Inf(0)
+	} else if p == 1 {
+		return math.Inf(0)
+	}
+
+	if p < plow {
+		q := math.Sqrt(-2 * math.Log(p))
+		x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
+			((((d1*q+d2)*q+d3)*q+d4)*q + 1)
+	} else if phigh < p {
+		q := math.Sqrt(-2 * math.Log(1-p))
+		x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
+			((((d1*q+d2)*q+d3)*q+d4)*q + 1)
+	} else {
+		q := p - 0.5
+		r := q * q
+		x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q /
+			(((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1)
+	}
+
+	e := 0.5*math.Erfc(-x/math.Sqrt2) - p
+	u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2)
+	x = x - u/(1+x*u/2)
+
+	return x*scale + loc
+}
+
+// NormIsf is the inverse survival function (inverse of sf).
+func NormIsf(p float64, loc float64, scale float64) (x float64) {
+	if -NormPpf(p, loc, scale) == 0 {
+		return 0
+	}
+	return -NormPpf(p, loc, scale)
+}
+
+// NormMoment approximates the non-central (raw) moment of order n.
+// For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution
+func NormMoment(n int, loc float64, scale float64) float64 {
+	toReturn := 0.0
+	for i := 0; i < n+1; i++ {
+		if (n-i)%2 == 0 {
+			toReturn += float64(Ncr(n, i)) * (math.Pow(loc, float64(i))) * (math.Pow(scale, float64(n-i))) *
+				(float64(factorial(n-i)) / ((math.Pow(2.0, float64((n-i)/2))) *
+					float64(factorial((n-i)/2))))
+		}
+	}
+	return toReturn
+}
+
+// NormStats returns the mean, variance, skew, and/or kurtosis.
+// Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’).
+// Takes string containing any of 'mvsk'.
+// Returns array of m v s k in that order.
+func NormStats(loc float64, scale float64, moments string) []float64 {
+	var toReturn []float64
+	if strings.ContainsAny(moments, "m") {
+		toReturn = append(toReturn, loc)
+	}
+	if strings.ContainsAny(moments, "v") {
+		toReturn = append(toReturn, math.Pow(scale, 2))
+	}
+	if strings.ContainsAny(moments, "s") {
+		toReturn = append(toReturn, 0.0)
+	}
+	if strings.ContainsAny(moments, "k") {
+		toReturn = append(toReturn, 0.0)
+	}
+	return toReturn
+}
+
+// NormEntropy is the differential entropy of the RV.
+func NormEntropy(loc float64, scale float64) float64 {
+	return math.Log(scale * math.Sqrt(2*math.Pi*math.E))
+}
+
+// NormFit returns the maximum likelihood estimators for the Normal Distribution.
+// Takes array of float64 values.
+// Returns array of Mean followed by Standard Deviation.
+func NormFit(data []float64) [2]float64 {
+	sum := 0.00
+	for i := 0; i < len(data); i++ {
+		sum += data[i]
+	}
+	mean := sum / float64(len(data))
+	stdNumerator := 0.00
+	for i := 0; i < len(data); i++ {
+		stdNumerator += math.Pow(data[i]-mean, 2)
+	}
+	return [2]float64{mean, math.Sqrt((stdNumerator) / (float64(len(data))))}
+}
+
+// NormMedian is the median of the distribution.
+func NormMedian(loc float64, scale float64) float64 {
+	return loc
+}
+
+// NormMean is the mean/expected value of the distribution.
+func NormMean(loc float64, scale float64) float64 {
+	return loc
+}
+
+// NormVar is the variance of the distribution.
+func NormVar(loc float64, scale float64) float64 {
+	return math.Pow(scale, 2)
+}
+
+// NormStd is the standard deviation of the distribution.
+func NormStd(loc float64, scale float64) float64 {
+	return scale
+}
+
+// NormInterval finds endpoints of the range that contains alpha percent of the distribution.
+func NormInterval(alpha float64, loc float64, scale float64) [2]float64 {
+	q1 := (1.0 - alpha) / 2
+	q2 := (1.0 + alpha) / 2
+	a := NormPpf(q1, loc, scale)
+	b := NormPpf(q2, loc, scale)
+	return [2]float64{a, b}
+}
+
+// factorial is the naive factorial algorithm.
+func factorial(x int) int {
+	if x == 0 {
+		return 1
+	}
+	return x * factorial(x-1)
+}
+
+// Ncr is an N choose R algorithm.
+// Aaron Cannon's algorithm.
+func Ncr(n, r int) int {
+	if n <= 1 || r == 0 || n == r {
+		return 1
+	}
+	if newR := n - r; newR < r {
+		r = newR
+	}
+	if r == 1 {
+		return n
+	}
+	ret := int(n - r + 1)
+	for i, j := ret+1, int(2); j <= r; i, j = i+1, j+1 {
+		ret = ret * i / j
+	}
+	return ret
+}
diff --git a/vendor/github.com/montanaflynn/stats/outlier.go b/vendor/github.com/montanaflynn/stats/outlier.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c9795bd35ba46044b811ef68154b5da81730cb3
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/outlier.go
@@ -0,0 +1,44 @@
+package stats
+
+// Outliers holds mild and extreme outliers found in data
+type Outliers struct {
+	Mild    Float64Data
+	Extreme Float64Data
+}
+
+// QuartileOutliers finds the mild and extreme outliers
+func QuartileOutliers(input Float64Data) (Outliers, error) {
+	if input.Len() == 0 {
+		return Outliers{}, EmptyInputErr
+	}
+
+	// Start by sorting a copy of the slice
+	copy := sortedCopy(input)
+
+	// Calculate the quartiles and interquartile range
+	qs, _ := Quartile(copy)
+	iqr, _ := InterQuartileRange(copy)
+
+	// Calculate the lower and upper inner and outer fences
+	lif := qs.Q1 - (1.5 * iqr)
+	uif := qs.Q3 + (1.5 * iqr)
+	lof := qs.Q1 - (3 * iqr)
+	uof := qs.Q3 + (3 * iqr)
+
+	// Find the data points that are outside of the
+	// inner and upper fences and add them to mild
+	// and extreme outlier slices
+	var mild Float64Data
+	var extreme Float64Data
+	for _, v := range copy {
+
+		if v < lof || v > uof {
+			extreme = append(extreme, v)
+		} else if v < lif || v > uif {
+			mild = append(mild, v)
+		}
+	}
+
+	// Wrap them into our struct
+	return Outliers{mild, extreme}, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/percentile.go b/vendor/github.com/montanaflynn/stats/percentile.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5641783ed026d7f27b0266f36014afa9144a326
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/percentile.go
@@ -0,0 +1,86 @@
+package stats
+
+import (
+	"math"
+)
+
+// Percentile finds the relative standing in a slice of floats
+func Percentile(input Float64Data, percent float64) (percentile float64, err error) {
+	length := input.Len()
+	if length == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	if length == 1 {
+		return input[0], nil
+	}
+
+	if percent <= 0 || percent > 100 {
+		return math.NaN(), BoundsErr
+	}
+
+	// Start by sorting a copy of the slice
+	c := sortedCopy(input)
+
+	// Multiply percent by length of input
+	index := (percent / 100) * float64(len(c))
+
+	// Check if the index is a whole number
+	if index == float64(int64(index)) {
+
+		// Convert float to int
+		i := int(index)
+
+		// Find the value at the index
+		percentile = c[i-1]
+
+	} else if index > 1 {
+
+		// Convert float to int via truncation
+		i := int(index)
+
+		// Find the average of the index and following values
+		percentile, _ = Mean(Float64Data{c[i-1], c[i]})
+
+	} else {
+		return math.NaN(), BoundsErr
+	}
+
+	return percentile, nil
+
+}
+
+// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method
+func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {
+
+	// Find the length of items in the slice
+	il := input.Len()
+
+	// Return an error for empty slices
+	if il == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Return error for less than 0 or greater than 100 percentages
+	if percent < 0 || percent > 100 {
+		return math.NaN(), BoundsErr
+	}
+
+	// Start by sorting a copy of the slice
+	c := sortedCopy(input)
+
+	// Return the last item
+	if percent == 100.0 {
+		return c[il-1], nil
+	}
+
+	// Find ordinal ranking
+	or := int(math.Ceil(float64(il) * percent / 100))
+
+	// Return the item that is in the place of the ordinal rank
+	if or == 0 {
+		return c[0], nil
+	}
+	return c[or-1], nil
+
+}
diff --git a/vendor/github.com/montanaflynn/stats/quartile.go b/vendor/github.com/montanaflynn/stats/quartile.go
new file mode 100644
index 0000000000000000000000000000000000000000..40bbf6e57b8487ceec3538f664a5ac9abe801f7d
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/quartile.go
@@ -0,0 +1,74 @@
+package stats
+
+import "math"
+
+// Quartiles holds the three quartile points
+type Quartiles struct {
+	Q1 float64
+	Q2 float64
+	Q3 float64
+}
+
+// Quartile returns the three quartile points from a slice of data
+func Quartile(input Float64Data) (Quartiles, error) {
+
+	il := input.Len()
+	if il == 0 {
+		return Quartiles{}, EmptyInputErr
+	}
+
+	// Start by sorting a copy of the slice
+	copy := sortedCopy(input)
+
+	// Find the cutoff places depeding on if
+	// the input slice length is even or odd
+	var c1 int
+	var c2 int
+	if il%2 == 0 {
+		c1 = il / 2
+		c2 = il / 2
+	} else {
+		c1 = (il - 1) / 2
+		c2 = c1 + 1
+	}
+
+	// Find the Medians with the cutoff points
+	Q1, _ := Median(copy[:c1])
+	Q2, _ := Median(copy)
+	Q3, _ := Median(copy[c2:])
+
+	return Quartiles{Q1, Q2, Q3}, nil
+
+}
+
+// InterQuartileRange finds the range between Q1 and Q3
+func InterQuartileRange(input Float64Data) (float64, error) {
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+	qs, _ := Quartile(input)
+	iqr := qs.Q3 - qs.Q1
+	return iqr, nil
+}
+
+// Midhinge finds the average of the first and third quartiles
+func Midhinge(input Float64Data) (float64, error) {
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+	qs, _ := Quartile(input)
+	mh := (qs.Q1 + qs.Q3) / 2
+	return mh, nil
+}
+
+// Trimean finds the average of the median and the midhinge
+func Trimean(input Float64Data) (float64, error) {
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	c := sortedCopy(input)
+	q, _ := Quartile(c)
+
+	return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/ranksum.go b/vendor/github.com/montanaflynn/stats/ranksum.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc424ef4e2f673f6e8d96403703a8e8dc1e5b30c
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/ranksum.go
@@ -0,0 +1,183 @@
+package stats
+
+// import "math"
+//
+// // WilcoxonRankSum tests the null hypothesis that two sets
+// // of data are drawn from the same distribution. It does
+// // not handle ties between measurements in x and y.
+// //
+// // Parameters:
+// //    data1 Float64Data: First set of data points.
+// //    data2 Float64Data: Second set of data points.
+// //    Length of both data samples must be equal.
+// //
+// // Return:
+// //    statistic float64: The test statistic under the
+// //                       large-sample approximation that the
+// //                       rank sum statistic is normally distributed.
+// //    pvalue float64: The two-sided p-value of the test
+// //    err error: Any error from the input data parameters
+// //
+// // https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
+// func WilcoxonRankSum(data1, data2 Float64Data) (float64, float64, error) {
+//
+// 	l1 := data1.Len()
+// 	l2 := data2.Len()
+//
+// 	if l1 == 0 || l2 == 0 {
+// 		return math.NaN(), math.NaN(), EmptyInputErr
+// 	}
+//
+// 	if l1 != l2 {
+// 		return math.NaN(), math.NaN(), SizeErr
+// 	}
+//
+// 	alldata := Float64Data{}
+// 	alldata = append(alldata, data1...)
+// 	alldata = append(alldata, data2...)
+//
+// 	// ranked :=
+//
+// 	return 0.0, 0.0, nil
+// }
+//
+// //     x, y = map(np.asarray, (x, y))
+// //     n1 = len(x)
+// //     n2 = len(y)
+// //     alldata = np.concatenate((x, y))
+// //     ranked = rankdata(alldata)
+// //     x = ranked[:n1]
+// //     s = np.sum(x, axis=0)
+// //     expected = n1 * (n1+n2+1) / 2.0
+// //     z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
+// //     prob = 2 * distributions.norm.sf(abs(z))
+// //
+// //     return RanksumsResult(z, prob)
+//
+// // def rankdata(a, method='average'):
+// //     """
+// //     Assign ranks to data, dealing with ties appropriately.
+// //     Ranks begin at 1.  The `method` argument controls how ranks are assigned
+// //     to equal values.  See [1]_ for further discussion of ranking methods.
+// //     Parameters
+// //     ----------
+// //     a : array_like
+// //         The array of values to be ranked.  The array is first flattened.
+// //     method : str, optional
+// //         The method used to assign ranks to tied elements.
+// //         The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
+// //         'average':
+// //             The average of the ranks that would have been assigned to
+// //             all the tied values is assigned to each value.
+// //         'min':
+// //             The minimum of the ranks that would have been assigned to all
+// //             the tied values is assigned to each value.  (This is also
+// //             referred to as "competition" ranking.)
+// //         'max':
+// //             The maximum of the ranks that would have been assigned to all
+// //             the tied values is assigned to each value.
+// //         'dense':
+// //             Like 'min', but the rank of the next highest element is assigned
+// //             the rank immediately after those assigned to the tied elements.
+// //         'ordinal':
+// //             All values are given a distinct rank, corresponding to the order
+// //             that the values occur in `a`.
+// //         The default is 'average'.
+// //     Returns
+// //     -------
+// //     ranks : ndarray
+// //          An array of length equal to the size of `a`, containing rank
+// //          scores.
+// //     References
+// //     ----------
+// //     .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
+// //     Examples
+// //     --------
+// //     >>> from scipy.stats import rankdata
+// //     >>> rankdata([0, 2, 3, 2])
+// //     array([ 1. ,  2.5,  4. ,  2.5])
+// //     """
+// //
+// //     arr = np.ravel(np.asarray(a))
+// //     algo = 'quicksort'
+// //     sorter = np.argsort(arr, kind=algo)
+// //
+// //     inv = np.empty(sorter.size, dtype=np.intp)
+// //     inv[sorter] = np.arange(sorter.size, dtype=np.intp)
+// //
+// //
+// //     arr = arr[sorter]
+// //     obs = np.r_[True, arr[1:] != arr[:-1]]
+// //     dense = obs.cumsum()[inv]
+// //
+// //
+// //     # cumulative counts of each unique value
+// //     count = np.r_[np.nonzero(obs)[0], len(obs)]
+// //
+// //     # average method
+// //     return .5 * (count[dense] + count[dense - 1] + 1)
+//
+// type rankable interface {
+// 	Len() int
+// 	RankEqual(int, int) bool
+// }
+//
+// func StandardRank(d rankable) []float64 {
+// 	r := make([]float64, d.Len())
+// 	var k int
+// 	for i := range r {
+// 		if i == 0 || !d.RankEqual(i, i-1) {
+// 			k = i + 1
+// 		}
+// 		r[i] = float64(k)
+// 	}
+// 	return r
+// }
+//
+// func ModifiedRank(d rankable) []float64 {
+// 	r := make([]float64, d.Len())
+// 	for i := range r {
+// 		k := i + 1
+// 		for j := i + 1; j < len(r) && d.RankEqual(i, j); j++ {
+// 			k = j + 1
+// 		}
+// 		r[i] = float64(k)
+// 	}
+// 	return r
+// }
+//
+// func DenseRank(d rankable) []float64 {
+// 	r := make([]float64, d.Len())
+// 	var k int
+// 	for i := range r {
+// 		if i == 0 || !d.RankEqual(i, i-1) {
+// 			k++
+// 		}
+// 		r[i] = float64(k)
+// 	}
+// 	return r
+// }
+//
+// func OrdinalRank(d rankable) []float64 {
+// 	r := make([]float64, d.Len())
+// 	for i := range r {
+// 		r[i] = float64(i + 1)
+// 	}
+// 	return r
+// }
+//
+// func FractionalRank(d rankable) []float64 {
+// 	r := make([]float64, d.Len())
+// 	for i := 0; i < len(r); {
+// 		var j int
+// 		f := float64(i + 1)
+// 		for j = i + 1; j < len(r) && d.RankEqual(i, j); j++ {
+// 			f += float64(j + 1)
+// 		}
+// 		f /= float64(j - i)
+// 		for ; i < j; i++ {
+// 			r[i] = f
+// 		}
+// 	}
+// 	return r
+// }
diff --git a/vendor/github.com/montanaflynn/stats/regression.go b/vendor/github.com/montanaflynn/stats/regression.go
new file mode 100644
index 0000000000000000000000000000000000000000..401d9512018f17794d8b19e230981bd8a251d305
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/regression.go
@@ -0,0 +1,113 @@
+package stats
+
+import "math"
+
+// Series is a container for a series of data
+type Series []Coordinate
+
+// Coordinate holds the data in a series
+type Coordinate struct {
+	X, Y float64
+}
+
+// LinearRegression finds the least squares linear regression on data series
+func LinearRegression(s Series) (regressions Series, err error) {
+
+	if len(s) == 0 {
+		return nil, EmptyInputErr
+	}
+
+	// Placeholder for the math to be done
+	var sum [5]float64
+
+	// Loop over data keeping index in place
+	i := 0
+	for ; i < len(s); i++ {
+		sum[0] += s[i].X
+		sum[1] += s[i].Y
+		sum[2] += s[i].X * s[i].X
+		sum[3] += s[i].X * s[i].Y
+		sum[4] += s[i].Y * s[i].Y
+	}
+
+	// Find gradient and intercept
+	f := float64(i)
+	gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0])
+	intercept := (sum[1] / f) - (gradient * sum[0] / f)
+
+	// Create the new regression series
+	for j := 0; j < len(s); j++ {
+		regressions = append(regressions, Coordinate{
+			X: s[j].X,
+			Y: s[j].X*gradient + intercept,
+		})
+	}
+
+	return regressions, nil
+}
+
+// ExponentialRegression returns an exponential regression on data series
+func ExponentialRegression(s Series) (regressions Series, err error) {
+
+	if len(s) == 0 {
+		return nil, EmptyInputErr
+	}
+
+	var sum [6]float64
+
+	for i := 0; i < len(s); i++ {
+		if s[i].Y < 0 {
+			return nil, YCoordErr
+		}
+		sum[0] += s[i].X
+		sum[1] += s[i].Y
+		sum[2] += s[i].X * s[i].X * s[i].Y
+		sum[3] += s[i].Y * math.Log(s[i].Y)
+		sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y)
+		sum[5] += s[i].X * s[i].Y
+	}
+
+	denominator := (sum[1]*sum[2] - sum[5]*sum[5])
+	a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator)
+	b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator
+
+	for j := 0; j < len(s); j++ {
+		regressions = append(regressions, Coordinate{
+			X: s[j].X,
+			Y: a * math.Exp(b*s[j].X),
+		})
+	}
+
+	return regressions, nil
+}
+
+// LogarithmicRegression returns an logarithmic regression on data series
+func LogarithmicRegression(s Series) (regressions Series, err error) {
+
+	if len(s) == 0 {
+		return nil, EmptyInputErr
+	}
+
+	var sum [4]float64
+
+	i := 0
+	for ; i < len(s); i++ {
+		sum[0] += math.Log(s[i].X)
+		sum[1] += s[i].Y * math.Log(s[i].X)
+		sum[2] += s[i].Y
+		sum[3] += math.Pow(math.Log(s[i].X), 2)
+	}
+
+	f := float64(i)
+	a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0])
+	b := (sum[2] - a*sum[0]) / f
+
+	for j := 0; j < len(s); j++ {
+		regressions = append(regressions, Coordinate{
+			X: s[j].X,
+			Y: b + a*math.Log(s[j].X),
+		})
+	}
+
+	return regressions, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/round.go b/vendor/github.com/montanaflynn/stats/round.go
new file mode 100644
index 0000000000000000000000000000000000000000..b66779c9fc7f7826f489ae3fe5c92f7d2c9f5a2d
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/round.go
@@ -0,0 +1,38 @@
+package stats
+
+import "math"
+
+// Round a float to a specific decimal place or precision
+func Round(input float64, places int) (rounded float64, err error) {
+
+	// If the float is not a number
+	if math.IsNaN(input) {
+		return math.NaN(), NaNErr
+	}
+
+	// Find out the actual sign and correct the input for later
+	sign := 1.0
+	if input < 0 {
+		sign = -1
+		input *= -1
+	}
+
+	// Use the places arg to get the amount of precision wanted
+	precision := math.Pow(10, float64(places))
+
+	// Find the decimal place we are looking to round
+	digit := input * precision
+
+	// Get the actual decimal number as a fraction to be compared
+	_, decimal := math.Modf(digit)
+
+	// If the decimal is less than .5 we round down otherwise up
+	if decimal >= 0.5 {
+		rounded = math.Ceil(digit)
+	} else {
+		rounded = math.Floor(digit)
+	}
+
+	// Finally we do the math to actually create a rounded number
+	return rounded / precision * sign, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/sample.go b/vendor/github.com/montanaflynn/stats/sample.go
new file mode 100644
index 0000000000000000000000000000000000000000..40166af6acf5fb283e08f9737ba26ba24a6bb710
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/sample.go
@@ -0,0 +1,76 @@
+package stats
+
+import (
+	"math/rand"
+	"sort"
+)
+
+// Sample returns sample from input with replacement or without
+func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {
+
+	if input.Len() == 0 {
+		return nil, EmptyInputErr
+	}
+
+	length := input.Len()
+	if replacement {
+
+		result := Float64Data{}
+		rand.Seed(unixnano())
+
+		// In every step, randomly take the num for
+		for i := 0; i < takenum; i++ {
+			idx := rand.Intn(length)
+			result = append(result, input[idx])
+		}
+
+		return result, nil
+
+	} else if !replacement && takenum <= length {
+
+		rand.Seed(unixnano())
+
+		// Get permutation of number of indexies
+		perm := rand.Perm(length)
+		result := Float64Data{}
+
+		// Get element of input by permutated index
+		for _, idx := range perm[0:takenum] {
+			result = append(result, input[idx])
+		}
+
+		return result, nil
+
+	}
+
+	return nil, BoundsErr
+}
+
+// StableSample like stable sort, it returns samples from input while keeps the order of original data.
+func StableSample(input Float64Data, takenum int) ([]float64, error) {
+	if input.Len() == 0 {
+		return nil, EmptyInputErr
+	}
+
+	length := input.Len()
+
+	if takenum <= length {
+
+		rand.Seed(unixnano())
+
+		perm := rand.Perm(length)
+		perm = perm[0:takenum]
+		// Sort perm before applying
+		sort.Ints(perm)
+		result := Float64Data{}
+
+		for _, idx := range perm {
+			result = append(result, input[idx])
+		}
+
+		return result, nil
+
+	}
+
+	return nil, BoundsErr
+}
diff --git a/vendor/github.com/montanaflynn/stats/sigmoid.go b/vendor/github.com/montanaflynn/stats/sigmoid.go
new file mode 100644
index 0000000000000000000000000000000000000000..5f2559d81cd8c0bcca3b688745ddeca078adf78a
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/sigmoid.go
@@ -0,0 +1,18 @@
+package stats
+
+import "math"
+
+// Sigmoid returns the input values in the range of -1 to 1
+// along the sigmoid or s-shaped curve, commonly used in
+// machine learning while training neural networks as an
+// activation function.
+func Sigmoid(input Float64Data) ([]float64, error) {
+	if input.Len() == 0 {
+		return Float64Data{}, EmptyInput
+	}
+	s := make([]float64, len(input))
+	for i, v := range input {
+		s[i] = 1 / (1 + math.Exp(-v))
+	}
+	return s, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/softmax.go b/vendor/github.com/montanaflynn/stats/softmax.go
new file mode 100644
index 0000000000000000000000000000000000000000..85072642bc29194b79ca911c8952274c313b8e63
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/softmax.go
@@ -0,0 +1,25 @@
+package stats
+
+import "math"
+
+// SoftMax returns the input values in the range of 0 to 1
+// with sum of all the probabilities being equal to one. It
+// is commonly used in machine learning neural networks.
+func SoftMax(input Float64Data) ([]float64, error) {
+	if input.Len() == 0 {
+		return Float64Data{}, EmptyInput
+	}
+
+	s := 0.0
+	c, _ := Max(input)
+	for _, e := range input {
+		s += math.Exp(e - c)
+	}
+
+	sm := make([]float64, len(input))
+	for i, v := range input {
+		sm[i] = math.Exp(v-c) / s
+	}
+
+	return sm, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/sum.go b/vendor/github.com/montanaflynn/stats/sum.go
new file mode 100644
index 0000000000000000000000000000000000000000..15b611d17a1227e2ae4e6659d747a899306a2120
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/sum.go
@@ -0,0 +1,18 @@
+package stats
+
+import "math"
+
+// Sum adds all the numbers of a slice together
+func Sum(input Float64Data) (sum float64, err error) {
+
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Add em up
+	for _, n := range input {
+		sum += n
+	}
+
+	return sum, nil
+}
diff --git a/vendor/github.com/montanaflynn/stats/util.go b/vendor/github.com/montanaflynn/stats/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..881997604d231da249723e0280f7ad7a42b89f89
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/util.go
@@ -0,0 +1,43 @@
+package stats
+
+import (
+	"sort"
+	"time"
+)
+
+// float64ToInt rounds a float64 to an int
+func float64ToInt(input float64) (output int) {
+	r, _ := Round(input, 0)
+	return int(r)
+}
+
+// unixnano returns nanoseconds from UTC epoch
+func unixnano() int64 {
+	return time.Now().UTC().UnixNano()
+}
+
+// copyslice copies a slice of float64s
+func copyslice(input Float64Data) Float64Data {
+	s := make(Float64Data, input.Len())
+	copy(s, input)
+	return s
+}
+
+// sortedCopy returns a sorted copy of float64s
+func sortedCopy(input Float64Data) (copy Float64Data) {
+	copy = copyslice(input)
+	sort.Float64s(copy)
+	return
+}
+
+// sortedCopyDif returns a sorted copy of float64s
+// only if the original data isn't sorted.
+// Only use this if returned slice won't be manipulated!
+func sortedCopyDif(input Float64Data) (copy Float64Data) {
+	if sort.Float64sAreSorted(input) {
+		return input
+	}
+	copy = copyslice(input)
+	sort.Float64s(copy)
+	return
+}
diff --git a/vendor/github.com/montanaflynn/stats/variance.go b/vendor/github.com/montanaflynn/stats/variance.go
new file mode 100644
index 0000000000000000000000000000000000000000..a6445690d14a0d4a72574d437830c7195c8bc920
--- /dev/null
+++ b/vendor/github.com/montanaflynn/stats/variance.go
@@ -0,0 +1,105 @@
+package stats
+
+import "math"
+
+// _variance finds the variance for both population and sample data
+func _variance(input Float64Data, sample int) (variance float64, err error) {
+
+	if input.Len() == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	// Sum the square of the mean subtracted from each number
+	m, _ := Mean(input)
+
+	for _, n := range input {
+		variance += (n - m) * (n - m)
+	}
+
+	// When getting the mean of the squared differences
+	// "sample" will allow us to know if it's a sample
+	// or population and wether to subtract by one or not
+	return variance / float64((input.Len() - (1 * sample))), nil
+}
+
+// Variance the amount of variation in the dataset
+func Variance(input Float64Data) (sdev float64, err error) {
+	return PopulationVariance(input)
+}
+
+// PopulationVariance finds the amount of variance within a population
+func PopulationVariance(input Float64Data) (pvar float64, err error) {
+
+	v, err := _variance(input, 0)
+	if err != nil {
+		return math.NaN(), err
+	}
+
+	return v, nil
+}
+
+// SampleVariance finds the amount of variance within a sample
+func SampleVariance(input Float64Data) (svar float64, err error) {
+
+	v, err := _variance(input, 1)
+	if err != nil {
+		return math.NaN(), err
+	}
+
+	return v, nil
+}
+
+// Covariance is a measure of how much two sets of data change
+func Covariance(data1, data2 Float64Data) (float64, error) {
+
+	l1 := data1.Len()
+	l2 := data2.Len()
+
+	if l1 == 0 || l2 == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	if l1 != l2 {
+		return math.NaN(), SizeErr
+	}
+
+	m1, _ := Mean(data1)
+	m2, _ := Mean(data2)
+
+	// Calculate sum of squares
+	var ss float64
+	for i := 0; i < l1; i++ {
+		delta1 := (data1.Get(i) - m1)
+		delta2 := (data2.Get(i) - m2)
+		ss += (delta1*delta2 - ss) / float64(i+1)
+	}
+
+	return ss * float64(l1) / float64(l1-1), nil
+}
+
+// CovariancePopulation computes covariance for entire population between two variables.
+func CovariancePopulation(data1, data2 Float64Data) (float64, error) {
+
+	l1 := data1.Len()
+	l2 := data2.Len()
+
+	if l1 == 0 || l2 == 0 {
+		return math.NaN(), EmptyInputErr
+	}
+
+	if l1 != l2 {
+		return math.NaN(), SizeErr
+	}
+
+	m1, _ := Mean(data1)
+	m2, _ := Mean(data2)
+
+	var s float64
+	for i := 0; i < l1; i++ {
+		delta1 := (data1.Get(i) - m1)
+		delta2 := (data2.Get(i) - m2)
+		s += delta1 * delta2
+	}
+
+	return s / float64(l1), nil
+}
diff --git a/vendor/github.com/xdg-go/pbkdf2/.gitignore b/vendor/github.com/xdg-go/pbkdf2/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f1c181ec9c5c921245027c6b452ecfc1d3626364
--- /dev/null
+++ b/vendor/github.com/xdg-go/pbkdf2/.gitignore
@@ -0,0 +1,12 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/xdg-go/pbkdf2/LICENSE b/vendor/github.com/xdg-go/pbkdf2/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..67db8588217f266eb561f75fae738656325deac9
--- /dev/null
+++ b/vendor/github.com/xdg-go/pbkdf2/LICENSE
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/xdg-go/pbkdf2/README.md b/vendor/github.com/xdg-go/pbkdf2/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d2824e45645fc7d6f753ae244a25784aece2b77c
--- /dev/null
+++ b/vendor/github.com/xdg-go/pbkdf2/README.md
@@ -0,0 +1,17 @@
+[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/pbkdf2.svg)](https://pkg.go.dev/github.com/xdg-go/pbkdf2)
+[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/pbkdf2)](https://goreportcard.com/report/github.com/xdg-go/pbkdf2)
+[![Github Actions](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml)
+
+# pbkdf2 – Go implementation of PBKDF2
+
+## Description
+
+Package pbkdf2 provides password-based key derivation based on
+[RFC 8018](https://tools.ietf.org/html/rfc8018).
+
+## Copyright and License
+
+Copyright 2021 by David A. Golden. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"). You may
+obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
diff --git a/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go b/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000000000000000000000000000000000000..029945ca46eb7ea18bf30ec7d5205d1f3d6e5089
--- /dev/null
+++ b/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go
@@ -0,0 +1,76 @@
+// Copyright 2021 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package pbkdf2 implements password-based key derivation using the PBKDF2
+// algorithm described in RFC 2898 and RFC 8018.
+//
+// It provides a drop-in replacement for `golang.org/x/crypto/pbkdf2`, with
+// the following benefits:
+//
+// - Released as a module with semantic versioning
+//
+// - Does not pull in dependencies for unrelated `x/crypto/*` packages
+//
+// - Supports Go 1.9+
+//
+// See https://tools.ietf.org/html/rfc8018#section-4 for security considerations
+// in the selection of a salt and iteration count.
+package pbkdf2
+
+import (
+	"crypto/hmac"
+	"encoding/binary"
+	"hash"
+)
+
+// Key generates a derived key from a password using the PBKDF2 algorithm. The
+// inputs include salt bytes, the iteration count, desired key length, and a
+// constructor for a hashing function.  For example, for a 32-byte key using
+// SHA-256:
+//
+//  key := Key([]byte("trustNo1"), salt, 10000, 32, sha256.New)
+func Key(password, salt []byte, iterCount, keyLen int, h func() hash.Hash) []byte {
+	prf := hmac.New(h, password)
+	hLen := prf.Size()
+	numBlocks := keyLen / hLen
+	// Get an extra block if keyLen is not an even number of hLen blocks.
+	if keyLen%hLen > 0 {
+		numBlocks++
+	}
+
+	Ti := make([]byte, hLen)
+	Uj := make([]byte, hLen)
+	dk := make([]byte, 0, hLen*numBlocks)
+	buf := make([]byte, 4)
+
+	for i := uint32(1); i <= uint32(numBlocks); i++ {
+		// Initialize Uj for j == 1 from salt and block index.
+		// Initialize Ti = U1.
+		binary.BigEndian.PutUint32(buf, i)
+		prf.Reset()
+		prf.Write(salt)
+		prf.Write(buf)
+		Uj = Uj[:0]
+		Uj = prf.Sum(Uj)
+
+		// Ti = U1 ^ U2 ^ ... ^ Ux
+		copy(Ti, Uj)
+		for j := 2; j <= iterCount; j++ {
+			prf.Reset()
+			prf.Write(Uj)
+			Uj = Uj[:0]
+			Uj = prf.Sum(Uj)
+			for k := range Uj {
+				Ti[k] ^= Uj[k]
+			}
+		}
+
+		// DK = concat(T1, T2, ... Tn)
+		dk = append(dk, Ti...)
+	}
+
+	return dk[0:keyLen]
+}
diff --git a/vendor/github.com/xdg-go/scram/.gitignore b/vendor/github.com/xdg-go/scram/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/vendor/github.com/xdg-go/scram/CHANGELOG.md b/vendor/github.com/xdg-go/scram/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..b833be5e2ae98c035504ed0323dc0f0a921e7902
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/CHANGELOG.md
@@ -0,0 +1,26 @@
+# CHANGELOG
+
+## v1.1.2 - 2022-12-07
+
+- Bump stringprep dependency to v1.0.4 for upstream CVE fix.
+
+## v1.1.1 - 2022-03-03
+
+- Bump stringprep dependency to v1.0.3 for upstream CVE fix.
+
+## v1.1.0 - 2022-01-16
+
+- Add SHA-512 hash generator function for convenience.
+
+## v1.0.2 - 2021-03-28
+
+- Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to
+  minimize transitive dependencies and support Go 1.9+.
+
+## v1.0.1 - 2021-03-27
+
+- Bump stringprep dependency to v1.0.2 for Go 1.11 support.
+
+## v1.0.0 - 2021-03-27
+
+- First release as a Go module
diff --git a/vendor/github.com/xdg-go/scram/LICENSE b/vendor/github.com/xdg-go/scram/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..67db8588217f266eb561f75fae738656325deac9
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/LICENSE
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/xdg-go/scram/README.md b/vendor/github.com/xdg-go/scram/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a46f5cebe643d54527746fc185f890bd278d001
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/README.md
@@ -0,0 +1,72 @@
+[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/scram.svg)](https://pkg.go.dev/github.com/xdg-go/scram)
+[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/scram)](https://goreportcard.com/report/github.com/xdg-go/scram)
+[![Github Actions](https://github.com/xdg-go/scram/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/scram/actions/workflows/test.yml)
+
+# scram – Go implementation of RFC-5802
+
+## Description
+
+Package scram provides client and server implementations of the Salted
+Challenge Response Authentication Mechanism (SCRAM) described in
+[RFC-5802](https://tools.ietf.org/html/rfc5802) and
+[RFC-7677](https://tools.ietf.org/html/rfc7677).
+
+It includes both client and server side support.
+
+Channel binding and extensions are not (yet) supported.
+
+## Examples
+
+### Client side
+
+    package main
+
+    import "github.com/xdg-go/scram"
+
+    func main() {
+        // Get Client with username, password and (optional) authorization ID.
+        clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "")
+        if err != nil {
+            panic(err)
+        }
+
+        // Prepare the authentication conversation. Use the empty string as the
+        // initial server message argument to start the conversation.
+        conv := clientSHA1.NewConversation()
+        var serverMsg string
+
+        // Get the first message, send it and read the response.
+        firstMsg, err := conv.Step(serverMsg)
+        if err != nil {
+            panic(err)
+        }
+        serverMsg = sendClientMsg(firstMsg)
+
+        // Get the second message, send it, and read the response.
+        secondMsg, err := conv.Step(serverMsg)
+        if err != nil {
+            panic(err)
+        }
+        serverMsg = sendClientMsg(secondMsg)
+
+        // Validate the server's final message.  We have no further message to
+        // send so ignore that return value.
+        _, err = conv.Step(serverMsg)
+        if err != nil {
+            panic(err)
+        }
+
+        return
+    }
+
+    func sendClientMsg(s string) string {
+        // A real implementation would send this to a server and read a reply.
+        return ""
+    }
+
+## Copyright and License
+
+Copyright 2018 by David A. Golden. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"). You may
+obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
diff --git a/vendor/github.com/xdg-go/scram/client.go b/vendor/github.com/xdg-go/scram/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b53021b32a2152b6ae2fc703d3b2e46749b66f1
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/client.go
@@ -0,0 +1,130 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"sync"
+
+	"github.com/xdg-go/pbkdf2"
+)
+
+// Client implements the client side of SCRAM authentication.  It holds
+// configuration values needed to initialize new client-side conversations for
+// a specific username, password and authorization ID tuple.  Client caches
+// the computationally-expensive parts of a SCRAM conversation as described in
+// RFC-5802.  If repeated authentication conversations may be required for a
+// user (e.g. disconnect/reconnect), the user's Client should be preserved.
+//
+// For security reasons, Clients have a default minimum PBKDF2 iteration count
+// of 4096.  If a server requests a smaller iteration count, an authentication
+// conversation will error.
+//
+// A Client can also be used by a server application to construct the hashed
+// authentication values to be stored for a new user.  See StoredCredentials()
+// for more.
+type Client struct {
+	sync.RWMutex
+	username string
+	password string
+	authzID  string
+	minIters int
+	nonceGen NonceGeneratorFcn
+	hashGen  HashGeneratorFcn
+	cache    map[KeyFactors]derivedKeys
+}
+
+func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client {
+	return &Client{
+		username: username,
+		password: password,
+		authzID:  authzID,
+		minIters: 4096,
+		nonceGen: defaultNonceGenerator,
+		hashGen:  fcn,
+		cache:    make(map[KeyFactors]derivedKeys),
+	}
+}
+
+// WithMinIterations changes minimum required PBKDF2 iteration count.
+func (c *Client) WithMinIterations(n int) *Client {
+	c.Lock()
+	defer c.Unlock()
+	c.minIters = n
+	return c
+}
+
+// WithNonceGenerator replaces the default nonce generator (base64 encoding of
+// 24 bytes from crypto/rand) with a custom generator.  This is provided for
+// testing or for users with custom nonce requirements.
+func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client {
+	c.Lock()
+	defer c.Unlock()
+	c.nonceGen = ng
+	return c
+}
+
+// NewConversation constructs a client-side authentication conversation.
+// Conversations cannot be reused, so this must be called for each new
+// authentication attempt.
+func (c *Client) NewConversation() *ClientConversation {
+	c.RLock()
+	defer c.RUnlock()
+	return &ClientConversation{
+		client:   c,
+		nonceGen: c.nonceGen,
+		hashGen:  c.hashGen,
+		minIters: c.minIters,
+	}
+}
+
+func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys {
+	dk, ok := c.getCache(kf)
+	if !ok {
+		dk = c.computeKeys(kf)
+		c.setCache(kf, dk)
+	}
+	return dk
+}
+
+// GetStoredCredentials takes a salt and iteration count structure and
+// provides the values that must be stored by a server to authentication a
+// user.  These values are what the Server credential lookup function must
+// return for a given username.
+func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials {
+	dk := c.getDerivedKeys(kf)
+	return StoredCredentials{
+		KeyFactors: kf,
+		StoredKey:  dk.StoredKey,
+		ServerKey:  dk.ServerKey,
+	}
+}
+
+func (c *Client) computeKeys(kf KeyFactors) derivedKeys {
+	h := c.hashGen()
+	saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen)
+	clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key"))
+
+	return derivedKeys{
+		ClientKey: clientKey,
+		StoredKey: computeHash(c.hashGen, clientKey),
+		ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")),
+	}
+}
+
+func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) {
+	c.RLock()
+	defer c.RUnlock()
+	dk, ok := c.cache[kf]
+	return dk, ok
+}
+
+func (c *Client) setCache(kf KeyFactors, dk derivedKeys) {
+	c.Lock()
+	defer c.Unlock()
+	c.cache[kf] = dk
+	return
+}
diff --git a/vendor/github.com/xdg-go/scram/client_conv.go b/vendor/github.com/xdg-go/scram/client_conv.go
new file mode 100644
index 0000000000000000000000000000000000000000..834056889e8ec7b20d345dbd1b8fb828f7ee79ad
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/client_conv.go
@@ -0,0 +1,149 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/hmac"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"strings"
+)
+
+type clientState int
+
+const (
+	clientStarting clientState = iota
+	clientFirst
+	clientFinal
+	clientDone
+)
+
+// ClientConversation implements the client-side of an authentication
+// conversation with a server.  A new conversation must be created for
+// each authentication attempt.
+type ClientConversation struct {
+	client   *Client
+	nonceGen NonceGeneratorFcn
+	hashGen  HashGeneratorFcn
+	minIters int
+	state    clientState
+	valid    bool
+	gs2      string
+	nonce    string
+	c1b      string
+	serveSig []byte
+}
+
+// Step takes a string provided from a server (or just an empty string for the
+// very first conversation step) and attempts to move the authentication
+// conversation forward.  It returns a string to be sent to the server or an
+// error if the server message is invalid.  Calling Step after a conversation
+// completes is also an error.
+func (cc *ClientConversation) Step(challenge string) (response string, err error) {
+	switch cc.state {
+	case clientStarting:
+		cc.state = clientFirst
+		response, err = cc.firstMsg()
+	case clientFirst:
+		cc.state = clientFinal
+		response, err = cc.finalMsg(challenge)
+	case clientFinal:
+		cc.state = clientDone
+		response, err = cc.validateServer(challenge)
+	default:
+		response, err = "", errors.New("Conversation already completed")
+	}
+	return
+}
+
+// Done returns true if the conversation is completed or has errored.
+func (cc *ClientConversation) Done() bool {
+	return cc.state == clientDone
+}
+
+// Valid returns true if the conversation successfully authenticated with the
+// server, including counter-validation that the server actually has the
+// user's stored credentials.
+func (cc *ClientConversation) Valid() bool {
+	return cc.valid
+}
+
+func (cc *ClientConversation) firstMsg() (string, error) {
+	// Values are cached for use in final message parameters
+	cc.gs2 = cc.gs2Header()
+	cc.nonce = cc.client.nonceGen()
+	cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce)
+
+	return cc.gs2 + cc.c1b, nil
+}
+
+func (cc *ClientConversation) finalMsg(s1 string) (string, error) {
+	msg, err := parseServerFirst(s1)
+	if err != nil {
+		return "", err
+	}
+
+	// Check nonce prefix and update
+	if !strings.HasPrefix(msg.nonce, cc.nonce) {
+		return "", errors.New("server nonce did not extend client nonce")
+	}
+	cc.nonce = msg.nonce
+
+	// Check iteration count vs minimum
+	if msg.iters < cc.minIters {
+		return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters)
+	}
+
+	// Create client-final-message-without-proof
+	c2wop := fmt.Sprintf(
+		"c=%s,r=%s",
+		base64.StdEncoding.EncodeToString([]byte(cc.gs2)),
+		cc.nonce,
+	)
+
+	// Create auth message
+	authMsg := cc.c1b + "," + s1 + "," + c2wop
+
+	// Get derived keys from client cache
+	dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters})
+
+	// Create proof as clientkey XOR clientsignature
+	clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg))
+	clientProof := xorBytes(dk.ClientKey, clientSignature)
+	proof := base64.StdEncoding.EncodeToString(clientProof)
+
+	// Cache ServerSignature for later validation
+	cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg))
+
+	return fmt.Sprintf("%s,p=%s", c2wop, proof), nil
+}
+
+func (cc *ClientConversation) validateServer(s2 string) (string, error) {
+	msg, err := parseServerFinal(s2)
+	if err != nil {
+		return "", err
+	}
+
+	if len(msg.err) > 0 {
+		return "", fmt.Errorf("server error: %s", msg.err)
+	}
+
+	if !hmac.Equal(msg.verifier, cc.serveSig) {
+		return "", errors.New("server validation failed")
+	}
+
+	cc.valid = true
+	return "", nil
+}
+
+func (cc *ClientConversation) gs2Header() string {
+	if cc.client.authzID == "" {
+		return "n,,"
+	}
+	return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID))
+}
diff --git a/vendor/github.com/xdg-go/scram/common.go b/vendor/github.com/xdg-go/scram/common.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb705cb74ecf5b19f3e5cddcadbed594308db82a
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/common.go
@@ -0,0 +1,97 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/hmac"
+	"crypto/rand"
+	"encoding/base64"
+	"strings"
+)
+
+// NonceGeneratorFcn defines a function that returns a string of high-quality
+// random printable ASCII characters EXCLUDING the comma (',') character.  The
+// default nonce generator provides Base64 encoding of 24 bytes from
+// crypto/rand.
+type NonceGeneratorFcn func() string
+
+// derivedKeys collects the three cryptographically derived values
+// into one struct for caching.
+type derivedKeys struct {
+	ClientKey []byte
+	StoredKey []byte
+	ServerKey []byte
+}
+
+// KeyFactors represent the two server-provided factors needed to compute
+// client credentials for authentication.  Salt is decoded bytes (i.e. not
+// base64), but in string form so that KeyFactors can be used as a map key for
+// cached credentials.
+type KeyFactors struct {
+	Salt  string
+	Iters int
+}
+
+// StoredCredentials are the values that a server must store for a given
+// username to allow authentication.  They include the salt and iteration
+// count, plus the derived values to authenticate a client and for the server
+// to authenticate itself back to the client.
+//
+// NOTE: these are specific to a given hash function.  To allow a user to
+// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of
+// StoredCredentials must be created and stored, one for each hash function.
+type StoredCredentials struct {
+	KeyFactors
+	StoredKey []byte
+	ServerKey []byte
+}
+
+// CredentialLookup is a callback to provide StoredCredentials for a given
+// username.  This is used to configure Server objects.
+//
+// NOTE: these are specific to a given hash function.  The callback provided
+// to a Server with a given hash function must provide the corresponding
+// StoredCredentials.
+type CredentialLookup func(string) (StoredCredentials, error)
+
+func defaultNonceGenerator() string {
+	raw := make([]byte, 24)
+	nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw)))
+	rand.Read(raw)
+	base64.StdEncoding.Encode(nonce, raw)
+	return string(nonce)
+}
+
+func encodeName(s string) string {
+	return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1)
+}
+
+func decodeName(s string) (string, error) {
+	// TODO Check for = not followed by 2C or 3D
+	return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil
+}
+
+func computeHash(hg HashGeneratorFcn, b []byte) []byte {
+	h := hg()
+	h.Write(b)
+	return h.Sum(nil)
+}
+
+func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte {
+	mac := hmac.New(hg, key)
+	mac.Write(data)
+	return mac.Sum(nil)
+}
+
+func xorBytes(a, b []byte) []byte {
+	// TODO check a & b are same length, or just xor to smallest
+	xor := make([]byte, len(a))
+	for i := range a {
+		xor[i] = a[i] ^ b[i]
+	}
+	return xor
+}
diff --git a/vendor/github.com/xdg-go/scram/doc.go b/vendor/github.com/xdg-go/scram/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..82e8aeed8794abe644952b5b33814dcb750ae138
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/doc.go
@@ -0,0 +1,26 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package scram provides client and server implementations of the Salted
+// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802
+// and RFC-7677.
+//
+// Usage
+//
+// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that
+// are used to construct Client or Server objects.
+//
+//     clientSHA1,   err := scram.SHA1.NewClient(username, password, authID)
+//     clientSHA256, err := scram.SHA256.NewClient(username, password, authID)
+//     clientSHA512, err := scram.SHA512.NewClient(username, password, authID)
+//
+//     serverSHA1,   err := scram.SHA1.NewServer(credentialLookupFcn)
+//     serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn)
+//     serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn)
+//
+// These objects are used to construct ClientConversation or
+// ServerConversation objects that are used to carry out authentication.
+package scram
diff --git a/vendor/github.com/xdg-go/scram/parse.go b/vendor/github.com/xdg-go/scram/parse.go
new file mode 100644
index 0000000000000000000000000000000000000000..722f6043d373a5fb86fb020d0cb8853bf52330ad
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/parse.go
@@ -0,0 +1,205 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type c1Msg struct {
+	gs2Header string
+	authzID   string
+	username  string
+	nonce     string
+	c1b       string
+}
+
+type c2Msg struct {
+	cbind []byte
+	nonce string
+	proof []byte
+	c2wop string
+}
+
+type s1Msg struct {
+	nonce string
+	salt  []byte
+	iters int
+}
+
+type s2Msg struct {
+	verifier []byte
+	err      string
+}
+
+func parseField(s, k string) (string, error) {
+	t := strings.TrimPrefix(s, k+"=")
+	if t == s {
+		return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k)
+	}
+	return t, nil
+}
+
+func parseGS2Flag(s string) (string, error) {
+	if s[0] == 'p' {
+		return "", fmt.Errorf("channel binding requested but not supported")
+	}
+
+	if s == "n" || s == "y" {
+		return s, nil
+	}
+
+	return "", fmt.Errorf("error parsing '%s' for gs2 flag", s)
+}
+
+func parseFieldBase64(s, k string) ([]byte, error) {
+	raw, err := parseField(s, k)
+	if err != nil {
+		return nil, err
+	}
+
+	dec, err := base64.StdEncoding.DecodeString(raw)
+	if err != nil {
+		return nil, err
+	}
+
+	return dec, nil
+}
+
+func parseFieldInt(s, k string) (int, error) {
+	raw, err := parseField(s, k)
+	if err != nil {
+		return 0, err
+	}
+
+	num, err := strconv.Atoi(raw)
+	if err != nil {
+		return 0, fmt.Errorf("error parsing field '%s': %v", k, err)
+	}
+
+	return num, nil
+}
+
+func parseClientFirst(c1 string) (msg c1Msg, err error) {
+
+	fields := strings.Split(c1, ",")
+	if len(fields) < 4 {
+		err = errors.New("not enough fields in first server message")
+		return
+	}
+
+	gs2flag, err := parseGS2Flag(fields[0])
+	if err != nil {
+		return
+	}
+
+	// 'a' field is optional
+	if len(fields[1]) > 0 {
+		msg.authzID, err = parseField(fields[1], "a")
+		if err != nil {
+			return
+		}
+	}
+
+	// Recombine and save the gs2 header
+	msg.gs2Header = gs2flag + "," + msg.authzID + ","
+
+	// Check for unsupported extensions field "m".
+	if strings.HasPrefix(fields[2], "m=") {
+		err = errors.New("SCRAM message extensions are not supported")
+		return
+	}
+
+	msg.username, err = parseField(fields[2], "n")
+	if err != nil {
+		return
+	}
+
+	msg.nonce, err = parseField(fields[3], "r")
+	if err != nil {
+		return
+	}
+
+	msg.c1b = strings.Join(fields[2:], ",")
+
+	return
+}
+
+func parseClientFinal(c2 string) (msg c2Msg, err error) {
+	fields := strings.Split(c2, ",")
+	if len(fields) < 3 {
+		err = errors.New("not enough fields in first server message")
+		return
+	}
+
+	msg.cbind, err = parseFieldBase64(fields[0], "c")
+	if err != nil {
+		return
+	}
+
+	msg.nonce, err = parseField(fields[1], "r")
+	if err != nil {
+		return
+	}
+
+	// Extension fields may come between nonce and proof, so we
+	// grab the *last* fields as proof.
+	msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p")
+	if err != nil {
+		return
+	}
+
+	msg.c2wop = c2[:strings.LastIndex(c2, ",")]
+
+	return
+}
+
+func parseServerFirst(s1 string) (msg s1Msg, err error) {
+
+	// Check for unsupported extensions field "m".
+	if strings.HasPrefix(s1, "m=") {
+		err = errors.New("SCRAM message extensions are not supported")
+		return
+	}
+
+	fields := strings.Split(s1, ",")
+	if len(fields) < 3 {
+		err = errors.New("not enough fields in first server message")
+		return
+	}
+
+	msg.nonce, err = parseField(fields[0], "r")
+	if err != nil {
+		return
+	}
+
+	msg.salt, err = parseFieldBase64(fields[1], "s")
+	if err != nil {
+		return
+	}
+
+	msg.iters, err = parseFieldInt(fields[2], "i")
+
+	return
+}
+
+func parseServerFinal(s2 string) (msg s2Msg, err error) {
+	fields := strings.Split(s2, ",")
+
+	msg.verifier, err = parseFieldBase64(fields[0], "v")
+	if err == nil {
+		return
+	}
+
+	msg.err, err = parseField(fields[0], "e")
+
+	return
+}
diff --git a/vendor/github.com/xdg-go/scram/scram.go b/vendor/github.com/xdg-go/scram/scram.go
new file mode 100644
index 0000000000000000000000000000000000000000..a7b366027e2753fde300f34f1927de8db2b8c74a
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/scram.go
@@ -0,0 +1,71 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/sha1"
+	"crypto/sha256"
+	"crypto/sha512"
+	"fmt"
+	"hash"
+
+	"github.com/xdg-go/stringprep"
+)
+
+// HashGeneratorFcn abstracts a factory function that returns a hash.Hash
+// value to be used for SCRAM operations.  Generally, one would use the
+// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most
+// common forms of SCRAM.
+type HashGeneratorFcn func() hash.Hash
+
+// SHA1 is a function that returns a crypto/sha1 hasher and should be used to
+// create Client objects configured for SHA-1 hashing.
+var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() }
+
+// SHA256 is a function that returns a crypto/sha256 hasher and should be used
+// to create Client objects configured for SHA-256 hashing.
+var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() }
+
+// SHA512 is a function that returns a crypto/sha512 hasher and should be used
+// to create Client objects configured for SHA-512 hashing.
+var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() }
+
+// NewClient constructs a SCRAM client component based on a given hash.Hash
+// factory receiver.  This constructor will normalize the username, password
+// and authzID via the SASLprep algorithm, as recommended by RFC-5802.  If
+// SASLprep fails, the method returns an error.
+func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) {
+	var userprep, passprep, authprep string
+	var err error
+
+	if userprep, err = stringprep.SASLprep.Prepare(username); err != nil {
+		return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err)
+	}
+	if passprep, err = stringprep.SASLprep.Prepare(password); err != nil {
+		return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err)
+	}
+	if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil {
+		return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err)
+	}
+
+	return newClient(userprep, passprep, authprep, f), nil
+}
+
+// NewClientUnprepped acts like NewClient, except none of the arguments will
+// be normalized via SASLprep.  This is not generally recommended, but is
+// provided for users that may have custom normalization needs.
+func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) {
+	return newClient(username, password, authzID, f), nil
+}
+
+// NewServer constructs a SCRAM server component based on a given hash.Hash
+// factory receiver.  To be maximally generic, it uses dependency injection to
+// handle credential lookup, which is the process of turning a username string
+// into a struct with stored credentials for authentication.
+func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) {
+	return newServer(cl, f)
+}
diff --git a/vendor/github.com/xdg-go/scram/server.go b/vendor/github.com/xdg-go/scram/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..b119b36156d400b71bfbac4beeea58c29c4315fc
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/server.go
@@ -0,0 +1,50 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import "sync"
+
+// Server implements the server side of SCRAM authentication.  It holds
+// configuration values needed to initialize new server-side conversations.
+// Generally, this can be persistent within an application.
+type Server struct {
+	sync.RWMutex
+	credentialCB CredentialLookup
+	nonceGen     NonceGeneratorFcn
+	hashGen      HashGeneratorFcn
+}
+
+func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) {
+	return &Server{
+		credentialCB: cl,
+		nonceGen:     defaultNonceGenerator,
+		hashGen:      fcn,
+	}, nil
+}
+
+// WithNonceGenerator replaces the default nonce generator (base64 encoding of
+// 24 bytes from crypto/rand) with a custom generator.  This is provided for
+// testing or for users with custom nonce requirements.
+func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server {
+	s.Lock()
+	defer s.Unlock()
+	s.nonceGen = ng
+	return s
+}
+
+// NewConversation constructs a server-side authentication conversation.
+// Conversations cannot be reused, so this must be called for each new
+// authentication attempt.
+func (s *Server) NewConversation() *ServerConversation {
+	s.RLock()
+	defer s.RUnlock()
+	return &ServerConversation{
+		nonceGen:     s.nonceGen,
+		hashGen:      s.hashGen,
+		credentialCB: s.credentialCB,
+	}
+}
diff --git a/vendor/github.com/xdg-go/scram/server_conv.go b/vendor/github.com/xdg-go/scram/server_conv.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c8838c38aea61e36a9f54ee36db0f2c40a6477f
--- /dev/null
+++ b/vendor/github.com/xdg-go/scram/server_conv.go
@@ -0,0 +1,151 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/hmac"
+	"encoding/base64"
+	"errors"
+	"fmt"
+)
+
+type serverState int
+
+const (
+	serverFirst serverState = iota
+	serverFinal
+	serverDone
+)
+
+// ServerConversation implements the server-side of an authentication
+// conversation with a client.  A new conversation must be created for
+// each authentication attempt.
+type ServerConversation struct {
+	nonceGen     NonceGeneratorFcn
+	hashGen      HashGeneratorFcn
+	credentialCB CredentialLookup
+	state        serverState
+	credential   StoredCredentials
+	valid        bool
+	gs2Header    string
+	username     string
+	authzID      string
+	nonce        string
+	c1b          string
+	s1           string
+}
+
+// Step takes a string provided from a client and attempts to move the
+// authentication conversation forward.  It returns a string to be sent to the
+// client or an error if the client message is invalid.  Calling Step after a
+// conversation completes is also an error.
+func (sc *ServerConversation) Step(challenge string) (response string, err error) {
+	switch sc.state {
+	case serverFirst:
+		sc.state = serverFinal
+		response, err = sc.firstMsg(challenge)
+	case serverFinal:
+		sc.state = serverDone
+		response, err = sc.finalMsg(challenge)
+	default:
+		response, err = "", errors.New("Conversation already completed")
+	}
+	return
+}
+
+// Done returns true if the conversation is completed or has errored.
+func (sc *ServerConversation) Done() bool {
+	return sc.state == serverDone
+}
+
+// Valid returns true if the conversation successfully authenticated the
+// client.
+func (sc *ServerConversation) Valid() bool {
+	return sc.valid
+}
+
+// Username returns the client-provided username.  This is valid to call
+// if the first conversation Step() is successful.
+func (sc *ServerConversation) Username() string {
+	return sc.username
+}
+
+// AuthzID returns the (optional) client-provided authorization identity, if
+// any.  If one was not provided, it returns the empty string.  This is valid
+// to call if the first conversation Step() is successful.
+func (sc *ServerConversation) AuthzID() string {
+	return sc.authzID
+}
+
+func (sc *ServerConversation) firstMsg(c1 string) (string, error) {
+	msg, err := parseClientFirst(c1)
+	if err != nil {
+		sc.state = serverDone
+		return "", err
+	}
+
+	sc.gs2Header = msg.gs2Header
+	sc.username = msg.username
+	sc.authzID = msg.authzID
+
+	sc.credential, err = sc.credentialCB(msg.username)
+	if err != nil {
+		sc.state = serverDone
+		return "e=unknown-user", err
+	}
+
+	sc.nonce = msg.nonce + sc.nonceGen()
+	sc.c1b = msg.c1b
+	sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d",
+		sc.nonce,
+		base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)),
+		sc.credential.Iters,
+	)
+
+	return sc.s1, nil
+}
+
+// For errors, returns server error message as well as non-nil error.  Callers
+// can choose whether to send server error or not.
+func (sc *ServerConversation) finalMsg(c2 string) (string, error) {
+	msg, err := parseClientFinal(c2)
+	if err != nil {
+		return "", err
+	}
+
+	// Check channel binding matches what we expect; in this case, we expect
+	// just the gs2 header we received as we don't support channel binding
+	// with a data payload.  If we add binding, we need to independently
+	// compute the header to match here.
+	if string(msg.cbind) != sc.gs2Header {
+		return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header)
+	}
+
+	// Check nonce received matches what we sent
+	if msg.nonce != sc.nonce {
+		return "e=other-error", errors.New("nonce received did not match nonce sent")
+	}
+
+	// Create auth message
+	authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop
+
+	// Retrieve ClientKey from proof and verify it
+	clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg))
+	clientKey := xorBytes([]byte(msg.proof), clientSignature)
+	storedKey := computeHash(sc.hashGen, clientKey)
+
+	// Compare with constant-time function
+	if !hmac.Equal(storedKey, sc.credential.StoredKey) {
+		return "e=invalid-proof", errors.New("challenge proof invalid")
+	}
+
+	sc.valid = true
+
+	// Compute and return server verifier
+	serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg))
+	return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil
+}
diff --git a/vendor/github.com/xdg-go/stringprep/.gitignore b/vendor/github.com/xdg-go/stringprep/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..04b9753cdf5fa93092aaf7da03b6b0d2a10a2f3d
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md
@@ -0,0 +1,36 @@
+# CHANGELOG
+
+<a name="v1.0.4"></a>
+## [v1.0.4] - 2022-12-07
+
+### Maintenance
+
+- Bump golang.org/x/text to v0.3.8 due to CVE-2022-32149
+
+<a name="v1.0.3"></a>
+## [v1.0.3] - 2022-03-01
+
+### Maintenance
+
+- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561
+
+<a name="v1.0.2"></a>
+## [v1.0.2] - 2021-03-27
+
+### Maintenance
+
+- Change minimum Go version to 1.11
+
+<a name="v1.0.1"></a>
+## [v1.0.1] - 2021-03-24
+
+### Bug Fixes
+
+- Add go.mod file
+
+<a name="v1.0.0"></a>
+## [v1.0.0] - 2018-02-21
+
+[v1.0.2]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.2
+[v1.0.1]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.1
+[v1.0.0]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.0
diff --git a/vendor/github.com/xdg-go/stringprep/LICENSE b/vendor/github.com/xdg-go/stringprep/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..67db8588217f266eb561f75fae738656325deac9
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/LICENSE
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/xdg-go/stringprep/README.md b/vendor/github.com/xdg-go/stringprep/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..83ea5346dd0ab3a4504fd33a432a8973a158c07e
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/README.md
@@ -0,0 +1,28 @@
+[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/stringprep.svg)](https://pkg.go.dev/github.com/xdg-go/stringprep)
+[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/stringprep)](https://goreportcard.com/report/github.com/xdg-go/stringprep)
+[![Github Actions](https://github.com/xdg-go/stringprep/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/stringprep/actions/workflows/test.yml)
+
+# stringprep – Go implementation of RFC-3454 stringprep and RFC-4013 SASLprep
+
+## Synopsis
+
+```
+    import "github.com/xdg-go/stringprep"
+
+    prepped := stringprep.SASLprep.Prepare("TrustNô1")
+
+```
+
+## Description
+
+This library provides an implementation of the stringprep algorithm
+(RFC-3454) in Go, including all data tables.
+
+A pre-built SASLprep (RFC-4013) profile is provided as well.
+
+## Copyright and License
+
+Copyright 2018 by David A. Golden. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"). You may
+obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
diff --git a/vendor/github.com/xdg-go/stringprep/bidi.go b/vendor/github.com/xdg-go/stringprep/bidi.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f6d321dfdabcd017c8a0e6e25389eb079f0d87d
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/bidi.go
@@ -0,0 +1,73 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+var errHasLCat = "BiDi string can't have runes from category L"
+var errFirstRune = "BiDi string first rune must have category R or AL"
+var errLastRune = "BiDi string last rune must have category R or AL"
+
+// Check for prohibited characters from table C.8
+func checkBiDiProhibitedRune(s string) error {
+	for _, r := range s {
+		if TableC8.Contains(r) {
+			return Error{Msg: errProhibited, Rune: r}
+		}
+	}
+	return nil
+}
+
+// Check for LCat characters from table D.2
+func checkBiDiLCat(s string) error {
+	for _, r := range s {
+		if TableD2.Contains(r) {
+			return Error{Msg: errHasLCat, Rune: r}
+		}
+	}
+	return nil
+}
+
+// Check first and last characters are in table D.1; requires non-empty string
+func checkBadFirstAndLastRandALCat(s string) error {
+	rs := []rune(s)
+	if !TableD1.Contains(rs[0]) {
+		return Error{Msg: errFirstRune, Rune: rs[0]}
+	}
+	n := len(rs) - 1
+	if !TableD1.Contains(rs[n]) {
+		return Error{Msg: errLastRune, Rune: rs[n]}
+	}
+	return nil
+}
+
+// Look for RandALCat characters from table D.1
+func hasBiDiRandALCat(s string) bool {
+	for _, r := range s {
+		if TableD1.Contains(r) {
+			return true
+		}
+	}
+	return false
+}
+
+// Check that BiDi rules are satisfied ; let empty string pass this rule
+func passesBiDiRules(s string) error {
+	if len(s) == 0 {
+		return nil
+	}
+	if err := checkBiDiProhibitedRune(s); err != nil {
+		return err
+	}
+	if hasBiDiRandALCat(s) {
+		if err := checkBiDiLCat(s); err != nil {
+			return err
+		}
+		if err := checkBadFirstAndLastRandALCat(s); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/xdg-go/stringprep/doc.go b/vendor/github.com/xdg-go/stringprep/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..b319e081b756b77f31b99bfdc4f836028d8daa74
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package stringprep provides data tables and algorithms for RFC-3454,
+// including errata (as of 2018-02).  It also provides a profile for
+// SASLprep as defined in RFC-4013.
+package stringprep
diff --git a/vendor/github.com/xdg-go/stringprep/error.go b/vendor/github.com/xdg-go/stringprep/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..7403e499119096b974f57e78b160ec55f2e60da5
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/error.go
@@ -0,0 +1,14 @@
+package stringprep
+
+import "fmt"
+
+// Error describes problems encountered during stringprep, including what rune
+// was problematic.
+type Error struct {
+	Msg  string
+	Rune rune
+}
+
+func (e Error) Error() string {
+	return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune)
+}
diff --git a/vendor/github.com/xdg-go/stringprep/map.go b/vendor/github.com/xdg-go/stringprep/map.go
new file mode 100644
index 0000000000000000000000000000000000000000..e56a0dd43eda15231813163ea2946c2311049e33
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/map.go
@@ -0,0 +1,21 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+// Mapping represents a stringprep mapping, from a single rune to zero or more
+// runes.
+type Mapping map[rune][]rune
+
+// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping.
+// The ok return value is false if the rune was not found.
+func (m Mapping) Map(r rune) (replacement []rune, ok bool) {
+	rs, ok := m[r]
+	if !ok {
+		return nil, false
+	}
+	return rs, true
+}
diff --git a/vendor/github.com/xdg-go/stringprep/profile.go b/vendor/github.com/xdg-go/stringprep/profile.go
new file mode 100644
index 0000000000000000000000000000000000000000..5a73be9e548d73aa4c3ad2d8499e68863fb41b65
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/profile.go
@@ -0,0 +1,75 @@
+package stringprep
+
+import (
+	"golang.org/x/text/unicode/norm"
+)
+
+// Profile represents a stringprep profile.
+type Profile struct {
+	Mappings  []Mapping
+	Normalize bool
+	Prohibits []Set
+	CheckBiDi bool
+}
+
+var errProhibited = "prohibited character"
+
+// Prepare transforms an input string to an output string following
+// the rules defined in the profile as defined by RFC-3454.
+func (p Profile) Prepare(s string) (string, error) {
+	// Optimistically, assume output will be same length as input
+	temp := make([]rune, 0, len(s))
+
+	// Apply maps
+	for _, r := range s {
+		rs, ok := p.applyMaps(r)
+		if ok {
+			temp = append(temp, rs...)
+		} else {
+			temp = append(temp, r)
+		}
+	}
+
+	// Normalize
+	var out string
+	if p.Normalize {
+		out = norm.NFKC.String(string(temp))
+	} else {
+		out = string(temp)
+	}
+
+	// Check prohibited
+	for _, r := range out {
+		if p.runeIsProhibited(r) {
+			return "", Error{Msg: errProhibited, Rune: r}
+		}
+	}
+
+	// Check BiDi allowed
+	if p.CheckBiDi {
+		if err := passesBiDiRules(out); err != nil {
+			return "", err
+		}
+	}
+
+	return out, nil
+}
+
+func (p Profile) applyMaps(r rune) ([]rune, bool) {
+	for _, m := range p.Mappings {
+		rs, ok := m.Map(r)
+		if ok {
+			return rs, true
+		}
+	}
+	return nil, false
+}
+
+func (p Profile) runeIsProhibited(r rune) bool {
+	for _, s := range p.Prohibits {
+		if s.Contains(r) {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/xdg-go/stringprep/saslprep.go b/vendor/github.com/xdg-go/stringprep/saslprep.go
new file mode 100644
index 0000000000000000000000000000000000000000..40013488bfdae295cbc219409862a497a7580d6f
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/saslprep.go
@@ -0,0 +1,52 @@
+package stringprep
+
+var mapNonASCIISpaceToASCIISpace = Mapping{
+	0x00A0: []rune{0x0020},
+	0x1680: []rune{0x0020},
+	0x2000: []rune{0x0020},
+	0x2001: []rune{0x0020},
+	0x2002: []rune{0x0020},
+	0x2003: []rune{0x0020},
+	0x2004: []rune{0x0020},
+	0x2005: []rune{0x0020},
+	0x2006: []rune{0x0020},
+	0x2007: []rune{0x0020},
+	0x2008: []rune{0x0020},
+	0x2009: []rune{0x0020},
+	0x200A: []rune{0x0020},
+	0x200B: []rune{0x0020},
+	0x202F: []rune{0x0020},
+	0x205F: []rune{0x0020},
+	0x3000: []rune{0x0020},
+}
+
+// SASLprep is a pre-defined stringprep profile for user names and passwords
+// as described in RFC-4013.
+//
+// Because the stringprep distinction between query and stored strings was
+// intended for compatibility across profile versions, but SASLprep was never
+// updated and is now deprecated, this profile only operates in stored
+// strings mode, prohibiting unassigned code points.
+var SASLprep Profile = saslprep
+
+var saslprep = Profile{
+	Mappings: []Mapping{
+		TableB1,
+		mapNonASCIISpaceToASCIISpace,
+	},
+	Normalize: true,
+	Prohibits: []Set{
+		TableA1,
+		TableC1_2,
+		TableC2_1,
+		TableC2_2,
+		TableC3,
+		TableC4,
+		TableC5,
+		TableC6,
+		TableC7,
+		TableC8,
+		TableC9,
+	},
+	CheckBiDi: true,
+}
diff --git a/vendor/github.com/xdg-go/stringprep/set.go b/vendor/github.com/xdg-go/stringprep/set.go
new file mode 100644
index 0000000000000000000000000000000000000000..c837e28c88a37987879bb54938d14e00c79f098f
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/set.go
@@ -0,0 +1,36 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+import "sort"
+
+// RuneRange represents a close-ended range of runes: [N,M].  For a range
+// consisting of a single rune, N and M will be equal.
+type RuneRange [2]rune
+
+// Contains returns true if a rune is within the bounds of the RuneRange.
+func (rr RuneRange) Contains(r rune) bool {
+	return rr[0] <= r && r <= rr[1]
+}
+
+func (rr RuneRange) isAbove(r rune) bool {
+	return r <= rr[0]
+}
+
+// Set represents a stringprep data table used to identify runes of a
+// particular type.
+type Set []RuneRange
+
+// Contains returns true if a rune is within any of the RuneRanges in the
+// Set.
+func (s Set) Contains(r rune) bool {
+	i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) })
+	if i < len(s) && s[i].Contains(r) {
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/xdg-go/stringprep/tables.go b/vendor/github.com/xdg-go/stringprep/tables.go
new file mode 100644
index 0000000000000000000000000000000000000000..c3fc1fa0fae2119346a797052abe87f3170ef3bc
--- /dev/null
+++ b/vendor/github.com/xdg-go/stringprep/tables.go
@@ -0,0 +1,3215 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+var tableA1 = Set{
+	RuneRange{0x0221, 0x0221},
+	RuneRange{0x0234, 0x024F},
+	RuneRange{0x02AE, 0x02AF},
+	RuneRange{0x02EF, 0x02FF},
+	RuneRange{0x0350, 0x035F},
+	RuneRange{0x0370, 0x0373},
+	RuneRange{0x0376, 0x0379},
+	RuneRange{0x037B, 0x037D},
+	RuneRange{0x037F, 0x0383},
+	RuneRange{0x038B, 0x038B},
+	RuneRange{0x038D, 0x038D},
+	RuneRange{0x03A2, 0x03A2},
+	RuneRange{0x03CF, 0x03CF},
+	RuneRange{0x03F7, 0x03FF},
+	RuneRange{0x0487, 0x0487},
+	RuneRange{0x04CF, 0x04CF},
+	RuneRange{0x04F6, 0x04F7},
+	RuneRange{0x04FA, 0x04FF},
+	RuneRange{0x0510, 0x0530},
+	RuneRange{0x0557, 0x0558},
+	RuneRange{0x0560, 0x0560},
+	RuneRange{0x0588, 0x0588},
+	RuneRange{0x058B, 0x0590},
+	RuneRange{0x05A2, 0x05A2},
+	RuneRange{0x05BA, 0x05BA},
+	RuneRange{0x05C5, 0x05CF},
+	RuneRange{0x05EB, 0x05EF},
+	RuneRange{0x05F5, 0x060B},
+	RuneRange{0x060D, 0x061A},
+	RuneRange{0x061C, 0x061E},
+	RuneRange{0x0620, 0x0620},
+	RuneRange{0x063B, 0x063F},
+	RuneRange{0x0656, 0x065F},
+	RuneRange{0x06EE, 0x06EF},
+	RuneRange{0x06FF, 0x06FF},
+	RuneRange{0x070E, 0x070E},
+	RuneRange{0x072D, 0x072F},
+	RuneRange{0x074B, 0x077F},
+	RuneRange{0x07B2, 0x0900},
+	RuneRange{0x0904, 0x0904},
+	RuneRange{0x093A, 0x093B},
+	RuneRange{0x094E, 0x094F},
+	RuneRange{0x0955, 0x0957},
+	RuneRange{0x0971, 0x0980},
+	RuneRange{0x0984, 0x0984},
+	RuneRange{0x098D, 0x098E},
+	RuneRange{0x0991, 0x0992},
+	RuneRange{0x09A9, 0x09A9},
+	RuneRange{0x09B1, 0x09B1},
+	RuneRange{0x09B3, 0x09B5},
+	RuneRange{0x09BA, 0x09BB},
+	RuneRange{0x09BD, 0x09BD},
+	RuneRange{0x09C5, 0x09C6},
+	RuneRange{0x09C9, 0x09CA},
+	RuneRange{0x09CE, 0x09D6},
+	RuneRange{0x09D8, 0x09DB},
+	RuneRange{0x09DE, 0x09DE},
+	RuneRange{0x09E4, 0x09E5},
+	RuneRange{0x09FB, 0x0A01},
+	RuneRange{0x0A03, 0x0A04},
+	RuneRange{0x0A0B, 0x0A0E},
+	RuneRange{0x0A11, 0x0A12},
+	RuneRange{0x0A29, 0x0A29},
+	RuneRange{0x0A31, 0x0A31},
+	RuneRange{0x0A34, 0x0A34},
+	RuneRange{0x0A37, 0x0A37},
+	RuneRange{0x0A3A, 0x0A3B},
+	RuneRange{0x0A3D, 0x0A3D},
+	RuneRange{0x0A43, 0x0A46},
+	RuneRange{0x0A49, 0x0A4A},
+	RuneRange{0x0A4E, 0x0A58},
+	RuneRange{0x0A5D, 0x0A5D},
+	RuneRange{0x0A5F, 0x0A65},
+	RuneRange{0x0A75, 0x0A80},
+	RuneRange{0x0A84, 0x0A84},
+	RuneRange{0x0A8C, 0x0A8C},
+	RuneRange{0x0A8E, 0x0A8E},
+	RuneRange{0x0A92, 0x0A92},
+	RuneRange{0x0AA9, 0x0AA9},
+	RuneRange{0x0AB1, 0x0AB1},
+	RuneRange{0x0AB4, 0x0AB4},
+	RuneRange{0x0ABA, 0x0ABB},
+	RuneRange{0x0AC6, 0x0AC6},
+	RuneRange{0x0ACA, 0x0ACA},
+	RuneRange{0x0ACE, 0x0ACF},
+	RuneRange{0x0AD1, 0x0ADF},
+	RuneRange{0x0AE1, 0x0AE5},
+	RuneRange{0x0AF0, 0x0B00},
+	RuneRange{0x0B04, 0x0B04},
+	RuneRange{0x0B0D, 0x0B0E},
+	RuneRange{0x0B11, 0x0B12},
+	RuneRange{0x0B29, 0x0B29},
+	RuneRange{0x0B31, 0x0B31},
+	RuneRange{0x0B34, 0x0B35},
+	RuneRange{0x0B3A, 0x0B3B},
+	RuneRange{0x0B44, 0x0B46},
+	RuneRange{0x0B49, 0x0B4A},
+	RuneRange{0x0B4E, 0x0B55},
+	RuneRange{0x0B58, 0x0B5B},
+	RuneRange{0x0B5E, 0x0B5E},
+	RuneRange{0x0B62, 0x0B65},
+	RuneRange{0x0B71, 0x0B81},
+	RuneRange{0x0B84, 0x0B84},
+	RuneRange{0x0B8B, 0x0B8D},
+	RuneRange{0x0B91, 0x0B91},
+	RuneRange{0x0B96, 0x0B98},
+	RuneRange{0x0B9B, 0x0B9B},
+	RuneRange{0x0B9D, 0x0B9D},
+	RuneRange{0x0BA0, 0x0BA2},
+	RuneRange{0x0BA5, 0x0BA7},
+	RuneRange{0x0BAB, 0x0BAD},
+	RuneRange{0x0BB6, 0x0BB6},
+	RuneRange{0x0BBA, 0x0BBD},
+	RuneRange{0x0BC3, 0x0BC5},
+	RuneRange{0x0BC9, 0x0BC9},
+	RuneRange{0x0BCE, 0x0BD6},
+	RuneRange{0x0BD8, 0x0BE6},
+	RuneRange{0x0BF3, 0x0C00},
+	RuneRange{0x0C04, 0x0C04},
+	RuneRange{0x0C0D, 0x0C0D},
+	RuneRange{0x0C11, 0x0C11},
+	RuneRange{0x0C29, 0x0C29},
+	RuneRange{0x0C34, 0x0C34},
+	RuneRange{0x0C3A, 0x0C3D},
+	RuneRange{0x0C45, 0x0C45},
+	RuneRange{0x0C49, 0x0C49},
+	RuneRange{0x0C4E, 0x0C54},
+	RuneRange{0x0C57, 0x0C5F},
+	RuneRange{0x0C62, 0x0C65},
+	RuneRange{0x0C70, 0x0C81},
+	RuneRange{0x0C84, 0x0C84},
+	RuneRange{0x0C8D, 0x0C8D},
+	RuneRange{0x0C91, 0x0C91},
+	RuneRange{0x0CA9, 0x0CA9},
+	RuneRange{0x0CB4, 0x0CB4},
+	RuneRange{0x0CBA, 0x0CBD},
+	RuneRange{0x0CC5, 0x0CC5},
+	RuneRange{0x0CC9, 0x0CC9},
+	RuneRange{0x0CCE, 0x0CD4},
+	RuneRange{0x0CD7, 0x0CDD},
+	RuneRange{0x0CDF, 0x0CDF},
+	RuneRange{0x0CE2, 0x0CE5},
+	RuneRange{0x0CF0, 0x0D01},
+	RuneRange{0x0D04, 0x0D04},
+	RuneRange{0x0D0D, 0x0D0D},
+	RuneRange{0x0D11, 0x0D11},
+	RuneRange{0x0D29, 0x0D29},
+	RuneRange{0x0D3A, 0x0D3D},
+	RuneRange{0x0D44, 0x0D45},
+	RuneRange{0x0D49, 0x0D49},
+	RuneRange{0x0D4E, 0x0D56},
+	RuneRange{0x0D58, 0x0D5F},
+	RuneRange{0x0D62, 0x0D65},
+	RuneRange{0x0D70, 0x0D81},
+	RuneRange{0x0D84, 0x0D84},
+	RuneRange{0x0D97, 0x0D99},
+	RuneRange{0x0DB2, 0x0DB2},
+	RuneRange{0x0DBC, 0x0DBC},
+	RuneRange{0x0DBE, 0x0DBF},
+	RuneRange{0x0DC7, 0x0DC9},
+	RuneRange{0x0DCB, 0x0DCE},
+	RuneRange{0x0DD5, 0x0DD5},
+	RuneRange{0x0DD7, 0x0DD7},
+	RuneRange{0x0DE0, 0x0DF1},
+	RuneRange{0x0DF5, 0x0E00},
+	RuneRange{0x0E3B, 0x0E3E},
+	RuneRange{0x0E5C, 0x0E80},
+	RuneRange{0x0E83, 0x0E83},
+	RuneRange{0x0E85, 0x0E86},
+	RuneRange{0x0E89, 0x0E89},
+	RuneRange{0x0E8B, 0x0E8C},
+	RuneRange{0x0E8E, 0x0E93},
+	RuneRange{0x0E98, 0x0E98},
+	RuneRange{0x0EA0, 0x0EA0},
+	RuneRange{0x0EA4, 0x0EA4},
+	RuneRange{0x0EA6, 0x0EA6},
+	RuneRange{0x0EA8, 0x0EA9},
+	RuneRange{0x0EAC, 0x0EAC},
+	RuneRange{0x0EBA, 0x0EBA},
+	RuneRange{0x0EBE, 0x0EBF},
+	RuneRange{0x0EC5, 0x0EC5},
+	RuneRange{0x0EC7, 0x0EC7},
+	RuneRange{0x0ECE, 0x0ECF},
+	RuneRange{0x0EDA, 0x0EDB},
+	RuneRange{0x0EDE, 0x0EFF},
+	RuneRange{0x0F48, 0x0F48},
+	RuneRange{0x0F6B, 0x0F70},
+	RuneRange{0x0F8C, 0x0F8F},
+	RuneRange{0x0F98, 0x0F98},
+	RuneRange{0x0FBD, 0x0FBD},
+	RuneRange{0x0FCD, 0x0FCE},
+	RuneRange{0x0FD0, 0x0FFF},
+	RuneRange{0x1022, 0x1022},
+	RuneRange{0x1028, 0x1028},
+	RuneRange{0x102B, 0x102B},
+	RuneRange{0x1033, 0x1035},
+	RuneRange{0x103A, 0x103F},
+	RuneRange{0x105A, 0x109F},
+	RuneRange{0x10C6, 0x10CF},
+	RuneRange{0x10F9, 0x10FA},
+	RuneRange{0x10FC, 0x10FF},
+	RuneRange{0x115A, 0x115E},
+	RuneRange{0x11A3, 0x11A7},
+	RuneRange{0x11FA, 0x11FF},
+	RuneRange{0x1207, 0x1207},
+	RuneRange{0x1247, 0x1247},
+	RuneRange{0x1249, 0x1249},
+	RuneRange{0x124E, 0x124F},
+	RuneRange{0x1257, 0x1257},
+	RuneRange{0x1259, 0x1259},
+	RuneRange{0x125E, 0x125F},
+	RuneRange{0x1287, 0x1287},
+	RuneRange{0x1289, 0x1289},
+	RuneRange{0x128E, 0x128F},
+	RuneRange{0x12AF, 0x12AF},
+	RuneRange{0x12B1, 0x12B1},
+	RuneRange{0x12B6, 0x12B7},
+	RuneRange{0x12BF, 0x12BF},
+	RuneRange{0x12C1, 0x12C1},
+	RuneRange{0x12C6, 0x12C7},
+	RuneRange{0x12CF, 0x12CF},
+	RuneRange{0x12D7, 0x12D7},
+	RuneRange{0x12EF, 0x12EF},
+	RuneRange{0x130F, 0x130F},
+	RuneRange{0x1311, 0x1311},
+	RuneRange{0x1316, 0x1317},
+	RuneRange{0x131F, 0x131F},
+	RuneRange{0x1347, 0x1347},
+	RuneRange{0x135B, 0x1360},
+	RuneRange{0x137D, 0x139F},
+	RuneRange{0x13F5, 0x1400},
+	RuneRange{0x1677, 0x167F},
+	RuneRange{0x169D, 0x169F},
+	RuneRange{0x16F1, 0x16FF},
+	RuneRange{0x170D, 0x170D},
+	RuneRange{0x1715, 0x171F},
+	RuneRange{0x1737, 0x173F},
+	RuneRange{0x1754, 0x175F},
+	RuneRange{0x176D, 0x176D},
+	RuneRange{0x1771, 0x1771},
+	RuneRange{0x1774, 0x177F},
+	RuneRange{0x17DD, 0x17DF},
+	RuneRange{0x17EA, 0x17FF},
+	RuneRange{0x180F, 0x180F},
+	RuneRange{0x181A, 0x181F},
+	RuneRange{0x1878, 0x187F},
+	RuneRange{0x18AA, 0x1DFF},
+	RuneRange{0x1E9C, 0x1E9F},
+	RuneRange{0x1EFA, 0x1EFF},
+	RuneRange{0x1F16, 0x1F17},
+	RuneRange{0x1F1E, 0x1F1F},
+	RuneRange{0x1F46, 0x1F47},
+	RuneRange{0x1F4E, 0x1F4F},
+	RuneRange{0x1F58, 0x1F58},
+	RuneRange{0x1F5A, 0x1F5A},
+	RuneRange{0x1F5C, 0x1F5C},
+	RuneRange{0x1F5E, 0x1F5E},
+	RuneRange{0x1F7E, 0x1F7F},
+	RuneRange{0x1FB5, 0x1FB5},
+	RuneRange{0x1FC5, 0x1FC5},
+	RuneRange{0x1FD4, 0x1FD5},
+	RuneRange{0x1FDC, 0x1FDC},
+	RuneRange{0x1FF0, 0x1FF1},
+	RuneRange{0x1FF5, 0x1FF5},
+	RuneRange{0x1FFF, 0x1FFF},
+	RuneRange{0x2053, 0x2056},
+	RuneRange{0x2058, 0x205E},
+	RuneRange{0x2064, 0x2069},
+	RuneRange{0x2072, 0x2073},
+	RuneRange{0x208F, 0x209F},
+	RuneRange{0x20B2, 0x20CF},
+	RuneRange{0x20EB, 0x20FF},
+	RuneRange{0x213B, 0x213C},
+	RuneRange{0x214C, 0x2152},
+	RuneRange{0x2184, 0x218F},
+	RuneRange{0x23CF, 0x23FF},
+	RuneRange{0x2427, 0x243F},
+	RuneRange{0x244B, 0x245F},
+	RuneRange{0x24FF, 0x24FF},
+	RuneRange{0x2614, 0x2615},
+	RuneRange{0x2618, 0x2618},
+	RuneRange{0x267E, 0x267F},
+	RuneRange{0x268A, 0x2700},
+	RuneRange{0x2705, 0x2705},
+	RuneRange{0x270A, 0x270B},
+	RuneRange{0x2728, 0x2728},
+	RuneRange{0x274C, 0x274C},
+	RuneRange{0x274E, 0x274E},
+	RuneRange{0x2753, 0x2755},
+	RuneRange{0x2757, 0x2757},
+	RuneRange{0x275F, 0x2760},
+	RuneRange{0x2795, 0x2797},
+	RuneRange{0x27B0, 0x27B0},
+	RuneRange{0x27BF, 0x27CF},
+	RuneRange{0x27EC, 0x27EF},
+	RuneRange{0x2B00, 0x2E7F},
+	RuneRange{0x2E9A, 0x2E9A},
+	RuneRange{0x2EF4, 0x2EFF},
+	RuneRange{0x2FD6, 0x2FEF},
+	RuneRange{0x2FFC, 0x2FFF},
+	RuneRange{0x3040, 0x3040},
+	RuneRange{0x3097, 0x3098},
+	RuneRange{0x3100, 0x3104},
+	RuneRange{0x312D, 0x3130},
+	RuneRange{0x318F, 0x318F},
+	RuneRange{0x31B8, 0x31EF},
+	RuneRange{0x321D, 0x321F},
+	RuneRange{0x3244, 0x3250},
+	RuneRange{0x327C, 0x327E},
+	RuneRange{0x32CC, 0x32CF},
+	RuneRange{0x32FF, 0x32FF},
+	RuneRange{0x3377, 0x337A},
+	RuneRange{0x33DE, 0x33DF},
+	RuneRange{0x33FF, 0x33FF},
+	RuneRange{0x4DB6, 0x4DFF},
+	RuneRange{0x9FA6, 0x9FFF},
+	RuneRange{0xA48D, 0xA48F},
+	RuneRange{0xA4C7, 0xABFF},
+	RuneRange{0xD7A4, 0xD7FF},
+	RuneRange{0xFA2E, 0xFA2F},
+	RuneRange{0xFA6B, 0xFAFF},
+	RuneRange{0xFB07, 0xFB12},
+	RuneRange{0xFB18, 0xFB1C},
+	RuneRange{0xFB37, 0xFB37},
+	RuneRange{0xFB3D, 0xFB3D},
+	RuneRange{0xFB3F, 0xFB3F},
+	RuneRange{0xFB42, 0xFB42},
+	RuneRange{0xFB45, 0xFB45},
+	RuneRange{0xFBB2, 0xFBD2},
+	RuneRange{0xFD40, 0xFD4F},
+	RuneRange{0xFD90, 0xFD91},
+	RuneRange{0xFDC8, 0xFDCF},
+	RuneRange{0xFDFD, 0xFDFF},
+	RuneRange{0xFE10, 0xFE1F},
+	RuneRange{0xFE24, 0xFE2F},
+	RuneRange{0xFE47, 0xFE48},
+	RuneRange{0xFE53, 0xFE53},
+	RuneRange{0xFE67, 0xFE67},
+	RuneRange{0xFE6C, 0xFE6F},
+	RuneRange{0xFE75, 0xFE75},
+	RuneRange{0xFEFD, 0xFEFE},
+	RuneRange{0xFF00, 0xFF00},
+	RuneRange{0xFFBF, 0xFFC1},
+	RuneRange{0xFFC8, 0xFFC9},
+	RuneRange{0xFFD0, 0xFFD1},
+	RuneRange{0xFFD8, 0xFFD9},
+	RuneRange{0xFFDD, 0xFFDF},
+	RuneRange{0xFFE7, 0xFFE7},
+	RuneRange{0xFFEF, 0xFFF8},
+	RuneRange{0x10000, 0x102FF},
+	RuneRange{0x1031F, 0x1031F},
+	RuneRange{0x10324, 0x1032F},
+	RuneRange{0x1034B, 0x103FF},
+	RuneRange{0x10426, 0x10427},
+	RuneRange{0x1044E, 0x1CFFF},
+	RuneRange{0x1D0F6, 0x1D0FF},
+	RuneRange{0x1D127, 0x1D129},
+	RuneRange{0x1D1DE, 0x1D3FF},
+	RuneRange{0x1D455, 0x1D455},
+	RuneRange{0x1D49D, 0x1D49D},
+	RuneRange{0x1D4A0, 0x1D4A1},
+	RuneRange{0x1D4A3, 0x1D4A4},
+	RuneRange{0x1D4A7, 0x1D4A8},
+	RuneRange{0x1D4AD, 0x1D4AD},
+	RuneRange{0x1D4BA, 0x1D4BA},
+	RuneRange{0x1D4BC, 0x1D4BC},
+	RuneRange{0x1D4C1, 0x1D4C1},
+	RuneRange{0x1D4C4, 0x1D4C4},
+	RuneRange{0x1D506, 0x1D506},
+	RuneRange{0x1D50B, 0x1D50C},
+	RuneRange{0x1D515, 0x1D515},
+	RuneRange{0x1D51D, 0x1D51D},
+	RuneRange{0x1D53A, 0x1D53A},
+	RuneRange{0x1D53F, 0x1D53F},
+	RuneRange{0x1D545, 0x1D545},
+	RuneRange{0x1D547, 0x1D549},
+	RuneRange{0x1D551, 0x1D551},
+	RuneRange{0x1D6A4, 0x1D6A7},
+	RuneRange{0x1D7CA, 0x1D7CD},
+	RuneRange{0x1D800, 0x1FFFD},
+	RuneRange{0x2A6D7, 0x2F7FF},
+	RuneRange{0x2FA1E, 0x2FFFD},
+	RuneRange{0x30000, 0x3FFFD},
+	RuneRange{0x40000, 0x4FFFD},
+	RuneRange{0x50000, 0x5FFFD},
+	RuneRange{0x60000, 0x6FFFD},
+	RuneRange{0x70000, 0x7FFFD},
+	RuneRange{0x80000, 0x8FFFD},
+	RuneRange{0x90000, 0x9FFFD},
+	RuneRange{0xA0000, 0xAFFFD},
+	RuneRange{0xB0000, 0xBFFFD},
+	RuneRange{0xC0000, 0xCFFFD},
+	RuneRange{0xD0000, 0xDFFFD},
+	RuneRange{0xE0000, 0xE0000},
+	RuneRange{0xE0002, 0xE001F},
+	RuneRange{0xE0080, 0xEFFFD},
+}
+
+// TableA1 represents RFC-3454 Table A.1.
+var TableA1 Set = tableA1
+
+var tableB1 = Mapping{
+	0x00AD: []rune{}, // Map to nothing
+	0x034F: []rune{}, // Map to nothing
+	0x180B: []rune{}, // Map to nothing
+	0x180C: []rune{}, // Map to nothing
+	0x180D: []rune{}, // Map to nothing
+	0x200B: []rune{}, // Map to nothing
+	0x200C: []rune{}, // Map to nothing
+	0x200D: []rune{}, // Map to nothing
+	0x2060: []rune{}, // Map to nothing
+	0xFE00: []rune{}, // Map to nothing
+	0xFE01: []rune{}, // Map to nothing
+	0xFE02: []rune{}, // Map to nothing
+	0xFE03: []rune{}, // Map to nothing
+	0xFE04: []rune{}, // Map to nothing
+	0xFE05: []rune{}, // Map to nothing
+	0xFE06: []rune{}, // Map to nothing
+	0xFE07: []rune{}, // Map to nothing
+	0xFE08: []rune{}, // Map to nothing
+	0xFE09: []rune{}, // Map to nothing
+	0xFE0A: []rune{}, // Map to nothing
+	0xFE0B: []rune{}, // Map to nothing
+	0xFE0C: []rune{}, // Map to nothing
+	0xFE0D: []rune{}, // Map to nothing
+	0xFE0E: []rune{}, // Map to nothing
+	0xFE0F: []rune{}, // Map to nothing
+	0xFEFF: []rune{}, // Map to nothing
+}
+
+// TableB1 represents RFC-3454 Table B.1.
+var TableB1 Mapping = tableB1
+
+var tableB2 = Mapping{
+	0x0041:  []rune{0x0061},                         // Case map
+	0x0042:  []rune{0x0062},                         // Case map
+	0x0043:  []rune{0x0063},                         // Case map
+	0x0044:  []rune{0x0064},                         // Case map
+	0x0045:  []rune{0x0065},                         // Case map
+	0x0046:  []rune{0x0066},                         // Case map
+	0x0047:  []rune{0x0067},                         // Case map
+	0x0048:  []rune{0x0068},                         // Case map
+	0x0049:  []rune{0x0069},                         // Case map
+	0x004A:  []rune{0x006A},                         // Case map
+	0x004B:  []rune{0x006B},                         // Case map
+	0x004C:  []rune{0x006C},                         // Case map
+	0x004D:  []rune{0x006D},                         // Case map
+	0x004E:  []rune{0x006E},                         // Case map
+	0x004F:  []rune{0x006F},                         // Case map
+	0x0050:  []rune{0x0070},                         // Case map
+	0x0051:  []rune{0x0071},                         // Case map
+	0x0052:  []rune{0x0072},                         // Case map
+	0x0053:  []rune{0x0073},                         // Case map
+	0x0054:  []rune{0x0074},                         // Case map
+	0x0055:  []rune{0x0075},                         // Case map
+	0x0056:  []rune{0x0076},                         // Case map
+	0x0057:  []rune{0x0077},                         // Case map
+	0x0058:  []rune{0x0078},                         // Case map
+	0x0059:  []rune{0x0079},                         // Case map
+	0x005A:  []rune{0x007A},                         // Case map
+	0x00B5:  []rune{0x03BC},                         // Case map
+	0x00C0:  []rune{0x00E0},                         // Case map
+	0x00C1:  []rune{0x00E1},                         // Case map
+	0x00C2:  []rune{0x00E2},                         // Case map
+	0x00C3:  []rune{0x00E3},                         // Case map
+	0x00C4:  []rune{0x00E4},                         // Case map
+	0x00C5:  []rune{0x00E5},                         // Case map
+	0x00C6:  []rune{0x00E6},                         // Case map
+	0x00C7:  []rune{0x00E7},                         // Case map
+	0x00C8:  []rune{0x00E8},                         // Case map
+	0x00C9:  []rune{0x00E9},                         // Case map
+	0x00CA:  []rune{0x00EA},                         // Case map
+	0x00CB:  []rune{0x00EB},                         // Case map
+	0x00CC:  []rune{0x00EC},                         // Case map
+	0x00CD:  []rune{0x00ED},                         // Case map
+	0x00CE:  []rune{0x00EE},                         // Case map
+	0x00CF:  []rune{0x00EF},                         // Case map
+	0x00D0:  []rune{0x00F0},                         // Case map
+	0x00D1:  []rune{0x00F1},                         // Case map
+	0x00D2:  []rune{0x00F2},                         // Case map
+	0x00D3:  []rune{0x00F3},                         // Case map
+	0x00D4:  []rune{0x00F4},                         // Case map
+	0x00D5:  []rune{0x00F5},                         // Case map
+	0x00D6:  []rune{0x00F6},                         // Case map
+	0x00D8:  []rune{0x00F8},                         // Case map
+	0x00D9:  []rune{0x00F9},                         // Case map
+	0x00DA:  []rune{0x00FA},                         // Case map
+	0x00DB:  []rune{0x00FB},                         // Case map
+	0x00DC:  []rune{0x00FC},                         // Case map
+	0x00DD:  []rune{0x00FD},                         // Case map
+	0x00DE:  []rune{0x00FE},                         // Case map
+	0x00DF:  []rune{0x0073, 0x0073},                 // Case map
+	0x0100:  []rune{0x0101},                         // Case map
+	0x0102:  []rune{0x0103},                         // Case map
+	0x0104:  []rune{0x0105},                         // Case map
+	0x0106:  []rune{0x0107},                         // Case map
+	0x0108:  []rune{0x0109},                         // Case map
+	0x010A:  []rune{0x010B},                         // Case map
+	0x010C:  []rune{0x010D},                         // Case map
+	0x010E:  []rune{0x010F},                         // Case map
+	0x0110:  []rune{0x0111},                         // Case map
+	0x0112:  []rune{0x0113},                         // Case map
+	0x0114:  []rune{0x0115},                         // Case map
+	0x0116:  []rune{0x0117},                         // Case map
+	0x0118:  []rune{0x0119},                         // Case map
+	0x011A:  []rune{0x011B},                         // Case map
+	0x011C:  []rune{0x011D},                         // Case map
+	0x011E:  []rune{0x011F},                         // Case map
+	0x0120:  []rune{0x0121},                         // Case map
+	0x0122:  []rune{0x0123},                         // Case map
+	0x0124:  []rune{0x0125},                         // Case map
+	0x0126:  []rune{0x0127},                         // Case map
+	0x0128:  []rune{0x0129},                         // Case map
+	0x012A:  []rune{0x012B},                         // Case map
+	0x012C:  []rune{0x012D},                         // Case map
+	0x012E:  []rune{0x012F},                         // Case map
+	0x0130:  []rune{0x0069, 0x0307},                 // Case map
+	0x0132:  []rune{0x0133},                         // Case map
+	0x0134:  []rune{0x0135},                         // Case map
+	0x0136:  []rune{0x0137},                         // Case map
+	0x0139:  []rune{0x013A},                         // Case map
+	0x013B:  []rune{0x013C},                         // Case map
+	0x013D:  []rune{0x013E},                         // Case map
+	0x013F:  []rune{0x0140},                         // Case map
+	0x0141:  []rune{0x0142},                         // Case map
+	0x0143:  []rune{0x0144},                         // Case map
+	0x0145:  []rune{0x0146},                         // Case map
+	0x0147:  []rune{0x0148},                         // Case map
+	0x0149:  []rune{0x02BC, 0x006E},                 // Case map
+	0x014A:  []rune{0x014B},                         // Case map
+	0x014C:  []rune{0x014D},                         // Case map
+	0x014E:  []rune{0x014F},                         // Case map
+	0x0150:  []rune{0x0151},                         // Case map
+	0x0152:  []rune{0x0153},                         // Case map
+	0x0154:  []rune{0x0155},                         // Case map
+	0x0156:  []rune{0x0157},                         // Case map
+	0x0158:  []rune{0x0159},                         // Case map
+	0x015A:  []rune{0x015B},                         // Case map
+	0x015C:  []rune{0x015D},                         // Case map
+	0x015E:  []rune{0x015F},                         // Case map
+	0x0160:  []rune{0x0161},                         // Case map
+	0x0162:  []rune{0x0163},                         // Case map
+	0x0164:  []rune{0x0165},                         // Case map
+	0x0166:  []rune{0x0167},                         // Case map
+	0x0168:  []rune{0x0169},                         // Case map
+	0x016A:  []rune{0x016B},                         // Case map
+	0x016C:  []rune{0x016D},                         // Case map
+	0x016E:  []rune{0x016F},                         // Case map
+	0x0170:  []rune{0x0171},                         // Case map
+	0x0172:  []rune{0x0173},                         // Case map
+	0x0174:  []rune{0x0175},                         // Case map
+	0x0176:  []rune{0x0177},                         // Case map
+	0x0178:  []rune{0x00FF},                         // Case map
+	0x0179:  []rune{0x017A},                         // Case map
+	0x017B:  []rune{0x017C},                         // Case map
+	0x017D:  []rune{0x017E},                         // Case map
+	0x017F:  []rune{0x0073},                         // Case map
+	0x0181:  []rune{0x0253},                         // Case map
+	0x0182:  []rune{0x0183},                         // Case map
+	0x0184:  []rune{0x0185},                         // Case map
+	0x0186:  []rune{0x0254},                         // Case map
+	0x0187:  []rune{0x0188},                         // Case map
+	0x0189:  []rune{0x0256},                         // Case map
+	0x018A:  []rune{0x0257},                         // Case map
+	0x018B:  []rune{0x018C},                         // Case map
+	0x018E:  []rune{0x01DD},                         // Case map
+	0x018F:  []rune{0x0259},                         // Case map
+	0x0190:  []rune{0x025B},                         // Case map
+	0x0191:  []rune{0x0192},                         // Case map
+	0x0193:  []rune{0x0260},                         // Case map
+	0x0194:  []rune{0x0263},                         // Case map
+	0x0196:  []rune{0x0269},                         // Case map
+	0x0197:  []rune{0x0268},                         // Case map
+	0x0198:  []rune{0x0199},                         // Case map
+	0x019C:  []rune{0x026F},                         // Case map
+	0x019D:  []rune{0x0272},                         // Case map
+	0x019F:  []rune{0x0275},                         // Case map
+	0x01A0:  []rune{0x01A1},                         // Case map
+	0x01A2:  []rune{0x01A3},                         // Case map
+	0x01A4:  []rune{0x01A5},                         // Case map
+	0x01A6:  []rune{0x0280},                         // Case map
+	0x01A7:  []rune{0x01A8},                         // Case map
+	0x01A9:  []rune{0x0283},                         // Case map
+	0x01AC:  []rune{0x01AD},                         // Case map
+	0x01AE:  []rune{0x0288},                         // Case map
+	0x01AF:  []rune{0x01B0},                         // Case map
+	0x01B1:  []rune{0x028A},                         // Case map
+	0x01B2:  []rune{0x028B},                         // Case map
+	0x01B3:  []rune{0x01B4},                         // Case map
+	0x01B5:  []rune{0x01B6},                         // Case map
+	0x01B7:  []rune{0x0292},                         // Case map
+	0x01B8:  []rune{0x01B9},                         // Case map
+	0x01BC:  []rune{0x01BD},                         // Case map
+	0x01C4:  []rune{0x01C6},                         // Case map
+	0x01C5:  []rune{0x01C6},                         // Case map
+	0x01C7:  []rune{0x01C9},                         // Case map
+	0x01C8:  []rune{0x01C9},                         // Case map
+	0x01CA:  []rune{0x01CC},                         // Case map
+	0x01CB:  []rune{0x01CC},                         // Case map
+	0x01CD:  []rune{0x01CE},                         // Case map
+	0x01CF:  []rune{0x01D0},                         // Case map
+	0x01D1:  []rune{0x01D2},                         // Case map
+	0x01D3:  []rune{0x01D4},                         // Case map
+	0x01D5:  []rune{0x01D6},                         // Case map
+	0x01D7:  []rune{0x01D8},                         // Case map
+	0x01D9:  []rune{0x01DA},                         // Case map
+	0x01DB:  []rune{0x01DC},                         // Case map
+	0x01DE:  []rune{0x01DF},                         // Case map
+	0x01E0:  []rune{0x01E1},                         // Case map
+	0x01E2:  []rune{0x01E3},                         // Case map
+	0x01E4:  []rune{0x01E5},                         // Case map
+	0x01E6:  []rune{0x01E7},                         // Case map
+	0x01E8:  []rune{0x01E9},                         // Case map
+	0x01EA:  []rune{0x01EB},                         // Case map
+	0x01EC:  []rune{0x01ED},                         // Case map
+	0x01EE:  []rune{0x01EF},                         // Case map
+	0x01F0:  []rune{0x006A, 0x030C},                 // Case map
+	0x01F1:  []rune{0x01F3},                         // Case map
+	0x01F2:  []rune{0x01F3},                         // Case map
+	0x01F4:  []rune{0x01F5},                         // Case map
+	0x01F6:  []rune{0x0195},                         // Case map
+	0x01F7:  []rune{0x01BF},                         // Case map
+	0x01F8:  []rune{0x01F9},                         // Case map
+	0x01FA:  []rune{0x01FB},                         // Case map
+	0x01FC:  []rune{0x01FD},                         // Case map
+	0x01FE:  []rune{0x01FF},                         // Case map
+	0x0200:  []rune{0x0201},                         // Case map
+	0x0202:  []rune{0x0203},                         // Case map
+	0x0204:  []rune{0x0205},                         // Case map
+	0x0206:  []rune{0x0207},                         // Case map
+	0x0208:  []rune{0x0209},                         // Case map
+	0x020A:  []rune{0x020B},                         // Case map
+	0x020C:  []rune{0x020D},                         // Case map
+	0x020E:  []rune{0x020F},                         // Case map
+	0x0210:  []rune{0x0211},                         // Case map
+	0x0212:  []rune{0x0213},                         // Case map
+	0x0214:  []rune{0x0215},                         // Case map
+	0x0216:  []rune{0x0217},                         // Case map
+	0x0218:  []rune{0x0219},                         // Case map
+	0x021A:  []rune{0x021B},                         // Case map
+	0x021C:  []rune{0x021D},                         // Case map
+	0x021E:  []rune{0x021F},                         // Case map
+	0x0220:  []rune{0x019E},                         // Case map
+	0x0222:  []rune{0x0223},                         // Case map
+	0x0224:  []rune{0x0225},                         // Case map
+	0x0226:  []rune{0x0227},                         // Case map
+	0x0228:  []rune{0x0229},                         // Case map
+	0x022A:  []rune{0x022B},                         // Case map
+	0x022C:  []rune{0x022D},                         // Case map
+	0x022E:  []rune{0x022F},                         // Case map
+	0x0230:  []rune{0x0231},                         // Case map
+	0x0232:  []rune{0x0233},                         // Case map
+	0x0345:  []rune{0x03B9},                         // Case map
+	0x037A:  []rune{0x0020, 0x03B9},                 // Additional folding
+	0x0386:  []rune{0x03AC},                         // Case map
+	0x0388:  []rune{0x03AD},                         // Case map
+	0x0389:  []rune{0x03AE},                         // Case map
+	0x038A:  []rune{0x03AF},                         // Case map
+	0x038C:  []rune{0x03CC},                         // Case map
+	0x038E:  []rune{0x03CD},                         // Case map
+	0x038F:  []rune{0x03CE},                         // Case map
+	0x0390:  []rune{0x03B9, 0x0308, 0x0301},         // Case map
+	0x0391:  []rune{0x03B1},                         // Case map
+	0x0392:  []rune{0x03B2},                         // Case map
+	0x0393:  []rune{0x03B3},                         // Case map
+	0x0394:  []rune{0x03B4},                         // Case map
+	0x0395:  []rune{0x03B5},                         // Case map
+	0x0396:  []rune{0x03B6},                         // Case map
+	0x0397:  []rune{0x03B7},                         // Case map
+	0x0398:  []rune{0x03B8},                         // Case map
+	0x0399:  []rune{0x03B9},                         // Case map
+	0x039A:  []rune{0x03BA},                         // Case map
+	0x039B:  []rune{0x03BB},                         // Case map
+	0x039C:  []rune{0x03BC},                         // Case map
+	0x039D:  []rune{0x03BD},                         // Case map
+	0x039E:  []rune{0x03BE},                         // Case map
+	0x039F:  []rune{0x03BF},                         // Case map
+	0x03A0:  []rune{0x03C0},                         // Case map
+	0x03A1:  []rune{0x03C1},                         // Case map
+	0x03A3:  []rune{0x03C3},                         // Case map
+	0x03A4:  []rune{0x03C4},                         // Case map
+	0x03A5:  []rune{0x03C5},                         // Case map
+	0x03A6:  []rune{0x03C6},                         // Case map
+	0x03A7:  []rune{0x03C7},                         // Case map
+	0x03A8:  []rune{0x03C8},                         // Case map
+	0x03A9:  []rune{0x03C9},                         // Case map
+	0x03AA:  []rune{0x03CA},                         // Case map
+	0x03AB:  []rune{0x03CB},                         // Case map
+	0x03B0:  []rune{0x03C5, 0x0308, 0x0301},         // Case map
+	0x03C2:  []rune{0x03C3},                         // Case map
+	0x03D0:  []rune{0x03B2},                         // Case map
+	0x03D1:  []rune{0x03B8},                         // Case map
+	0x03D2:  []rune{0x03C5},                         // Additional folding
+	0x03D3:  []rune{0x03CD},                         // Additional folding
+	0x03D4:  []rune{0x03CB},                         // Additional folding
+	0x03D5:  []rune{0x03C6},                         // Case map
+	0x03D6:  []rune{0x03C0},                         // Case map
+	0x03D8:  []rune{0x03D9},                         // Case map
+	0x03DA:  []rune{0x03DB},                         // Case map
+	0x03DC:  []rune{0x03DD},                         // Case map
+	0x03DE:  []rune{0x03DF},                         // Case map
+	0x03E0:  []rune{0x03E1},                         // Case map
+	0x03E2:  []rune{0x03E3},                         // Case map
+	0x03E4:  []rune{0x03E5},                         // Case map
+	0x03E6:  []rune{0x03E7},                         // Case map
+	0x03E8:  []rune{0x03E9},                         // Case map
+	0x03EA:  []rune{0x03EB},                         // Case map
+	0x03EC:  []rune{0x03ED},                         // Case map
+	0x03EE:  []rune{0x03EF},                         // Case map
+	0x03F0:  []rune{0x03BA},                         // Case map
+	0x03F1:  []rune{0x03C1},                         // Case map
+	0x03F2:  []rune{0x03C3},                         // Case map
+	0x03F4:  []rune{0x03B8},                         // Case map
+	0x03F5:  []rune{0x03B5},                         // Case map
+	0x0400:  []rune{0x0450},                         // Case map
+	0x0401:  []rune{0x0451},                         // Case map
+	0x0402:  []rune{0x0452},                         // Case map
+	0x0403:  []rune{0x0453},                         // Case map
+	0x0404:  []rune{0x0454},                         // Case map
+	0x0405:  []rune{0x0455},                         // Case map
+	0x0406:  []rune{0x0456},                         // Case map
+	0x0407:  []rune{0x0457},                         // Case map
+	0x0408:  []rune{0x0458},                         // Case map
+	0x0409:  []rune{0x0459},                         // Case map
+	0x040A:  []rune{0x045A},                         // Case map
+	0x040B:  []rune{0x045B},                         // Case map
+	0x040C:  []rune{0x045C},                         // Case map
+	0x040D:  []rune{0x045D},                         // Case map
+	0x040E:  []rune{0x045E},                         // Case map
+	0x040F:  []rune{0x045F},                         // Case map
+	0x0410:  []rune{0x0430},                         // Case map
+	0x0411:  []rune{0x0431},                         // Case map
+	0x0412:  []rune{0x0432},                         // Case map
+	0x0413:  []rune{0x0433},                         // Case map
+	0x0414:  []rune{0x0434},                         // Case map
+	0x0415:  []rune{0x0435},                         // Case map
+	0x0416:  []rune{0x0436},                         // Case map
+	0x0417:  []rune{0x0437},                         // Case map
+	0x0418:  []rune{0x0438},                         // Case map
+	0x0419:  []rune{0x0439},                         // Case map
+	0x041A:  []rune{0x043A},                         // Case map
+	0x041B:  []rune{0x043B},                         // Case map
+	0x041C:  []rune{0x043C},                         // Case map
+	0x041D:  []rune{0x043D},                         // Case map
+	0x041E:  []rune{0x043E},                         // Case map
+	0x041F:  []rune{0x043F},                         // Case map
+	0x0420:  []rune{0x0440},                         // Case map
+	0x0421:  []rune{0x0441},                         // Case map
+	0x0422:  []rune{0x0442},                         // Case map
+	0x0423:  []rune{0x0443},                         // Case map
+	0x0424:  []rune{0x0444},                         // Case map
+	0x0425:  []rune{0x0445},                         // Case map
+	0x0426:  []rune{0x0446},                         // Case map
+	0x0427:  []rune{0x0447},                         // Case map
+	0x0428:  []rune{0x0448},                         // Case map
+	0x0429:  []rune{0x0449},                         // Case map
+	0x042A:  []rune{0x044A},                         // Case map
+	0x042B:  []rune{0x044B},                         // Case map
+	0x042C:  []rune{0x044C},                         // Case map
+	0x042D:  []rune{0x044D},                         // Case map
+	0x042E:  []rune{0x044E},                         // Case map
+	0x042F:  []rune{0x044F},                         // Case map
+	0x0460:  []rune{0x0461},                         // Case map
+	0x0462:  []rune{0x0463},                         // Case map
+	0x0464:  []rune{0x0465},                         // Case map
+	0x0466:  []rune{0x0467},                         // Case map
+	0x0468:  []rune{0x0469},                         // Case map
+	0x046A:  []rune{0x046B},                         // Case map
+	0x046C:  []rune{0x046D},                         // Case map
+	0x046E:  []rune{0x046F},                         // Case map
+	0x0470:  []rune{0x0471},                         // Case map
+	0x0472:  []rune{0x0473},                         // Case map
+	0x0474:  []rune{0x0475},                         // Case map
+	0x0476:  []rune{0x0477},                         // Case map
+	0x0478:  []rune{0x0479},                         // Case map
+	0x047A:  []rune{0x047B},                         // Case map
+	0x047C:  []rune{0x047D},                         // Case map
+	0x047E:  []rune{0x047F},                         // Case map
+	0x0480:  []rune{0x0481},                         // Case map
+	0x048A:  []rune{0x048B},                         // Case map
+	0x048C:  []rune{0x048D},                         // Case map
+	0x048E:  []rune{0x048F},                         // Case map
+	0x0490:  []rune{0x0491},                         // Case map
+	0x0492:  []rune{0x0493},                         // Case map
+	0x0494:  []rune{0x0495},                         // Case map
+	0x0496:  []rune{0x0497},                         // Case map
+	0x0498:  []rune{0x0499},                         // Case map
+	0x049A:  []rune{0x049B},                         // Case map
+	0x049C:  []rune{0x049D},                         // Case map
+	0x049E:  []rune{0x049F},                         // Case map
+	0x04A0:  []rune{0x04A1},                         // Case map
+	0x04A2:  []rune{0x04A3},                         // Case map
+	0x04A4:  []rune{0x04A5},                         // Case map
+	0x04A6:  []rune{0x04A7},                         // Case map
+	0x04A8:  []rune{0x04A9},                         // Case map
+	0x04AA:  []rune{0x04AB},                         // Case map
+	0x04AC:  []rune{0x04AD},                         // Case map
+	0x04AE:  []rune{0x04AF},                         // Case map
+	0x04B0:  []rune{0x04B1},                         // Case map
+	0x04B2:  []rune{0x04B3},                         // Case map
+	0x04B4:  []rune{0x04B5},                         // Case map
+	0x04B6:  []rune{0x04B7},                         // Case map
+	0x04B8:  []rune{0x04B9},                         // Case map
+	0x04BA:  []rune{0x04BB},                         // Case map
+	0x04BC:  []rune{0x04BD},                         // Case map
+	0x04BE:  []rune{0x04BF},                         // Case map
+	0x04C1:  []rune{0x04C2},                         // Case map
+	0x04C3:  []rune{0x04C4},                         // Case map
+	0x04C5:  []rune{0x04C6},                         // Case map
+	0x04C7:  []rune{0x04C8},                         // Case map
+	0x04C9:  []rune{0x04CA},                         // Case map
+	0x04CB:  []rune{0x04CC},                         // Case map
+	0x04CD:  []rune{0x04CE},                         // Case map
+	0x04D0:  []rune{0x04D1},                         // Case map
+	0x04D2:  []rune{0x04D3},                         // Case map
+	0x04D4:  []rune{0x04D5},                         // Case map
+	0x04D6:  []rune{0x04D7},                         // Case map
+	0x04D8:  []rune{0x04D9},                         // Case map
+	0x04DA:  []rune{0x04DB},                         // Case map
+	0x04DC:  []rune{0x04DD},                         // Case map
+	0x04DE:  []rune{0x04DF},                         // Case map
+	0x04E0:  []rune{0x04E1},                         // Case map
+	0x04E2:  []rune{0x04E3},                         // Case map
+	0x04E4:  []rune{0x04E5},                         // Case map
+	0x04E6:  []rune{0x04E7},                         // Case map
+	0x04E8:  []rune{0x04E9},                         // Case map
+	0x04EA:  []rune{0x04EB},                         // Case map
+	0x04EC:  []rune{0x04ED},                         // Case map
+	0x04EE:  []rune{0x04EF},                         // Case map
+	0x04F0:  []rune{0x04F1},                         // Case map
+	0x04F2:  []rune{0x04F3},                         // Case map
+	0x04F4:  []rune{0x04F5},                         // Case map
+	0x04F8:  []rune{0x04F9},                         // Case map
+	0x0500:  []rune{0x0501},                         // Case map
+	0x0502:  []rune{0x0503},                         // Case map
+	0x0504:  []rune{0x0505},                         // Case map
+	0x0506:  []rune{0x0507},                         // Case map
+	0x0508:  []rune{0x0509},                         // Case map
+	0x050A:  []rune{0x050B},                         // Case map
+	0x050C:  []rune{0x050D},                         // Case map
+	0x050E:  []rune{0x050F},                         // Case map
+	0x0531:  []rune{0x0561},                         // Case map
+	0x0532:  []rune{0x0562},                         // Case map
+	0x0533:  []rune{0x0563},                         // Case map
+	0x0534:  []rune{0x0564},                         // Case map
+	0x0535:  []rune{0x0565},                         // Case map
+	0x0536:  []rune{0x0566},                         // Case map
+	0x0537:  []rune{0x0567},                         // Case map
+	0x0538:  []rune{0x0568},                         // Case map
+	0x0539:  []rune{0x0569},                         // Case map
+	0x053A:  []rune{0x056A},                         // Case map
+	0x053B:  []rune{0x056B},                         // Case map
+	0x053C:  []rune{0x056C},                         // Case map
+	0x053D:  []rune{0x056D},                         // Case map
+	0x053E:  []rune{0x056E},                         // Case map
+	0x053F:  []rune{0x056F},                         // Case map
+	0x0540:  []rune{0x0570},                         // Case map
+	0x0541:  []rune{0x0571},                         // Case map
+	0x0542:  []rune{0x0572},                         // Case map
+	0x0543:  []rune{0x0573},                         // Case map
+	0x0544:  []rune{0x0574},                         // Case map
+	0x0545:  []rune{0x0575},                         // Case map
+	0x0546:  []rune{0x0576},                         // Case map
+	0x0547:  []rune{0x0577},                         // Case map
+	0x0548:  []rune{0x0578},                         // Case map
+	0x0549:  []rune{0x0579},                         // Case map
+	0x054A:  []rune{0x057A},                         // Case map
+	0x054B:  []rune{0x057B},                         // Case map
+	0x054C:  []rune{0x057C},                         // Case map
+	0x054D:  []rune{0x057D},                         // Case map
+	0x054E:  []rune{0x057E},                         // Case map
+	0x054F:  []rune{0x057F},                         // Case map
+	0x0550:  []rune{0x0580},                         // Case map
+	0x0551:  []rune{0x0581},                         // Case map
+	0x0552:  []rune{0x0582},                         // Case map
+	0x0553:  []rune{0x0583},                         // Case map
+	0x0554:  []rune{0x0584},                         // Case map
+	0x0555:  []rune{0x0585},                         // Case map
+	0x0556:  []rune{0x0586},                         // Case map
+	0x0587:  []rune{0x0565, 0x0582},                 // Case map
+	0x1E00:  []rune{0x1E01},                         // Case map
+	0x1E02:  []rune{0x1E03},                         // Case map
+	0x1E04:  []rune{0x1E05},                         // Case map
+	0x1E06:  []rune{0x1E07},                         // Case map
+	0x1E08:  []rune{0x1E09},                         // Case map
+	0x1E0A:  []rune{0x1E0B},                         // Case map
+	0x1E0C:  []rune{0x1E0D},                         // Case map
+	0x1E0E:  []rune{0x1E0F},                         // Case map
+	0x1E10:  []rune{0x1E11},                         // Case map
+	0x1E12:  []rune{0x1E13},                         // Case map
+	0x1E14:  []rune{0x1E15},                         // Case map
+	0x1E16:  []rune{0x1E17},                         // Case map
+	0x1E18:  []rune{0x1E19},                         // Case map
+	0x1E1A:  []rune{0x1E1B},                         // Case map
+	0x1E1C:  []rune{0x1E1D},                         // Case map
+	0x1E1E:  []rune{0x1E1F},                         // Case map
+	0x1E20:  []rune{0x1E21},                         // Case map
+	0x1E22:  []rune{0x1E23},                         // Case map
+	0x1E24:  []rune{0x1E25},                         // Case map
+	0x1E26:  []rune{0x1E27},                         // Case map
+	0x1E28:  []rune{0x1E29},                         // Case map
+	0x1E2A:  []rune{0x1E2B},                         // Case map
+	0x1E2C:  []rune{0x1E2D},                         // Case map
+	0x1E2E:  []rune{0x1E2F},                         // Case map
+	0x1E30:  []rune{0x1E31},                         // Case map
+	0x1E32:  []rune{0x1E33},                         // Case map
+	0x1E34:  []rune{0x1E35},                         // Case map
+	0x1E36:  []rune{0x1E37},                         // Case map
+	0x1E38:  []rune{0x1E39},                         // Case map
+	0x1E3A:  []rune{0x1E3B},                         // Case map
+	0x1E3C:  []rune{0x1E3D},                         // Case map
+	0x1E3E:  []rune{0x1E3F},                         // Case map
+	0x1E40:  []rune{0x1E41},                         // Case map
+	0x1E42:  []rune{0x1E43},                         // Case map
+	0x1E44:  []rune{0x1E45},                         // Case map
+	0x1E46:  []rune{0x1E47},                         // Case map
+	0x1E48:  []rune{0x1E49},                         // Case map
+	0x1E4A:  []rune{0x1E4B},                         // Case map
+	0x1E4C:  []rune{0x1E4D},                         // Case map
+	0x1E4E:  []rune{0x1E4F},                         // Case map
+	0x1E50:  []rune{0x1E51},                         // Case map
+	0x1E52:  []rune{0x1E53},                         // Case map
+	0x1E54:  []rune{0x1E55},                         // Case map
+	0x1E56:  []rune{0x1E57},                         // Case map
+	0x1E58:  []rune{0x1E59},                         // Case map
+	0x1E5A:  []rune{0x1E5B},                         // Case map
+	0x1E5C:  []rune{0x1E5D},                         // Case map
+	0x1E5E:  []rune{0x1E5F},                         // Case map
+	0x1E60:  []rune{0x1E61},                         // Case map
+	0x1E62:  []rune{0x1E63},                         // Case map
+	0x1E64:  []rune{0x1E65},                         // Case map
+	0x1E66:  []rune{0x1E67},                         // Case map
+	0x1E68:  []rune{0x1E69},                         // Case map
+	0x1E6A:  []rune{0x1E6B},                         // Case map
+	0x1E6C:  []rune{0x1E6D},                         // Case map
+	0x1E6E:  []rune{0x1E6F},                         // Case map
+	0x1E70:  []rune{0x1E71},                         // Case map
+	0x1E72:  []rune{0x1E73},                         // Case map
+	0x1E74:  []rune{0x1E75},                         // Case map
+	0x1E76:  []rune{0x1E77},                         // Case map
+	0x1E78:  []rune{0x1E79},                         // Case map
+	0x1E7A:  []rune{0x1E7B},                         // Case map
+	0x1E7C:  []rune{0x1E7D},                         // Case map
+	0x1E7E:  []rune{0x1E7F},                         // Case map
+	0x1E80:  []rune{0x1E81},                         // Case map
+	0x1E82:  []rune{0x1E83},                         // Case map
+	0x1E84:  []rune{0x1E85},                         // Case map
+	0x1E86:  []rune{0x1E87},                         // Case map
+	0x1E88:  []rune{0x1E89},                         // Case map
+	0x1E8A:  []rune{0x1E8B},                         // Case map
+	0x1E8C:  []rune{0x1E8D},                         // Case map
+	0x1E8E:  []rune{0x1E8F},                         // Case map
+	0x1E90:  []rune{0x1E91},                         // Case map
+	0x1E92:  []rune{0x1E93},                         // Case map
+	0x1E94:  []rune{0x1E95},                         // Case map
+	0x1E96:  []rune{0x0068, 0x0331},                 // Case map
+	0x1E97:  []rune{0x0074, 0x0308},                 // Case map
+	0x1E98:  []rune{0x0077, 0x030A},                 // Case map
+	0x1E99:  []rune{0x0079, 0x030A},                 // Case map
+	0x1E9A:  []rune{0x0061, 0x02BE},                 // Case map
+	0x1E9B:  []rune{0x1E61},                         // Case map
+	0x1EA0:  []rune{0x1EA1},                         // Case map
+	0x1EA2:  []rune{0x1EA3},                         // Case map
+	0x1EA4:  []rune{0x1EA5},                         // Case map
+	0x1EA6:  []rune{0x1EA7},                         // Case map
+	0x1EA8:  []rune{0x1EA9},                         // Case map
+	0x1EAA:  []rune{0x1EAB},                         // Case map
+	0x1EAC:  []rune{0x1EAD},                         // Case map
+	0x1EAE:  []rune{0x1EAF},                         // Case map
+	0x1EB0:  []rune{0x1EB1},                         // Case map
+	0x1EB2:  []rune{0x1EB3},                         // Case map
+	0x1EB4:  []rune{0x1EB5},                         // Case map
+	0x1EB6:  []rune{0x1EB7},                         // Case map
+	0x1EB8:  []rune{0x1EB9},                         // Case map
+	0x1EBA:  []rune{0x1EBB},                         // Case map
+	0x1EBC:  []rune{0x1EBD},                         // Case map
+	0x1EBE:  []rune{0x1EBF},                         // Case map
+	0x1EC0:  []rune{0x1EC1},                         // Case map
+	0x1EC2:  []rune{0x1EC3},                         // Case map
+	0x1EC4:  []rune{0x1EC5},                         // Case map
+	0x1EC6:  []rune{0x1EC7},                         // Case map
+	0x1EC8:  []rune{0x1EC9},                         // Case map
+	0x1ECA:  []rune{0x1ECB},                         // Case map
+	0x1ECC:  []rune{0x1ECD},                         // Case map
+	0x1ECE:  []rune{0x1ECF},                         // Case map
+	0x1ED0:  []rune{0x1ED1},                         // Case map
+	0x1ED2:  []rune{0x1ED3},                         // Case map
+	0x1ED4:  []rune{0x1ED5},                         // Case map
+	0x1ED6:  []rune{0x1ED7},                         // Case map
+	0x1ED8:  []rune{0x1ED9},                         // Case map
+	0x1EDA:  []rune{0x1EDB},                         // Case map
+	0x1EDC:  []rune{0x1EDD},                         // Case map
+	0x1EDE:  []rune{0x1EDF},                         // Case map
+	0x1EE0:  []rune{0x1EE1},                         // Case map
+	0x1EE2:  []rune{0x1EE3},                         // Case map
+	0x1EE4:  []rune{0x1EE5},                         // Case map
+	0x1EE6:  []rune{0x1EE7},                         // Case map
+	0x1EE8:  []rune{0x1EE9},                         // Case map
+	0x1EEA:  []rune{0x1EEB},                         // Case map
+	0x1EEC:  []rune{0x1EED},                         // Case map
+	0x1EEE:  []rune{0x1EEF},                         // Case map
+	0x1EF0:  []rune{0x1EF1},                         // Case map
+	0x1EF2:  []rune{0x1EF3},                         // Case map
+	0x1EF4:  []rune{0x1EF5},                         // Case map
+	0x1EF6:  []rune{0x1EF7},                         // Case map
+	0x1EF8:  []rune{0x1EF9},                         // Case map
+	0x1F08:  []rune{0x1F00},                         // Case map
+	0x1F09:  []rune{0x1F01},                         // Case map
+	0x1F0A:  []rune{0x1F02},                         // Case map
+	0x1F0B:  []rune{0x1F03},                         // Case map
+	0x1F0C:  []rune{0x1F04},                         // Case map
+	0x1F0D:  []rune{0x1F05},                         // Case map
+	0x1F0E:  []rune{0x1F06},                         // Case map
+	0x1F0F:  []rune{0x1F07},                         // Case map
+	0x1F18:  []rune{0x1F10},                         // Case map
+	0x1F19:  []rune{0x1F11},                         // Case map
+	0x1F1A:  []rune{0x1F12},                         // Case map
+	0x1F1B:  []rune{0x1F13},                         // Case map
+	0x1F1C:  []rune{0x1F14},                         // Case map
+	0x1F1D:  []rune{0x1F15},                         // Case map
+	0x1F28:  []rune{0x1F20},                         // Case map
+	0x1F29:  []rune{0x1F21},                         // Case map
+	0x1F2A:  []rune{0x1F22},                         // Case map
+	0x1F2B:  []rune{0x1F23},                         // Case map
+	0x1F2C:  []rune{0x1F24},                         // Case map
+	0x1F2D:  []rune{0x1F25},                         // Case map
+	0x1F2E:  []rune{0x1F26},                         // Case map
+	0x1F2F:  []rune{0x1F27},                         // Case map
+	0x1F38:  []rune{0x1F30},                         // Case map
+	0x1F39:  []rune{0x1F31},                         // Case map
+	0x1F3A:  []rune{0x1F32},                         // Case map
+	0x1F3B:  []rune{0x1F33},                         // Case map
+	0x1F3C:  []rune{0x1F34},                         // Case map
+	0x1F3D:  []rune{0x1F35},                         // Case map
+	0x1F3E:  []rune{0x1F36},                         // Case map
+	0x1F3F:  []rune{0x1F37},                         // Case map
+	0x1F48:  []rune{0x1F40},                         // Case map
+	0x1F49:  []rune{0x1F41},                         // Case map
+	0x1F4A:  []rune{0x1F42},                         // Case map
+	0x1F4B:  []rune{0x1F43},                         // Case map
+	0x1F4C:  []rune{0x1F44},                         // Case map
+	0x1F4D:  []rune{0x1F45},                         // Case map
+	0x1F50:  []rune{0x03C5, 0x0313},                 // Case map
+	0x1F52:  []rune{0x03C5, 0x0313, 0x0300},         // Case map
+	0x1F54:  []rune{0x03C5, 0x0313, 0x0301},         // Case map
+	0x1F56:  []rune{0x03C5, 0x0313, 0x0342},         // Case map
+	0x1F59:  []rune{0x1F51},                         // Case map
+	0x1F5B:  []rune{0x1F53},                         // Case map
+	0x1F5D:  []rune{0x1F55},                         // Case map
+	0x1F5F:  []rune{0x1F57},                         // Case map
+	0x1F68:  []rune{0x1F60},                         // Case map
+	0x1F69:  []rune{0x1F61},                         // Case map
+	0x1F6A:  []rune{0x1F62},                         // Case map
+	0x1F6B:  []rune{0x1F63},                         // Case map
+	0x1F6C:  []rune{0x1F64},                         // Case map
+	0x1F6D:  []rune{0x1F65},                         // Case map
+	0x1F6E:  []rune{0x1F66},                         // Case map
+	0x1F6F:  []rune{0x1F67},                         // Case map
+	0x1F80:  []rune{0x1F00, 0x03B9},                 // Case map
+	0x1F81:  []rune{0x1F01, 0x03B9},                 // Case map
+	0x1F82:  []rune{0x1F02, 0x03B9},                 // Case map
+	0x1F83:  []rune{0x1F03, 0x03B9},                 // Case map
+	0x1F84:  []rune{0x1F04, 0x03B9},                 // Case map
+	0x1F85:  []rune{0x1F05, 0x03B9},                 // Case map
+	0x1F86:  []rune{0x1F06, 0x03B9},                 // Case map
+	0x1F87:  []rune{0x1F07, 0x03B9},                 // Case map
+	0x1F88:  []rune{0x1F00, 0x03B9},                 // Case map
+	0x1F89:  []rune{0x1F01, 0x03B9},                 // Case map
+	0x1F8A:  []rune{0x1F02, 0x03B9},                 // Case map
+	0x1F8B:  []rune{0x1F03, 0x03B9},                 // Case map
+	0x1F8C:  []rune{0x1F04, 0x03B9},                 // Case map
+	0x1F8D:  []rune{0x1F05, 0x03B9},                 // Case map
+	0x1F8E:  []rune{0x1F06, 0x03B9},                 // Case map
+	0x1F8F:  []rune{0x1F07, 0x03B9},                 // Case map
+	0x1F90:  []rune{0x1F20, 0x03B9},                 // Case map
+	0x1F91:  []rune{0x1F21, 0x03B9},                 // Case map
+	0x1F92:  []rune{0x1F22, 0x03B9},                 // Case map
+	0x1F93:  []rune{0x1F23, 0x03B9},                 // Case map
+	0x1F94:  []rune{0x1F24, 0x03B9},                 // Case map
+	0x1F95:  []rune{0x1F25, 0x03B9},                 // Case map
+	0x1F96:  []rune{0x1F26, 0x03B9},                 // Case map
+	0x1F97:  []rune{0x1F27, 0x03B9},                 // Case map
+	0x1F98:  []rune{0x1F20, 0x03B9},                 // Case map
+	0x1F99:  []rune{0x1F21, 0x03B9},                 // Case map
+	0x1F9A:  []rune{0x1F22, 0x03B9},                 // Case map
+	0x1F9B:  []rune{0x1F23, 0x03B9},                 // Case map
+	0x1F9C:  []rune{0x1F24, 0x03B9},                 // Case map
+	0x1F9D:  []rune{0x1F25, 0x03B9},                 // Case map
+	0x1F9E:  []rune{0x1F26, 0x03B9},                 // Case map
+	0x1F9F:  []rune{0x1F27, 0x03B9},                 // Case map
+	0x1FA0:  []rune{0x1F60, 0x03B9},                 // Case map
+	0x1FA1:  []rune{0x1F61, 0x03B9},                 // Case map
+	0x1FA2:  []rune{0x1F62, 0x03B9},                 // Case map
+	0x1FA3:  []rune{0x1F63, 0x03B9},                 // Case map
+	0x1FA4:  []rune{0x1F64, 0x03B9},                 // Case map
+	0x1FA5:  []rune{0x1F65, 0x03B9},                 // Case map
+	0x1FA6:  []rune{0x1F66, 0x03B9},                 // Case map
+	0x1FA7:  []rune{0x1F67, 0x03B9},                 // Case map
+	0x1FA8:  []rune{0x1F60, 0x03B9},                 // Case map
+	0x1FA9:  []rune{0x1F61, 0x03B9},                 // Case map
+	0x1FAA:  []rune{0x1F62, 0x03B9},                 // Case map
+	0x1FAB:  []rune{0x1F63, 0x03B9},                 // Case map
+	0x1FAC:  []rune{0x1F64, 0x03B9},                 // Case map
+	0x1FAD:  []rune{0x1F65, 0x03B9},                 // Case map
+	0x1FAE:  []rune{0x1F66, 0x03B9},                 // Case map
+	0x1FAF:  []rune{0x1F67, 0x03B9},                 // Case map
+	0x1FB2:  []rune{0x1F70, 0x03B9},                 // Case map
+	0x1FB3:  []rune{0x03B1, 0x03B9},                 // Case map
+	0x1FB4:  []rune{0x03AC, 0x03B9},                 // Case map
+	0x1FB6:  []rune{0x03B1, 0x0342},                 // Case map
+	0x1FB7:  []rune{0x03B1, 0x0342, 0x03B9},         // Case map
+	0x1FB8:  []rune{0x1FB0},                         // Case map
+	0x1FB9:  []rune{0x1FB1},                         // Case map
+	0x1FBA:  []rune{0x1F70},                         // Case map
+	0x1FBB:  []rune{0x1F71},                         // Case map
+	0x1FBC:  []rune{0x03B1, 0x03B9},                 // Case map
+	0x1FBE:  []rune{0x03B9},                         // Case map
+	0x1FC2:  []rune{0x1F74, 0x03B9},                 // Case map
+	0x1FC3:  []rune{0x03B7, 0x03B9},                 // Case map
+	0x1FC4:  []rune{0x03AE, 0x03B9},                 // Case map
+	0x1FC6:  []rune{0x03B7, 0x0342},                 // Case map
+	0x1FC7:  []rune{0x03B7, 0x0342, 0x03B9},         // Case map
+	0x1FC8:  []rune{0x1F72},                         // Case map
+	0x1FC9:  []rune{0x1F73},                         // Case map
+	0x1FCA:  []rune{0x1F74},                         // Case map
+	0x1FCB:  []rune{0x1F75},                         // Case map
+	0x1FCC:  []rune{0x03B7, 0x03B9},                 // Case map
+	0x1FD2:  []rune{0x03B9, 0x0308, 0x0300},         // Case map
+	0x1FD3:  []rune{0x03B9, 0x0308, 0x0301},         // Case map
+	0x1FD6:  []rune{0x03B9, 0x0342},                 // Case map
+	0x1FD7:  []rune{0x03B9, 0x0308, 0x0342},         // Case map
+	0x1FD8:  []rune{0x1FD0},                         // Case map
+	0x1FD9:  []rune{0x1FD1},                         // Case map
+	0x1FDA:  []rune{0x1F76},                         // Case map
+	0x1FDB:  []rune{0x1F77},                         // Case map
+	0x1FE2:  []rune{0x03C5, 0x0308, 0x0300},         // Case map
+	0x1FE3:  []rune{0x03C5, 0x0308, 0x0301},         // Case map
+	0x1FE4:  []rune{0x03C1, 0x0313},                 // Case map
+	0x1FE6:  []rune{0x03C5, 0x0342},                 // Case map
+	0x1FE7:  []rune{0x03C5, 0x0308, 0x0342},         // Case map
+	0x1FE8:  []rune{0x1FE0},                         // Case map
+	0x1FE9:  []rune{0x1FE1},                         // Case map
+	0x1FEA:  []rune{0x1F7A},                         // Case map
+	0x1FEB:  []rune{0x1F7B},                         // Case map
+	0x1FEC:  []rune{0x1FE5},                         // Case map
+	0x1FF2:  []rune{0x1F7C, 0x03B9},                 // Case map
+	0x1FF3:  []rune{0x03C9, 0x03B9},                 // Case map
+	0x1FF4:  []rune{0x03CE, 0x03B9},                 // Case map
+	0x1FF6:  []rune{0x03C9, 0x0342},                 // Case map
+	0x1FF7:  []rune{0x03C9, 0x0342, 0x03B9},         // Case map
+	0x1FF8:  []rune{0x1F78},                         // Case map
+	0x1FF9:  []rune{0x1F79},                         // Case map
+	0x1FFA:  []rune{0x1F7C},                         // Case map
+	0x1FFB:  []rune{0x1F7D},                         // Case map
+	0x1FFC:  []rune{0x03C9, 0x03B9},                 // Case map
+	0x20A8:  []rune{0x0072, 0x0073},                 // Additional folding
+	0x2102:  []rune{0x0063},                         // Additional folding
+	0x2103:  []rune{0x00B0, 0x0063},                 // Additional folding
+	0x2107:  []rune{0x025B},                         // Additional folding
+	0x2109:  []rune{0x00B0, 0x0066},                 // Additional folding
+	0x210B:  []rune{0x0068},                         // Additional folding
+	0x210C:  []rune{0x0068},                         // Additional folding
+	0x210D:  []rune{0x0068},                         // Additional folding
+	0x2110:  []rune{0x0069},                         // Additional folding
+	0x2111:  []rune{0x0069},                         // Additional folding
+	0x2112:  []rune{0x006C},                         // Additional folding
+	0x2115:  []rune{0x006E},                         // Additional folding
+	0x2116:  []rune{0x006E, 0x006F},                 // Additional folding
+	0x2119:  []rune{0x0070},                         // Additional folding
+	0x211A:  []rune{0x0071},                         // Additional folding
+	0x211B:  []rune{0x0072},                         // Additional folding
+	0x211C:  []rune{0x0072},                         // Additional folding
+	0x211D:  []rune{0x0072},                         // Additional folding
+	0x2120:  []rune{0x0073, 0x006D},                 // Additional folding
+	0x2121:  []rune{0x0074, 0x0065, 0x006C},         // Additional folding
+	0x2122:  []rune{0x0074, 0x006D},                 // Additional folding
+	0x2124:  []rune{0x007A},                         // Additional folding
+	0x2126:  []rune{0x03C9},                         // Case map
+	0x2128:  []rune{0x007A},                         // Additional folding
+	0x212A:  []rune{0x006B},                         // Case map
+	0x212B:  []rune{0x00E5},                         // Case map
+	0x212C:  []rune{0x0062},                         // Additional folding
+	0x212D:  []rune{0x0063},                         // Additional folding
+	0x2130:  []rune{0x0065},                         // Additional folding
+	0x2131:  []rune{0x0066},                         // Additional folding
+	0x2133:  []rune{0x006D},                         // Additional folding
+	0x213E:  []rune{0x03B3},                         // Additional folding
+	0x213F:  []rune{0x03C0},                         // Additional folding
+	0x2145:  []rune{0x0064},                         // Additional folding
+	0x2160:  []rune{0x2170},                         // Case map
+	0x2161:  []rune{0x2171},                         // Case map
+	0x2162:  []rune{0x2172},                         // Case map
+	0x2163:  []rune{0x2173},                         // Case map
+	0x2164:  []rune{0x2174},                         // Case map
+	0x2165:  []rune{0x2175},                         // Case map
+	0x2166:  []rune{0x2176},                         // Case map
+	0x2167:  []rune{0x2177},                         // Case map
+	0x2168:  []rune{0x2178},                         // Case map
+	0x2169:  []rune{0x2179},                         // Case map
+	0x216A:  []rune{0x217A},                         // Case map
+	0x216B:  []rune{0x217B},                         // Case map
+	0x216C:  []rune{0x217C},                         // Case map
+	0x216D:  []rune{0x217D},                         // Case map
+	0x216E:  []rune{0x217E},                         // Case map
+	0x216F:  []rune{0x217F},                         // Case map
+	0x24B6:  []rune{0x24D0},                         // Case map
+	0x24B7:  []rune{0x24D1},                         // Case map
+	0x24B8:  []rune{0x24D2},                         // Case map
+	0x24B9:  []rune{0x24D3},                         // Case map
+	0x24BA:  []rune{0x24D4},                         // Case map
+	0x24BB:  []rune{0x24D5},                         // Case map
+	0x24BC:  []rune{0x24D6},                         // Case map
+	0x24BD:  []rune{0x24D7},                         // Case map
+	0x24BE:  []rune{0x24D8},                         // Case map
+	0x24BF:  []rune{0x24D9},                         // Case map
+	0x24C0:  []rune{0x24DA},                         // Case map
+	0x24C1:  []rune{0x24DB},                         // Case map
+	0x24C2:  []rune{0x24DC},                         // Case map
+	0x24C3:  []rune{0x24DD},                         // Case map
+	0x24C4:  []rune{0x24DE},                         // Case map
+	0x24C5:  []rune{0x24DF},                         // Case map
+	0x24C6:  []rune{0x24E0},                         // Case map
+	0x24C7:  []rune{0x24E1},                         // Case map
+	0x24C8:  []rune{0x24E2},                         // Case map
+	0x24C9:  []rune{0x24E3},                         // Case map
+	0x24CA:  []rune{0x24E4},                         // Case map
+	0x24CB:  []rune{0x24E5},                         // Case map
+	0x24CC:  []rune{0x24E6},                         // Case map
+	0x24CD:  []rune{0x24E7},                         // Case map
+	0x24CE:  []rune{0x24E8},                         // Case map
+	0x24CF:  []rune{0x24E9},                         // Case map
+	0x3371:  []rune{0x0068, 0x0070, 0x0061},         // Additional folding
+	0x3373:  []rune{0x0061, 0x0075},                 // Additional folding
+	0x3375:  []rune{0x006F, 0x0076},                 // Additional folding
+	0x3380:  []rune{0x0070, 0x0061},                 // Additional folding
+	0x3381:  []rune{0x006E, 0x0061},                 // Additional folding
+	0x3382:  []rune{0x03BC, 0x0061},                 // Additional folding
+	0x3383:  []rune{0x006D, 0x0061},                 // Additional folding
+	0x3384:  []rune{0x006B, 0x0061},                 // Additional folding
+	0x3385:  []rune{0x006B, 0x0062},                 // Additional folding
+	0x3386:  []rune{0x006D, 0x0062},                 // Additional folding
+	0x3387:  []rune{0x0067, 0x0062},                 // Additional folding
+	0x338A:  []rune{0x0070, 0x0066},                 // Additional folding
+	0x338B:  []rune{0x006E, 0x0066},                 // Additional folding
+	0x338C:  []rune{0x03BC, 0x0066},                 // Additional folding
+	0x3390:  []rune{0x0068, 0x007A},                 // Additional folding
+	0x3391:  []rune{0x006B, 0x0068, 0x007A},         // Additional folding
+	0x3392:  []rune{0x006D, 0x0068, 0x007A},         // Additional folding
+	0x3393:  []rune{0x0067, 0x0068, 0x007A},         // Additional folding
+	0x3394:  []rune{0x0074, 0x0068, 0x007A},         // Additional folding
+	0x33A9:  []rune{0x0070, 0x0061},                 // Additional folding
+	0x33AA:  []rune{0x006B, 0x0070, 0x0061},         // Additional folding
+	0x33AB:  []rune{0x006D, 0x0070, 0x0061},         // Additional folding
+	0x33AC:  []rune{0x0067, 0x0070, 0x0061},         // Additional folding
+	0x33B4:  []rune{0x0070, 0x0076},                 // Additional folding
+	0x33B5:  []rune{0x006E, 0x0076},                 // Additional folding
+	0x33B6:  []rune{0x03BC, 0x0076},                 // Additional folding
+	0x33B7:  []rune{0x006D, 0x0076},                 // Additional folding
+	0x33B8:  []rune{0x006B, 0x0076},                 // Additional folding
+	0x33B9:  []rune{0x006D, 0x0076},                 // Additional folding
+	0x33BA:  []rune{0x0070, 0x0077},                 // Additional folding
+	0x33BB:  []rune{0x006E, 0x0077},                 // Additional folding
+	0x33BC:  []rune{0x03BC, 0x0077},                 // Additional folding
+	0x33BD:  []rune{0x006D, 0x0077},                 // Additional folding
+	0x33BE:  []rune{0x006B, 0x0077},                 // Additional folding
+	0x33BF:  []rune{0x006D, 0x0077},                 // Additional folding
+	0x33C0:  []rune{0x006B, 0x03C9},                 // Additional folding
+	0x33C1:  []rune{0x006D, 0x03C9},                 // Additional folding
+	0x33C3:  []rune{0x0062, 0x0071},                 // Additional folding
+	0x33C6:  []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding
+	0x33C7:  []rune{0x0063, 0x006F, 0x002E},         // Additional folding
+	0x33C8:  []rune{0x0064, 0x0062},                 // Additional folding
+	0x33C9:  []rune{0x0067, 0x0079},                 // Additional folding
+	0x33CB:  []rune{0x0068, 0x0070},                 // Additional folding
+	0x33CD:  []rune{0x006B, 0x006B},                 // Additional folding
+	0x33CE:  []rune{0x006B, 0x006D},                 // Additional folding
+	0x33D7:  []rune{0x0070, 0x0068},                 // Additional folding
+	0x33D9:  []rune{0x0070, 0x0070, 0x006D},         // Additional folding
+	0x33DA:  []rune{0x0070, 0x0072},                 // Additional folding
+	0x33DC:  []rune{0x0073, 0x0076},                 // Additional folding
+	0x33DD:  []rune{0x0077, 0x0062},                 // Additional folding
+	0xFB00:  []rune{0x0066, 0x0066},                 // Case map
+	0xFB01:  []rune{0x0066, 0x0069},                 // Case map
+	0xFB02:  []rune{0x0066, 0x006C},                 // Case map
+	0xFB03:  []rune{0x0066, 0x0066, 0x0069},         // Case map
+	0xFB04:  []rune{0x0066, 0x0066, 0x006C},         // Case map
+	0xFB05:  []rune{0x0073, 0x0074},                 // Case map
+	0xFB06:  []rune{0x0073, 0x0074},                 // Case map
+	0xFB13:  []rune{0x0574, 0x0576},                 // Case map
+	0xFB14:  []rune{0x0574, 0x0565},                 // Case map
+	0xFB15:  []rune{0x0574, 0x056B},                 // Case map
+	0xFB16:  []rune{0x057E, 0x0576},                 // Case map
+	0xFB17:  []rune{0x0574, 0x056D},                 // Case map
+	0xFF21:  []rune{0xFF41},                         // Case map
+	0xFF22:  []rune{0xFF42},                         // Case map
+	0xFF23:  []rune{0xFF43},                         // Case map
+	0xFF24:  []rune{0xFF44},                         // Case map
+	0xFF25:  []rune{0xFF45},                         // Case map
+	0xFF26:  []rune{0xFF46},                         // Case map
+	0xFF27:  []rune{0xFF47},                         // Case map
+	0xFF28:  []rune{0xFF48},                         // Case map
+	0xFF29:  []rune{0xFF49},                         // Case map
+	0xFF2A:  []rune{0xFF4A},                         // Case map
+	0xFF2B:  []rune{0xFF4B},                         // Case map
+	0xFF2C:  []rune{0xFF4C},                         // Case map
+	0xFF2D:  []rune{0xFF4D},                         // Case map
+	0xFF2E:  []rune{0xFF4E},                         // Case map
+	0xFF2F:  []rune{0xFF4F},                         // Case map
+	0xFF30:  []rune{0xFF50},                         // Case map
+	0xFF31:  []rune{0xFF51},                         // Case map
+	0xFF32:  []rune{0xFF52},                         // Case map
+	0xFF33:  []rune{0xFF53},                         // Case map
+	0xFF34:  []rune{0xFF54},                         // Case map
+	0xFF35:  []rune{0xFF55},                         // Case map
+	0xFF36:  []rune{0xFF56},                         // Case map
+	0xFF37:  []rune{0xFF57},                         // Case map
+	0xFF38:  []rune{0xFF58},                         // Case map
+	0xFF39:  []rune{0xFF59},                         // Case map
+	0xFF3A:  []rune{0xFF5A},                         // Case map
+	0x10400: []rune{0x10428},                        // Case map
+	0x10401: []rune{0x10429},                        // Case map
+	0x10402: []rune{0x1042A},                        // Case map
+	0x10403: []rune{0x1042B},                        // Case map
+	0x10404: []rune{0x1042C},                        // Case map
+	0x10405: []rune{0x1042D},                        // Case map
+	0x10406: []rune{0x1042E},                        // Case map
+	0x10407: []rune{0x1042F},                        // Case map
+	0x10408: []rune{0x10430},                        // Case map
+	0x10409: []rune{0x10431},                        // Case map
+	0x1040A: []rune{0x10432},                        // Case map
+	0x1040B: []rune{0x10433},                        // Case map
+	0x1040C: []rune{0x10434},                        // Case map
+	0x1040D: []rune{0x10435},                        // Case map
+	0x1040E: []rune{0x10436},                        // Case map
+	0x1040F: []rune{0x10437},                        // Case map
+	0x10410: []rune{0x10438},                        // Case map
+	0x10411: []rune{0x10439},                        // Case map
+	0x10412: []rune{0x1043A},                        // Case map
+	0x10413: []rune{0x1043B},                        // Case map
+	0x10414: []rune{0x1043C},                        // Case map
+	0x10415: []rune{0x1043D},                        // Case map
+	0x10416: []rune{0x1043E},                        // Case map
+	0x10417: []rune{0x1043F},                        // Case map
+	0x10418: []rune{0x10440},                        // Case map
+	0x10419: []rune{0x10441},                        // Case map
+	0x1041A: []rune{0x10442},                        // Case map
+	0x1041B: []rune{0x10443},                        // Case map
+	0x1041C: []rune{0x10444},                        // Case map
+	0x1041D: []rune{0x10445},                        // Case map
+	0x1041E: []rune{0x10446},                        // Case map
+	0x1041F: []rune{0x10447},                        // Case map
+	0x10420: []rune{0x10448},                        // Case map
+	0x10421: []rune{0x10449},                        // Case map
+	0x10422: []rune{0x1044A},                        // Case map
+	0x10423: []rune{0x1044B},                        // Case map
+	0x10424: []rune{0x1044C},                        // Case map
+	0x10425: []rune{0x1044D},                        // Case map
+	0x1D400: []rune{0x0061},                         // Additional folding
+	0x1D401: []rune{0x0062},                         // Additional folding
+	0x1D402: []rune{0x0063},                         // Additional folding
+	0x1D403: []rune{0x0064},                         // Additional folding
+	0x1D404: []rune{0x0065},                         // Additional folding
+	0x1D405: []rune{0x0066},                         // Additional folding
+	0x1D406: []rune{0x0067},                         // Additional folding
+	0x1D407: []rune{0x0068},                         // Additional folding
+	0x1D408: []rune{0x0069},                         // Additional folding
+	0x1D409: []rune{0x006A},                         // Additional folding
+	0x1D40A: []rune{0x006B},                         // Additional folding
+	0x1D40B: []rune{0x006C},                         // Additional folding
+	0x1D40C: []rune{0x006D},                         // Additional folding
+	0x1D40D: []rune{0x006E},                         // Additional folding
+	0x1D40E: []rune{0x006F},                         // Additional folding
+	0x1D40F: []rune{0x0070},                         // Additional folding
+	0x1D410: []rune{0x0071},                         // Additional folding
+	0x1D411: []rune{0x0072},                         // Additional folding
+	0x1D412: []rune{0x0073},                         // Additional folding
+	0x1D413: []rune{0x0074},                         // Additional folding
+	0x1D414: []rune{0x0075},                         // Additional folding
+	0x1D415: []rune{0x0076},                         // Additional folding
+	0x1D416: []rune{0x0077},                         // Additional folding
+	0x1D417: []rune{0x0078},                         // Additional folding
+	0x1D418: []rune{0x0079},                         // Additional folding
+	0x1D419: []rune{0x007A},                         // Additional folding
+	0x1D434: []rune{0x0061},                         // Additional folding
+	0x1D435: []rune{0x0062},                         // Additional folding
+	0x1D436: []rune{0x0063},                         // Additional folding
+	0x1D437: []rune{0x0064},                         // Additional folding
+	0x1D438: []rune{0x0065},                         // Additional folding
+	0x1D439: []rune{0x0066},                         // Additional folding
+	0x1D43A: []rune{0x0067},                         // Additional folding
+	0x1D43B: []rune{0x0068},                         // Additional folding
+	0x1D43C: []rune{0x0069},                         // Additional folding
+	0x1D43D: []rune{0x006A},                         // Additional folding
+	0x1D43E: []rune{0x006B},                         // Additional folding
+	0x1D43F: []rune{0x006C},                         // Additional folding
+	0x1D440: []rune{0x006D},                         // Additional folding
+	0x1D441: []rune{0x006E},                         // Additional folding
+	0x1D442: []rune{0x006F},                         // Additional folding
+	0x1D443: []rune{0x0070},                         // Additional folding
+	0x1D444: []rune{0x0071},                         // Additional folding
+	0x1D445: []rune{0x0072},                         // Additional folding
+	0x1D446: []rune{0x0073},                         // Additional folding
+	0x1D447: []rune{0x0074},                         // Additional folding
+	0x1D448: []rune{0x0075},                         // Additional folding
+	0x1D449: []rune{0x0076},                         // Additional folding
+	0x1D44A: []rune{0x0077},                         // Additional folding
+	0x1D44B: []rune{0x0078},                         // Additional folding
+	0x1D44C: []rune{0x0079},                         // Additional folding
+	0x1D44D: []rune{0x007A},                         // Additional folding
+	0x1D468: []rune{0x0061},                         // Additional folding
+	0x1D469: []rune{0x0062},                         // Additional folding
+	0x1D46A: []rune{0x0063},                         // Additional folding
+	0x1D46B: []rune{0x0064},                         // Additional folding
+	0x1D46C: []rune{0x0065},                         // Additional folding
+	0x1D46D: []rune{0x0066},                         // Additional folding
+	0x1D46E: []rune{0x0067},                         // Additional folding
+	0x1D46F: []rune{0x0068},                         // Additional folding
+	0x1D470: []rune{0x0069},                         // Additional folding
+	0x1D471: []rune{0x006A},                         // Additional folding
+	0x1D472: []rune{0x006B},                         // Additional folding
+	0x1D473: []rune{0x006C},                         // Additional folding
+	0x1D474: []rune{0x006D},                         // Additional folding
+	0x1D475: []rune{0x006E},                         // Additional folding
+	0x1D476: []rune{0x006F},                         // Additional folding
+	0x1D477: []rune{0x0070},                         // Additional folding
+	0x1D478: []rune{0x0071},                         // Additional folding
+	0x1D479: []rune{0x0072},                         // Additional folding
+	0x1D47A: []rune{0x0073},                         // Additional folding
+	0x1D47B: []rune{0x0074},                         // Additional folding
+	0x1D47C: []rune{0x0075},                         // Additional folding
+	0x1D47D: []rune{0x0076},                         // Additional folding
+	0x1D47E: []rune{0x0077},                         // Additional folding
+	0x1D47F: []rune{0x0078},                         // Additional folding
+	0x1D480: []rune{0x0079},                         // Additional folding
+	0x1D481: []rune{0x007A},                         // Additional folding
+	0x1D49C: []rune{0x0061},                         // Additional folding
+	0x1D49E: []rune{0x0063},                         // Additional folding
+	0x1D49F: []rune{0x0064},                         // Additional folding
+	0x1D4A2: []rune{0x0067},                         // Additional folding
+	0x1D4A5: []rune{0x006A},                         // Additional folding
+	0x1D4A6: []rune{0x006B},                         // Additional folding
+	0x1D4A9: []rune{0x006E},                         // Additional folding
+	0x1D4AA: []rune{0x006F},                         // Additional folding
+	0x1D4AB: []rune{0x0070},                         // Additional folding
+	0x1D4AC: []rune{0x0071},                         // Additional folding
+	0x1D4AE: []rune{0x0073},                         // Additional folding
+	0x1D4AF: []rune{0x0074},                         // Additional folding
+	0x1D4B0: []rune{0x0075},                         // Additional folding
+	0x1D4B1: []rune{0x0076},                         // Additional folding
+	0x1D4B2: []rune{0x0077},                         // Additional folding
+	0x1D4B3: []rune{0x0078},                         // Additional folding
+	0x1D4B4: []rune{0x0079},                         // Additional folding
+	0x1D4B5: []rune{0x007A},                         // Additional folding
+	0x1D4D0: []rune{0x0061},                         // Additional folding
+	0x1D4D1: []rune{0x0062},                         // Additional folding
+	0x1D4D2: []rune{0x0063},                         // Additional folding
+	0x1D4D3: []rune{0x0064},                         // Additional folding
+	0x1D4D4: []rune{0x0065},                         // Additional folding
+	0x1D4D5: []rune{0x0066},                         // Additional folding
+	0x1D4D6: []rune{0x0067},                         // Additional folding
+	0x1D4D7: []rune{0x0068},                         // Additional folding
+	0x1D4D8: []rune{0x0069},                         // Additional folding
+	0x1D4D9: []rune{0x006A},                         // Additional folding
+	0x1D4DA: []rune{0x006B},                         // Additional folding
+	0x1D4DB: []rune{0x006C},                         // Additional folding
+	0x1D4DC: []rune{0x006D},                         // Additional folding
+	0x1D4DD: []rune{0x006E},                         // Additional folding
+	0x1D4DE: []rune{0x006F},                         // Additional folding
+	0x1D4DF: []rune{0x0070},                         // Additional folding
+	0x1D4E0: []rune{0x0071},                         // Additional folding
+	0x1D4E1: []rune{0x0072},                         // Additional folding
+	0x1D4E2: []rune{0x0073},                         // Additional folding
+	0x1D4E3: []rune{0x0074},                         // Additional folding
+	0x1D4E4: []rune{0x0075},                         // Additional folding
+	0x1D4E5: []rune{0x0076},                         // Additional folding
+	0x1D4E6: []rune{0x0077},                         // Additional folding
+	0x1D4E7: []rune{0x0078},                         // Additional folding
+	0x1D4E8: []rune{0x0079},                         // Additional folding
+	0x1D4E9: []rune{0x007A},                         // Additional folding
+	0x1D504: []rune{0x0061},                         // Additional folding
+	0x1D505: []rune{0x0062},                         // Additional folding
+	0x1D507: []rune{0x0064},                         // Additional folding
+	0x1D508: []rune{0x0065},                         // Additional folding
+	0x1D509: []rune{0x0066},                         // Additional folding
+	0x1D50A: []rune{0x0067},                         // Additional folding
+	0x1D50D: []rune{0x006A},                         // Additional folding
+	0x1D50E: []rune{0x006B},                         // Additional folding
+	0x1D50F: []rune{0x006C},                         // Additional folding
+	0x1D510: []rune{0x006D},                         // Additional folding
+	0x1D511: []rune{0x006E},                         // Additional folding
+	0x1D512: []rune{0x006F},                         // Additional folding
+	0x1D513: []rune{0x0070},                         // Additional folding
+	0x1D514: []rune{0x0071},                         // Additional folding
+	0x1D516: []rune{0x0073},                         // Additional folding
+	0x1D517: []rune{0x0074},                         // Additional folding
+	0x1D518: []rune{0x0075},                         // Additional folding
+	0x1D519: []rune{0x0076},                         // Additional folding
+	0x1D51A: []rune{0x0077},                         // Additional folding
+	0x1D51B: []rune{0x0078},                         // Additional folding
+	0x1D51C: []rune{0x0079},                         // Additional folding
+	0x1D538: []rune{0x0061},                         // Additional folding
+	0x1D539: []rune{0x0062},                         // Additional folding
+	0x1D53B: []rune{0x0064},                         // Additional folding
+	0x1D53C: []rune{0x0065},                         // Additional folding
+	0x1D53D: []rune{0x0066},                         // Additional folding
+	0x1D53E: []rune{0x0067},                         // Additional folding
+	0x1D540: []rune{0x0069},                         // Additional folding
+	0x1D541: []rune{0x006A},                         // Additional folding
+	0x1D542: []rune{0x006B},                         // Additional folding
+	0x1D543: []rune{0x006C},                         // Additional folding
+	0x1D544: []rune{0x006D},                         // Additional folding
+	0x1D546: []rune{0x006F},                         // Additional folding
+	0x1D54A: []rune{0x0073},                         // Additional folding
+	0x1D54B: []rune{0x0074},                         // Additional folding
+	0x1D54C: []rune{0x0075},                         // Additional folding
+	0x1D54D: []rune{0x0076},                         // Additional folding
+	0x1D54E: []rune{0x0077},                         // Additional folding
+	0x1D54F: []rune{0x0078},                         // Additional folding
+	0x1D550: []rune{0x0079},                         // Additional folding
+	0x1D56C: []rune{0x0061},                         // Additional folding
+	0x1D56D: []rune{0x0062},                         // Additional folding
+	0x1D56E: []rune{0x0063},                         // Additional folding
+	0x1D56F: []rune{0x0064},                         // Additional folding
+	0x1D570: []rune{0x0065},                         // Additional folding
+	0x1D571: []rune{0x0066},                         // Additional folding
+	0x1D572: []rune{0x0067},                         // Additional folding
+	0x1D573: []rune{0x0068},                         // Additional folding
+	0x1D574: []rune{0x0069},                         // Additional folding
+	0x1D575: []rune{0x006A},                         // Additional folding
+	0x1D576: []rune{0x006B},                         // Additional folding
+	0x1D577: []rune{0x006C},                         // Additional folding
+	0x1D578: []rune{0x006D},                         // Additional folding
+	0x1D579: []rune{0x006E},                         // Additional folding
+	0x1D57A: []rune{0x006F},                         // Additional folding
+	0x1D57B: []rune{0x0070},                         // Additional folding
+	0x1D57C: []rune{0x0071},                         // Additional folding
+	0x1D57D: []rune{0x0072},                         // Additional folding
+	0x1D57E: []rune{0x0073},                         // Additional folding
+	0x1D57F: []rune{0x0074},                         // Additional folding
+	0x1D580: []rune{0x0075},                         // Additional folding
+	0x1D581: []rune{0x0076},                         // Additional folding
+	0x1D582: []rune{0x0077},                         // Additional folding
+	0x1D583: []rune{0x0078},                         // Additional folding
+	0x1D584: []rune{0x0079},                         // Additional folding
+	0x1D585: []rune{0x007A},                         // Additional folding
+	0x1D5A0: []rune{0x0061},                         // Additional folding
+	0x1D5A1: []rune{0x0062},                         // Additional folding
+	0x1D5A2: []rune{0x0063},                         // Additional folding
+	0x1D5A3: []rune{0x0064},                         // Additional folding
+	0x1D5A4: []rune{0x0065},                         // Additional folding
+	0x1D5A5: []rune{0x0066},                         // Additional folding
+	0x1D5A6: []rune{0x0067},                         // Additional folding
+	0x1D5A7: []rune{0x0068},                         // Additional folding
+	0x1D5A8: []rune{0x0069},                         // Additional folding
+	0x1D5A9: []rune{0x006A},                         // Additional folding
+	0x1D5AA: []rune{0x006B},                         // Additional folding
+	0x1D5AB: []rune{0x006C},                         // Additional folding
+	0x1D5AC: []rune{0x006D},                         // Additional folding
+	0x1D5AD: []rune{0x006E},                         // Additional folding
+	0x1D5AE: []rune{0x006F},                         // Additional folding
+	0x1D5AF: []rune{0x0070},                         // Additional folding
+	0x1D5B0: []rune{0x0071},                         // Additional folding
+	0x1D5B1: []rune{0x0072},                         // Additional folding
+	0x1D5B2: []rune{0x0073},                         // Additional folding
+	0x1D5B3: []rune{0x0074},                         // Additional folding
+	0x1D5B4: []rune{0x0075},                         // Additional folding
+	0x1D5B5: []rune{0x0076},                         // Additional folding
+	0x1D5B6: []rune{0x0077},                         // Additional folding
+	0x1D5B7: []rune{0x0078},                         // Additional folding
+	0x1D5B8: []rune{0x0079},                         // Additional folding
+	0x1D5B9: []rune{0x007A},                         // Additional folding
+	0x1D5D4: []rune{0x0061},                         // Additional folding
+	0x1D5D5: []rune{0x0062},                         // Additional folding
+	0x1D5D6: []rune{0x0063},                         // Additional folding
+	0x1D5D7: []rune{0x0064},                         // Additional folding
+	0x1D5D8: []rune{0x0065},                         // Additional folding
+	0x1D5D9: []rune{0x0066},                         // Additional folding
+	0x1D5DA: []rune{0x0067},                         // Additional folding
+	0x1D5DB: []rune{0x0068},                         // Additional folding
+	0x1D5DC: []rune{0x0069},                         // Additional folding
+	0x1D5DD: []rune{0x006A},                         // Additional folding
+	0x1D5DE: []rune{0x006B},                         // Additional folding
+	0x1D5DF: []rune{0x006C},                         // Additional folding
+	0x1D5E0: []rune{0x006D},                         // Additional folding
+	0x1D5E1: []rune{0x006E},                         // Additional folding
+	0x1D5E2: []rune{0x006F},                         // Additional folding
+	0x1D5E3: []rune{0x0070},                         // Additional folding
+	0x1D5E4: []rune{0x0071},                         // Additional folding
+	0x1D5E5: []rune{0x0072},                         // Additional folding
+	0x1D5E6: []rune{0x0073},                         // Additional folding
+	0x1D5E7: []rune{0x0074},                         // Additional folding
+	0x1D5E8: []rune{0x0075},                         // Additional folding
+	0x1D5E9: []rune{0x0076},                         // Additional folding
+	0x1D5EA: []rune{0x0077},                         // Additional folding
+	0x1D5EB: []rune{0x0078},                         // Additional folding
+	0x1D5EC: []rune{0x0079},                         // Additional folding
+	0x1D5ED: []rune{0x007A},                         // Additional folding
+	0x1D608: []rune{0x0061},                         // Additional folding
+	0x1D609: []rune{0x0062},                         // Additional folding
+	0x1D60A: []rune{0x0063},                         // Additional folding
+	0x1D60B: []rune{0x0064},                         // Additional folding
+	0x1D60C: []rune{0x0065},                         // Additional folding
+	0x1D60D: []rune{0x0066},                         // Additional folding
+	0x1D60E: []rune{0x0067},                         // Additional folding
+	0x1D60F: []rune{0x0068},                         // Additional folding
+	0x1D610: []rune{0x0069},                         // Additional folding
+	0x1D611: []rune{0x006A},                         // Additional folding
+	0x1D612: []rune{0x006B},                         // Additional folding
+	0x1D613: []rune{0x006C},                         // Additional folding
+	0x1D614: []rune{0x006D},                         // Additional folding
+	0x1D615: []rune{0x006E},                         // Additional folding
+	0x1D616: []rune{0x006F},                         // Additional folding
+	0x1D617: []rune{0x0070},                         // Additional folding
+	0x1D618: []rune{0x0071},                         // Additional folding
+	0x1D619: []rune{0x0072},                         // Additional folding
+	0x1D61A: []rune{0x0073},                         // Additional folding
+	0x1D61B: []rune{0x0074},                         // Additional folding
+	0x1D61C: []rune{0x0075},                         // Additional folding
+	0x1D61D: []rune{0x0076},                         // Additional folding
+	0x1D61E: []rune{0x0077},                         // Additional folding
+	0x1D61F: []rune{0x0078},                         // Additional folding
+	0x1D620: []rune{0x0079},                         // Additional folding
+	0x1D621: []rune{0x007A},                         // Additional folding
+	0x1D63C: []rune{0x0061},                         // Additional folding
+	0x1D63D: []rune{0x0062},                         // Additional folding
+	0x1D63E: []rune{0x0063},                         // Additional folding
+	0x1D63F: []rune{0x0064},                         // Additional folding
+	0x1D640: []rune{0x0065},                         // Additional folding
+	0x1D641: []rune{0x0066},                         // Additional folding
+	0x1D642: []rune{0x0067},                         // Additional folding
+	0x1D643: []rune{0x0068},                         // Additional folding
+	0x1D644: []rune{0x0069},                         // Additional folding
+	0x1D645: []rune{0x006A},                         // Additional folding
+	0x1D646: []rune{0x006B},                         // Additional folding
+	0x1D647: []rune{0x006C},                         // Additional folding
+	0x1D648: []rune{0x006D},                         // Additional folding
+	0x1D649: []rune{0x006E},                         // Additional folding
+	0x1D64A: []rune{0x006F},                         // Additional folding
+	0x1D64B: []rune{0x0070},                         // Additional folding
+	0x1D64C: []rune{0x0071},                         // Additional folding
+	0x1D64D: []rune{0x0072},                         // Additional folding
+	0x1D64E: []rune{0x0073},                         // Additional folding
+	0x1D64F: []rune{0x0074},                         // Additional folding
+	0x1D650: []rune{0x0075},                         // Additional folding
+	0x1D651: []rune{0x0076},                         // Additional folding
+	0x1D652: []rune{0x0077},                         // Additional folding
+	0x1D653: []rune{0x0078},                         // Additional folding
+	0x1D654: []rune{0x0079},                         // Additional folding
+	0x1D655: []rune{0x007A},                         // Additional folding
+	0x1D670: []rune{0x0061},                         // Additional folding
+	0x1D671: []rune{0x0062},                         // Additional folding
+	0x1D672: []rune{0x0063},                         // Additional folding
+	0x1D673: []rune{0x0064},                         // Additional folding
+	0x1D674: []rune{0x0065},                         // Additional folding
+	0x1D675: []rune{0x0066},                         // Additional folding
+	0x1D676: []rune{0x0067},                         // Additional folding
+	0x1D677: []rune{0x0068},                         // Additional folding
+	0x1D678: []rune{0x0069},                         // Additional folding
+	0x1D679: []rune{0x006A},                         // Additional folding
+	0x1D67A: []rune{0x006B},                         // Additional folding
+	0x1D67B: []rune{0x006C},                         // Additional folding
+	0x1D67C: []rune{0x006D},                         // Additional folding
+	0x1D67D: []rune{0x006E},                         // Additional folding
+	0x1D67E: []rune{0x006F},                         // Additional folding
+	0x1D67F: []rune{0x0070},                         // Additional folding
+	0x1D680: []rune{0x0071},                         // Additional folding
+	0x1D681: []rune{0x0072},                         // Additional folding
+	0x1D682: []rune{0x0073},                         // Additional folding
+	0x1D683: []rune{0x0074},                         // Additional folding
+	0x1D684: []rune{0x0075},                         // Additional folding
+	0x1D685: []rune{0x0076},                         // Additional folding
+	0x1D686: []rune{0x0077},                         // Additional folding
+	0x1D687: []rune{0x0078},                         // Additional folding
+	0x1D688: []rune{0x0079},                         // Additional folding
+	0x1D689: []rune{0x007A},                         // Additional folding
+	0x1D6A8: []rune{0x03B1},                         // Additional folding
+	0x1D6A9: []rune{0x03B2},                         // Additional folding
+	0x1D6AA: []rune{0x03B3},                         // Additional folding
+	0x1D6AB: []rune{0x03B4},                         // Additional folding
+	0x1D6AC: []rune{0x03B5},                         // Additional folding
+	0x1D6AD: []rune{0x03B6},                         // Additional folding
+	0x1D6AE: []rune{0x03B7},                         // Additional folding
+	0x1D6AF: []rune{0x03B8},                         // Additional folding
+	0x1D6B0: []rune{0x03B9},                         // Additional folding
+	0x1D6B1: []rune{0x03BA},                         // Additional folding
+	0x1D6B2: []rune{0x03BB},                         // Additional folding
+	0x1D6B3: []rune{0x03BC},                         // Additional folding
+	0x1D6B4: []rune{0x03BD},                         // Additional folding
+	0x1D6B5: []rune{0x03BE},                         // Additional folding
+	0x1D6B6: []rune{0x03BF},                         // Additional folding
+	0x1D6B7: []rune{0x03C0},                         // Additional folding
+	0x1D6B8: []rune{0x03C1},                         // Additional folding
+	0x1D6B9: []rune{0x03B8},                         // Additional folding
+	0x1D6BA: []rune{0x03C3},                         // Additional folding
+	0x1D6BB: []rune{0x03C4},                         // Additional folding
+	0x1D6BC: []rune{0x03C5},                         // Additional folding
+	0x1D6BD: []rune{0x03C6},                         // Additional folding
+	0x1D6BE: []rune{0x03C7},                         // Additional folding
+	0x1D6BF: []rune{0x03C8},                         // Additional folding
+	0x1D6C0: []rune{0x03C9},                         // Additional folding
+	0x1D6D3: []rune{0x03C3},                         // Additional folding
+	0x1D6E2: []rune{0x03B1},                         // Additional folding
+	0x1D6E3: []rune{0x03B2},                         // Additional folding
+	0x1D6E4: []rune{0x03B3},                         // Additional folding
+	0x1D6E5: []rune{0x03B4},                         // Additional folding
+	0x1D6E6: []rune{0x03B5},                         // Additional folding
+	0x1D6E7: []rune{0x03B6},                         // Additional folding
+	0x1D6E8: []rune{0x03B7},                         // Additional folding
+	0x1D6E9: []rune{0x03B8},                         // Additional folding
+	0x1D6EA: []rune{0x03B9},                         // Additional folding
+	0x1D6EB: []rune{0x03BA},                         // Additional folding
+	0x1D6EC: []rune{0x03BB},                         // Additional folding
+	0x1D6ED: []rune{0x03BC},                         // Additional folding
+	0x1D6EE: []rune{0x03BD},                         // Additional folding
+	0x1D6EF: []rune{0x03BE},                         // Additional folding
+	0x1D6F0: []rune{0x03BF},                         // Additional folding
+	0x1D6F1: []rune{0x03C0},                         // Additional folding
+	0x1D6F2: []rune{0x03C1},                         // Additional folding
+	0x1D6F3: []rune{0x03B8},                         // Additional folding
+	0x1D6F4: []rune{0x03C3},                         // Additional folding
+	0x1D6F5: []rune{0x03C4},                         // Additional folding
+	0x1D6F6: []rune{0x03C5},                         // Additional folding
+	0x1D6F7: []rune{0x03C6},                         // Additional folding
+	0x1D6F8: []rune{0x03C7},                         // Additional folding
+	0x1D6F9: []rune{0x03C8},                         // Additional folding
+	0x1D6FA: []rune{0x03C9},                         // Additional folding
+	0x1D70D: []rune{0x03C3},                         // Additional folding
+	0x1D71C: []rune{0x03B1},                         // Additional folding
+	0x1D71D: []rune{0x03B2},                         // Additional folding
+	0x1D71E: []rune{0x03B3},                         // Additional folding
+	0x1D71F: []rune{0x03B4},                         // Additional folding
+	0x1D720: []rune{0x03B5},                         // Additional folding
+	0x1D721: []rune{0x03B6},                         // Additional folding
+	0x1D722: []rune{0x03B7},                         // Additional folding
+	0x1D723: []rune{0x03B8},                         // Additional folding
+	0x1D724: []rune{0x03B9},                         // Additional folding
+	0x1D725: []rune{0x03BA},                         // Additional folding
+	0x1D726: []rune{0x03BB},                         // Additional folding
+	0x1D727: []rune{0x03BC},                         // Additional folding
+	0x1D728: []rune{0x03BD},                         // Additional folding
+	0x1D729: []rune{0x03BE},                         // Additional folding
+	0x1D72A: []rune{0x03BF},                         // Additional folding
+	0x1D72B: []rune{0x03C0},                         // Additional folding
+	0x1D72C: []rune{0x03C1},                         // Additional folding
+	0x1D72D: []rune{0x03B8},                         // Additional folding
+	0x1D72E: []rune{0x03C3},                         // Additional folding
+	0x1D72F: []rune{0x03C4},                         // Additional folding
+	0x1D730: []rune{0x03C5},                         // Additional folding
+	0x1D731: []rune{0x03C6},                         // Additional folding
+	0x1D732: []rune{0x03C7},                         // Additional folding
+	0x1D733: []rune{0x03C8},                         // Additional folding
+	0x1D734: []rune{0x03C9},                         // Additional folding
+	0x1D747: []rune{0x03C3},                         // Additional folding
+	0x1D756: []rune{0x03B1},                         // Additional folding
+	0x1D757: []rune{0x03B2},                         // Additional folding
+	0x1D758: []rune{0x03B3},                         // Additional folding
+	0x1D759: []rune{0x03B4},                         // Additional folding
+	0x1D75A: []rune{0x03B5},                         // Additional folding
+	0x1D75B: []rune{0x03B6},                         // Additional folding
+	0x1D75C: []rune{0x03B7},                         // Additional folding
+	0x1D75D: []rune{0x03B8},                         // Additional folding
+	0x1D75E: []rune{0x03B9},                         // Additional folding
+	0x1D75F: []rune{0x03BA},                         // Additional folding
+	0x1D760: []rune{0x03BB},                         // Additional folding
+	0x1D761: []rune{0x03BC},                         // Additional folding
+	0x1D762: []rune{0x03BD},                         // Additional folding
+	0x1D763: []rune{0x03BE},                         // Additional folding
+	0x1D764: []rune{0x03BF},                         // Additional folding
+	0x1D765: []rune{0x03C0},                         // Additional folding
+	0x1D766: []rune{0x03C1},                         // Additional folding
+	0x1D767: []rune{0x03B8},                         // Additional folding
+	0x1D768: []rune{0x03C3},                         // Additional folding
+	0x1D769: []rune{0x03C4},                         // Additional folding
+	0x1D76A: []rune{0x03C5},                         // Additional folding
+	0x1D76B: []rune{0x03C6},                         // Additional folding
+	0x1D76C: []rune{0x03C7},                         // Additional folding
+	0x1D76D: []rune{0x03C8},                         // Additional folding
+	0x1D76E: []rune{0x03C9},                         // Additional folding
+	0x1D781: []rune{0x03C3},                         // Additional folding
+	0x1D790: []rune{0x03B1},                         // Additional folding
+	0x1D791: []rune{0x03B2},                         // Additional folding
+	0x1D792: []rune{0x03B3},                         // Additional folding
+	0x1D793: []rune{0x03B4},                         // Additional folding
+	0x1D794: []rune{0x03B5},                         // Additional folding
+	0x1D795: []rune{0x03B6},                         // Additional folding
+	0x1D796: []rune{0x03B7},                         // Additional folding
+	0x1D797: []rune{0x03B8},                         // Additional folding
+	0x1D798: []rune{0x03B9},                         // Additional folding
+	0x1D799: []rune{0x03BA},                         // Additional folding
+	0x1D79A: []rune{0x03BB},                         // Additional folding
+	0x1D79B: []rune{0x03BC},                         // Additional folding
+	0x1D79C: []rune{0x03BD},                         // Additional folding
+	0x1D79D: []rune{0x03BE},                         // Additional folding
+	0x1D79E: []rune{0x03BF},                         // Additional folding
+	0x1D79F: []rune{0x03C0},                         // Additional folding
+	0x1D7A0: []rune{0x03C1},                         // Additional folding
+	0x1D7A1: []rune{0x03B8},                         // Additional folding
+	0x1D7A2: []rune{0x03C3},                         // Additional folding
+	0x1D7A3: []rune{0x03C4},                         // Additional folding
+	0x1D7A4: []rune{0x03C5},                         // Additional folding
+	0x1D7A5: []rune{0x03C6},                         // Additional folding
+	0x1D7A6: []rune{0x03C7},                         // Additional folding
+	0x1D7A7: []rune{0x03C8},                         // Additional folding
+	0x1D7A8: []rune{0x03C9},                         // Additional folding
+	0x1D7BB: []rune{0x03C3},                         // Additional folding
+}
+
+// TableB2 represents RFC-3454 Table B.2.
+var TableB2 Mapping = tableB2
+
+var tableB3 = Mapping{
+	0x0041:  []rune{0x0061},                 // Case map
+	0x0042:  []rune{0x0062},                 // Case map
+	0x0043:  []rune{0x0063},                 // Case map
+	0x0044:  []rune{0x0064},                 // Case map
+	0x0045:  []rune{0x0065},                 // Case map
+	0x0046:  []rune{0x0066},                 // Case map
+	0x0047:  []rune{0x0067},                 // Case map
+	0x0048:  []rune{0x0068},                 // Case map
+	0x0049:  []rune{0x0069},                 // Case map
+	0x004A:  []rune{0x006A},                 // Case map
+	0x004B:  []rune{0x006B},                 // Case map
+	0x004C:  []rune{0x006C},                 // Case map
+	0x004D:  []rune{0x006D},                 // Case map
+	0x004E:  []rune{0x006E},                 // Case map
+	0x004F:  []rune{0x006F},                 // Case map
+	0x0050:  []rune{0x0070},                 // Case map
+	0x0051:  []rune{0x0071},                 // Case map
+	0x0052:  []rune{0x0072},                 // Case map
+	0x0053:  []rune{0x0073},                 // Case map
+	0x0054:  []rune{0x0074},                 // Case map
+	0x0055:  []rune{0x0075},                 // Case map
+	0x0056:  []rune{0x0076},                 // Case map
+	0x0057:  []rune{0x0077},                 // Case map
+	0x0058:  []rune{0x0078},                 // Case map
+	0x0059:  []rune{0x0079},                 // Case map
+	0x005A:  []rune{0x007A},                 // Case map
+	0x00B5:  []rune{0x03BC},                 // Case map
+	0x00C0:  []rune{0x00E0},                 // Case map
+	0x00C1:  []rune{0x00E1},                 // Case map
+	0x00C2:  []rune{0x00E2},                 // Case map
+	0x00C3:  []rune{0x00E3},                 // Case map
+	0x00C4:  []rune{0x00E4},                 // Case map
+	0x00C5:  []rune{0x00E5},                 // Case map
+	0x00C6:  []rune{0x00E6},                 // Case map
+	0x00C7:  []rune{0x00E7},                 // Case map
+	0x00C8:  []rune{0x00E8},                 // Case map
+	0x00C9:  []rune{0x00E9},                 // Case map
+	0x00CA:  []rune{0x00EA},                 // Case map
+	0x00CB:  []rune{0x00EB},                 // Case map
+	0x00CC:  []rune{0x00EC},                 // Case map
+	0x00CD:  []rune{0x00ED},                 // Case map
+	0x00CE:  []rune{0x00EE},                 // Case map
+	0x00CF:  []rune{0x00EF},                 // Case map
+	0x00D0:  []rune{0x00F0},                 // Case map
+	0x00D1:  []rune{0x00F1},                 // Case map
+	0x00D2:  []rune{0x00F2},                 // Case map
+	0x00D3:  []rune{0x00F3},                 // Case map
+	0x00D4:  []rune{0x00F4},                 // Case map
+	0x00D5:  []rune{0x00F5},                 // Case map
+	0x00D6:  []rune{0x00F6},                 // Case map
+	0x00D8:  []rune{0x00F8},                 // Case map
+	0x00D9:  []rune{0x00F9},                 // Case map
+	0x00DA:  []rune{0x00FA},                 // Case map
+	0x00DB:  []rune{0x00FB},                 // Case map
+	0x00DC:  []rune{0x00FC},                 // Case map
+	0x00DD:  []rune{0x00FD},                 // Case map
+	0x00DE:  []rune{0x00FE},                 // Case map
+	0x00DF:  []rune{0x0073, 0x0073},         // Case map
+	0x0100:  []rune{0x0101},                 // Case map
+	0x0102:  []rune{0x0103},                 // Case map
+	0x0104:  []rune{0x0105},                 // Case map
+	0x0106:  []rune{0x0107},                 // Case map
+	0x0108:  []rune{0x0109},                 // Case map
+	0x010A:  []rune{0x010B},                 // Case map
+	0x010C:  []rune{0x010D},                 // Case map
+	0x010E:  []rune{0x010F},                 // Case map
+	0x0110:  []rune{0x0111},                 // Case map
+	0x0112:  []rune{0x0113},                 // Case map
+	0x0114:  []rune{0x0115},                 // Case map
+	0x0116:  []rune{0x0117},                 // Case map
+	0x0118:  []rune{0x0119},                 // Case map
+	0x011A:  []rune{0x011B},                 // Case map
+	0x011C:  []rune{0x011D},                 // Case map
+	0x011E:  []rune{0x011F},                 // Case map
+	0x0120:  []rune{0x0121},                 // Case map
+	0x0122:  []rune{0x0123},                 // Case map
+	0x0124:  []rune{0x0125},                 // Case map
+	0x0126:  []rune{0x0127},                 // Case map
+	0x0128:  []rune{0x0129},                 // Case map
+	0x012A:  []rune{0x012B},                 // Case map
+	0x012C:  []rune{0x012D},                 // Case map
+	0x012E:  []rune{0x012F},                 // Case map
+	0x0130:  []rune{0x0069, 0x0307},         // Case map
+	0x0132:  []rune{0x0133},                 // Case map
+	0x0134:  []rune{0x0135},                 // Case map
+	0x0136:  []rune{0x0137},                 // Case map
+	0x0139:  []rune{0x013A},                 // Case map
+	0x013B:  []rune{0x013C},                 // Case map
+	0x013D:  []rune{0x013E},                 // Case map
+	0x013F:  []rune{0x0140},                 // Case map
+	0x0141:  []rune{0x0142},                 // Case map
+	0x0143:  []rune{0x0144},                 // Case map
+	0x0145:  []rune{0x0146},                 // Case map
+	0x0147:  []rune{0x0148},                 // Case map
+	0x0149:  []rune{0x02BC, 0x006E},         // Case map
+	0x014A:  []rune{0x014B},                 // Case map
+	0x014C:  []rune{0x014D},                 // Case map
+	0x014E:  []rune{0x014F},                 // Case map
+	0x0150:  []rune{0x0151},                 // Case map
+	0x0152:  []rune{0x0153},                 // Case map
+	0x0154:  []rune{0x0155},                 // Case map
+	0x0156:  []rune{0x0157},                 // Case map
+	0x0158:  []rune{0x0159},                 // Case map
+	0x015A:  []rune{0x015B},                 // Case map
+	0x015C:  []rune{0x015D},                 // Case map
+	0x015E:  []rune{0x015F},                 // Case map
+	0x0160:  []rune{0x0161},                 // Case map
+	0x0162:  []rune{0x0163},                 // Case map
+	0x0164:  []rune{0x0165},                 // Case map
+	0x0166:  []rune{0x0167},                 // Case map
+	0x0168:  []rune{0x0169},                 // Case map
+	0x016A:  []rune{0x016B},                 // Case map
+	0x016C:  []rune{0x016D},                 // Case map
+	0x016E:  []rune{0x016F},                 // Case map
+	0x0170:  []rune{0x0171},                 // Case map
+	0x0172:  []rune{0x0173},                 // Case map
+	0x0174:  []rune{0x0175},                 // Case map
+	0x0176:  []rune{0x0177},                 // Case map
+	0x0178:  []rune{0x00FF},                 // Case map
+	0x0179:  []rune{0x017A},                 // Case map
+	0x017B:  []rune{0x017C},                 // Case map
+	0x017D:  []rune{0x017E},                 // Case map
+	0x017F:  []rune{0x0073},                 // Case map
+	0x0181:  []rune{0x0253},                 // Case map
+	0x0182:  []rune{0x0183},                 // Case map
+	0x0184:  []rune{0x0185},                 // Case map
+	0x0186:  []rune{0x0254},                 // Case map
+	0x0187:  []rune{0x0188},                 // Case map
+	0x0189:  []rune{0x0256},                 // Case map
+	0x018A:  []rune{0x0257},                 // Case map
+	0x018B:  []rune{0x018C},                 // Case map
+	0x018E:  []rune{0x01DD},                 // Case map
+	0x018F:  []rune{0x0259},                 // Case map
+	0x0190:  []rune{0x025B},                 // Case map
+	0x0191:  []rune{0x0192},                 // Case map
+	0x0193:  []rune{0x0260},                 // Case map
+	0x0194:  []rune{0x0263},                 // Case map
+	0x0196:  []rune{0x0269},                 // Case map
+	0x0197:  []rune{0x0268},                 // Case map
+	0x0198:  []rune{0x0199},                 // Case map
+	0x019C:  []rune{0x026F},                 // Case map
+	0x019D:  []rune{0x0272},                 // Case map
+	0x019F:  []rune{0x0275},                 // Case map
+	0x01A0:  []rune{0x01A1},                 // Case map
+	0x01A2:  []rune{0x01A3},                 // Case map
+	0x01A4:  []rune{0x01A5},                 // Case map
+	0x01A6:  []rune{0x0280},                 // Case map
+	0x01A7:  []rune{0x01A8},                 // Case map
+	0x01A9:  []rune{0x0283},                 // Case map
+	0x01AC:  []rune{0x01AD},                 // Case map
+	0x01AE:  []rune{0x0288},                 // Case map
+	0x01AF:  []rune{0x01B0},                 // Case map
+	0x01B1:  []rune{0x028A},                 // Case map
+	0x01B2:  []rune{0x028B},                 // Case map
+	0x01B3:  []rune{0x01B4},                 // Case map
+	0x01B5:  []rune{0x01B6},                 // Case map
+	0x01B7:  []rune{0x0292},                 // Case map
+	0x01B8:  []rune{0x01B9},                 // Case map
+	0x01BC:  []rune{0x01BD},                 // Case map
+	0x01C4:  []rune{0x01C6},                 // Case map
+	0x01C5:  []rune{0x01C6},                 // Case map
+	0x01C7:  []rune{0x01C9},                 // Case map
+	0x01C8:  []rune{0x01C9},                 // Case map
+	0x01CA:  []rune{0x01CC},                 // Case map
+	0x01CB:  []rune{0x01CC},                 // Case map
+	0x01CD:  []rune{0x01CE},                 // Case map
+	0x01CF:  []rune{0x01D0},                 // Case map
+	0x01D1:  []rune{0x01D2},                 // Case map
+	0x01D3:  []rune{0x01D4},                 // Case map
+	0x01D5:  []rune{0x01D6},                 // Case map
+	0x01D7:  []rune{0x01D8},                 // Case map
+	0x01D9:  []rune{0x01DA},                 // Case map
+	0x01DB:  []rune{0x01DC},                 // Case map
+	0x01DE:  []rune{0x01DF},                 // Case map
+	0x01E0:  []rune{0x01E1},                 // Case map
+	0x01E2:  []rune{0x01E3},                 // Case map
+	0x01E4:  []rune{0x01E5},                 // Case map
+	0x01E6:  []rune{0x01E7},                 // Case map
+	0x01E8:  []rune{0x01E9},                 // Case map
+	0x01EA:  []rune{0x01EB},                 // Case map
+	0x01EC:  []rune{0x01ED},                 // Case map
+	0x01EE:  []rune{0x01EF},                 // Case map
+	0x01F0:  []rune{0x006A, 0x030C},         // Case map
+	0x01F1:  []rune{0x01F3},                 // Case map
+	0x01F2:  []rune{0x01F3},                 // Case map
+	0x01F4:  []rune{0x01F5},                 // Case map
+	0x01F6:  []rune{0x0195},                 // Case map
+	0x01F7:  []rune{0x01BF},                 // Case map
+	0x01F8:  []rune{0x01F9},                 // Case map
+	0x01FA:  []rune{0x01FB},                 // Case map
+	0x01FC:  []rune{0x01FD},                 // Case map
+	0x01FE:  []rune{0x01FF},                 // Case map
+	0x0200:  []rune{0x0201},                 // Case map
+	0x0202:  []rune{0x0203},                 // Case map
+	0x0204:  []rune{0x0205},                 // Case map
+	0x0206:  []rune{0x0207},                 // Case map
+	0x0208:  []rune{0x0209},                 // Case map
+	0x020A:  []rune{0x020B},                 // Case map
+	0x020C:  []rune{0x020D},                 // Case map
+	0x020E:  []rune{0x020F},                 // Case map
+	0x0210:  []rune{0x0211},                 // Case map
+	0x0212:  []rune{0x0213},                 // Case map
+	0x0214:  []rune{0x0215},                 // Case map
+	0x0216:  []rune{0x0217},                 // Case map
+	0x0218:  []rune{0x0219},                 // Case map
+	0x021A:  []rune{0x021B},                 // Case map
+	0x021C:  []rune{0x021D},                 // Case map
+	0x021E:  []rune{0x021F},                 // Case map
+	0x0220:  []rune{0x019E},                 // Case map
+	0x0222:  []rune{0x0223},                 // Case map
+	0x0224:  []rune{0x0225},                 // Case map
+	0x0226:  []rune{0x0227},                 // Case map
+	0x0228:  []rune{0x0229},                 // Case map
+	0x022A:  []rune{0x022B},                 // Case map
+	0x022C:  []rune{0x022D},                 // Case map
+	0x022E:  []rune{0x022F},                 // Case map
+	0x0230:  []rune{0x0231},                 // Case map
+	0x0232:  []rune{0x0233},                 // Case map
+	0x0345:  []rune{0x03B9},                 // Case map
+	0x0386:  []rune{0x03AC},                 // Case map
+	0x0388:  []rune{0x03AD},                 // Case map
+	0x0389:  []rune{0x03AE},                 // Case map
+	0x038A:  []rune{0x03AF},                 // Case map
+	0x038C:  []rune{0x03CC},                 // Case map
+	0x038E:  []rune{0x03CD},                 // Case map
+	0x038F:  []rune{0x03CE},                 // Case map
+	0x0390:  []rune{0x03B9, 0x0308, 0x0301}, // Case map
+	0x0391:  []rune{0x03B1},                 // Case map
+	0x0392:  []rune{0x03B2},                 // Case map
+	0x0393:  []rune{0x03B3},                 // Case map
+	0x0394:  []rune{0x03B4},                 // Case map
+	0x0395:  []rune{0x03B5},                 // Case map
+	0x0396:  []rune{0x03B6},                 // Case map
+	0x0397:  []rune{0x03B7},                 // Case map
+	0x0398:  []rune{0x03B8},                 // Case map
+	0x0399:  []rune{0x03B9},                 // Case map
+	0x039A:  []rune{0x03BA},                 // Case map
+	0x039B:  []rune{0x03BB},                 // Case map
+	0x039C:  []rune{0x03BC},                 // Case map
+	0x039D:  []rune{0x03BD},                 // Case map
+	0x039E:  []rune{0x03BE},                 // Case map
+	0x039F:  []rune{0x03BF},                 // Case map
+	0x03A0:  []rune{0x03C0},                 // Case map
+	0x03A1:  []rune{0x03C1},                 // Case map
+	0x03A3:  []rune{0x03C3},                 // Case map
+	0x03A4:  []rune{0x03C4},                 // Case map
+	0x03A5:  []rune{0x03C5},                 // Case map
+	0x03A6:  []rune{0x03C6},                 // Case map
+	0x03A7:  []rune{0x03C7},                 // Case map
+	0x03A8:  []rune{0x03C8},                 // Case map
+	0x03A9:  []rune{0x03C9},                 // Case map
+	0x03AA:  []rune{0x03CA},                 // Case map
+	0x03AB:  []rune{0x03CB},                 // Case map
+	0x03B0:  []rune{0x03C5, 0x0308, 0x0301}, // Case map
+	0x03C2:  []rune{0x03C3},                 // Case map
+	0x03D0:  []rune{0x03B2},                 // Case map
+	0x03D1:  []rune{0x03B8},                 // Case map
+	0x03D5:  []rune{0x03C6},                 // Case map
+	0x03D6:  []rune{0x03C0},                 // Case map
+	0x03D8:  []rune{0x03D9},                 // Case map
+	0x03DA:  []rune{0x03DB},                 // Case map
+	0x03DC:  []rune{0x03DD},                 // Case map
+	0x03DE:  []rune{0x03DF},                 // Case map
+	0x03E0:  []rune{0x03E1},                 // Case map
+	0x03E2:  []rune{0x03E3},                 // Case map
+	0x03E4:  []rune{0x03E5},                 // Case map
+	0x03E6:  []rune{0x03E7},                 // Case map
+	0x03E8:  []rune{0x03E9},                 // Case map
+	0x03EA:  []rune{0x03EB},                 // Case map
+	0x03EC:  []rune{0x03ED},                 // Case map
+	0x03EE:  []rune{0x03EF},                 // Case map
+	0x03F0:  []rune{0x03BA},                 // Case map
+	0x03F1:  []rune{0x03C1},                 // Case map
+	0x03F2:  []rune{0x03C3},                 // Case map
+	0x03F4:  []rune{0x03B8},                 // Case map
+	0x03F5:  []rune{0x03B5},                 // Case map
+	0x0400:  []rune{0x0450},                 // Case map
+	0x0401:  []rune{0x0451},                 // Case map
+	0x0402:  []rune{0x0452},                 // Case map
+	0x0403:  []rune{0x0453},                 // Case map
+	0x0404:  []rune{0x0454},                 // Case map
+	0x0405:  []rune{0x0455},                 // Case map
+	0x0406:  []rune{0x0456},                 // Case map
+	0x0407:  []rune{0x0457},                 // Case map
+	0x0408:  []rune{0x0458},                 // Case map
+	0x0409:  []rune{0x0459},                 // Case map
+	0x040A:  []rune{0x045A},                 // Case map
+	0x040B:  []rune{0x045B},                 // Case map
+	0x040C:  []rune{0x045C},                 // Case map
+	0x040D:  []rune{0x045D},                 // Case map
+	0x040E:  []rune{0x045E},                 // Case map
+	0x040F:  []rune{0x045F},                 // Case map
+	0x0410:  []rune{0x0430},                 // Case map
+	0x0411:  []rune{0x0431},                 // Case map
+	0x0412:  []rune{0x0432},                 // Case map
+	0x0413:  []rune{0x0433},                 // Case map
+	0x0414:  []rune{0x0434},                 // Case map
+	0x0415:  []rune{0x0435},                 // Case map
+	0x0416:  []rune{0x0436},                 // Case map
+	0x0417:  []rune{0x0437},                 // Case map
+	0x0418:  []rune{0x0438},                 // Case map
+	0x0419:  []rune{0x0439},                 // Case map
+	0x041A:  []rune{0x043A},                 // Case map
+	0x041B:  []rune{0x043B},                 // Case map
+	0x041C:  []rune{0x043C},                 // Case map
+	0x041D:  []rune{0x043D},                 // Case map
+	0x041E:  []rune{0x043E},                 // Case map
+	0x041F:  []rune{0x043F},                 // Case map
+	0x0420:  []rune{0x0440},                 // Case map
+	0x0421:  []rune{0x0441},                 // Case map
+	0x0422:  []rune{0x0442},                 // Case map
+	0x0423:  []rune{0x0443},                 // Case map
+	0x0424:  []rune{0x0444},                 // Case map
+	0x0425:  []rune{0x0445},                 // Case map
+	0x0426:  []rune{0x0446},                 // Case map
+	0x0427:  []rune{0x0447},                 // Case map
+	0x0428:  []rune{0x0448},                 // Case map
+	0x0429:  []rune{0x0449},                 // Case map
+	0x042A:  []rune{0x044A},                 // Case map
+	0x042B:  []rune{0x044B},                 // Case map
+	0x042C:  []rune{0x044C},                 // Case map
+	0x042D:  []rune{0x044D},                 // Case map
+	0x042E:  []rune{0x044E},                 // Case map
+	0x042F:  []rune{0x044F},                 // Case map
+	0x0460:  []rune{0x0461},                 // Case map
+	0x0462:  []rune{0x0463},                 // Case map
+	0x0464:  []rune{0x0465},                 // Case map
+	0x0466:  []rune{0x0467},                 // Case map
+	0x0468:  []rune{0x0469},                 // Case map
+	0x046A:  []rune{0x046B},                 // Case map
+	0x046C:  []rune{0x046D},                 // Case map
+	0x046E:  []rune{0x046F},                 // Case map
+	0x0470:  []rune{0x0471},                 // Case map
+	0x0472:  []rune{0x0473},                 // Case map
+	0x0474:  []rune{0x0475},                 // Case map
+	0x0476:  []rune{0x0477},                 // Case map
+	0x0478:  []rune{0x0479},                 // Case map
+	0x047A:  []rune{0x047B},                 // Case map
+	0x047C:  []rune{0x047D},                 // Case map
+	0x047E:  []rune{0x047F},                 // Case map
+	0x0480:  []rune{0x0481},                 // Case map
+	0x048A:  []rune{0x048B},                 // Case map
+	0x048C:  []rune{0x048D},                 // Case map
+	0x048E:  []rune{0x048F},                 // Case map
+	0x0490:  []rune{0x0491},                 // Case map
+	0x0492:  []rune{0x0493},                 // Case map
+	0x0494:  []rune{0x0495},                 // Case map
+	0x0496:  []rune{0x0497},                 // Case map
+	0x0498:  []rune{0x0499},                 // Case map
+	0x049A:  []rune{0x049B},                 // Case map
+	0x049C:  []rune{0x049D},                 // Case map
+	0x049E:  []rune{0x049F},                 // Case map
+	0x04A0:  []rune{0x04A1},                 // Case map
+	0x04A2:  []rune{0x04A3},                 // Case map
+	0x04A4:  []rune{0x04A5},                 // Case map
+	0x04A6:  []rune{0x04A7},                 // Case map
+	0x04A8:  []rune{0x04A9},                 // Case map
+	0x04AA:  []rune{0x04AB},                 // Case map
+	0x04AC:  []rune{0x04AD},                 // Case map
+	0x04AE:  []rune{0x04AF},                 // Case map
+	0x04B0:  []rune{0x04B1},                 // Case map
+	0x04B2:  []rune{0x04B3},                 // Case map
+	0x04B4:  []rune{0x04B5},                 // Case map
+	0x04B6:  []rune{0x04B7},                 // Case map
+	0x04B8:  []rune{0x04B9},                 // Case map
+	0x04BA:  []rune{0x04BB},                 // Case map
+	0x04BC:  []rune{0x04BD},                 // Case map
+	0x04BE:  []rune{0x04BF},                 // Case map
+	0x04C1:  []rune{0x04C2},                 // Case map
+	0x04C3:  []rune{0x04C4},                 // Case map
+	0x04C5:  []rune{0x04C6},                 // Case map
+	0x04C7:  []rune{0x04C8},                 // Case map
+	0x04C9:  []rune{0x04CA},                 // Case map
+	0x04CB:  []rune{0x04CC},                 // Case map
+	0x04CD:  []rune{0x04CE},                 // Case map
+	0x04D0:  []rune{0x04D1},                 // Case map
+	0x04D2:  []rune{0x04D3},                 // Case map
+	0x04D4:  []rune{0x04D5},                 // Case map
+	0x04D6:  []rune{0x04D7},                 // Case map
+	0x04D8:  []rune{0x04D9},                 // Case map
+	0x04DA:  []rune{0x04DB},                 // Case map
+	0x04DC:  []rune{0x04DD},                 // Case map
+	0x04DE:  []rune{0x04DF},                 // Case map
+	0x04E0:  []rune{0x04E1},                 // Case map
+	0x04E2:  []rune{0x04E3},                 // Case map
+	0x04E4:  []rune{0x04E5},                 // Case map
+	0x04E6:  []rune{0x04E7},                 // Case map
+	0x04E8:  []rune{0x04E9},                 // Case map
+	0x04EA:  []rune{0x04EB},                 // Case map
+	0x04EC:  []rune{0x04ED},                 // Case map
+	0x04EE:  []rune{0x04EF},                 // Case map
+	0x04F0:  []rune{0x04F1},                 // Case map
+	0x04F2:  []rune{0x04F3},                 // Case map
+	0x04F4:  []rune{0x04F5},                 // Case map
+	0x04F8:  []rune{0x04F9},                 // Case map
+	0x0500:  []rune{0x0501},                 // Case map
+	0x0502:  []rune{0x0503},                 // Case map
+	0x0504:  []rune{0x0505},                 // Case map
+	0x0506:  []rune{0x0507},                 // Case map
+	0x0508:  []rune{0x0509},                 // Case map
+	0x050A:  []rune{0x050B},                 // Case map
+	0x050C:  []rune{0x050D},                 // Case map
+	0x050E:  []rune{0x050F},                 // Case map
+	0x0531:  []rune{0x0561},                 // Case map
+	0x0532:  []rune{0x0562},                 // Case map
+	0x0533:  []rune{0x0563},                 // Case map
+	0x0534:  []rune{0x0564},                 // Case map
+	0x0535:  []rune{0x0565},                 // Case map
+	0x0536:  []rune{0x0566},                 // Case map
+	0x0537:  []rune{0x0567},                 // Case map
+	0x0538:  []rune{0x0568},                 // Case map
+	0x0539:  []rune{0x0569},                 // Case map
+	0x053A:  []rune{0x056A},                 // Case map
+	0x053B:  []rune{0x056B},                 // Case map
+	0x053C:  []rune{0x056C},                 // Case map
+	0x053D:  []rune{0x056D},                 // Case map
+	0x053E:  []rune{0x056E},                 // Case map
+	0x053F:  []rune{0x056F},                 // Case map
+	0x0540:  []rune{0x0570},                 // Case map
+	0x0541:  []rune{0x0571},                 // Case map
+	0x0542:  []rune{0x0572},                 // Case map
+	0x0543:  []rune{0x0573},                 // Case map
+	0x0544:  []rune{0x0574},                 // Case map
+	0x0545:  []rune{0x0575},                 // Case map
+	0x0546:  []rune{0x0576},                 // Case map
+	0x0547:  []rune{0x0577},                 // Case map
+	0x0548:  []rune{0x0578},                 // Case map
+	0x0549:  []rune{0x0579},                 // Case map
+	0x054A:  []rune{0x057A},                 // Case map
+	0x054B:  []rune{0x057B},                 // Case map
+	0x054C:  []rune{0x057C},                 // Case map
+	0x054D:  []rune{0x057D},                 // Case map
+	0x054E:  []rune{0x057E},                 // Case map
+	0x054F:  []rune{0x057F},                 // Case map
+	0x0550:  []rune{0x0580},                 // Case map
+	0x0551:  []rune{0x0581},                 // Case map
+	0x0552:  []rune{0x0582},                 // Case map
+	0x0553:  []rune{0x0583},                 // Case map
+	0x0554:  []rune{0x0584},                 // Case map
+	0x0555:  []rune{0x0585},                 // Case map
+	0x0556:  []rune{0x0586},                 // Case map
+	0x0587:  []rune{0x0565, 0x0582},         // Case map
+	0x1E00:  []rune{0x1E01},                 // Case map
+	0x1E02:  []rune{0x1E03},                 // Case map
+	0x1E04:  []rune{0x1E05},                 // Case map
+	0x1E06:  []rune{0x1E07},                 // Case map
+	0x1E08:  []rune{0x1E09},                 // Case map
+	0x1E0A:  []rune{0x1E0B},                 // Case map
+	0x1E0C:  []rune{0x1E0D},                 // Case map
+	0x1E0E:  []rune{0x1E0F},                 // Case map
+	0x1E10:  []rune{0x1E11},                 // Case map
+	0x1E12:  []rune{0x1E13},                 // Case map
+	0x1E14:  []rune{0x1E15},                 // Case map
+	0x1E16:  []rune{0x1E17},                 // Case map
+	0x1E18:  []rune{0x1E19},                 // Case map
+	0x1E1A:  []rune{0x1E1B},                 // Case map
+	0x1E1C:  []rune{0x1E1D},                 // Case map
+	0x1E1E:  []rune{0x1E1F},                 // Case map
+	0x1E20:  []rune{0x1E21},                 // Case map
+	0x1E22:  []rune{0x1E23},                 // Case map
+	0x1E24:  []rune{0x1E25},                 // Case map
+	0x1E26:  []rune{0x1E27},                 // Case map
+	0x1E28:  []rune{0x1E29},                 // Case map
+	0x1E2A:  []rune{0x1E2B},                 // Case map
+	0x1E2C:  []rune{0x1E2D},                 // Case map
+	0x1E2E:  []rune{0x1E2F},                 // Case map
+	0x1E30:  []rune{0x1E31},                 // Case map
+	0x1E32:  []rune{0x1E33},                 // Case map
+	0x1E34:  []rune{0x1E35},                 // Case map
+	0x1E36:  []rune{0x1E37},                 // Case map
+	0x1E38:  []rune{0x1E39},                 // Case map
+	0x1E3A:  []rune{0x1E3B},                 // Case map
+	0x1E3C:  []rune{0x1E3D},                 // Case map
+	0x1E3E:  []rune{0x1E3F},                 // Case map
+	0x1E40:  []rune{0x1E41},                 // Case map
+	0x1E42:  []rune{0x1E43},                 // Case map
+	0x1E44:  []rune{0x1E45},                 // Case map
+	0x1E46:  []rune{0x1E47},                 // Case map
+	0x1E48:  []rune{0x1E49},                 // Case map
+	0x1E4A:  []rune{0x1E4B},                 // Case map
+	0x1E4C:  []rune{0x1E4D},                 // Case map
+	0x1E4E:  []rune{0x1E4F},                 // Case map
+	0x1E50:  []rune{0x1E51},                 // Case map
+	0x1E52:  []rune{0x1E53},                 // Case map
+	0x1E54:  []rune{0x1E55},                 // Case map
+	0x1E56:  []rune{0x1E57},                 // Case map
+	0x1E58:  []rune{0x1E59},                 // Case map
+	0x1E5A:  []rune{0x1E5B},                 // Case map
+	0x1E5C:  []rune{0x1E5D},                 // Case map
+	0x1E5E:  []rune{0x1E5F},                 // Case map
+	0x1E60:  []rune{0x1E61},                 // Case map
+	0x1E62:  []rune{0x1E63},                 // Case map
+	0x1E64:  []rune{0x1E65},                 // Case map
+	0x1E66:  []rune{0x1E67},                 // Case map
+	0x1E68:  []rune{0x1E69},                 // Case map
+	0x1E6A:  []rune{0x1E6B},                 // Case map
+	0x1E6C:  []rune{0x1E6D},                 // Case map
+	0x1E6E:  []rune{0x1E6F},                 // Case map
+	0x1E70:  []rune{0x1E71},                 // Case map
+	0x1E72:  []rune{0x1E73},                 // Case map
+	0x1E74:  []rune{0x1E75},                 // Case map
+	0x1E76:  []rune{0x1E77},                 // Case map
+	0x1E78:  []rune{0x1E79},                 // Case map
+	0x1E7A:  []rune{0x1E7B},                 // Case map
+	0x1E7C:  []rune{0x1E7D},                 // Case map
+	0x1E7E:  []rune{0x1E7F},                 // Case map
+	0x1E80:  []rune{0x1E81},                 // Case map
+	0x1E82:  []rune{0x1E83},                 // Case map
+	0x1E84:  []rune{0x1E85},                 // Case map
+	0x1E86:  []rune{0x1E87},                 // Case map
+	0x1E88:  []rune{0x1E89},                 // Case map
+	0x1E8A:  []rune{0x1E8B},                 // Case map
+	0x1E8C:  []rune{0x1E8D},                 // Case map
+	0x1E8E:  []rune{0x1E8F},                 // Case map
+	0x1E90:  []rune{0x1E91},                 // Case map
+	0x1E92:  []rune{0x1E93},                 // Case map
+	0x1E94:  []rune{0x1E95},                 // Case map
+	0x1E96:  []rune{0x0068, 0x0331},         // Case map
+	0x1E97:  []rune{0x0074, 0x0308},         // Case map
+	0x1E98:  []rune{0x0077, 0x030A},         // Case map
+	0x1E99:  []rune{0x0079, 0x030A},         // Case map
+	0x1E9A:  []rune{0x0061, 0x02BE},         // Case map
+	0x1E9B:  []rune{0x1E61},                 // Case map
+	0x1EA0:  []rune{0x1EA1},                 // Case map
+	0x1EA2:  []rune{0x1EA3},                 // Case map
+	0x1EA4:  []rune{0x1EA5},                 // Case map
+	0x1EA6:  []rune{0x1EA7},                 // Case map
+	0x1EA8:  []rune{0x1EA9},                 // Case map
+	0x1EAA:  []rune{0x1EAB},                 // Case map
+	0x1EAC:  []rune{0x1EAD},                 // Case map
+	0x1EAE:  []rune{0x1EAF},                 // Case map
+	0x1EB0:  []rune{0x1EB1},                 // Case map
+	0x1EB2:  []rune{0x1EB3},                 // Case map
+	0x1EB4:  []rune{0x1EB5},                 // Case map
+	0x1EB6:  []rune{0x1EB7},                 // Case map
+	0x1EB8:  []rune{0x1EB9},                 // Case map
+	0x1EBA:  []rune{0x1EBB},                 // Case map
+	0x1EBC:  []rune{0x1EBD},                 // Case map
+	0x1EBE:  []rune{0x1EBF},                 // Case map
+	0x1EC0:  []rune{0x1EC1},                 // Case map
+	0x1EC2:  []rune{0x1EC3},                 // Case map
+	0x1EC4:  []rune{0x1EC5},                 // Case map
+	0x1EC6:  []rune{0x1EC7},                 // Case map
+	0x1EC8:  []rune{0x1EC9},                 // Case map
+	0x1ECA:  []rune{0x1ECB},                 // Case map
+	0x1ECC:  []rune{0x1ECD},                 // Case map
+	0x1ECE:  []rune{0x1ECF},                 // Case map
+	0x1ED0:  []rune{0x1ED1},                 // Case map
+	0x1ED2:  []rune{0x1ED3},                 // Case map
+	0x1ED4:  []rune{0x1ED5},                 // Case map
+	0x1ED6:  []rune{0x1ED7},                 // Case map
+	0x1ED8:  []rune{0x1ED9},                 // Case map
+	0x1EDA:  []rune{0x1EDB},                 // Case map
+	0x1EDC:  []rune{0x1EDD},                 // Case map
+	0x1EDE:  []rune{0x1EDF},                 // Case map
+	0x1EE0:  []rune{0x1EE1},                 // Case map
+	0x1EE2:  []rune{0x1EE3},                 // Case map
+	0x1EE4:  []rune{0x1EE5},                 // Case map
+	0x1EE6:  []rune{0x1EE7},                 // Case map
+	0x1EE8:  []rune{0x1EE9},                 // Case map
+	0x1EEA:  []rune{0x1EEB},                 // Case map
+	0x1EEC:  []rune{0x1EED},                 // Case map
+	0x1EEE:  []rune{0x1EEF},                 // Case map
+	0x1EF0:  []rune{0x1EF1},                 // Case map
+	0x1EF2:  []rune{0x1EF3},                 // Case map
+	0x1EF4:  []rune{0x1EF5},                 // Case map
+	0x1EF6:  []rune{0x1EF7},                 // Case map
+	0x1EF8:  []rune{0x1EF9},                 // Case map
+	0x1F08:  []rune{0x1F00},                 // Case map
+	0x1F09:  []rune{0x1F01},                 // Case map
+	0x1F0A:  []rune{0x1F02},                 // Case map
+	0x1F0B:  []rune{0x1F03},                 // Case map
+	0x1F0C:  []rune{0x1F04},                 // Case map
+	0x1F0D:  []rune{0x1F05},                 // Case map
+	0x1F0E:  []rune{0x1F06},                 // Case map
+	0x1F0F:  []rune{0x1F07},                 // Case map
+	0x1F18:  []rune{0x1F10},                 // Case map
+	0x1F19:  []rune{0x1F11},                 // Case map
+	0x1F1A:  []rune{0x1F12},                 // Case map
+	0x1F1B:  []rune{0x1F13},                 // Case map
+	0x1F1C:  []rune{0x1F14},                 // Case map
+	0x1F1D:  []rune{0x1F15},                 // Case map
+	0x1F28:  []rune{0x1F20},                 // Case map
+	0x1F29:  []rune{0x1F21},                 // Case map
+	0x1F2A:  []rune{0x1F22},                 // Case map
+	0x1F2B:  []rune{0x1F23},                 // Case map
+	0x1F2C:  []rune{0x1F24},                 // Case map
+	0x1F2D:  []rune{0x1F25},                 // Case map
+	0x1F2E:  []rune{0x1F26},                 // Case map
+	0x1F2F:  []rune{0x1F27},                 // Case map
+	0x1F38:  []rune{0x1F30},                 // Case map
+	0x1F39:  []rune{0x1F31},                 // Case map
+	0x1F3A:  []rune{0x1F32},                 // Case map
+	0x1F3B:  []rune{0x1F33},                 // Case map
+	0x1F3C:  []rune{0x1F34},                 // Case map
+	0x1F3D:  []rune{0x1F35},                 // Case map
+	0x1F3E:  []rune{0x1F36},                 // Case map
+	0x1F3F:  []rune{0x1F37},                 // Case map
+	0x1F48:  []rune{0x1F40},                 // Case map
+	0x1F49:  []rune{0x1F41},                 // Case map
+	0x1F4A:  []rune{0x1F42},                 // Case map
+	0x1F4B:  []rune{0x1F43},                 // Case map
+	0x1F4C:  []rune{0x1F44},                 // Case map
+	0x1F4D:  []rune{0x1F45},                 // Case map
+	0x1F50:  []rune{0x03C5, 0x0313},         // Case map
+	0x1F52:  []rune{0x03C5, 0x0313, 0x0300}, // Case map
+	0x1F54:  []rune{0x03C5, 0x0313, 0x0301}, // Case map
+	0x1F56:  []rune{0x03C5, 0x0313, 0x0342}, // Case map
+	0x1F59:  []rune{0x1F51},                 // Case map
+	0x1F5B:  []rune{0x1F53},                 // Case map
+	0x1F5D:  []rune{0x1F55},                 // Case map
+	0x1F5F:  []rune{0x1F57},                 // Case map
+	0x1F68:  []rune{0x1F60},                 // Case map
+	0x1F69:  []rune{0x1F61},                 // Case map
+	0x1F6A:  []rune{0x1F62},                 // Case map
+	0x1F6B:  []rune{0x1F63},                 // Case map
+	0x1F6C:  []rune{0x1F64},                 // Case map
+	0x1F6D:  []rune{0x1F65},                 // Case map
+	0x1F6E:  []rune{0x1F66},                 // Case map
+	0x1F6F:  []rune{0x1F67},                 // Case map
+	0x1F80:  []rune{0x1F00, 0x03B9},         // Case map
+	0x1F81:  []rune{0x1F01, 0x03B9},         // Case map
+	0x1F82:  []rune{0x1F02, 0x03B9},         // Case map
+	0x1F83:  []rune{0x1F03, 0x03B9},         // Case map
+	0x1F84:  []rune{0x1F04, 0x03B9},         // Case map
+	0x1F85:  []rune{0x1F05, 0x03B9},         // Case map
+	0x1F86:  []rune{0x1F06, 0x03B9},         // Case map
+	0x1F87:  []rune{0x1F07, 0x03B9},         // Case map
+	0x1F88:  []rune{0x1F00, 0x03B9},         // Case map
+	0x1F89:  []rune{0x1F01, 0x03B9},         // Case map
+	0x1F8A:  []rune{0x1F02, 0x03B9},         // Case map
+	0x1F8B:  []rune{0x1F03, 0x03B9},         // Case map
+	0x1F8C:  []rune{0x1F04, 0x03B9},         // Case map
+	0x1F8D:  []rune{0x1F05, 0x03B9},         // Case map
+	0x1F8E:  []rune{0x1F06, 0x03B9},         // Case map
+	0x1F8F:  []rune{0x1F07, 0x03B9},         // Case map
+	0x1F90:  []rune{0x1F20, 0x03B9},         // Case map
+	0x1F91:  []rune{0x1F21, 0x03B9},         // Case map
+	0x1F92:  []rune{0x1F22, 0x03B9},         // Case map
+	0x1F93:  []rune{0x1F23, 0x03B9},         // Case map
+	0x1F94:  []rune{0x1F24, 0x03B9},         // Case map
+	0x1F95:  []rune{0x1F25, 0x03B9},         // Case map
+	0x1F96:  []rune{0x1F26, 0x03B9},         // Case map
+	0x1F97:  []rune{0x1F27, 0x03B9},         // Case map
+	0x1F98:  []rune{0x1F20, 0x03B9},         // Case map
+	0x1F99:  []rune{0x1F21, 0x03B9},         // Case map
+	0x1F9A:  []rune{0x1F22, 0x03B9},         // Case map
+	0x1F9B:  []rune{0x1F23, 0x03B9},         // Case map
+	0x1F9C:  []rune{0x1F24, 0x03B9},         // Case map
+	0x1F9D:  []rune{0x1F25, 0x03B9},         // Case map
+	0x1F9E:  []rune{0x1F26, 0x03B9},         // Case map
+	0x1F9F:  []rune{0x1F27, 0x03B9},         // Case map
+	0x1FA0:  []rune{0x1F60, 0x03B9},         // Case map
+	0x1FA1:  []rune{0x1F61, 0x03B9},         // Case map
+	0x1FA2:  []rune{0x1F62, 0x03B9},         // Case map
+	0x1FA3:  []rune{0x1F63, 0x03B9},         // Case map
+	0x1FA4:  []rune{0x1F64, 0x03B9},         // Case map
+	0x1FA5:  []rune{0x1F65, 0x03B9},         // Case map
+	0x1FA6:  []rune{0x1F66, 0x03B9},         // Case map
+	0x1FA7:  []rune{0x1F67, 0x03B9},         // Case map
+	0x1FA8:  []rune{0x1F60, 0x03B9},         // Case map
+	0x1FA9:  []rune{0x1F61, 0x03B9},         // Case map
+	0x1FAA:  []rune{0x1F62, 0x03B9},         // Case map
+	0x1FAB:  []rune{0x1F63, 0x03B9},         // Case map
+	0x1FAC:  []rune{0x1F64, 0x03B9},         // Case map
+	0x1FAD:  []rune{0x1F65, 0x03B9},         // Case map
+	0x1FAE:  []rune{0x1F66, 0x03B9},         // Case map
+	0x1FAF:  []rune{0x1F67, 0x03B9},         // Case map
+	0x1FB2:  []rune{0x1F70, 0x03B9},         // Case map
+	0x1FB3:  []rune{0x03B1, 0x03B9},         // Case map
+	0x1FB4:  []rune{0x03AC, 0x03B9},         // Case map
+	0x1FB6:  []rune{0x03B1, 0x0342},         // Case map
+	0x1FB7:  []rune{0x03B1, 0x0342, 0x03B9}, // Case map
+	0x1FB8:  []rune{0x1FB0},                 // Case map
+	0x1FB9:  []rune{0x1FB1},                 // Case map
+	0x1FBA:  []rune{0x1F70},                 // Case map
+	0x1FBB:  []rune{0x1F71},                 // Case map
+	0x1FBC:  []rune{0x03B1, 0x03B9},         // Case map
+	0x1FBE:  []rune{0x03B9},                 // Case map
+	0x1FC2:  []rune{0x1F74, 0x03B9},         // Case map
+	0x1FC3:  []rune{0x03B7, 0x03B9},         // Case map
+	0x1FC4:  []rune{0x03AE, 0x03B9},         // Case map
+	0x1FC6:  []rune{0x03B7, 0x0342},         // Case map
+	0x1FC7:  []rune{0x03B7, 0x0342, 0x03B9}, // Case map
+	0x1FC8:  []rune{0x1F72},                 // Case map
+	0x1FC9:  []rune{0x1F73},                 // Case map
+	0x1FCA:  []rune{0x1F74},                 // Case map
+	0x1FCB:  []rune{0x1F75},                 // Case map
+	0x1FCC:  []rune{0x03B7, 0x03B9},         // Case map
+	0x1FD2:  []rune{0x03B9, 0x0308, 0x0300}, // Case map
+	0x1FD3:  []rune{0x03B9, 0x0308, 0x0301}, // Case map
+	0x1FD6:  []rune{0x03B9, 0x0342},         // Case map
+	0x1FD7:  []rune{0x03B9, 0x0308, 0x0342}, // Case map
+	0x1FD8:  []rune{0x1FD0},                 // Case map
+	0x1FD9:  []rune{0x1FD1},                 // Case map
+	0x1FDA:  []rune{0x1F76},                 // Case map
+	0x1FDB:  []rune{0x1F77},                 // Case map
+	0x1FE2:  []rune{0x03C5, 0x0308, 0x0300}, // Case map
+	0x1FE3:  []rune{0x03C5, 0x0308, 0x0301}, // Case map
+	0x1FE4:  []rune{0x03C1, 0x0313},         // Case map
+	0x1FE6:  []rune{0x03C5, 0x0342},         // Case map
+	0x1FE7:  []rune{0x03C5, 0x0308, 0x0342}, // Case map
+	0x1FE8:  []rune{0x1FE0},                 // Case map
+	0x1FE9:  []rune{0x1FE1},                 // Case map
+	0x1FEA:  []rune{0x1F7A},                 // Case map
+	0x1FEB:  []rune{0x1F7B},                 // Case map
+	0x1FEC:  []rune{0x1FE5},                 // Case map
+	0x1FF2:  []rune{0x1F7C, 0x03B9},         // Case map
+	0x1FF3:  []rune{0x03C9, 0x03B9},         // Case map
+	0x1FF4:  []rune{0x03CE, 0x03B9},         // Case map
+	0x1FF6:  []rune{0x03C9, 0x0342},         // Case map
+	0x1FF7:  []rune{0x03C9, 0x0342, 0x03B9}, // Case map
+	0x1FF8:  []rune{0x1F78},                 // Case map
+	0x1FF9:  []rune{0x1F79},                 // Case map
+	0x1FFA:  []rune{0x1F7C},                 // Case map
+	0x1FFB:  []rune{0x1F7D},                 // Case map
+	0x1FFC:  []rune{0x03C9, 0x03B9},         // Case map
+	0x2126:  []rune{0x03C9},                 // Case map
+	0x212A:  []rune{0x006B},                 // Case map
+	0x212B:  []rune{0x00E5},                 // Case map
+	0x2160:  []rune{0x2170},                 // Case map
+	0x2161:  []rune{0x2171},                 // Case map
+	0x2162:  []rune{0x2172},                 // Case map
+	0x2163:  []rune{0x2173},                 // Case map
+	0x2164:  []rune{0x2174},                 // Case map
+	0x2165:  []rune{0x2175},                 // Case map
+	0x2166:  []rune{0x2176},                 // Case map
+	0x2167:  []rune{0x2177},                 // Case map
+	0x2168:  []rune{0x2178},                 // Case map
+	0x2169:  []rune{0x2179},                 // Case map
+	0x216A:  []rune{0x217A},                 // Case map
+	0x216B:  []rune{0x217B},                 // Case map
+	0x216C:  []rune{0x217C},                 // Case map
+	0x216D:  []rune{0x217D},                 // Case map
+	0x216E:  []rune{0x217E},                 // Case map
+	0x216F:  []rune{0x217F},                 // Case map
+	0x24B6:  []rune{0x24D0},                 // Case map
+	0x24B7:  []rune{0x24D1},                 // Case map
+	0x24B8:  []rune{0x24D2},                 // Case map
+	0x24B9:  []rune{0x24D3},                 // Case map
+	0x24BA:  []rune{0x24D4},                 // Case map
+	0x24BB:  []rune{0x24D5},                 // Case map
+	0x24BC:  []rune{0x24D6},                 // Case map
+	0x24BD:  []rune{0x24D7},                 // Case map
+	0x24BE:  []rune{0x24D8},                 // Case map
+	0x24BF:  []rune{0x24D9},                 // Case map
+	0x24C0:  []rune{0x24DA},                 // Case map
+	0x24C1:  []rune{0x24DB},                 // Case map
+	0x24C2:  []rune{0x24DC},                 // Case map
+	0x24C3:  []rune{0x24DD},                 // Case map
+	0x24C4:  []rune{0x24DE},                 // Case map
+	0x24C5:  []rune{0x24DF},                 // Case map
+	0x24C6:  []rune{0x24E0},                 // Case map
+	0x24C7:  []rune{0x24E1},                 // Case map
+	0x24C8:  []rune{0x24E2},                 // Case map
+	0x24C9:  []rune{0x24E3},                 // Case map
+	0x24CA:  []rune{0x24E4},                 // Case map
+	0x24CB:  []rune{0x24E5},                 // Case map
+	0x24CC:  []rune{0x24E6},                 // Case map
+	0x24CD:  []rune{0x24E7},                 // Case map
+	0x24CE:  []rune{0x24E8},                 // Case map
+	0x24CF:  []rune{0x24E9},                 // Case map
+	0xFB00:  []rune{0x0066, 0x0066},         // Case map
+	0xFB01:  []rune{0x0066, 0x0069},         // Case map
+	0xFB02:  []rune{0x0066, 0x006C},         // Case map
+	0xFB03:  []rune{0x0066, 0x0066, 0x0069}, // Case map
+	0xFB04:  []rune{0x0066, 0x0066, 0x006C}, // Case map
+	0xFB05:  []rune{0x0073, 0x0074},         // Case map
+	0xFB06:  []rune{0x0073, 0x0074},         // Case map
+	0xFB13:  []rune{0x0574, 0x0576},         // Case map
+	0xFB14:  []rune{0x0574, 0x0565},         // Case map
+	0xFB15:  []rune{0x0574, 0x056B},         // Case map
+	0xFB16:  []rune{0x057E, 0x0576},         // Case map
+	0xFB17:  []rune{0x0574, 0x056D},         // Case map
+	0xFF21:  []rune{0xFF41},                 // Case map
+	0xFF22:  []rune{0xFF42},                 // Case map
+	0xFF23:  []rune{0xFF43},                 // Case map
+	0xFF24:  []rune{0xFF44},                 // Case map
+	0xFF25:  []rune{0xFF45},                 // Case map
+	0xFF26:  []rune{0xFF46},                 // Case map
+	0xFF27:  []rune{0xFF47},                 // Case map
+	0xFF28:  []rune{0xFF48},                 // Case map
+	0xFF29:  []rune{0xFF49},                 // Case map
+	0xFF2A:  []rune{0xFF4A},                 // Case map
+	0xFF2B:  []rune{0xFF4B},                 // Case map
+	0xFF2C:  []rune{0xFF4C},                 // Case map
+	0xFF2D:  []rune{0xFF4D},                 // Case map
+	0xFF2E:  []rune{0xFF4E},                 // Case map
+	0xFF2F:  []rune{0xFF4F},                 // Case map
+	0xFF30:  []rune{0xFF50},                 // Case map
+	0xFF31:  []rune{0xFF51},                 // Case map
+	0xFF32:  []rune{0xFF52},                 // Case map
+	0xFF33:  []rune{0xFF53},                 // Case map
+	0xFF34:  []rune{0xFF54},                 // Case map
+	0xFF35:  []rune{0xFF55},                 // Case map
+	0xFF36:  []rune{0xFF56},                 // Case map
+	0xFF37:  []rune{0xFF57},                 // Case map
+	0xFF38:  []rune{0xFF58},                 // Case map
+	0xFF39:  []rune{0xFF59},                 // Case map
+	0xFF3A:  []rune{0xFF5A},                 // Case map
+	0x10400: []rune{0x10428},                // Case map
+	0x10401: []rune{0x10429},                // Case map
+	0x10402: []rune{0x1042A},                // Case map
+	0x10403: []rune{0x1042B},                // Case map
+	0x10404: []rune{0x1042C},                // Case map
+	0x10405: []rune{0x1042D},                // Case map
+	0x10406: []rune{0x1042E},                // Case map
+	0x10407: []rune{0x1042F},                // Case map
+	0x10408: []rune{0x10430},                // Case map
+	0x10409: []rune{0x10431},                // Case map
+	0x1040A: []rune{0x10432},                // Case map
+	0x1040B: []rune{0x10433},                // Case map
+	0x1040C: []rune{0x10434},                // Case map
+	0x1040D: []rune{0x10435},                // Case map
+	0x1040E: []rune{0x10436},                // Case map
+	0x1040F: []rune{0x10437},                // Case map
+	0x10410: []rune{0x10438},                // Case map
+	0x10411: []rune{0x10439},                // Case map
+	0x10412: []rune{0x1043A},                // Case map
+	0x10413: []rune{0x1043B},                // Case map
+	0x10414: []rune{0x1043C},                // Case map
+	0x10415: []rune{0x1043D},                // Case map
+	0x10416: []rune{0x1043E},                // Case map
+	0x10417: []rune{0x1043F},                // Case map
+	0x10418: []rune{0x10440},                // Case map
+	0x10419: []rune{0x10441},                // Case map
+	0x1041A: []rune{0x10442},                // Case map
+	0x1041B: []rune{0x10443},                // Case map
+	0x1041C: []rune{0x10444},                // Case map
+	0x1041D: []rune{0x10445},                // Case map
+	0x1041E: []rune{0x10446},                // Case map
+	0x1041F: []rune{0x10447},                // Case map
+	0x10420: []rune{0x10448},                // Case map
+	0x10421: []rune{0x10449},                // Case map
+	0x10422: []rune{0x1044A},                // Case map
+	0x10423: []rune{0x1044B},                // Case map
+	0x10424: []rune{0x1044C},                // Case map
+	0x10425: []rune{0x1044D},                // Case map
+}
+
+// TableB3 represents RFC-3454 Table B.3.
+var TableB3 Mapping = tableB3
+
+var tableC1_1 = Set{
+	RuneRange{0x0020, 0x0020}, // SPACE
+}
+
+// TableC1_1 represents RFC-3454 Table C.1.1.
+var TableC1_1 Set = tableC1_1
+
+var tableC1_2 = Set{
+	RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE
+	RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK
+	RuneRange{0x2000, 0x2000}, // EN QUAD
+	RuneRange{0x2001, 0x2001}, // EM QUAD
+	RuneRange{0x2002, 0x2002}, // EN SPACE
+	RuneRange{0x2003, 0x2003}, // EM SPACE
+	RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE
+	RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE
+	RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE
+	RuneRange{0x2007, 0x2007}, // FIGURE SPACE
+	RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE
+	RuneRange{0x2009, 0x2009}, // THIN SPACE
+	RuneRange{0x200A, 0x200A}, // HAIR SPACE
+	RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE
+	RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE
+	RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE
+	RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE
+}
+
+// TableC1_2 represents RFC-3454 Table C.1.2.
+var TableC1_2 Set = tableC1_2
+
+var tableC2_1 = Set{
+	RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS]
+	RuneRange{0x007F, 0x007F}, // DELETE
+}
+
+// TableC2_1 represents RFC-3454 Table C.2.1.
+var TableC2_1 Set = tableC2_1
+
+var tableC2_2 = Set{
+	RuneRange{0x0080, 0x009F},   // [CONTROL CHARACTERS]
+	RuneRange{0x06DD, 0x06DD},   // ARABIC END OF AYAH
+	RuneRange{0x070F, 0x070F},   // SYRIAC ABBREVIATION MARK
+	RuneRange{0x180E, 0x180E},   // MONGOLIAN VOWEL SEPARATOR
+	RuneRange{0x200C, 0x200C},   // ZERO WIDTH NON-JOINER
+	RuneRange{0x200D, 0x200D},   // ZERO WIDTH JOINER
+	RuneRange{0x2028, 0x2028},   // LINE SEPARATOR
+	RuneRange{0x2029, 0x2029},   // PARAGRAPH SEPARATOR
+	RuneRange{0x2060, 0x2060},   // WORD JOINER
+	RuneRange{0x2061, 0x2061},   // FUNCTION APPLICATION
+	RuneRange{0x2062, 0x2062},   // INVISIBLE TIMES
+	RuneRange{0x2063, 0x2063},   // INVISIBLE SEPARATOR
+	RuneRange{0x206A, 0x206F},   // [CONTROL CHARACTERS]
+	RuneRange{0xFEFF, 0xFEFF},   // ZERO WIDTH NO-BREAK SPACE
+	RuneRange{0xFFF9, 0xFFFC},   // [CONTROL CHARACTERS]
+	RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS]
+}
+
+// TableC2_2 represents RFC-3454 Table C.2.2.
+var TableC2_2 Set = tableC2_2
+
+var tableC3 = Set{
+	RuneRange{0xE000, 0xF8FF},     // [PRIVATE USE, PLANE 0]
+	RuneRange{0xF0000, 0xFFFFD},   // [PRIVATE USE, PLANE 15]
+	RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16]
+}
+
+// TableC3 represents RFC-3454 Table C.3.
+var TableC3 Set = tableC3
+
+var tableC4 = Set{
+	RuneRange{0xFDD0, 0xFDEF},     // [NONCHARACTER CODE POINTS]
+	RuneRange{0xFFFE, 0xFFFF},     // [NONCHARACTER CODE POINTS]
+	RuneRange{0x1FFFE, 0x1FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x2FFFE, 0x2FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x3FFFE, 0x3FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x4FFFE, 0x4FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x5FFFE, 0x5FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x6FFFE, 0x6FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x7FFFE, 0x7FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x8FFFE, 0x8FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x9FFFE, 0x9FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xAFFFE, 0xAFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xBFFFE, 0xBFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xCFFFE, 0xCFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xDFFFE, 0xDFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xEFFFE, 0xEFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xFFFFE, 0xFFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS]
+}
+
+// TableC4 represents RFC-3454 Table C.4.
+var TableC4 Set = tableC4
+
+var tableC5 = Set{
+	RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES]
+}
+
+// TableC5 represents RFC-3454 Table C.5.
+var TableC5 Set = tableC5
+
+var tableC6 = Set{
+	RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR
+	RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR
+	RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR
+	RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER
+	RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER
+}
+
+// TableC6 represents RFC-3454 Table C.6.
+var TableC6 Set = tableC6
+
+var tableC7 = Set{
+	RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS]
+}
+
+// TableC7 represents RFC-3454 Table C.7.
+var TableC7 Set = tableC7
+
+var tableC8 = Set{
+	RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK
+	RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK
+	RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK
+	RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK
+	RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING
+	RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING
+	RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING
+	RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE
+	RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE
+	RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING
+	RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING
+	RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING
+	RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING
+	RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES
+	RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES
+}
+
+// TableC8 represents RFC-3454 Table C.8.
+var TableC8 Set = tableC8
+
+var tableC9 = Set{
+	RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG
+	RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS]
+}
+
+// TableC9 represents RFC-3454 Table C.9.
+var TableC9 Set = tableC9
+
+var tableD1 = Set{
+	RuneRange{0x05BE, 0x05BE},
+	RuneRange{0x05C0, 0x05C0},
+	RuneRange{0x05C3, 0x05C3},
+	RuneRange{0x05D0, 0x05EA},
+	RuneRange{0x05F0, 0x05F4},
+	RuneRange{0x061B, 0x061B},
+	RuneRange{0x061F, 0x061F},
+	RuneRange{0x0621, 0x063A},
+	RuneRange{0x0640, 0x064A},
+	RuneRange{0x066D, 0x066F},
+	RuneRange{0x0671, 0x06D5},
+	RuneRange{0x06DD, 0x06DD},
+	RuneRange{0x06E5, 0x06E6},
+	RuneRange{0x06FA, 0x06FE},
+	RuneRange{0x0700, 0x070D},
+	RuneRange{0x0710, 0x0710},
+	RuneRange{0x0712, 0x072C},
+	RuneRange{0x0780, 0x07A5},
+	RuneRange{0x07B1, 0x07B1},
+	RuneRange{0x200F, 0x200F},
+	RuneRange{0xFB1D, 0xFB1D},
+	RuneRange{0xFB1F, 0xFB28},
+	RuneRange{0xFB2A, 0xFB36},
+	RuneRange{0xFB38, 0xFB3C},
+	RuneRange{0xFB3E, 0xFB3E},
+	RuneRange{0xFB40, 0xFB41},
+	RuneRange{0xFB43, 0xFB44},
+	RuneRange{0xFB46, 0xFBB1},
+	RuneRange{0xFBD3, 0xFD3D},
+	RuneRange{0xFD50, 0xFD8F},
+	RuneRange{0xFD92, 0xFDC7},
+	RuneRange{0xFDF0, 0xFDFC},
+	RuneRange{0xFE70, 0xFE74},
+	RuneRange{0xFE76, 0xFEFC},
+}
+
+// TableD1 represents RFC-3454 Table D.1.
+var TableD1 Set = tableD1
+
+var tableD2 = Set{
+	RuneRange{0x0041, 0x005A},
+	RuneRange{0x0061, 0x007A},
+	RuneRange{0x00AA, 0x00AA},
+	RuneRange{0x00B5, 0x00B5},
+	RuneRange{0x00BA, 0x00BA},
+	RuneRange{0x00C0, 0x00D6},
+	RuneRange{0x00D8, 0x00F6},
+	RuneRange{0x00F8, 0x0220},
+	RuneRange{0x0222, 0x0233},
+	RuneRange{0x0250, 0x02AD},
+	RuneRange{0x02B0, 0x02B8},
+	RuneRange{0x02BB, 0x02C1},
+	RuneRange{0x02D0, 0x02D1},
+	RuneRange{0x02E0, 0x02E4},
+	RuneRange{0x02EE, 0x02EE},
+	RuneRange{0x037A, 0x037A},
+	RuneRange{0x0386, 0x0386},
+	RuneRange{0x0388, 0x038A},
+	RuneRange{0x038C, 0x038C},
+	RuneRange{0x038E, 0x03A1},
+	RuneRange{0x03A3, 0x03CE},
+	RuneRange{0x03D0, 0x03F5},
+	RuneRange{0x0400, 0x0482},
+	RuneRange{0x048A, 0x04CE},
+	RuneRange{0x04D0, 0x04F5},
+	RuneRange{0x04F8, 0x04F9},
+	RuneRange{0x0500, 0x050F},
+	RuneRange{0x0531, 0x0556},
+	RuneRange{0x0559, 0x055F},
+	RuneRange{0x0561, 0x0587},
+	RuneRange{0x0589, 0x0589},
+	RuneRange{0x0903, 0x0903},
+	RuneRange{0x0905, 0x0939},
+	RuneRange{0x093D, 0x0940},
+	RuneRange{0x0949, 0x094C},
+	RuneRange{0x0950, 0x0950},
+	RuneRange{0x0958, 0x0961},
+	RuneRange{0x0964, 0x0970},
+	RuneRange{0x0982, 0x0983},
+	RuneRange{0x0985, 0x098C},
+	RuneRange{0x098F, 0x0990},
+	RuneRange{0x0993, 0x09A8},
+	RuneRange{0x09AA, 0x09B0},
+	RuneRange{0x09B2, 0x09B2},
+	RuneRange{0x09B6, 0x09B9},
+	RuneRange{0x09BE, 0x09C0},
+	RuneRange{0x09C7, 0x09C8},
+	RuneRange{0x09CB, 0x09CC},
+	RuneRange{0x09D7, 0x09D7},
+	RuneRange{0x09DC, 0x09DD},
+	RuneRange{0x09DF, 0x09E1},
+	RuneRange{0x09E6, 0x09F1},
+	RuneRange{0x09F4, 0x09FA},
+	RuneRange{0x0A05, 0x0A0A},
+	RuneRange{0x0A0F, 0x0A10},
+	RuneRange{0x0A13, 0x0A28},
+	RuneRange{0x0A2A, 0x0A30},
+	RuneRange{0x0A32, 0x0A33},
+	RuneRange{0x0A35, 0x0A36},
+	RuneRange{0x0A38, 0x0A39},
+	RuneRange{0x0A3E, 0x0A40},
+	RuneRange{0x0A59, 0x0A5C},
+	RuneRange{0x0A5E, 0x0A5E},
+	RuneRange{0x0A66, 0x0A6F},
+	RuneRange{0x0A72, 0x0A74},
+	RuneRange{0x0A83, 0x0A83},
+	RuneRange{0x0A85, 0x0A8B},
+	RuneRange{0x0A8D, 0x0A8D},
+	RuneRange{0x0A8F, 0x0A91},
+	RuneRange{0x0A93, 0x0AA8},
+	RuneRange{0x0AAA, 0x0AB0},
+	RuneRange{0x0AB2, 0x0AB3},
+	RuneRange{0x0AB5, 0x0AB9},
+	RuneRange{0x0ABD, 0x0AC0},
+	RuneRange{0x0AC9, 0x0AC9},
+	RuneRange{0x0ACB, 0x0ACC},
+	RuneRange{0x0AD0, 0x0AD0},
+	RuneRange{0x0AE0, 0x0AE0},
+	RuneRange{0x0AE6, 0x0AEF},
+	RuneRange{0x0B02, 0x0B03},
+	RuneRange{0x0B05, 0x0B0C},
+	RuneRange{0x0B0F, 0x0B10},
+	RuneRange{0x0B13, 0x0B28},
+	RuneRange{0x0B2A, 0x0B30},
+	RuneRange{0x0B32, 0x0B33},
+	RuneRange{0x0B36, 0x0B39},
+	RuneRange{0x0B3D, 0x0B3E},
+	RuneRange{0x0B40, 0x0B40},
+	RuneRange{0x0B47, 0x0B48},
+	RuneRange{0x0B4B, 0x0B4C},
+	RuneRange{0x0B57, 0x0B57},
+	RuneRange{0x0B5C, 0x0B5D},
+	RuneRange{0x0B5F, 0x0B61},
+	RuneRange{0x0B66, 0x0B70},
+	RuneRange{0x0B83, 0x0B83},
+	RuneRange{0x0B85, 0x0B8A},
+	RuneRange{0x0B8E, 0x0B90},
+	RuneRange{0x0B92, 0x0B95},
+	RuneRange{0x0B99, 0x0B9A},
+	RuneRange{0x0B9C, 0x0B9C},
+	RuneRange{0x0B9E, 0x0B9F},
+	RuneRange{0x0BA3, 0x0BA4},
+	RuneRange{0x0BA8, 0x0BAA},
+	RuneRange{0x0BAE, 0x0BB5},
+	RuneRange{0x0BB7, 0x0BB9},
+	RuneRange{0x0BBE, 0x0BBF},
+	RuneRange{0x0BC1, 0x0BC2},
+	RuneRange{0x0BC6, 0x0BC8},
+	RuneRange{0x0BCA, 0x0BCC},
+	RuneRange{0x0BD7, 0x0BD7},
+	RuneRange{0x0BE7, 0x0BF2},
+	RuneRange{0x0C01, 0x0C03},
+	RuneRange{0x0C05, 0x0C0C},
+	RuneRange{0x0C0E, 0x0C10},
+	RuneRange{0x0C12, 0x0C28},
+	RuneRange{0x0C2A, 0x0C33},
+	RuneRange{0x0C35, 0x0C39},
+	RuneRange{0x0C41, 0x0C44},
+	RuneRange{0x0C60, 0x0C61},
+	RuneRange{0x0C66, 0x0C6F},
+	RuneRange{0x0C82, 0x0C83},
+	RuneRange{0x0C85, 0x0C8C},
+	RuneRange{0x0C8E, 0x0C90},
+	RuneRange{0x0C92, 0x0CA8},
+	RuneRange{0x0CAA, 0x0CB3},
+	RuneRange{0x0CB5, 0x0CB9},
+	RuneRange{0x0CBE, 0x0CBE},
+	RuneRange{0x0CC0, 0x0CC4},
+	RuneRange{0x0CC7, 0x0CC8},
+	RuneRange{0x0CCA, 0x0CCB},
+	RuneRange{0x0CD5, 0x0CD6},
+	RuneRange{0x0CDE, 0x0CDE},
+	RuneRange{0x0CE0, 0x0CE1},
+	RuneRange{0x0CE6, 0x0CEF},
+	RuneRange{0x0D02, 0x0D03},
+	RuneRange{0x0D05, 0x0D0C},
+	RuneRange{0x0D0E, 0x0D10},
+	RuneRange{0x0D12, 0x0D28},
+	RuneRange{0x0D2A, 0x0D39},
+	RuneRange{0x0D3E, 0x0D40},
+	RuneRange{0x0D46, 0x0D48},
+	RuneRange{0x0D4A, 0x0D4C},
+	RuneRange{0x0D57, 0x0D57},
+	RuneRange{0x0D60, 0x0D61},
+	RuneRange{0x0D66, 0x0D6F},
+	RuneRange{0x0D82, 0x0D83},
+	RuneRange{0x0D85, 0x0D96},
+	RuneRange{0x0D9A, 0x0DB1},
+	RuneRange{0x0DB3, 0x0DBB},
+	RuneRange{0x0DBD, 0x0DBD},
+	RuneRange{0x0DC0, 0x0DC6},
+	RuneRange{0x0DCF, 0x0DD1},
+	RuneRange{0x0DD8, 0x0DDF},
+	RuneRange{0x0DF2, 0x0DF4},
+	RuneRange{0x0E01, 0x0E30},
+	RuneRange{0x0E32, 0x0E33},
+	RuneRange{0x0E40, 0x0E46},
+	RuneRange{0x0E4F, 0x0E5B},
+	RuneRange{0x0E81, 0x0E82},
+	RuneRange{0x0E84, 0x0E84},
+	RuneRange{0x0E87, 0x0E88},
+	RuneRange{0x0E8A, 0x0E8A},
+	RuneRange{0x0E8D, 0x0E8D},
+	RuneRange{0x0E94, 0x0E97},
+	RuneRange{0x0E99, 0x0E9F},
+	RuneRange{0x0EA1, 0x0EA3},
+	RuneRange{0x0EA5, 0x0EA5},
+	RuneRange{0x0EA7, 0x0EA7},
+	RuneRange{0x0EAA, 0x0EAB},
+	RuneRange{0x0EAD, 0x0EB0},
+	RuneRange{0x0EB2, 0x0EB3},
+	RuneRange{0x0EBD, 0x0EBD},
+	RuneRange{0x0EC0, 0x0EC4},
+	RuneRange{0x0EC6, 0x0EC6},
+	RuneRange{0x0ED0, 0x0ED9},
+	RuneRange{0x0EDC, 0x0EDD},
+	RuneRange{0x0F00, 0x0F17},
+	RuneRange{0x0F1A, 0x0F34},
+	RuneRange{0x0F36, 0x0F36},
+	RuneRange{0x0F38, 0x0F38},
+	RuneRange{0x0F3E, 0x0F47},
+	RuneRange{0x0F49, 0x0F6A},
+	RuneRange{0x0F7F, 0x0F7F},
+	RuneRange{0x0F85, 0x0F85},
+	RuneRange{0x0F88, 0x0F8B},
+	RuneRange{0x0FBE, 0x0FC5},
+	RuneRange{0x0FC7, 0x0FCC},
+	RuneRange{0x0FCF, 0x0FCF},
+	RuneRange{0x1000, 0x1021},
+	RuneRange{0x1023, 0x1027},
+	RuneRange{0x1029, 0x102A},
+	RuneRange{0x102C, 0x102C},
+	RuneRange{0x1031, 0x1031},
+	RuneRange{0x1038, 0x1038},
+	RuneRange{0x1040, 0x1057},
+	RuneRange{0x10A0, 0x10C5},
+	RuneRange{0x10D0, 0x10F8},
+	RuneRange{0x10FB, 0x10FB},
+	RuneRange{0x1100, 0x1159},
+	RuneRange{0x115F, 0x11A2},
+	RuneRange{0x11A8, 0x11F9},
+	RuneRange{0x1200, 0x1206},
+	RuneRange{0x1208, 0x1246},
+	RuneRange{0x1248, 0x1248},
+	RuneRange{0x124A, 0x124D},
+	RuneRange{0x1250, 0x1256},
+	RuneRange{0x1258, 0x1258},
+	RuneRange{0x125A, 0x125D},
+	RuneRange{0x1260, 0x1286},
+	RuneRange{0x1288, 0x1288},
+	RuneRange{0x128A, 0x128D},
+	RuneRange{0x1290, 0x12AE},
+	RuneRange{0x12B0, 0x12B0},
+	RuneRange{0x12B2, 0x12B5},
+	RuneRange{0x12B8, 0x12BE},
+	RuneRange{0x12C0, 0x12C0},
+	RuneRange{0x12C2, 0x12C5},
+	RuneRange{0x12C8, 0x12CE},
+	RuneRange{0x12D0, 0x12D6},
+	RuneRange{0x12D8, 0x12EE},
+	RuneRange{0x12F0, 0x130E},
+	RuneRange{0x1310, 0x1310},
+	RuneRange{0x1312, 0x1315},
+	RuneRange{0x1318, 0x131E},
+	RuneRange{0x1320, 0x1346},
+	RuneRange{0x1348, 0x135A},
+	RuneRange{0x1361, 0x137C},
+	RuneRange{0x13A0, 0x13F4},
+	RuneRange{0x1401, 0x1676},
+	RuneRange{0x1681, 0x169A},
+	RuneRange{0x16A0, 0x16F0},
+	RuneRange{0x1700, 0x170C},
+	RuneRange{0x170E, 0x1711},
+	RuneRange{0x1720, 0x1731},
+	RuneRange{0x1735, 0x1736},
+	RuneRange{0x1740, 0x1751},
+	RuneRange{0x1760, 0x176C},
+	RuneRange{0x176E, 0x1770},
+	RuneRange{0x1780, 0x17B6},
+	RuneRange{0x17BE, 0x17C5},
+	RuneRange{0x17C7, 0x17C8},
+	RuneRange{0x17D4, 0x17DA},
+	RuneRange{0x17DC, 0x17DC},
+	RuneRange{0x17E0, 0x17E9},
+	RuneRange{0x1810, 0x1819},
+	RuneRange{0x1820, 0x1877},
+	RuneRange{0x1880, 0x18A8},
+	RuneRange{0x1E00, 0x1E9B},
+	RuneRange{0x1EA0, 0x1EF9},
+	RuneRange{0x1F00, 0x1F15},
+	RuneRange{0x1F18, 0x1F1D},
+	RuneRange{0x1F20, 0x1F45},
+	RuneRange{0x1F48, 0x1F4D},
+	RuneRange{0x1F50, 0x1F57},
+	RuneRange{0x1F59, 0x1F59},
+	RuneRange{0x1F5B, 0x1F5B},
+	RuneRange{0x1F5D, 0x1F5D},
+	RuneRange{0x1F5F, 0x1F7D},
+	RuneRange{0x1F80, 0x1FB4},
+	RuneRange{0x1FB6, 0x1FBC},
+	RuneRange{0x1FBE, 0x1FBE},
+	RuneRange{0x1FC2, 0x1FC4},
+	RuneRange{0x1FC6, 0x1FCC},
+	RuneRange{0x1FD0, 0x1FD3},
+	RuneRange{0x1FD6, 0x1FDB},
+	RuneRange{0x1FE0, 0x1FEC},
+	RuneRange{0x1FF2, 0x1FF4},
+	RuneRange{0x1FF6, 0x1FFC},
+	RuneRange{0x200E, 0x200E},
+	RuneRange{0x2071, 0x2071},
+	RuneRange{0x207F, 0x207F},
+	RuneRange{0x2102, 0x2102},
+	RuneRange{0x2107, 0x2107},
+	RuneRange{0x210A, 0x2113},
+	RuneRange{0x2115, 0x2115},
+	RuneRange{0x2119, 0x211D},
+	RuneRange{0x2124, 0x2124},
+	RuneRange{0x2126, 0x2126},
+	RuneRange{0x2128, 0x2128},
+	RuneRange{0x212A, 0x212D},
+	RuneRange{0x212F, 0x2131},
+	RuneRange{0x2133, 0x2139},
+	RuneRange{0x213D, 0x213F},
+	RuneRange{0x2145, 0x2149},
+	RuneRange{0x2160, 0x2183},
+	RuneRange{0x2336, 0x237A},
+	RuneRange{0x2395, 0x2395},
+	RuneRange{0x249C, 0x24E9},
+	RuneRange{0x3005, 0x3007},
+	RuneRange{0x3021, 0x3029},
+	RuneRange{0x3031, 0x3035},
+	RuneRange{0x3038, 0x303C},
+	RuneRange{0x3041, 0x3096},
+	RuneRange{0x309D, 0x309F},
+	RuneRange{0x30A1, 0x30FA},
+	RuneRange{0x30FC, 0x30FF},
+	RuneRange{0x3105, 0x312C},
+	RuneRange{0x3131, 0x318E},
+	RuneRange{0x3190, 0x31B7},
+	RuneRange{0x31F0, 0x321C},
+	RuneRange{0x3220, 0x3243},
+	RuneRange{0x3260, 0x327B},
+	RuneRange{0x327F, 0x32B0},
+	RuneRange{0x32C0, 0x32CB},
+	RuneRange{0x32D0, 0x32FE},
+	RuneRange{0x3300, 0x3376},
+	RuneRange{0x337B, 0x33DD},
+	RuneRange{0x33E0, 0x33FE},
+	RuneRange{0x3400, 0x4DB5},
+	RuneRange{0x4E00, 0x9FA5},
+	RuneRange{0xA000, 0xA48C},
+	RuneRange{0xAC00, 0xD7A3},
+	RuneRange{0xD800, 0xFA2D},
+	RuneRange{0xFA30, 0xFA6A},
+	RuneRange{0xFB00, 0xFB06},
+	RuneRange{0xFB13, 0xFB17},
+	RuneRange{0xFF21, 0xFF3A},
+	RuneRange{0xFF41, 0xFF5A},
+	RuneRange{0xFF66, 0xFFBE},
+	RuneRange{0xFFC2, 0xFFC7},
+	RuneRange{0xFFCA, 0xFFCF},
+	RuneRange{0xFFD2, 0xFFD7},
+	RuneRange{0xFFDA, 0xFFDC},
+	RuneRange{0x10300, 0x1031E},
+	RuneRange{0x10320, 0x10323},
+	RuneRange{0x10330, 0x1034A},
+	RuneRange{0x10400, 0x10425},
+	RuneRange{0x10428, 0x1044D},
+	RuneRange{0x1D000, 0x1D0F5},
+	RuneRange{0x1D100, 0x1D126},
+	RuneRange{0x1D12A, 0x1D166},
+	RuneRange{0x1D16A, 0x1D172},
+	RuneRange{0x1D183, 0x1D184},
+	RuneRange{0x1D18C, 0x1D1A9},
+	RuneRange{0x1D1AE, 0x1D1DD},
+	RuneRange{0x1D400, 0x1D454},
+	RuneRange{0x1D456, 0x1D49C},
+	RuneRange{0x1D49E, 0x1D49F},
+	RuneRange{0x1D4A2, 0x1D4A2},
+	RuneRange{0x1D4A5, 0x1D4A6},
+	RuneRange{0x1D4A9, 0x1D4AC},
+	RuneRange{0x1D4AE, 0x1D4B9},
+	RuneRange{0x1D4BB, 0x1D4BB},
+	RuneRange{0x1D4BD, 0x1D4C0},
+	RuneRange{0x1D4C2, 0x1D4C3},
+	RuneRange{0x1D4C5, 0x1D505},
+	RuneRange{0x1D507, 0x1D50A},
+	RuneRange{0x1D50D, 0x1D514},
+	RuneRange{0x1D516, 0x1D51C},
+	RuneRange{0x1D51E, 0x1D539},
+	RuneRange{0x1D53B, 0x1D53E},
+	RuneRange{0x1D540, 0x1D544},
+	RuneRange{0x1D546, 0x1D546},
+	RuneRange{0x1D54A, 0x1D550},
+	RuneRange{0x1D552, 0x1D6A3},
+	RuneRange{0x1D6A8, 0x1D7C9},
+	RuneRange{0x20000, 0x2A6D6},
+	RuneRange{0x2F800, 0x2FA1D},
+	RuneRange{0xF0000, 0xFFFFD},
+	RuneRange{0x100000, 0x10FFFD},
+}
+
+// TableD2 represents RFC-3454 Table D.2.
+var TableD2 Set = tableD2
diff --git a/vendor/github.com/youmark/pkcs8/.gitignore b/vendor/github.com/youmark/pkcs8/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..836562412fe8a44fa99a515eeff68d2bc1a86daa
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/youmark/pkcs8/LICENSE b/vendor/github.com/youmark/pkcs8/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c939f448100c7f383c1de5734abf4a898151133b
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 youmark
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/youmark/pkcs8/README b/vendor/github.com/youmark/pkcs8/README
new file mode 100644
index 0000000000000000000000000000000000000000..376fcaf64e60538ff295df273d4362d556e039dd
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/README
@@ -0,0 +1 @@
+pkcs8 package: implement PKCS#8 private key parsing and conversion as defined in RFC5208 and RFC5958
diff --git a/vendor/github.com/youmark/pkcs8/README.md b/vendor/github.com/youmark/pkcs8/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef6c7625712b37b2b7afe914a418e8b77ca1070d
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/README.md
@@ -0,0 +1,22 @@
+pkcs8
+===
+OpenSSL can generate private keys in both "traditional format" and PKCS#8 format. Newer applications are advised to use more secure PKCS#8 format. Go standard crypto package provides a [function](http://golang.org/pkg/crypto/x509/#ParsePKCS8PrivateKey) to parse private key in PKCS#8 format. There is a limitation to this function. It can only handle unencrypted PKCS#8 private keys. To use this function, the user has to save the private key in file without encryption, which is a bad practice to leave private keys unprotected on file systems. In addition, Go standard package lacks the functions to convert RSA/ECDSA private keys into PKCS#8 format.
+
+pkcs8 package fills the gap here. It implements functions to process private keys in PKCS#8 format, as defined in [RFC5208](https://tools.ietf.org/html/rfc5208) and [RFC5958](https://tools.ietf.org/html/rfc5958). It can handle both unencrypted PKCS#8 PrivateKeyInfo format and EncryptedPrivateKeyInfo format with PKCS#5 (v2.0) algorithms.
+
+
+[**Godoc**](http://godoc.org/github.com/youmark/pkcs8)
+
+## Installation
+Supports Go 1.10+. Release v1.1 is the last release supporting Go 1.9 
+
+```text
+go get github.com/youmark/pkcs8
+```
+## dependency
+This package depends on golang.org/x/crypto/pbkdf2 and golang.org/x/crypto/scrypt packages. Use the following command to retrieve them
+```text
+go get golang.org/x/crypto/pbkdf2
+go get golang.org/x/crypto/scrypt
+```
+
diff --git a/vendor/github.com/youmark/pkcs8/cipher.go b/vendor/github.com/youmark/pkcs8/cipher.go
new file mode 100644
index 0000000000000000000000000000000000000000..2946c93e89c6861931d4fedbdea9f65153c5d2fc
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/cipher.go
@@ -0,0 +1,60 @@
+package pkcs8
+
+import (
+	"bytes"
+	"crypto/cipher"
+	"encoding/asn1"
+)
+
+type cipherWithBlock struct {
+	oid      asn1.ObjectIdentifier
+	ivSize   int
+	keySize  int
+	newBlock func(key []byte) (cipher.Block, error)
+}
+
+func (c cipherWithBlock) IVSize() int {
+	return c.ivSize
+}
+
+func (c cipherWithBlock) KeySize() int {
+	return c.keySize
+}
+
+func (c cipherWithBlock) OID() asn1.ObjectIdentifier {
+	return c.oid
+}
+
+func (c cipherWithBlock) Encrypt(key, iv, plaintext []byte) ([]byte, error) {
+	block, err := c.newBlock(key)
+	if err != nil {
+		return nil, err
+	}
+	return cbcEncrypt(block, key, iv, plaintext)
+}
+
+func (c cipherWithBlock) Decrypt(key, iv, ciphertext []byte) ([]byte, error) {
+	block, err := c.newBlock(key)
+	if err != nil {
+		return nil, err
+	}
+	return cbcDecrypt(block, key, iv, ciphertext)
+}
+
+func cbcEncrypt(block cipher.Block, key, iv, plaintext []byte) ([]byte, error) {
+	mode := cipher.NewCBCEncrypter(block, iv)
+	paddingLen := block.BlockSize() - (len(plaintext) % block.BlockSize())
+	ciphertext := make([]byte, len(plaintext)+paddingLen)
+	copy(ciphertext, plaintext)
+	copy(ciphertext[len(plaintext):], bytes.Repeat([]byte{byte(paddingLen)}, paddingLen))
+	mode.CryptBlocks(ciphertext, ciphertext)
+	return ciphertext, nil
+}
+
+func cbcDecrypt(block cipher.Block, key, iv, ciphertext []byte) ([]byte, error) {
+	mode := cipher.NewCBCDecrypter(block, iv)
+	plaintext := make([]byte, len(ciphertext))
+	mode.CryptBlocks(plaintext, ciphertext)
+	// TODO: remove padding
+	return plaintext, nil
+}
diff --git a/vendor/github.com/youmark/pkcs8/cipher_3des.go b/vendor/github.com/youmark/pkcs8/cipher_3des.go
new file mode 100644
index 0000000000000000000000000000000000000000..5629664409dd8f67f72201df6fabca997b9da542
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/cipher_3des.go
@@ -0,0 +1,24 @@
+package pkcs8
+
+import (
+	"crypto/des"
+	"encoding/asn1"
+)
+
+var (
+	oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
+)
+
+func init() {
+	RegisterCipher(oidDESEDE3CBC, func() Cipher {
+		return TripleDESCBC
+	})
+}
+
+// TripleDESCBC is the 168-bit key 3DES cipher in CBC mode.
+var TripleDESCBC = cipherWithBlock{
+	ivSize:   des.BlockSize,
+	keySize:  24,
+	newBlock: des.NewTripleDESCipher,
+	oid:      oidDESEDE3CBC,
+}
diff --git a/vendor/github.com/youmark/pkcs8/cipher_aes.go b/vendor/github.com/youmark/pkcs8/cipher_aes.go
new file mode 100644
index 0000000000000000000000000000000000000000..c0372d1eeba49c4e1f96c468bd5b58ec1f8f2d95
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/cipher_aes.go
@@ -0,0 +1,84 @@
+package pkcs8
+
+import (
+	"crypto/aes"
+	"encoding/asn1"
+)
+
+var (
+	oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2}
+	oidAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6}
+	oidAES192CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 22}
+	oidAES192GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 26}
+	oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
+	oidAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46}
+)
+
+func init() {
+	RegisterCipher(oidAES128CBC, func() Cipher {
+		return AES128CBC
+	})
+	RegisterCipher(oidAES128GCM, func() Cipher {
+		return AES128GCM
+	})
+	RegisterCipher(oidAES192CBC, func() Cipher {
+		return AES192CBC
+	})
+	RegisterCipher(oidAES192GCM, func() Cipher {
+		return AES192GCM
+	})
+	RegisterCipher(oidAES256CBC, func() Cipher {
+		return AES256CBC
+	})
+	RegisterCipher(oidAES256GCM, func() Cipher {
+		return AES256GCM
+	})
+}
+
+// AES128CBC is the 128-bit key AES cipher in CBC mode.
+var AES128CBC = cipherWithBlock{
+	ivSize:   aes.BlockSize,
+	keySize:  16,
+	newBlock: aes.NewCipher,
+	oid:      oidAES128CBC,
+}
+
+// AES128GCM is the 128-bit key AES cipher in GCM mode.
+var AES128GCM = cipherWithBlock{
+	ivSize:   aes.BlockSize,
+	keySize:  16,
+	newBlock: aes.NewCipher,
+	oid:      oidAES128GCM,
+}
+
+// AES192CBC is the 192-bit key AES cipher in CBC mode.
+var AES192CBC = cipherWithBlock{
+	ivSize:   aes.BlockSize,
+	keySize:  24,
+	newBlock: aes.NewCipher,
+	oid:      oidAES192CBC,
+}
+
+// AES192GCM is the 912-bit key AES cipher in GCM mode.
+var AES192GCM = cipherWithBlock{
+	ivSize:   aes.BlockSize,
+	keySize:  24,
+	newBlock: aes.NewCipher,
+	oid:      oidAES192GCM,
+}
+
+// AES256CBC is the 256-bit key AES cipher in CBC mode.
+var AES256CBC = cipherWithBlock{
+	ivSize:   aes.BlockSize,
+	keySize:  32,
+	newBlock: aes.NewCipher,
+	oid:      oidAES256CBC,
+}
+
+// AES256GCM is the 256-bit key AES cipher in GCM mode.
+var AES256GCM = cipherWithBlock{
+	ivSize:   aes.BlockSize,
+	keySize:  32,
+	newBlock: aes.NewCipher,
+	oid:      oidAES256GCM,
+}
diff --git a/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go b/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go
new file mode 100644
index 0000000000000000000000000000000000000000..79697dd82bb997423f987d52f40863a8ea1ba01d
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go
@@ -0,0 +1,91 @@
+package pkcs8
+
+import (
+	"crypto"
+	"crypto/sha1"
+	"crypto/sha256"
+	"crypto/x509/pkix"
+	"encoding/asn1"
+	"errors"
+	"hash"
+
+	"golang.org/x/crypto/pbkdf2"
+)
+
+var (
+	oidPKCS5PBKDF2        = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12}
+	oidHMACWithSHA1       = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 7}
+	oidHMACWithSHA256     = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9}
+)
+
+func init() {
+	RegisterKDF(oidPKCS5PBKDF2, func() KDFParameters {
+		return new(pbkdf2Params)
+	})
+}
+
+func newHashFromPRF(ai pkix.AlgorithmIdentifier) (func() hash.Hash, error) {
+	switch {
+	case len(ai.Algorithm) == 0 || ai.Algorithm.Equal(oidHMACWithSHA1):
+		return sha1.New, nil
+	case ai.Algorithm.Equal(oidHMACWithSHA256):
+		return sha256.New, nil
+	default:
+		return nil, errors.New("pkcs8: unsupported hash function")
+	}
+}
+
+func newPRFParamFromHash(h crypto.Hash) (pkix.AlgorithmIdentifier, error) {
+	switch h {
+	case crypto.SHA1:
+		return pkix.AlgorithmIdentifier{
+			Algorithm:  oidHMACWithSHA1,
+			Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil
+	case crypto.SHA256:
+		return pkix.AlgorithmIdentifier{
+			Algorithm:  oidHMACWithSHA256,
+			Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil
+	}
+	return pkix.AlgorithmIdentifier{}, errors.New("pkcs8: unsupported hash function")
+}
+
+type pbkdf2Params struct {
+	Salt           []byte
+	IterationCount int
+	PRF            pkix.AlgorithmIdentifier `asn1:"optional"`
+}
+
+func (p pbkdf2Params) DeriveKey(password []byte, size int) (key []byte, err error) {
+	h, err := newHashFromPRF(p.PRF)
+	if err != nil {
+		return nil, err
+	}
+	return pbkdf2.Key(password, p.Salt, p.IterationCount, size, h), nil
+}
+
+// PBKDF2Opts contains options for the PBKDF2 key derivation function.
+type PBKDF2Opts struct {
+	SaltSize       int
+	IterationCount int
+	HMACHash       crypto.Hash
+}
+
+func (p PBKDF2Opts) DeriveKey(password, salt []byte, size int) (
+	key []byte, params KDFParameters, err error) {
+
+	key = pbkdf2.Key(password, salt, p.IterationCount, size, p.HMACHash.New)
+	prfParam, err := newPRFParamFromHash(p.HMACHash)
+	if err != nil {
+		return nil, nil, err
+	}
+	params = pbkdf2Params{salt, p.IterationCount, prfParam}
+	return key, params, nil
+}
+
+func (p PBKDF2Opts) GetSaltSize() int {
+	return p.SaltSize
+}
+
+func (p PBKDF2Opts) OID() asn1.ObjectIdentifier {
+	return oidPKCS5PBKDF2
+}
diff --git a/vendor/github.com/youmark/pkcs8/kdf_scrypt.go b/vendor/github.com/youmark/pkcs8/kdf_scrypt.go
new file mode 100644
index 0000000000000000000000000000000000000000..36c4f4f59513727e557a3f0fe4f8926bcb728992
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/kdf_scrypt.go
@@ -0,0 +1,62 @@
+package pkcs8
+
+import (
+	"encoding/asn1"
+
+	"golang.org/x/crypto/scrypt"
+)
+
+var (
+	oidScrypt = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 4, 11}
+)
+
+func init() {
+	RegisterKDF(oidScrypt, func() KDFParameters {
+		return new(scryptParams)
+	})
+}
+
+type scryptParams struct {
+	Salt                     []byte
+	CostParameter            int
+	BlockSize                int
+	ParallelizationParameter int
+}
+
+func (p scryptParams) DeriveKey(password []byte, size int) (key []byte, err error) {
+	return scrypt.Key(password, p.Salt, p.CostParameter, p.BlockSize,
+		p.ParallelizationParameter, size)
+}
+
+// ScryptOpts contains options for the scrypt key derivation function.
+type ScryptOpts struct {
+	SaltSize                 int
+	CostParameter            int
+	BlockSize                int
+	ParallelizationParameter int
+}
+
+func (p ScryptOpts) DeriveKey(password, salt []byte, size int) (
+	key []byte, params KDFParameters, err error) {
+
+	key, err = scrypt.Key(password, salt, p.CostParameter, p.BlockSize,
+		p.ParallelizationParameter, size)
+	if err != nil {
+		return nil, nil, err
+	}
+	params = scryptParams{
+		BlockSize:                p.BlockSize,
+		CostParameter:            p.CostParameter,
+		ParallelizationParameter: p.ParallelizationParameter,
+		Salt:                     salt,
+	}
+	return key, params, nil
+}
+
+func (p ScryptOpts) GetSaltSize() int {
+	return p.SaltSize
+}
+
+func (p ScryptOpts) OID() asn1.ObjectIdentifier {
+	return oidScrypt
+}
diff --git a/vendor/github.com/youmark/pkcs8/pkcs8.go b/vendor/github.com/youmark/pkcs8/pkcs8.go
new file mode 100644
index 0000000000000000000000000000000000000000..f27f62752341863ab79960398de507aa31082180
--- /dev/null
+++ b/vendor/github.com/youmark/pkcs8/pkcs8.go
@@ -0,0 +1,309 @@
+// Package pkcs8 implements functions to parse and convert private keys in PKCS#8 format, as defined in RFC5208 and RFC5958
+package pkcs8
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/asn1"
+	"errors"
+	"fmt"
+)
+
+// DefaultOpts are the default options for encrypting a key if none are given.
+// The defaults can be changed by the library user.
+var DefaultOpts = &Opts{
+	Cipher: AES256CBC,
+	KDFOpts: PBKDF2Opts{
+		SaltSize:       8,
+		IterationCount: 10000,
+		HMACHash:       crypto.SHA256,
+	},
+}
+
+// KDFOpts contains options for a key derivation function.
+// An implementation of this interface must be specified when encrypting a PKCS#8 key.
+type KDFOpts interface {
+	// DeriveKey derives a key of size bytes from the given password and salt.
+	// It returns the key and the ASN.1-encodable parameters used.
+	DeriveKey(password, salt []byte, size int) (key []byte, params KDFParameters, err error)
+	// GetSaltSize returns the salt size specified.
+	GetSaltSize() int
+	// OID returns the OID of the KDF specified.
+	OID() asn1.ObjectIdentifier
+}
+
+// KDFParameters contains parameters (salt, etc.) for a key deriviation function.
+// It must be a ASN.1-decodable structure.
+// An implementation of this interface is created when decoding an encrypted PKCS#8 key.
+type KDFParameters interface {
+	// DeriveKey derives a key of size bytes from the given password.
+	// It uses the salt from the decoded parameters.
+	DeriveKey(password []byte, size int) (key []byte, err error)
+}
+
+var kdfs = make(map[string]func() KDFParameters)
+
+// RegisterKDF registers a function that returns a new instance of the given KDF
+// parameters. This allows the library to support client-provided KDFs.
+func RegisterKDF(oid asn1.ObjectIdentifier, params func() KDFParameters) {
+	kdfs[oid.String()] = params
+}
+
+// Cipher represents a cipher for encrypting the key material.
+type Cipher interface {
+	// IVSize returns the IV size of the cipher, in bytes.
+	IVSize() int
+	// KeySize returns the key size of the cipher, in bytes.
+	KeySize() int
+	// Encrypt encrypts the key material.
+	Encrypt(key, iv, plaintext []byte) ([]byte, error)
+	// Decrypt decrypts the key material.
+	Decrypt(key, iv, ciphertext []byte) ([]byte, error)
+	// OID returns the OID of the cipher specified.
+	OID() asn1.ObjectIdentifier
+}
+
+var ciphers = make(map[string]func() Cipher)
+
+// RegisterCipher registers a function that returns a new instance of the given
+// cipher. This allows the library to support client-provided ciphers.
+func RegisterCipher(oid asn1.ObjectIdentifier, cipher func() Cipher) {
+	ciphers[oid.String()] = cipher
+}
+
+// Opts contains options for encrypting a PKCS#8 key.
+type Opts struct {
+	Cipher  Cipher
+	KDFOpts KDFOpts
+}
+
+// Unecrypted PKCS8
+var (
+	oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13}
+)
+
+type encryptedPrivateKeyInfo struct {
+	EncryptionAlgorithm pkix.AlgorithmIdentifier
+	EncryptedData       []byte
+}
+
+type pbes2Params struct {
+	KeyDerivationFunc pkix.AlgorithmIdentifier
+	EncryptionScheme  pkix.AlgorithmIdentifier
+}
+
+type privateKeyInfo struct {
+	Version             int
+	PrivateKeyAlgorithm pkix.AlgorithmIdentifier
+	PrivateKey          []byte
+}
+
+func parseKeyDerivationFunc(keyDerivationFunc pkix.AlgorithmIdentifier) (KDFParameters, error) {
+	oid := keyDerivationFunc.Algorithm.String()
+	newParams, ok := kdfs[oid]
+	if !ok {
+		return nil, fmt.Errorf("pkcs8: unsupported KDF (OID: %s)", oid)
+	}
+	params := newParams()
+	_, err := asn1.Unmarshal(keyDerivationFunc.Parameters.FullBytes, params)
+	if err != nil {
+		return nil, errors.New("pkcs8: invalid KDF parameters")
+	}
+	return params, nil
+}
+
+func parseEncryptionScheme(encryptionScheme pkix.AlgorithmIdentifier) (Cipher, []byte, error) {
+	oid := encryptionScheme.Algorithm.String()
+	newCipher, ok := ciphers[oid]
+	if !ok {
+		return nil, nil, fmt.Errorf("pkcs8: unsupported cipher (OID: %s)", oid)
+	}
+	cipher := newCipher()
+	var iv []byte
+	if _, err := asn1.Unmarshal(encryptionScheme.Parameters.FullBytes, &iv); err != nil {
+		return nil, nil, errors.New("pkcs8: invalid cipher parameters")
+	}
+	return cipher, iv, nil
+}
+
+// ParsePrivateKey parses a DER-encoded PKCS#8 private key.
+// Password can be nil.
+// This is equivalent to ParsePKCS8PrivateKey.
+func ParsePrivateKey(der []byte, password []byte) (interface{}, KDFParameters, error) {
+	// No password provided, assume the private key is unencrypted
+	if len(password) == 0 {
+		privateKey, err := x509.ParsePKCS8PrivateKey(der)
+		return privateKey, nil, err
+	}
+
+	// Use the password provided to decrypt the private key
+	var privKey encryptedPrivateKeyInfo
+	if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+		return nil, nil, errors.New("pkcs8: only PKCS #5 v2.0 supported")
+	}
+
+	if !privKey.EncryptionAlgorithm.Algorithm.Equal(oidPBES2) {
+		return nil, nil, errors.New("pkcs8: only PBES2 supported")
+	}
+
+	var params pbes2Params
+	if _, err := asn1.Unmarshal(privKey.EncryptionAlgorithm.Parameters.FullBytes, &params); err != nil {
+		return nil, nil, errors.New("pkcs8: invalid PBES2 parameters")
+	}
+
+	cipher, iv, err := parseEncryptionScheme(params.EncryptionScheme)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	kdfParams, err := parseKeyDerivationFunc(params.KeyDerivationFunc)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	keySize := cipher.KeySize()
+	symkey, err := kdfParams.DeriveKey(password, keySize)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	encryptedKey := privKey.EncryptedData
+	decryptedKey, err := cipher.Decrypt(symkey, iv, encryptedKey)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	key, err := x509.ParsePKCS8PrivateKey(decryptedKey)
+	if err != nil {
+		return nil, nil, errors.New("pkcs8: incorrect password")
+	}
+	return key, kdfParams, nil
+}
+
+// MarshalPrivateKey encodes a private key into DER-encoded PKCS#8 with the given options.
+// Password can be nil.
+func MarshalPrivateKey(priv interface{}, password []byte, opts *Opts) ([]byte, error) {
+	if len(password) == 0 {
+		return x509.MarshalPKCS8PrivateKey(priv)
+	}
+
+	if opts == nil {
+		opts = DefaultOpts
+	}
+
+	// Convert private key into PKCS8 format
+	pkey, err := x509.MarshalPKCS8PrivateKey(priv)
+	if err != nil {
+		return nil, err
+	}
+
+	encAlg := opts.Cipher
+	salt := make([]byte, opts.KDFOpts.GetSaltSize())
+	_, err = rand.Read(salt)
+	if err != nil {
+		return nil, err
+	}
+	iv := make([]byte, encAlg.IVSize())
+	_, err = rand.Read(iv)
+	if err != nil {
+		return nil, err
+	}
+	key, kdfParams, err := opts.KDFOpts.DeriveKey(password, salt, encAlg.KeySize())
+	if err != nil {
+		return nil, err
+	}
+
+	encryptedKey, err := encAlg.Encrypt(key, iv, pkey)
+	if err != nil {
+		return nil, err
+	}
+
+	marshalledParams, err := asn1.Marshal(kdfParams)
+	if err != nil {
+		return nil, err
+	}
+	keyDerivationFunc := pkix.AlgorithmIdentifier{
+		Algorithm:  opts.KDFOpts.OID(),
+		Parameters: asn1.RawValue{FullBytes: marshalledParams},
+	}
+	marshalledIV, err := asn1.Marshal(iv)
+	if err != nil {
+		return nil, err
+	}
+	encryptionScheme := pkix.AlgorithmIdentifier{
+		Algorithm:  encAlg.OID(),
+		Parameters: asn1.RawValue{FullBytes: marshalledIV},
+	}
+
+	encryptionAlgorithmParams := pbes2Params{
+		EncryptionScheme:  encryptionScheme,
+		KeyDerivationFunc: keyDerivationFunc,
+	}
+	marshalledEncryptionAlgorithmParams, err := asn1.Marshal(encryptionAlgorithmParams)
+	if err != nil {
+		return nil, err
+	}
+	encryptionAlgorithm := pkix.AlgorithmIdentifier{
+		Algorithm:  oidPBES2,
+		Parameters: asn1.RawValue{FullBytes: marshalledEncryptionAlgorithmParams},
+	}
+
+	encryptedPkey := encryptedPrivateKeyInfo{
+		EncryptionAlgorithm: encryptionAlgorithm,
+		EncryptedData:       encryptedKey,
+	}
+
+	return asn1.Marshal(encryptedPkey)
+}
+
+// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter.
+func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) {
+	var password []byte
+	if len(v) > 0 {
+		password = v[0]
+	}
+	privateKey, _, err := ParsePrivateKey(der, password)
+	return privateKey, err
+}
+
+// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter.
+func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) {
+	key, err := ParsePKCS8PrivateKey(der, v...)
+	if err != nil {
+		return nil, err
+	}
+	typedKey, ok := key.(*rsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("key block is not of type RSA")
+	}
+	return typedKey, nil
+}
+
+// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter.
+func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) {
+	key, err := ParsePKCS8PrivateKey(der, v...)
+	if err != nil {
+		return nil, err
+	}
+	typedKey, ok := key.(*ecdsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("key block is not of type ECDSA")
+	}
+	return typedKey, nil
+}
+
+// ConvertPrivateKeyToPKCS8 converts the private key into PKCS#8 format.
+// To encrypt the private key, the password of []byte type should be provided as the second parameter.
+//
+// The only supported key types are RSA and ECDSA (*rsa.PrivateKey or *ecdsa.PrivateKey for priv)
+func ConvertPrivateKeyToPKCS8(priv interface{}, v ...[]byte) ([]byte, error) {
+	var password []byte
+	if len(v) > 0 {
+		password = v[0]
+	}
+	return MarshalPrivateKey(priv, password, nil)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go
new file mode 100644
index 0000000000000000000000000000000000000000..a0d81858261161005d82cabf0671bbfc415ec118
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go
@@ -0,0 +1,50 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bson // import "go.mongodb.org/mongo-driver/bson"
+
+import (
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters,
+// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead.
+//
+// A D should not be constructed with duplicate key names, as that can cause undefined server behavior.
+//
+// Example usage:
+//
+//	bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+type D = primitive.D
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E = primitive.E
+
+// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not
+// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be
+// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead.
+//
+// Example usage:
+//
+//	bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+type M = primitive.M
+
+// An A is an ordered representation of a BSON array.
+//
+// Example usage:
+//
+//	bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}}
+type A = primitive.A
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..652aa48b8536612451c9d32a89d88265e948fbcd
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// ArrayCodec is the Codec used for bsoncore.Array values.
+//
+// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0.
+type ArrayCodec struct{}
+
+var defaultArrayCodec = NewArrayCodec()
+
+// NewArrayCodec returns an ArrayCodec.
+//
+// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See
+// [ArrayCodec] for more details.
+func NewArrayCodec() *ArrayCodec {
+	return &ArrayCodec{}
+}
+
+// EncodeValue is the ValueEncoder for bsoncore.Array values.
+func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tCoreArray {
+		return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
+	}
+
+	arr := val.Interface().(bsoncore.Array)
+	return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr)
+}
+
+// DecodeValue is the ValueDecoder for bsoncore.Array values.
+func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tCoreArray {
+		return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+	arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr)
+	val.Set(reflect.ValueOf(arr))
+	return err
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go
new file mode 100644
index 0000000000000000000000000000000000000000..0693bd432feeeb149dda8ed78fe98797abd624f6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go
@@ -0,0 +1,382 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec"
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+var (
+	emptyValue = reflect.Value{}
+)
+
+// Marshaler is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead.
+type Marshaler interface {
+	MarshalBSON() ([]byte, error)
+}
+
+// ValueMarshaler is an interface implemented by types that can marshal
+// themselves into a BSON value as bytes. The type must be the valid type for
+// the bytes returned. The bytes and byte type together must be valid if the
+// error is nil.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead.
+type ValueMarshaler interface {
+	MarshalBSONValue() (bsontype.Type, []byte, error)
+}
+
+// Unmarshaler is an interface implemented by types that can unmarshal a BSON
+// document representation of themselves. The BSON bytes can be assumed to be
+// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
+// after returning.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead.
+type Unmarshaler interface {
+	UnmarshalBSON([]byte) error
+}
+
+// ValueUnmarshaler is an interface implemented by types that can unmarshal a
+// BSON value representation of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead.
+type ValueUnmarshaler interface {
+	UnmarshalBSONValue(bsontype.Type, []byte) error
+}
+
+// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be
+// encoded by the ValueEncoder.
+type ValueEncoderError struct {
+	Name     string
+	Types    []reflect.Type
+	Kinds    []reflect.Kind
+	Received reflect.Value
+}
+
+func (vee ValueEncoderError) Error() string {
+	typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds))
+	for _, t := range vee.Types {
+		typeKinds = append(typeKinds, t.String())
+	}
+	for _, k := range vee.Kinds {
+		if k == reflect.Map {
+			typeKinds = append(typeKinds, "map[string]*")
+			continue
+		}
+		typeKinds = append(typeKinds, k.String())
+	}
+	received := vee.Received.Kind().String()
+	if vee.Received.IsValid() {
+		received = vee.Received.Type().String()
+	}
+	return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be
+// decoded by the ValueDecoder.
+type ValueDecoderError struct {
+	Name     string
+	Types    []reflect.Type
+	Kinds    []reflect.Kind
+	Received reflect.Value
+}
+
+func (vde ValueDecoderError) Error() string {
+	typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds))
+	for _, t := range vde.Types {
+		typeKinds = append(typeKinds, t.String())
+	}
+	for _, k := range vde.Kinds {
+		if k == reflect.Map {
+			typeKinds = append(typeKinds, "map[string]*")
+			continue
+		}
+		typeKinds = append(typeKinds, k.String())
+	}
+	received := vde.Received.Kind().String()
+	if vde.Received.IsValid() {
+		received = vde.Received.Type().String()
+	}
+	return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// EncodeContext is the contextual information required for a Codec to encode a
+// value.
+type EncodeContext struct {
+	*Registry
+
+	// MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64,
+	// uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits)
+	// that can represent the integer value.
+	//
+	// Deprecated: Use bson.Encoder.IntMinSize instead.
+	MinSize bool
+
+	errorOnInlineDuplicates bool
+	stringifyMapKeysWithFmt bool
+	nilMapAsEmpty           bool
+	nilSliceAsEmpty         bool
+	nilByteSliceAsEmpty     bool
+	omitZeroStruct          bool
+	useJSONStructTags       bool
+}
+
+// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in
+// the marshaled BSON when the "inline" struct tag option is set.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
+func (ec *EncodeContext) ErrorOnInlineDuplicates() {
+	ec.errorOnInlineDuplicates = true
+}
+
+// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name
+// strings using fmt.Sprintf() instead of the default string conversion logic.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
+func (ec *EncodeContext) StringifyMapKeysWithFmt() {
+	ec.stringifyMapKeysWithFmt = true
+}
+
+// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON
+// null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
+func (ec *EncodeContext) NilMapAsEmpty() {
+	ec.nilMapAsEmpty = true
+}
+
+// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON
+// null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
+func (ec *EncodeContext) NilSliceAsEmpty() {
+	ec.nilSliceAsEmpty = true
+}
+
+// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values
+// instead of BSON null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
+func (ec *EncodeContext) NilByteSliceAsEmpty() {
+	ec.nilByteSliceAsEmpty = true
+}
+
+// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{})
+// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set.
+//
+// Note that the Encoder only examines exported struct fields when determining if a struct is the
+// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
+func (ec *EncodeContext) OmitZeroStruct() {
+	ec.omitZeroStruct = true
+}
+
+// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson"
+// struct tag is not specified.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead.
+func (ec *EncodeContext) UseJSONStructTags() {
+	ec.useJSONStructTags = true
+}
+
+// DecodeContext is the contextual information required for a Codec to decode a
+// value.
+type DecodeContext struct {
+	*Registry
+
+	// Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double"
+	// values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64,
+	// uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to
+	// BSON "decimal128" values.
+	//
+	// Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead.
+	Truncate bool
+
+	// Ancestor is the type of a containing document. This is mainly used to determine what type
+	// should be used when decoding an embedded document into an empty interface. For example, if
+	// Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface
+	// will be decoded into a bson.M.
+	//
+	// Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead.
+	Ancestor reflect.Type
+
+	// defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the
+	// usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is
+	// set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an
+	// error. DocumentType overrides the Ancestor field.
+	defaultDocumentType reflect.Type
+
+	binaryAsSlice     bool
+	useJSONStructTags bool
+	useLocalTimeZone  bool
+	zeroMaps          bool
+	zeroStructs       bool
+}
+
+// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or
+// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
+func (dc *DecodeContext) BinaryAsSlice() {
+	dc.binaryAsSlice = true
+}
+
+// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson"
+// struct tag is not specified.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
+func (dc *DecodeContext) UseJSONStructTags() {
+	dc.useJSONStructTags = true
+}
+
+// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead
+// of the UTC timezone.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
+func (dc *DecodeContext) UseLocalTimeZone() {
+	dc.useLocalTimeZone = true
+}
+
+// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value
+// passed to Decode before unmarshaling BSON documents into them.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
+func (dc *DecodeContext) ZeroMaps() {
+	dc.zeroMaps = true
+}
+
+// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination
+// value passed to Decode before unmarshaling BSON documents into them.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
+func (dc *DecodeContext) ZeroStructs() {
+	dc.zeroStructs = true
+}
+
+// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This
+// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead.
+func (dc *DecodeContext) DefaultDocumentM() {
+	dc.defaultDocumentType = reflect.TypeOf(primitive.M{})
+}
+
+// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This
+// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead.
+func (dc *DecodeContext) DefaultDocumentD() {
+	dc.defaultDocumentType = reflect.TypeOf(primitive.D{})
+}
+
+// ValueCodec is an interface for encoding and decoding a reflect.Value.
+// values.
+//
+// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead.
+type ValueCodec interface {
+	ValueEncoder
+	ValueDecoder
+}
+
+// ValueEncoder is the interface implemented by types that can handle the encoding of a value.
+type ValueEncoder interface {
+	EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+}
+
+// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueEncoder.
+type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+
+// EncodeValue implements the ValueEncoder interface.
+func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	return fn(ec, vw, val)
+}
+
+// ValueDecoder is the interface implemented by types that can handle the decoding of a value.
+type ValueDecoder interface {
+	DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+}
+
+// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueDecoder.
+type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+
+// DecodeValue implements the ValueDecoder interface.
+func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	return fn(dc, vr, val)
+}
+
+// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type.
+type typeDecoder interface {
+	decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error)
+}
+
+// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder.
+type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error)
+
+func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	return fn(dc, vr, t)
+}
+
+// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder.
+type decodeAdapter struct {
+	ValueDecoderFunc
+	typeDecoderFunc
+}
+
+var _ ValueDecoder = decodeAdapter{}
+var _ typeDecoder = decodeAdapter{}
+
+// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type
+// t and calls decoder.DecodeValue on it.
+func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	td, _ := decoder.(typeDecoder)
+	return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true)
+}
+
+func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) {
+	if td != nil {
+		val, err := td.decodeType(dc, vr, t)
+		if err == nil && convert && val.Type() != t {
+			// This conversion step is necessary for slices and maps. If a user declares variables like:
+			//
+			// type myBool bool
+			// var m map[string]myBool
+			//
+			// and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present
+			// because we'll try to assign a value of type bool to one of type myBool.
+			val = val.Convert(t)
+		}
+		return val, err
+	}
+
+	val := reflect.New(t).Elem()
+	err := vd.DecodeValue(dc, vr, val)
+	return val, err
+}
+
+// CodecZeroer is the interface implemented by Codecs that can also determine if
+// a value of the type that would be encoded is zero.
+//
+// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver
+// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to
+// nil instead.
+type CodecZeroer interface {
+	IsTypeZero(interface{}) bool
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..0134b5a94bec3dc1abd81b7cb2858c632213c517
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go
@@ -0,0 +1,138 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"fmt"
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// ByteSliceCodec is the Codec used for []byte values.
+//
+// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver
+// 2.0. To configure the byte slice encode and decode behavior, use the
+// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice
+// encode and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to encode nil byte slices as empty
+// BSON binary values, use:
+//
+//	opt := options.Client().SetBSONOptions(&options.BSONOptions{
+//	    NilByteSliceAsEmpty: true,
+//	})
+//
+// See the deprecation notice for each field in ByteSliceCodec for the
+// corresponding settings.
+type ByteSliceCodec struct {
+	// EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values
+	// instead of BSON null.
+	//
+	// Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty
+	// instead.
+	EncodeNilAsEmpty bool
+}
+
+var (
+	defaultByteSliceCodec = NewByteSliceCodec()
+
+	// Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be
+	// used by collection type decoders (e.g. map, slice, etc) to set individual values in a
+	// collection.
+	_ typeDecoder = defaultByteSliceCodec
+)
+
+// NewByteSliceCodec returns a ByteSliceCodec with options opts.
+//
+// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See
+// [ByteSliceCodec] for more details.
+func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec {
+	byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...)
+	codec := ByteSliceCodec{}
+	if byteSliceOpt.EncodeNilAsEmpty != nil {
+		codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty
+	}
+	return &codec
+}
+
+// EncodeValue is the ValueEncoder for []byte.
+func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tByteSlice {
+		return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+	if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty {
+		return vw.WriteNull()
+	}
+	return vw.WriteBinary(val.Interface().([]byte))
+}
+
+func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tByteSlice {
+		return emptyValue, ValueDecoderError{
+			Name:     "ByteSliceDecodeValue",
+			Types:    []reflect.Type{tByteSlice},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var data []byte
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.String:
+		str, err := vr.ReadString()
+		if err != nil {
+			return emptyValue, err
+		}
+		data = []byte(str)
+	case bsontype.Symbol:
+		sym, err := vr.ReadSymbol()
+		if err != nil {
+			return emptyValue, err
+		}
+		data = []byte(sym)
+	case bsontype.Binary:
+		var subtype byte
+		data, subtype, err = vr.ReadBinary()
+		if err != nil {
+			return emptyValue, err
+		}
+		if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+			return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"}
+		}
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(data), nil
+}
+
+// DecodeValue is the ValueDecoder for []byte.
+func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tByteSlice {
+		return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+
+	elem, err := bsc.decodeType(dc, vr, tByteSlice)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..844b50299f2fed9e66683727511fd282d1cb6b5c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go
@@ -0,0 +1,166 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+	"sync"
+	"sync/atomic"
+)
+
+// Runtime check that the kind encoder and decoder caches can store any valid
+// reflect.Kind constant.
+func init() {
+	if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" {
+		panic("The capacity of kindEncoderCache is too small.\n" +
+			"This is due to a new type being added to reflect.Kind.")
+	}
+}
+
+// statically assert array size
+var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer]
+var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer]
+
+type typeEncoderCache struct {
+	cache sync.Map // map[reflect.Type]ValueEncoder
+}
+
+func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) {
+	c.cache.Store(rt, enc)
+}
+
+func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) {
+	if v, _ := c.cache.Load(rt); v != nil {
+		return v.(ValueEncoder), true
+	}
+	return nil, false
+}
+
+func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder {
+	if v, loaded := c.cache.LoadOrStore(rt, enc); loaded {
+		enc = v.(ValueEncoder)
+	}
+	return enc
+}
+
+func (c *typeEncoderCache) Clone() *typeEncoderCache {
+	cc := new(typeEncoderCache)
+	c.cache.Range(func(k, v interface{}) bool {
+		if k != nil && v != nil {
+			cc.cache.Store(k, v)
+		}
+		return true
+	})
+	return cc
+}
+
+type typeDecoderCache struct {
+	cache sync.Map // map[reflect.Type]ValueDecoder
+}
+
+func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) {
+	c.cache.Store(rt, dec)
+}
+
+func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) {
+	if v, _ := c.cache.Load(rt); v != nil {
+		return v.(ValueDecoder), true
+	}
+	return nil, false
+}
+
+func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder {
+	if v, loaded := c.cache.LoadOrStore(rt, dec); loaded {
+		dec = v.(ValueDecoder)
+	}
+	return dec
+}
+
+func (c *typeDecoderCache) Clone() *typeDecoderCache {
+	cc := new(typeDecoderCache)
+	c.cache.Range(func(k, v interface{}) bool {
+		if k != nil && v != nil {
+			cc.cache.Store(k, v)
+		}
+		return true
+	})
+	return cc
+}
+
+// atomic.Value requires that all calls to Store() have the same concrete type
+// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type
+// is always the same (since different concrete types may implement the
+// ValueEncoder interface).
+type kindEncoderCacheEntry struct {
+	enc ValueEncoder
+}
+
+type kindEncoderCache struct {
+	entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry
+}
+
+func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) {
+	if enc != nil && rt < reflect.Kind(len(c.entries)) {
+		c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc})
+	}
+}
+
+func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) {
+	if rt < reflect.Kind(len(c.entries)) {
+		if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok {
+			return ent.enc, ent.enc != nil
+		}
+	}
+	return nil, false
+}
+
+func (c *kindEncoderCache) Clone() *kindEncoderCache {
+	cc := new(kindEncoderCache)
+	for i, v := range c.entries {
+		if val := v.Load(); val != nil {
+			cc.entries[i].Store(val)
+		}
+	}
+	return cc
+}
+
+// atomic.Value requires that all calls to Store() have the same concrete type
+// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type
+// is always the same (since different concrete types may implement the
+// ValueDecoder interface).
+type kindDecoderCacheEntry struct {
+	dec ValueDecoder
+}
+
+type kindDecoderCache struct {
+	entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry
+}
+
+func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) {
+	if rt < reflect.Kind(len(c.entries)) {
+		c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec})
+	}
+}
+
+func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) {
+	if rt < reflect.Kind(len(c.entries)) {
+		if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok {
+			return ent.dec, ent.dec != nil
+		}
+	}
+	return nil, false
+}
+
+func (c *kindDecoderCache) Clone() *kindDecoderCache {
+	cc := new(kindDecoderCache)
+	for i, v := range c.entries {
+		if val := v.Load(); val != nil {
+			cc.entries[i].Store(val)
+		}
+	}
+	return cc
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb8180f25cccb8ae069fda5ff9e37b8659764104
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+)
+
+// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder.
+type condAddrEncoder struct {
+	canAddrEnc ValueEncoder
+	elseEnc    ValueEncoder
+}
+
+var _ ValueEncoder = (*condAddrEncoder)(nil)
+
+// newCondAddrEncoder returns an condAddrEncoder.
+func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder {
+	encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+	return &encoder
+}
+
+// EncodeValue is the ValueEncoderFunc for a value that may be addressable.
+func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.CanAddr() {
+		return cae.canAddrEnc.EncodeValue(ec, vw, val)
+	}
+	if cae.elseEnc != nil {
+		return cae.elseEnc.EncodeValue(ec, vw, val)
+	}
+	return ErrNoEncoder{Type: val.Type()}
+}
+
+// condAddrDecoder is the decoder used when a pointer to the value has a decoder.
+type condAddrDecoder struct {
+	canAddrDec ValueDecoder
+	elseDec    ValueDecoder
+}
+
+var _ ValueDecoder = (*condAddrDecoder)(nil)
+
+// newCondAddrDecoder returns an CondAddrDecoder.
+func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder {
+	decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec}
+	return &decoder
+}
+
+// DecodeValue is the ValueDecoderFunc for a value that may be addressable.
+func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if val.CanAddr() {
+		return cad.canAddrDec.DecodeValue(dc, vr, val)
+	}
+	if cad.elseDec != nil {
+		return cad.elseDec.DecodeValue(dc, vr, val)
+	}
+	return ErrNoDecoder{Type: val.Type()}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc4a7b1dbf587eb81d1a2200e35598b5b2d8ebdf
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
@@ -0,0 +1,1807 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var (
+	defaultValueDecoders DefaultValueDecoders
+	errCannotTruncate    = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled")
+)
+
+type decodeBinaryError struct {
+	subtype  byte
+	typeName string
+}
+
+func (d decodeBinaryError) Error() string {
+	return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype)
+}
+
+func newDefaultStructCodec() *StructCodec {
+	codec, err := NewStructCodec(DefaultStructTagParser)
+	if err != nil {
+		// This function is called from the codec registration path, so errors can't be propagated. If there's an error
+		// constructing the StructCodec, we panic to avoid losing it.
+		panic(fmt.Errorf("error creating default StructCodec: %w", err))
+	}
+	return codec
+}
+
+// DefaultValueDecoders is a namespace type for the default ValueDecoders used
+// when creating a registry.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+type DefaultValueDecoders struct{}
+
+// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with
+// the provided RegistryBuilder.
+//
+// There is no support for decoding map[string]interface{} because there is no decoder for
+// interface{}, so users must either register this decoder themselves or use the
+// EmptyInterfaceDecoder available in the bson package.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterDefaultDecoders must not be nil"))
+	}
+
+	intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType}
+	floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType}
+
+	rb.
+		RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)).
+		RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}).
+		RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}).
+		RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}).
+		RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}).
+		RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}).
+		RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}).
+		RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}).
+		RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}).
+		RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}).
+		RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}).
+		RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}).
+		RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec).
+		RegisterTypeDecoder(tTime, defaultTimeCodec).
+		RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec).
+		RegisterTypeDecoder(tCoreArray, defaultArrayCodec).
+		RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}).
+		RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}).
+		RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}).
+		RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}).
+		RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)).
+		RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}).
+		RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}).
+		RegisterDefaultDecoder(reflect.Int, intDecoder).
+		RegisterDefaultDecoder(reflect.Int8, intDecoder).
+		RegisterDefaultDecoder(reflect.Int16, intDecoder).
+		RegisterDefaultDecoder(reflect.Int32, intDecoder).
+		RegisterDefaultDecoder(reflect.Int64, intDecoder).
+		RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec).
+		RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec).
+		RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec).
+		RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec).
+		RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec).
+		RegisterDefaultDecoder(reflect.Float32, floatDecoder).
+		RegisterDefaultDecoder(reflect.Float64, floatDecoder).
+		RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)).
+		RegisterDefaultDecoder(reflect.Map, defaultMapCodec).
+		RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec).
+		RegisterDefaultDecoder(reflect.String, defaultStringCodec).
+		RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()).
+		RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()).
+		RegisterTypeMapEntry(bsontype.Double, tFloat64).
+		RegisterTypeMapEntry(bsontype.String, tString).
+		RegisterTypeMapEntry(bsontype.Array, tA).
+		RegisterTypeMapEntry(bsontype.Binary, tBinary).
+		RegisterTypeMapEntry(bsontype.Undefined, tUndefined).
+		RegisterTypeMapEntry(bsontype.ObjectID, tOID).
+		RegisterTypeMapEntry(bsontype.Boolean, tBool).
+		RegisterTypeMapEntry(bsontype.DateTime, tDateTime).
+		RegisterTypeMapEntry(bsontype.Regex, tRegex).
+		RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer).
+		RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript).
+		RegisterTypeMapEntry(bsontype.Symbol, tSymbol).
+		RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope).
+		RegisterTypeMapEntry(bsontype.Int32, tInt32).
+		RegisterTypeMapEntry(bsontype.Int64, tInt64).
+		RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp).
+		RegisterTypeMapEntry(bsontype.Decimal128, tDecimal).
+		RegisterTypeMapEntry(bsontype.MinKey, tMinKey).
+		RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey).
+		RegisterTypeMapEntry(bsontype.Type(0), tD).
+		RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD).
+		RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)).
+		RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue))
+}
+
+// DDecodeValue is the ValueDecoderFunc for primitive.D instances.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || !val.CanSet() || val.Type() != tD {
+		return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		dc.Ancestor = tD
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	default:
+		return fmt.Errorf("cannot decode %v into a primitive.D", vrType)
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	decoder, err := dc.LookupDecoder(tEmpty)
+	if err != nil {
+		return err
+	}
+	tEmptyTypeDecoder, _ := decoder.(typeDecoder)
+
+	// Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance.
+	var elems primitive.D
+	if !val.IsNil() {
+		val.SetLen(0)
+		elems = val.Interface().(primitive.D)
+	} else {
+		elems = make(primitive.D, 0)
+	}
+
+	for {
+		key, elemVr, err := dr.ReadElement()
+		if errors.Is(err, bsonrw.ErrEOD) {
+			break
+		} else if err != nil {
+			return err
+		}
+
+		// Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty.
+		elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false)
+		if err != nil {
+			return err
+		}
+
+		elems = append(elems, primitive.E{Key: key, Value: elem.Interface()})
+	}
+
+	val.Set(reflect.ValueOf(elems))
+	return nil
+}
+
+func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t.Kind() != reflect.Bool {
+		return emptyValue, ValueDecoderError{
+			Name:     "BooleanDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Bool},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var b bool
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return emptyValue, err
+		}
+		b = (i32 != 0)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return emptyValue, err
+		}
+		b = (i64 != 0)
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return emptyValue, err
+		}
+		b = (f64 != 0)
+	case bsontype.Boolean:
+		b, err = vr.ReadBoolean()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(b), nil
+}
+
+// BooleanDecodeValue is the ValueDecoderFunc for bool types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool {
+		return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+	}
+
+	elem, err := dvd.booleanDecodeType(dctx, vr, val.Type())
+	if err != nil {
+		return err
+	}
+
+	val.SetBool(elem.Bool())
+	return nil
+}
+
+func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	var i64 int64
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return emptyValue, err
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		i64, err = vr.ReadInt64()
+		if err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return emptyValue, err
+		}
+		if !dc.Truncate && math.Floor(f64) != f64 {
+			return emptyValue, errCannotTruncate
+		}
+		if f64 > float64(math.MaxInt64) {
+			return emptyValue, fmt.Errorf("%g overflows int64", f64)
+		}
+		i64 = int64(f64)
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return emptyValue, err
+		}
+		if b {
+			i64 = 1
+		}
+	case bsontype.Null:
+		if err = vr.ReadNull(); err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Undefined:
+		if err = vr.ReadUndefined(); err != nil {
+			return emptyValue, err
+		}
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType)
+	}
+
+	switch t.Kind() {
+	case reflect.Int8:
+		if i64 < math.MinInt8 || i64 > math.MaxInt8 {
+			return emptyValue, fmt.Errorf("%d overflows int8", i64)
+		}
+
+		return reflect.ValueOf(int8(i64)), nil
+	case reflect.Int16:
+		if i64 < math.MinInt16 || i64 > math.MaxInt16 {
+			return emptyValue, fmt.Errorf("%d overflows int16", i64)
+		}
+
+		return reflect.ValueOf(int16(i64)), nil
+	case reflect.Int32:
+		if i64 < math.MinInt32 || i64 > math.MaxInt32 {
+			return emptyValue, fmt.Errorf("%d overflows int32", i64)
+		}
+
+		return reflect.ValueOf(int32(i64)), nil
+	case reflect.Int64:
+		return reflect.ValueOf(i64), nil
+	case reflect.Int:
+		if i64 > math.MaxInt { // Can we fit this inside of an int
+			return emptyValue, fmt.Errorf("%d overflows int", i64)
+		}
+
+		return reflect.ValueOf(int(i64)), nil
+	default:
+		return emptyValue, ValueDecoderError{
+			Name:     "IntDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+			Received: reflect.Zero(t),
+		}
+	}
+}
+
+// IntDecodeValue is the ValueDecoderFunc for int types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "IntDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+			Received: val,
+		}
+	}
+
+	elem, err := dvd.intDecodeType(dc, vr, val.Type())
+	if err != nil {
+		return err
+	}
+
+	val.SetInt(elem.Int())
+	return nil
+}
+
+// UintDecodeValue is the ValueDecoderFunc for uint types.
+//
+// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var i64 int64
+	var err error
+	switch vr.Type() {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		i64, err = vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		if !dc.Truncate && math.Floor(f64) != f64 {
+			return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled")
+		}
+		if f64 > float64(math.MaxInt64) {
+			return fmt.Errorf("%g overflows int64", f64)
+		}
+		i64 = int64(f64)
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return err
+		}
+		if b {
+			i64 = 1
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into an integer type", vr.Type())
+	}
+
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: val,
+		}
+	}
+
+	switch val.Kind() {
+	case reflect.Uint8:
+		if i64 < 0 || i64 > math.MaxUint8 {
+			return fmt.Errorf("%d overflows uint8", i64)
+		}
+	case reflect.Uint16:
+		if i64 < 0 || i64 > math.MaxUint16 {
+			return fmt.Errorf("%d overflows uint16", i64)
+		}
+	case reflect.Uint32:
+		if i64 < 0 || i64 > math.MaxUint32 {
+			return fmt.Errorf("%d overflows uint32", i64)
+		}
+	case reflect.Uint64:
+		if i64 < 0 {
+			return fmt.Errorf("%d overflows uint64", i64)
+		}
+	case reflect.Uint:
+		if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint
+			return fmt.Errorf("%d overflows uint", i64)
+		}
+	default:
+		return ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: val,
+		}
+	}
+
+	val.SetUint(uint64(i64))
+	return nil
+}
+
+func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	var f float64
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return emptyValue, err
+		}
+		f = float64(i32)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return emptyValue, err
+		}
+		f = float64(i64)
+	case bsontype.Double:
+		f, err = vr.ReadDouble()
+		if err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return emptyValue, err
+		}
+		if b {
+			f = 1
+		}
+	case bsontype.Null:
+		if err = vr.ReadNull(); err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Undefined:
+		if err = vr.ReadUndefined(); err != nil {
+			return emptyValue, err
+		}
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType)
+	}
+
+	switch t.Kind() {
+	case reflect.Float32:
+		if !dc.Truncate && float64(float32(f)) != f {
+			return emptyValue, errCannotTruncate
+		}
+
+		return reflect.ValueOf(float32(f)), nil
+	case reflect.Float64:
+		return reflect.ValueOf(f), nil
+	default:
+		return emptyValue, ValueDecoderError{
+			Name:     "FloatDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Float32, reflect.Float64},
+			Received: reflect.Zero(t),
+		}
+	}
+}
+
+// FloatDecodeValue is the ValueDecoderFunc for float types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "FloatDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Float32, reflect.Float64},
+			Received: val,
+		}
+	}
+
+	elem, err := dvd.floatDecodeType(ec, vr, val.Type())
+	if err != nil {
+		return err
+	}
+
+	val.SetFloat(elem.Float())
+	return nil
+}
+
+// StringDecodeValue is the ValueDecoderFunc for string types.
+//
+// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var str string
+	var err error
+	switch vr.Type() {
+	// TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed.
+	case bsontype.String:
+		str, err = vr.ReadString()
+		if err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a string type", vr.Type())
+	}
+	if !val.CanSet() || val.Kind() != reflect.String {
+		return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val}
+	}
+
+	val.SetString(str)
+	return nil
+}
+
+func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tJavaScript {
+		return emptyValue, ValueDecoderError{
+			Name:     "JavaScriptDecodeValue",
+			Types:    []reflect.Type{tJavaScript},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var js string
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.JavaScript:
+		js, err = vr.ReadJavascript()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.JavaScript(js)), nil
+}
+
+// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tJavaScript {
+		return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+	}
+
+	elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript)
+	if err != nil {
+		return err
+	}
+
+	val.SetString(elem.String())
+	return nil
+}
+
+func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tSymbol {
+		return emptyValue, ValueDecoderError{
+			Name:     "SymbolDecodeValue",
+			Types:    []reflect.Type{tSymbol},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var symbol string
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.String:
+		symbol, err = vr.ReadString()
+	case bsontype.Symbol:
+		symbol, err = vr.ReadSymbol()
+	case bsontype.Binary:
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return emptyValue, err
+		}
+
+		if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+			return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"}
+		}
+		symbol = string(data)
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.Symbol(symbol)), nil
+}
+
+// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tSymbol {
+		return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+	}
+
+	elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol)
+	if err != nil {
+		return err
+	}
+
+	val.SetString(elem.String())
+	return nil
+}
+
+func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tBinary {
+		return emptyValue, ValueDecoderError{
+			Name:     "BinaryDecodeValue",
+			Types:    []reflect.Type{tBinary},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var data []byte
+	var subtype byte
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Binary:
+		data, subtype, err = vr.ReadBinary()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil
+}
+
+// BinaryDecodeValue is the ValueDecoderFunc for Binary.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tBinary {
+		return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val}
+	}
+
+	elem, err := dvd.binaryDecodeType(dc, vr, tBinary)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tUndefined {
+		return emptyValue, ValueDecoderError{
+			Name:     "UndefinedDecodeValue",
+			Types:    []reflect.Type{tUndefined},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type())
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.Undefined{}), nil
+}
+
+// UndefinedDecodeValue is the ValueDecoderFunc for Undefined.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tUndefined {
+		return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+	}
+
+	elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+// Accept both 12-byte string and pretty-printed 24-byte hex string formats.
+func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tOID {
+		return emptyValue, ValueDecoderError{
+			Name:     "ObjectIDDecodeValue",
+			Types:    []reflect.Type{tOID},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var oid primitive.ObjectID
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.ObjectID:
+		oid, err = vr.ReadObjectID()
+		if err != nil {
+			return emptyValue, err
+		}
+	case bsontype.String:
+		str, err := vr.ReadString()
+		if err != nil {
+			return emptyValue, err
+		}
+		if oid, err = primitive.ObjectIDFromHex(str); err == nil {
+			break
+		}
+		if len(str) != 12 {
+			return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str))
+		}
+		byteArr := []byte(str)
+		copy(oid[:], byteArr)
+	case bsontype.Null:
+		if err = vr.ReadNull(); err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Undefined:
+		if err = vr.ReadUndefined(); err != nil {
+			return emptyValue, err
+		}
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType)
+	}
+
+	return reflect.ValueOf(oid), nil
+}
+
+// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tOID {
+		return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val}
+	}
+
+	elem, err := dvd.objectIDDecodeType(dc, vr, tOID)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tDateTime {
+		return emptyValue, ValueDecoderError{
+			Name:     "DateTimeDecodeValue",
+			Types:    []reflect.Type{tDateTime},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var dt int64
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.DateTime:
+		dt, err = vr.ReadDateTime()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.DateTime(dt)), nil
+}
+
+// DateTimeDecodeValue is the ValueDecoderFunc for DateTime.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDateTime {
+		return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+	}
+
+	elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tNull {
+		return emptyValue, ValueDecoderError{
+			Name:     "NullDecodeValue",
+			Types:    []reflect.Type{tNull},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type())
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.Null{}), nil
+}
+
+// NullDecodeValue is the ValueDecoderFunc for Null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tNull {
+		return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val}
+	}
+
+	elem, err := dvd.nullDecodeType(dc, vr, tNull)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tRegex {
+		return emptyValue, ValueDecoderError{
+			Name:     "RegexDecodeValue",
+			Types:    []reflect.Type{tRegex},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var pattern, options string
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Regex:
+		pattern, options, err = vr.ReadRegex()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil
+}
+
+// RegexDecodeValue is the ValueDecoderFunc for Regex.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRegex {
+		return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val}
+	}
+
+	elem, err := dvd.regexDecodeType(dc, vr, tRegex)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tDBPointer {
+		return emptyValue, ValueDecoderError{
+			Name:     "DBPointerDecodeValue",
+			Types:    []reflect.Type{tDBPointer},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var ns string
+	var pointer primitive.ObjectID
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.DBPointer:
+		ns, pointer, err = vr.ReadDBPointer()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil
+}
+
+// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDBPointer {
+		return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+	}
+
+	elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) {
+	if reflectType != tTimestamp {
+		return emptyValue, ValueDecoderError{
+			Name:     "TimestampDecodeValue",
+			Types:    []reflect.Type{tTimestamp},
+			Received: reflect.Zero(reflectType),
+		}
+	}
+
+	var t, incr uint32
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Timestamp:
+		t, incr, err = vr.ReadTimestamp()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil
+}
+
+// TimestampDecodeValue is the ValueDecoderFunc for Timestamp.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tTimestamp {
+		return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+	}
+
+	elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tMinKey {
+		return emptyValue, ValueDecoderError{
+			Name:     "MinKeyDecodeValue",
+			Types:    []reflect.Type{tMinKey},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.MinKey:
+		err = vr.ReadMinKey()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type())
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.MinKey{}), nil
+}
+
+// MinKeyDecodeValue is the ValueDecoderFunc for MinKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tMinKey {
+		return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+	}
+
+	elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tMaxKey {
+		return emptyValue, ValueDecoderError{
+			Name:     "MaxKeyDecodeValue",
+			Types:    []reflect.Type{tMaxKey},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.MaxKey:
+		err = vr.ReadMaxKey()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type())
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(primitive.MaxKey{}), nil
+}
+
+// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tMaxKey {
+		return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+	}
+
+	elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tDecimal {
+		return emptyValue, ValueDecoderError{
+			Name:     "Decimal128DecodeValue",
+			Types:    []reflect.Type{tDecimal},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var d128 primitive.Decimal128
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Decimal128:
+		d128, err = vr.ReadDecimal128()
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type())
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(d128), nil
+}
+
+// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDecimal {
+		return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+	}
+
+	elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tJSONNumber {
+		return emptyValue, ValueDecoderError{
+			Name:     "JSONNumberDecodeValue",
+			Types:    []reflect.Type{tJSONNumber},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var jsonNum json.Number
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return emptyValue, err
+		}
+		jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64))
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return emptyValue, err
+		}
+		jsonNum = json.Number(strconv.FormatInt(int64(i32), 10))
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return emptyValue, err
+		}
+		jsonNum = json.Number(strconv.FormatInt(i64, 10))
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(jsonNum), nil
+}
+
+// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tJSONNumber {
+		return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+	}
+
+	elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tURL {
+		return emptyValue, ValueDecoderError{
+			Name:     "URLDecodeValue",
+			Types:    []reflect.Type{tURL},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	urlPtr := &url.URL{}
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.String:
+		var str string // Declare str here to avoid shadowing err during the ReadString call.
+		str, err = vr.ReadString()
+		if err != nil {
+			return emptyValue, err
+		}
+
+		urlPtr, err = url.Parse(str)
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(urlPtr).Elem(), nil
+}
+
+// URLDecodeValue is the ValueDecoderFunc for url.URL.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tURL {
+		return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val}
+	}
+
+	elem, err := dvd.urlDecodeType(dc, vr, tURL)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+// TimeDecodeValue is the ValueDecoderFunc for time.Time.
+//
+// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.DateTime {
+		return fmt.Errorf("cannot decode %v into a time.Time", vr.Type())
+	}
+
+	dt, err := vr.ReadDateTime()
+	if err != nil {
+		return err
+	}
+
+	if !val.CanSet() || val.Type() != tTime {
+		return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+
+	val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC()))
+	return nil
+}
+
+// ByteSliceDecodeValue is the ValueDecoderFunc for []byte.
+//
+// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null {
+		return fmt.Errorf("cannot decode %v into a []byte", vr.Type())
+	}
+
+	if !val.CanSet() || val.Type() != tByteSlice {
+		return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+
+	if vr.Type() == bsontype.Null {
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	}
+
+	data, subtype, err := vr.ReadBinary()
+	if err != nil {
+		return err
+	}
+	if subtype != 0x00 {
+		return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype)
+	}
+
+	val.Set(reflect.ValueOf(data))
+	return nil
+}
+
+// MapDecodeValue is the ValueDecoderFunc for map[string]* types.
+//
+// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+		return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	default:
+		return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type())
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeMap(val.Type()))
+	}
+
+	eType := val.Type().Elem()
+	decoder, err := dc.LookupDecoder(eType)
+	if err != nil {
+		return err
+	}
+
+	if eType == tEmpty {
+		dc.Ancestor = val.Type()
+	}
+
+	keyType := val.Type().Key()
+	for {
+		key, vr, err := dr.ReadElement()
+		if errors.Is(err, bsonrw.ErrEOD) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		elem := reflect.New(eType).Elem()
+
+		err = decoder.DecodeValue(dc, vr, elem)
+		if err != nil {
+			return err
+		}
+
+		val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem)
+	}
+	return nil
+}
+
+// ArrayDecodeValue is the ValueDecoderFunc for array types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Array {
+		return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+	}
+
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Array:
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		if val.Type().Elem() != tE {
+			return fmt.Errorf("cannot decode document into %s", val.Type())
+		}
+	case bsontype.Binary:
+		if val.Type().Elem() != tByte {
+			return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType)
+		}
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return err
+		}
+		if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+			return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype)
+		}
+
+		if len(data) > val.Len() {
+			return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type())
+		}
+
+		for idx, elem := range data {
+			val.Index(idx).Set(reflect.ValueOf(elem))
+		}
+		return nil
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	case bsontype.Undefined:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadUndefined()
+	default:
+		return fmt.Errorf("cannot decode %v into an array", vrType)
+	}
+
+	var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+	switch val.Type().Elem() {
+	case tE:
+		elemsFunc = dvd.decodeD
+	default:
+		elemsFunc = dvd.decodeDefault
+	}
+
+	elems, err := elemsFunc(dc, vr, val)
+	if err != nil {
+		return err
+	}
+
+	if len(elems) > val.Len() {
+		return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems))
+	}
+
+	for idx, elem := range elems {
+		val.Index(idx).Set(elem)
+	}
+
+	return nil
+}
+
+// SliceDecodeValue is the ValueDecoderFunc for slice types.
+//
+// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Slice {
+		return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Array:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		if val.Type().Elem() != tE {
+			return fmt.Errorf("cannot decode document into %s", val.Type())
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a slice", vr.Type())
+	}
+
+	var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+	switch val.Type().Elem() {
+	case tE:
+		dc.Ancestor = val.Type()
+		elemsFunc = dvd.decodeD
+	default:
+		elemsFunc = dvd.decodeDefault
+	}
+
+	elems, err := elemsFunc(dc, vr, val)
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, len(elems)))
+	}
+
+	val.SetLen(0)
+	val.Set(reflect.Append(val, elems...))
+
+	return nil
+}
+
+// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) {
+		return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+	}
+
+	if val.Kind() == reflect.Ptr && val.IsNil() {
+		if !val.CanSet() {
+			return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+		}
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	if !val.Type().Implements(tValueUnmarshaler) {
+		if !val.CanAddr() {
+			return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+		}
+		val = val.Addr() // If the type doesn't implement the interface, a pointer to it must.
+	}
+
+	t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	m, ok := val.Interface().(ValueUnmarshaler)
+	if !ok {
+		// NB: this error should be unreachable due to the above checks
+		return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+	}
+	return m.UnmarshalBSONValue(t, src)
+}
+
+// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) {
+		return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+	}
+
+	if val.Kind() == reflect.Ptr && val.IsNil() {
+		if !val.CanSet() {
+			return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+		}
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	_, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	// If the target Go value is a pointer and the BSON field value is empty, set the value to the
+	// zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to
+	// change the pointer value from within the function (only the value at the pointer address),
+	// so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON
+	// field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches
+	// the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and
+	// the JSON field value is "null".
+	if val.Kind() == reflect.Ptr && len(src) == 0 {
+		val.Set(reflect.Zero(val.Type()))
+		return nil
+	}
+
+	if !val.Type().Implements(tUnmarshaler) {
+		if !val.CanAddr() {
+			return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+		}
+		val = val.Addr() // If the type doesn't implement the interface, a pointer to it must.
+	}
+
+	m, ok := val.Interface().(Unmarshaler)
+	if !ok {
+		// NB: this error should be unreachable due to the above checks
+		return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+	}
+	return m.UnmarshalBSON(src)
+}
+
+// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}.
+//
+// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tEmpty {
+		return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	rtype, err := dc.LookupTypeMapEntry(vr.Type())
+	if err != nil {
+		switch vr.Type() {
+		case bsontype.EmbeddedDocument:
+			if dc.Ancestor != nil {
+				rtype = dc.Ancestor
+				break
+			}
+			rtype = tD
+		case bsontype.Null:
+			val.Set(reflect.Zero(val.Type()))
+			return vr.ReadNull()
+		default:
+			return err
+		}
+	}
+
+	decoder, err := dc.LookupDecoder(rtype)
+	if err != nil {
+		return err
+	}
+
+	elem := reflect.New(rtype).Elem()
+	err = decoder.DecodeValue(dc, vr, elem)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tCoreDocument {
+		return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr)
+	val.Set(reflect.ValueOf(cdoc))
+	return err
+}
+
+func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) {
+	elems := make([]reflect.Value, 0)
+
+	ar, err := vr.ReadArray()
+	if err != nil {
+		return nil, err
+	}
+
+	eType := val.Type().Elem()
+
+	decoder, err := dc.LookupDecoder(eType)
+	if err != nil {
+		return nil, err
+	}
+	eTypeDecoder, _ := decoder.(typeDecoder)
+
+	idx := 0
+	for {
+		vr, err := ar.ReadValue()
+		if errors.Is(err, bsonrw.ErrEOA) {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true)
+		if err != nil {
+			return nil, newDecodeError(strconv.Itoa(idx), err)
+		}
+		elems = append(elems, elem)
+		idx++
+	}
+
+	return elems, nil
+}
+
+func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) {
+	var cws primitive.CodeWithScope
+
+	code, dr, err := vr.ReadCodeWithScope()
+	if err != nil {
+		return cws, err
+	}
+
+	scope := reflect.New(tD).Elem()
+	elems, err := dvd.decodeElemsFromDocumentReader(dc, dr)
+	if err != nil {
+		return cws, err
+	}
+
+	scope.Set(reflect.MakeSlice(tD, 0, len(elems)))
+	scope.Set(reflect.Append(scope, elems...))
+
+	cws = primitive.CodeWithScope{
+		Code:  primitive.JavaScript(code),
+		Scope: scope.Interface().(primitive.D),
+	}
+	return cws, nil
+}
+
+func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tCodeWithScope {
+		return emptyValue, ValueDecoderError{
+			Name:     "CodeWithScopeDecodeValue",
+			Types:    []reflect.Type{tCodeWithScope},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var cws primitive.CodeWithScope
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.CodeWithScope:
+		cws, err = dvd.readCodeWithScope(dc, vr)
+	case bsontype.Null:
+		err = vr.ReadNull()
+	case bsontype.Undefined:
+		err = vr.ReadUndefined()
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType)
+	}
+	if err != nil {
+		return emptyValue, err
+	}
+
+	return reflect.ValueOf(cws), nil
+}
+
+// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tCodeWithScope {
+		return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+	}
+
+	elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) {
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	default:
+		return nil, fmt.Errorf("cannot decode %v into a D", vr.Type())
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return nil, err
+	}
+
+	return dvd.decodeElemsFromDocumentReader(dc, dr)
+}
+
+func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) {
+	decoder, err := dc.LookupDecoder(tEmpty)
+	if err != nil {
+		return nil, err
+	}
+
+	elems := make([]reflect.Value, 0)
+	for {
+		key, vr, err := dr.ReadElement()
+		if errors.Is(err, bsonrw.ErrEOD) {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		val := reflect.New(tEmpty).Elem()
+		err = decoder.DecodeValue(dc, vr, val)
+		if err != nil {
+			return nil, newDecodeError(key, err)
+		}
+
+		elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()}))
+	}
+
+	return elems, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
new file mode 100644
index 0000000000000000000000000000000000000000..4751ae995e7e3fed20527b6ae41ec0be84fe30e8
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
@@ -0,0 +1,856 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"sync"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var defaultValueEncoders DefaultValueEncoders
+
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+
+var errInvalidValue = errors.New("cannot encode invalid element")
+
+var sliceWriterPool = sync.Pool{
+	New: func() interface{} {
+		sw := make(bsonrw.SliceWriter, 0)
+		return &sw
+	},
+}
+
+func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error {
+	vw, err := dw.WriteDocumentElement(e.Key)
+	if err != nil {
+		return err
+	}
+
+	if e.Value == nil {
+		return vw.WriteNull()
+	}
+	encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value))
+	if err != nil {
+		return err
+	}
+
+	err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// DefaultValueEncoders is a namespace type for the default ValueEncoders used
+// when creating a registry.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+type DefaultValueEncoders struct{}
+
+// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with
+// the provided RegistryBuilder.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterDefaultEncoders must not be nil"))
+	}
+	rb.
+		RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec).
+		RegisterTypeEncoder(tTime, defaultTimeCodec).
+		RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec).
+		RegisterTypeEncoder(tCoreArray, defaultArrayCodec).
+		RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)).
+		RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)).
+		RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)).
+		RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)).
+		RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)).
+		RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)).
+		RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)).
+		RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)).
+		RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)).
+		RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)).
+		RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)).
+		RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)).
+		RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)).
+		RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)).
+		RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)).
+		RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)).
+		RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)).
+		RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec).
+		RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec).
+		RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec).
+		RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec).
+		RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec).
+		RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)).
+		RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)).
+		RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)).
+		RegisterDefaultEncoder(reflect.Map, defaultMapCodec).
+		RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec).
+		RegisterDefaultEncoder(reflect.String, defaultStringCodec).
+		RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()).
+		RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()).
+		RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)).
+		RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)).
+		RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue))
+}
+
+// BooleanEncodeValue is the ValueEncoderFunc for bool types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Bool {
+		return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+	}
+	return vw.WriteBoolean(val.Bool())
+}
+
+func fitsIn32Bits(i int64) bool {
+	return math.MinInt32 <= i && i <= math.MaxInt32
+}
+
+// IntEncodeValue is the ValueEncoderFunc for int types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Int8, reflect.Int16, reflect.Int32:
+		return vw.WriteInt32(int32(val.Int()))
+	case reflect.Int:
+		i64 := val.Int()
+		if fitsIn32Bits(i64) {
+			return vw.WriteInt32(int32(i64))
+		}
+		return vw.WriteInt64(i64)
+	case reflect.Int64:
+		i64 := val.Int()
+		if ec.MinSize && fitsIn32Bits(i64) {
+			return vw.WriteInt32(int32(i64))
+		}
+		return vw.WriteInt64(i64)
+	}
+
+	return ValueEncoderError{
+		Name:     "IntEncodeValue",
+		Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+		Received: val,
+	}
+}
+
+// UintEncodeValue is the ValueEncoderFunc for uint types.
+//
+// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Uint8, reflect.Uint16:
+		return vw.WriteInt32(int32(val.Uint()))
+	case reflect.Uint, reflect.Uint32, reflect.Uint64:
+		u64 := val.Uint()
+		if ec.MinSize && u64 <= math.MaxInt32 {
+			return vw.WriteInt32(int32(u64))
+		}
+		if u64 > math.MaxInt64 {
+			return fmt.Errorf("%d overflows int64", u64)
+		}
+		return vw.WriteInt64(int64(u64))
+	}
+
+	return ValueEncoderError{
+		Name:     "UintEncodeValue",
+		Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+		Received: val,
+	}
+}
+
+// FloatEncodeValue is the ValueEncoderFunc for float types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Float32, reflect.Float64:
+		return vw.WriteDouble(val.Float())
+	}
+
+	return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+}
+
+// StringEncodeValue is the ValueEncoderFunc for string types.
+//
+// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.Kind() != reflect.String {
+		return ValueEncoderError{
+			Name:     "StringEncodeValue",
+			Kinds:    []reflect.Kind{reflect.String},
+			Received: val,
+		}
+	}
+
+	return vw.WriteString(val.String())
+}
+
+// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tOID {
+		return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val}
+	}
+	return vw.WriteObjectID(val.Interface().(primitive.ObjectID))
+}
+
+// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDecimal {
+		return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+	}
+	return vw.WriteDecimal128(val.Interface().(primitive.Decimal128))
+}
+
+// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tJSONNumber {
+		return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+	}
+	jsnum := val.Interface().(json.Number)
+
+	// Attempt int first, then float64
+	if i64, err := jsnum.Int64(); err == nil {
+		return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64))
+	}
+
+	f64, err := jsnum.Float64()
+	if err != nil {
+		return err
+	}
+
+	return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64))
+}
+
+// URLEncodeValue is the ValueEncoderFunc for url.URL.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tURL {
+		return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val}
+	}
+	u := val.Interface().(url.URL)
+	return vw.WriteString(u.String())
+}
+
+// TimeEncodeValue is the ValueEncoderFunc for time.TIme.
+//
+// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tTime {
+		return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+	tt := val.Interface().(time.Time)
+	dt := primitive.NewDateTimeFromTime(tt)
+	return vw.WriteDateTime(int64(dt))
+}
+
+// ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
+//
+// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tByteSlice {
+		return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+	return vw.WriteBinary(val.Interface().([]byte))
+}
+
+// MapEncodeValue is the ValueEncoderFunc for map[string]* types.
+//
+// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+		return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	if val.IsNil() {
+		// If we have a nill map but we can't WriteNull, that means we're probably trying to encode
+		// to a TopLevel document. We can't currently tell if this is what actually happened, but if
+		// there's a deeper underlying problem, the error will also be returned from WriteDocument,
+		// so just continue. The operations on a map reflection value are valid, so we can call
+		// MapKeys within mapEncodeValue without a problem.
+		err := vw.WriteNull()
+		if err == nil {
+			return nil
+		}
+	}
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return dve.mapEncodeValue(ec, dw, val, nil)
+}
+
+// mapEncodeValue handles encoding of the values of a map. The collisionFn returns
+// true if the provided key exists, this is mainly used for inline maps in the
+// struct codec.
+func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error {
+
+	elemType := val.Type().Elem()
+	encoder, err := ec.LookupEncoder(elemType)
+	if err != nil && elemType.Kind() != reflect.Interface {
+		return err
+	}
+
+	keys := val.MapKeys()
+	for _, key := range keys {
+		if collisionFn != nil && collisionFn(key.String()) {
+			return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
+		}
+
+		currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key))
+		if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+			return lookupErr
+		}
+
+		vw, err := dw.WriteDocumentElement(key.String())
+		if err != nil {
+			return err
+		}
+
+		if errors.Is(lookupErr, errInvalidValue) {
+			err = vw.WriteNull()
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		err = currEncoder.EncodeValue(ec, vw, currVal)
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// ArrayEncodeValue is the ValueEncoderFunc for array types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Array {
+		return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+	}
+
+	// If we have a []primitive.E we want to treat it as a document instead of as an array.
+	if val.Type().Elem() == tE {
+		dw, err := vw.WriteDocument()
+		if err != nil {
+			return err
+		}
+
+		for idx := 0; idx < val.Len(); idx++ {
+			e := val.Index(idx).Interface().(primitive.E)
+			err = encodeElement(ec, dw, e)
+			if err != nil {
+				return err
+			}
+		}
+
+		return dw.WriteDocumentEnd()
+	}
+
+	// If we have a []byte we want to treat it as a binary instead of as an array.
+	if val.Type().Elem() == tByte {
+		var byteSlice []byte
+		for idx := 0; idx < val.Len(); idx++ {
+			byteSlice = append(byteSlice, val.Index(idx).Interface().(byte))
+		}
+		return vw.WriteBinary(byteSlice)
+	}
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	elemType := val.Type().Elem()
+	encoder, err := ec.LookupEncoder(elemType)
+	if err != nil && elemType.Kind() != reflect.Interface {
+		return err
+	}
+
+	for idx := 0; idx < val.Len(); idx++ {
+		currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx))
+		if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+			return lookupErr
+		}
+
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		if errors.Is(lookupErr, errInvalidValue) {
+			err = vw.WriteNull()
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		err = currEncoder.EncodeValue(ec, vw, currVal)
+		if err != nil {
+			return err
+		}
+	}
+	return aw.WriteArrayEnd()
+}
+
+// SliceEncodeValue is the ValueEncoderFunc for slice types.
+//
+// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Slice {
+		return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	// If we have a []primitive.E we want to treat it as a document instead of as an array.
+	if val.Type().ConvertibleTo(tD) {
+		d := val.Convert(tD).Interface().(primitive.D)
+
+		dw, err := vw.WriteDocument()
+		if err != nil {
+			return err
+		}
+
+		for _, e := range d {
+			err = encodeElement(ec, dw, e)
+			if err != nil {
+				return err
+			}
+		}
+
+		return dw.WriteDocumentEnd()
+	}
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	elemType := val.Type().Elem()
+	encoder, err := ec.LookupEncoder(elemType)
+	if err != nil && elemType.Kind() != reflect.Interface {
+		return err
+	}
+
+	for idx := 0; idx < val.Len(); idx++ {
+		currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx))
+		if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+			return lookupErr
+		}
+
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		if errors.Is(lookupErr, errInvalidValue) {
+			err = vw.WriteNull()
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		err = currEncoder.EncodeValue(ec, vw, currVal)
+		if err != nil {
+			return err
+		}
+	}
+	return aw.WriteArrayEnd()
+}
+
+func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) {
+	if origEncoder != nil || (currVal.Kind() != reflect.Interface) {
+		return origEncoder, currVal, nil
+	}
+	currVal = currVal.Elem()
+	if !currVal.IsValid() {
+		return nil, currVal, errInvalidValue
+	}
+	currEncoder, err := ec.LookupEncoder(currVal.Type())
+
+	return currEncoder, currVal, err
+}
+
+// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}.
+//
+// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tEmpty {
+		return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+	encoder, err := ec.LookupEncoder(val.Elem().Type())
+	if err != nil {
+		return err
+	}
+
+	return encoder.EncodeValue(ec, vw, val.Elem())
+}
+
+// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	// Either val or a pointer to val must implement ValueMarshaler
+	switch {
+	case !val.IsValid():
+		return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
+	case val.Type().Implements(tValueMarshaler):
+		// If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer
+		if isImplementationNil(val, tValueMarshaler) {
+			return vw.WriteNull()
+		}
+	case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr():
+		val = val.Addr()
+	default:
+		return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
+	}
+
+	m, ok := val.Interface().(ValueMarshaler)
+	if !ok {
+		return vw.WriteNull()
+	}
+	t, data, err := m.MarshalBSONValue()
+	if err != nil {
+		return err
+	}
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data)
+}
+
+// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	// Either val or a pointer to val must implement Marshaler
+	switch {
+	case !val.IsValid():
+		return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
+	case val.Type().Implements(tMarshaler):
+		// If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer
+		if isImplementationNil(val, tMarshaler) {
+			return vw.WriteNull()
+		}
+	case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr():
+		val = val.Addr()
+	default:
+		return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
+	}
+
+	m, ok := val.Interface().(Marshaler)
+	if !ok {
+		return vw.WriteNull()
+	}
+	data, err := m.MarshalBSON()
+	if err != nil {
+		return err
+	}
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data)
+}
+
+// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	// Either val or a pointer to val must implement Proxy
+	switch {
+	case !val.IsValid():
+		return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
+	case val.Type().Implements(tProxy):
+		// If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer
+		if isImplementationNil(val, tProxy) {
+			return vw.WriteNull()
+		}
+	case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr():
+		val = val.Addr()
+	default:
+		return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
+	}
+
+	m, ok := val.Interface().(Proxy)
+	if !ok {
+		return vw.WriteNull()
+	}
+	v, err := m.ProxyBSON()
+	if err != nil {
+		return err
+	}
+	if v == nil {
+		encoder, err := ec.LookupEncoder(nil)
+		if err != nil {
+			return err
+		}
+		return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil))
+	}
+	vv := reflect.ValueOf(v)
+	switch vv.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		vv = vv.Elem()
+	}
+	encoder, err := ec.LookupEncoder(vv.Type())
+	if err != nil {
+		return err
+	}
+	return encoder.EncodeValue(ec, vw, vv)
+}
+
+// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tJavaScript {
+		return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+	}
+
+	return vw.WriteJavascript(val.String())
+}
+
+// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tSymbol {
+		return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+	}
+
+	return vw.WriteSymbol(val.String())
+}
+
+// BinaryEncodeValue is the ValueEncoderFunc for Binary.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tBinary {
+		return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val}
+	}
+	b := val.Interface().(primitive.Binary)
+
+	return vw.WriteBinaryWithSubtype(b.Data, b.Subtype)
+}
+
+// UndefinedEncodeValue is the ValueEncoderFunc for Undefined.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tUndefined {
+		return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+	}
+
+	return vw.WriteUndefined()
+}
+
+// DateTimeEncodeValue is the ValueEncoderFunc for DateTime.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDateTime {
+		return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+	}
+
+	return vw.WriteDateTime(val.Int())
+}
+
+// NullEncodeValue is the ValueEncoderFunc for Null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tNull {
+		return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val}
+	}
+
+	return vw.WriteNull()
+}
+
+// RegexEncodeValue is the ValueEncoderFunc for Regex.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRegex {
+		return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val}
+	}
+
+	regex := val.Interface().(primitive.Regex)
+
+	return vw.WriteRegex(regex.Pattern, regex.Options)
+}
+
+// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDBPointer {
+		return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+	}
+
+	dbp := val.Interface().(primitive.DBPointer)
+
+	return vw.WriteDBPointer(dbp.DB, dbp.Pointer)
+}
+
+// TimestampEncodeValue is the ValueEncoderFunc for Timestamp.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tTimestamp {
+		return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+	}
+
+	ts := val.Interface().(primitive.Timestamp)
+
+	return vw.WriteTimestamp(ts.T, ts.I)
+}
+
+// MinKeyEncodeValue is the ValueEncoderFunc for MinKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tMinKey {
+		return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+	}
+
+	return vw.WriteMinKey()
+}
+
+// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tMaxKey {
+		return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+	}
+
+	return vw.WriteMaxKey()
+}
+
+// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tCoreDocument {
+		return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+	}
+
+	cdoc := val.Interface().(bsoncore.Document)
+
+	return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc)
+}
+
+// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tCodeWithScope {
+		return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+	}
+
+	cws := val.Interface().(primitive.CodeWithScope)
+
+	dw, err := vw.WriteCodeWithScope(string(cws.Code))
+	if err != nil {
+		return err
+	}
+
+	sw := sliceWriterPool.Get().(*bsonrw.SliceWriter)
+	defer sliceWriterPool.Put(sw)
+	*sw = (*sw)[:0]
+
+	scopeVW := bvwPool.Get(sw)
+	defer bvwPool.Put(scopeVW)
+
+	encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope))
+	if err != nil {
+		return err
+	}
+
+	err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope))
+	if err != nil {
+		return err
+	}
+
+	err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw)
+	if err != nil {
+		return err
+	}
+	return dw.WriteDocumentEnd()
+}
+
+// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type
+func isImplementationNil(val reflect.Value, inter reflect.Type) bool {
+	vt := val.Type()
+	for vt.Kind() == reflect.Ptr {
+		vt = vt.Elem()
+	}
+	return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4613e5a1ec79d76a3c2ff8702afd680c07948709
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go
@@ -0,0 +1,95 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsoncodec provides a system for encoding values to BSON representations and decoding
+// values from BSON representations. This package considers both binary BSON and ExtendedJSON as
+// BSON representations. The types in this package enable a flexible system for handling this
+// encoding and decoding.
+//
+// The codec system is composed of two parts:
+//
+// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON
+// representations.
+//
+// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for
+// retrieving them.
+//
+// # ValueEncoders and ValueDecoders
+//
+// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON.
+// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the
+// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc
+// is provided to allow use of a function with the correct signature as a ValueEncoder. An
+// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and
+// to provide configuration information.
+//
+// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that
+// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to
+// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext
+// instance is provided and serves similar functionality to the EncodeContext.
+//
+// # Registry
+//
+// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type
+// documentation for examples of registering various custom encoders and decoders. A Registry can
+// have three main types of codecs:
+//
+// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and
+// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value
+// whose type matches the registered type exactly.
+// If the registered type is an interface, the codec will be invoked when encoding or decoding
+// values whose type is the interface, but not for values with concrete types that implement the
+// interface.
+//
+// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and
+// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs
+// will be invoked when encoding or decoding values whose types implement the interface. An example
+// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method
+// for any value whose type implements bson.Marshaler, regardless of the value's concrete type.
+//
+// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type
+// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}.
+// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances,
+// respectively, when decoding into a bson.D. The following code would change the behavior so these
+// values decode as Go int instances instead:
+//
+//	intType := reflect.TypeOf(int(0))
+//	registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType)
+//
+// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and
+// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding
+// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't
+// match a registered type or hook encoder/decoder first. These methods should be used to change the
+// behavior for all values for a specific kind.
+//
+// # Registry Lookup Procedure
+//
+// When looking up an encoder in a Registry, the precedence rules are as follows:
+//
+// 1. A type encoder registered for the exact type of the value.
+//
+// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to
+// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and
+// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries
+// constructed using bson.NewRegistry have driver-defined hooks registered for the
+// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take
+// precedence over any new hooks.
+//
+// 3. A kind encoder registered for the value's kind.
+//
+// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The
+// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder
+// will be returned if no decoder is found.
+//
+// # DefaultValueEncoders and DefaultValueDecoders
+//
+// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and
+// ValueDecoders for handling a wide range of Go types, including all of the types within the
+// primitive package. To make registering these codecs easier, a helper method on each type is
+// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for
+// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also
+// handles registering type map entries for each BSON type.
+package bsoncodec
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..098368f0711f13119fded493a0d71dd350807b97
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
@@ -0,0 +1,173 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// EmptyInterfaceCodec is the Codec used for interface{} values.
+//
+// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go
+// Driver 2.0. To configure the empty interface encode and decode behavior, use
+// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface
+// encode and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to unmarshal BSON binary field
+// values as a Go byte slice, use:
+//
+//	opt := options.Client().SetBSONOptions(&options.BSONOptions{
+//	    BinaryAsSlice: true,
+//	})
+//
+// See the deprecation notice for each field in EmptyInterfaceCodec for the
+// corresponding settings.
+type EmptyInterfaceCodec struct {
+	// DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the
+	// "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
+	//
+	// Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead.
+	DecodeBinaryAsSlice bool
+}
+
+var (
+	defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec()
+
+	// Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it
+	// to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a
+	// collection.
+	_ typeDecoder = defaultEmptyInterfaceCodec
+)
+
+// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts.
+//
+// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See
+// [EmptyInterfaceCodec] for more details.
+func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec {
+	interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...)
+
+	codec := EmptyInterfaceCodec{}
+	if interfaceOpt.DecodeBinaryAsSlice != nil {
+		codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice
+	}
+	return &codec
+}
+
+// EncodeValue is the ValueEncoderFunc for interface{}.
+func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tEmpty {
+		return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+	encoder, err := ec.LookupEncoder(val.Elem().Type())
+	if err != nil {
+		return err
+	}
+
+	return encoder.EncodeValue(ec, vw, val.Elem())
+}
+
+func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) {
+	isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument
+	if isDocument {
+		if dc.defaultDocumentType != nil {
+			// If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return
+			// that type.
+			return dc.defaultDocumentType, nil
+		}
+		if dc.Ancestor != nil {
+			// Using ancestor information rather than looking up the type map entry forces consistent decoding.
+			// If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry
+			// has been registered.
+			return dc.Ancestor, nil
+		}
+	}
+
+	rtype, err := dc.LookupTypeMapEntry(valueType)
+	if err == nil {
+		return rtype, nil
+	}
+
+	if isDocument {
+		// For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument,
+		// depending on the original valueType.
+		var lookupType bsontype.Type
+		switch valueType {
+		case bsontype.Type(0):
+			lookupType = bsontype.EmbeddedDocument
+		case bsontype.EmbeddedDocument:
+			lookupType = bsontype.Type(0)
+		}
+
+		rtype, err = dc.LookupTypeMapEntry(lookupType)
+		if err == nil {
+			return rtype, nil
+		}
+	}
+
+	return nil, err
+}
+
+func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tEmpty {
+		return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)}
+	}
+
+	rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type())
+	if err != nil {
+		switch vr.Type() {
+		case bsontype.Null:
+			return reflect.Zero(t), vr.ReadNull()
+		default:
+			return emptyValue, err
+		}
+	}
+
+	decoder, err := dc.LookupDecoder(rtype)
+	if err != nil {
+		return emptyValue, err
+	}
+
+	elem, err := decodeTypeOrValue(decoder, dc, vr, rtype)
+	if err != nil {
+		return emptyValue, err
+	}
+
+	if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary {
+		binElem := elem.Interface().(primitive.Binary)
+		if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld {
+			elem = reflect.ValueOf(binElem.Data)
+		}
+	}
+
+	return elem, nil
+}
+
+// DecodeValue is the ValueDecoderFunc for interface{}.
+func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tEmpty {
+		return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	elem, err := eic.decodeType(dc, vr, val.Type())
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7e00ffa8d16cefd96bde94d0157d07856b39be2
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go
@@ -0,0 +1,343 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+var defaultMapCodec = NewMapCodec()
+
+// MapCodec is the Codec used for map values.
+//
+// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To
+// configure the map encode and decode behavior, use the configuration methods
+// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and
+// decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON
+// documents, use:
+//
+//	opt := options.Client().SetBSONOptions(&options.BSONOptions{
+//	    NilMapAsEmpty: true,
+//	})
+//
+// See the deprecation notice for each field in MapCodec for the corresponding
+// settings.
+type MapCodec struct {
+	// DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination
+	// value passed to Decode before unmarshaling BSON documents into them.
+	//
+	// Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead.
+	DecodeZerosMap bool
+
+	// EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of
+	// BSON null.
+	//
+	// Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead.
+	EncodeNilAsEmpty bool
+
+	// EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name
+	// strings using fmt.Sprintf() instead of the default string conversion logic.
+	//
+	// Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or
+	// options.BSONOptions.StringifyMapKeysWithFmt instead.
+	EncodeKeysWithStringer bool
+}
+
+// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key.
+// This applies to types used as map keys and is similar to encoding.TextMarshaler.
+type KeyMarshaler interface {
+	MarshalKey() (key string, err error)
+}
+
+// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation
+// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler.
+//
+// UnmarshalKey must be able to decode the form generated by MarshalKey.
+// UnmarshalKey must copy the text if it wishes to retain the text
+// after returning.
+type KeyUnmarshaler interface {
+	UnmarshalKey(key string) error
+}
+
+// NewMapCodec returns a MapCodec with options opts.
+//
+// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See
+// [MapCodec] for more details.
+func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec {
+	mapOpt := bsonoptions.MergeMapCodecOptions(opts...)
+
+	codec := MapCodec{}
+	if mapOpt.DecodeZerosMap != nil {
+		codec.DecodeZerosMap = *mapOpt.DecodeZerosMap
+	}
+	if mapOpt.EncodeNilAsEmpty != nil {
+		codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty
+	}
+	if mapOpt.EncodeKeysWithStringer != nil {
+		codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer
+	}
+	return &codec
+}
+
+// EncodeValue is the ValueEncoder for map[*]* types.
+func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Map {
+		return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty {
+		// If we have a nil map but we can't WriteNull, that means we're probably trying to encode
+		// to a TopLevel document. We can't currently tell if this is what actually happened, but if
+		// there's a deeper underlying problem, the error will also be returned from WriteDocument,
+		// so just continue. The operations on a map reflection value are valid, so we can call
+		// MapKeys within mapEncodeValue without a problem.
+		err := vw.WriteNull()
+		if err == nil {
+			return nil
+		}
+	}
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return mc.mapEncodeValue(ec, dw, val, nil)
+}
+
+// mapEncodeValue handles encoding of the values of a map. The collisionFn returns
+// true if the provided key exists, this is mainly used for inline maps in the
+// struct codec.
+func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error {
+
+	elemType := val.Type().Elem()
+	encoder, err := ec.LookupEncoder(elemType)
+	if err != nil && elemType.Kind() != reflect.Interface {
+		return err
+	}
+
+	keys := val.MapKeys()
+	for _, key := range keys {
+		keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt)
+		if err != nil {
+			return err
+		}
+
+		if collisionFn != nil && collisionFn(keyStr) {
+			return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
+		}
+
+		currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key))
+		if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+			return lookupErr
+		}
+
+		vw, err := dw.WriteDocumentElement(keyStr)
+		if err != nil {
+			return err
+		}
+
+		if errors.Is(lookupErr, errInvalidValue) {
+			err = vw.WriteNull()
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		err = currEncoder.EncodeValue(ec, vw, currVal)
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// DecodeValue is the ValueDecoder for map[string/decimal]* types.
+func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) {
+		return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	case bsontype.Undefined:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadUndefined()
+	default:
+		return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type())
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeMap(val.Type()))
+	}
+
+	if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) {
+		clearMap(val)
+	}
+
+	eType := val.Type().Elem()
+	decoder, err := dc.LookupDecoder(eType)
+	if err != nil {
+		return err
+	}
+	eTypeDecoder, _ := decoder.(typeDecoder)
+
+	if eType == tEmpty {
+		dc.Ancestor = val.Type()
+	}
+
+	keyType := val.Type().Key()
+
+	for {
+		key, vr, err := dr.ReadElement()
+		if errors.Is(err, bsonrw.ErrEOD) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		k, err := mc.decodeKey(key, keyType)
+		if err != nil {
+			return err
+		}
+
+		elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true)
+		if err != nil {
+			return newDecodeError(key, err)
+		}
+
+		val.SetMapIndex(k, elem)
+	}
+	return nil
+}
+
+func clearMap(m reflect.Value) {
+	var none reflect.Value
+	for _, k := range m.MapKeys() {
+		m.SetMapIndex(k, none)
+	}
+}
+
+func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) {
+	if mc.EncodeKeysWithStringer || encodeKeysWithStringer {
+		return fmt.Sprint(val), nil
+	}
+
+	// keys of any string type are used directly
+	if val.Kind() == reflect.String {
+		return val.String(), nil
+	}
+	// KeyMarshalers are marshaled
+	if km, ok := val.Interface().(KeyMarshaler); ok {
+		if val.Kind() == reflect.Ptr && val.IsNil() {
+			return "", nil
+		}
+		buf, err := km.MarshalKey()
+		if err == nil {
+			return buf, nil
+		}
+		return "", err
+	}
+	// keys implement encoding.TextMarshaler are marshaled.
+	if km, ok := val.Interface().(encoding.TextMarshaler); ok {
+		if val.Kind() == reflect.Ptr && val.IsNil() {
+			return "", nil
+		}
+
+		buf, err := km.MarshalText()
+		if err != nil {
+			return "", err
+		}
+
+		return string(buf), nil
+	}
+
+	switch val.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return strconv.FormatInt(val.Int(), 10), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return strconv.FormatUint(val.Uint(), 10), nil
+	}
+	return "", fmt.Errorf("unsupported key type: %v", val.Type())
+}
+
+var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) {
+	keyVal := reflect.ValueOf(key)
+	var err error
+	switch {
+	// First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler
+	case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType):
+		keyVal = reflect.New(keyType)
+		v := keyVal.Interface().(KeyUnmarshaler)
+		err = v.UnmarshalKey(key)
+		keyVal = keyVal.Elem()
+	// Try to decode encoding.TextUnmarshalers.
+	case reflect.PtrTo(keyType).Implements(textUnmarshalerType):
+		keyVal = reflect.New(keyType)
+		v := keyVal.Interface().(encoding.TextUnmarshaler)
+		err = v.UnmarshalText([]byte(key))
+		keyVal = keyVal.Elem()
+	// Otherwise, go to type specific behavior
+	default:
+		switch keyType.Kind() {
+		case reflect.String:
+			keyVal = reflect.ValueOf(key).Convert(keyType)
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			n, parseErr := strconv.ParseInt(key, 10, 64)
+			if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) {
+				err = fmt.Errorf("failed to unmarshal number key %v", key)
+			}
+			keyVal = reflect.ValueOf(n).Convert(keyType)
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			n, parseErr := strconv.ParseUint(key, 10, 64)
+			if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) {
+				err = fmt.Errorf("failed to unmarshal number key %v", key)
+				break
+			}
+			keyVal = reflect.ValueOf(n).Convert(keyType)
+		case reflect.Float32, reflect.Float64:
+			if mc.EncodeKeysWithStringer {
+				parsed, err := strconv.ParseFloat(key, 64)
+				if err != nil {
+					return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err)
+				}
+				keyVal = reflect.ValueOf(parsed)
+				break
+			}
+			fallthrough
+		default:
+			return keyVal, fmt.Errorf("unsupported key type: %v", keyType)
+		}
+	}
+	return keyVal, err
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go
new file mode 100644
index 0000000000000000000000000000000000000000..fbd9f0a9e9722817dfb02a25d4a2a72df5449303
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import "fmt"
+
+type mode int
+
+const (
+	_ mode = iota
+	mTopLevel
+	mDocument
+	mArray
+	mValue
+	mElement
+	mCodeWithScope
+	mSpacer
+)
+
+func (m mode) String() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "DocumentMode"
+	case mArray:
+		str = "ArrayMode"
+	case mValue:
+		str = "ValueMode"
+	case mElement:
+		str = "ElementMode"
+	case mCodeWithScope:
+		str = "CodeWithScopeMode"
+	case mSpacer:
+		str = "CodeWithScopeSpacerFrame"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+// TransitionError is an error returned when an invalid progressing a
+// ValueReader or ValueWriter state machine occurs.
+type TransitionError struct {
+	parent      mode
+	current     mode
+	destination mode
+}
+
+func (te TransitionError) Error() string {
+	if te.destination == mode(0) {
+		return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current)
+	}
+	if te.parent == mode(0) {
+		return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination)
+	}
+	return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..ddfa4a33e180893f44fcf7ce2dcf583334e7fb97
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go
@@ -0,0 +1,108 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+var _ ValueEncoder = &PointerCodec{}
+var _ ValueDecoder = &PointerCodec{}
+
+// PointerCodec is the Codec used for pointers.
+//
+// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To
+// override the default pointer encode and decode behavior, create a new registry
+// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new
+// encoder and decoder for pointers.
+//
+// For example,
+//
+//	reg := bson.NewRegistry()
+//	reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder)
+//	reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder)
+type PointerCodec struct {
+	ecache typeEncoderCache
+	dcache typeDecoderCache
+}
+
+// NewPointerCodec returns a PointerCodec that has been initialized.
+//
+// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See
+// [PointerCodec] for more details.
+func NewPointerCodec() *PointerCodec {
+	return &PointerCodec{}
+}
+
+// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil
+// or looking up an encoder for the type of value the pointer points to.
+func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.Kind() != reflect.Ptr {
+		if !val.IsValid() {
+			return vw.WriteNull()
+		}
+		return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	typ := val.Type()
+	if v, ok := pc.ecache.Load(typ); ok {
+		if v == nil {
+			return ErrNoEncoder{Type: typ}
+		}
+		return v.EncodeValue(ec, vw, val.Elem())
+	}
+	// TODO(charlie): handle concurrent requests for the same type
+	enc, err := ec.LookupEncoder(typ.Elem())
+	enc = pc.ecache.LoadOrStore(typ, enc)
+	if err != nil {
+		return err
+	}
+	return enc.EncodeValue(ec, vw, val.Elem())
+}
+
+// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and
+// using that to decode. If the BSON value is Null, this method will set the pointer to nil.
+func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Ptr {
+		return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+	}
+
+	typ := val.Type()
+	if vr.Type() == bsontype.Null {
+		val.Set(reflect.Zero(typ))
+		return vr.ReadNull()
+	}
+	if vr.Type() == bsontype.Undefined {
+		val.Set(reflect.Zero(typ))
+		return vr.ReadUndefined()
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.New(typ.Elem()))
+	}
+
+	if v, ok := pc.dcache.Load(typ); ok {
+		if v == nil {
+			return ErrNoDecoder{Type: typ}
+		}
+		return v.DecodeValue(dc, vr, val.Elem())
+	}
+	// TODO(charlie): handle concurrent requests for the same type
+	dec, err := dc.LookupDecoder(typ.Elem())
+	dec = pc.dcache.LoadOrStore(typ, dec)
+	if err != nil {
+		return err
+	}
+	return dec.DecodeValue(dc, vr, val.Elem())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go
new file mode 100644
index 0000000000000000000000000000000000000000..4cf2b01ab482718fe7a10f936cdce97c430dc1ee
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types
+// that implement this interface with have ProxyBSON called during the encoding process and that
+// value will be encoded in place for the implementer.
+type Proxy interface {
+	ProxyBSON() (interface{}, error)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go
new file mode 100644
index 0000000000000000000000000000000000000000..196c491bbbf15c045b2a1deb551350aa8e908aac
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go
@@ -0,0 +1,524 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder.
+//
+// Deprecated: ErrNilType will not be supported in Go Driver 2.0.
+var ErrNilType = errors.New("cannot perform a decoder lookup on <nil>")
+
+// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder.
+//
+// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0.
+var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder")
+
+// ErrNoEncoder is returned when there wasn't an encoder available for a type.
+//
+// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0.
+type ErrNoEncoder struct {
+	Type reflect.Type
+}
+
+func (ene ErrNoEncoder) Error() string {
+	if ene.Type == nil {
+		return "no encoder found for <nil>"
+	}
+	return "no encoder found for " + ene.Type.String()
+}
+
+// ErrNoDecoder is returned when there wasn't a decoder available for a type.
+//
+// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0.
+type ErrNoDecoder struct {
+	Type reflect.Type
+}
+
+func (end ErrNoDecoder) Error() string {
+	return "no decoder found for " + end.Type.String()
+}
+
+// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type.
+//
+// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0.
+type ErrNoTypeMapEntry struct {
+	Type bsontype.Type
+}
+
+func (entme ErrNoTypeMapEntry) Error() string {
+	return "no type map entry found for " + entme.Type.String()
+}
+
+// ErrNotInterface is returned when the provided type is not an interface.
+//
+// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0.
+var ErrNotInterface = errors.New("The provided type is not an interface")
+
+// A RegistryBuilder is used to build a Registry. This type is not goroutine
+// safe.
+//
+// Deprecated: Use Registry instead.
+type RegistryBuilder struct {
+	registry *Registry
+}
+
+// NewRegistryBuilder creates a new empty RegistryBuilder.
+//
+// Deprecated: Use NewRegistry instead.
+func NewRegistryBuilder() *RegistryBuilder {
+	return &RegistryBuilder{
+		registry: NewRegistry(),
+	}
+}
+
+// RegisterCodec will register the provided ValueCodec for the provided type.
+//
+// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead.
+func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder {
+	rb.RegisterTypeEncoder(t, codec)
+	rb.RegisterTypeDecoder(t, codec)
+	return rb
+}
+
+// RegisterTypeEncoder will register the provided ValueEncoder for the provided type.
+//
+// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered
+// for a pointer to that type.
+//
+// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It
+// will not be called when marshaling a non-interface type that implements the interface.
+//
+// Deprecated: Use Registry.RegisterTypeEncoder instead.
+func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+	rb.registry.RegisterTypeEncoder(t, enc)
+	return rb
+}
+
+// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when
+// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not
+// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
+//
+// Deprecated: Use Registry.RegisterInterfaceEncoder instead.
+func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+	rb.registry.RegisterInterfaceEncoder(t, enc)
+	return rb
+}
+
+// RegisterTypeDecoder will register the provided ValueDecoder for the provided type.
+//
+// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered
+// for a pointer to that type.
+//
+// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface.
+// It will not be called when unmarshaling into a non-interface type that implements the interface.
+//
+// Deprecated: Use Registry.RegisterTypeDecoder instead.
+func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+	rb.registry.RegisterTypeDecoder(t, dec)
+	return rb
+}
+
+// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when
+// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not
+// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
+//
+// Deprecated: Use Registry.RegisterInterfaceDecoder instead.
+func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+	rb.registry.RegisterInterfaceDecoder(t, dec)
+	return rb
+}
+
+// RegisterEncoder registers the provided type and encoder pair.
+//
+// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead.
+func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+	if t == tEmpty {
+		rb.registry.RegisterTypeEncoder(t, enc)
+		return rb
+	}
+	switch t.Kind() {
+	case reflect.Interface:
+		rb.registry.RegisterInterfaceEncoder(t, enc)
+	default:
+		rb.registry.RegisterTypeEncoder(t, enc)
+	}
+	return rb
+}
+
+// RegisterDecoder registers the provided type and decoder pair.
+//
+// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead.
+func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+	if t == nil {
+		rb.registry.RegisterTypeDecoder(t, dec)
+		return rb
+	}
+	if t == tEmpty {
+		rb.registry.RegisterTypeDecoder(t, dec)
+		return rb
+	}
+	switch t.Kind() {
+	case reflect.Interface:
+		rb.registry.RegisterInterfaceDecoder(t, dec)
+	default:
+		rb.registry.RegisterTypeDecoder(t, dec)
+	}
+	return rb
+}
+
+// RegisterDefaultEncoder will register the provided ValueEncoder to the provided
+// kind.
+//
+// Deprecated: Use Registry.RegisterKindEncoder instead.
+func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder {
+	rb.registry.RegisterKindEncoder(kind, enc)
+	return rb
+}
+
+// RegisterDefaultDecoder will register the provided ValueDecoder to the
+// provided kind.
+//
+// Deprecated: Use Registry.RegisterKindDecoder instead.
+func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder {
+	rb.registry.RegisterKindDecoder(kind, dec)
+	return rb
+}
+
+// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
+// mapping is decoding situations where an empty interface is used and a default type needs to be
+// created and decoded into.
+//
+// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON
+// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents
+// to decode to bson.Raw, use the following code:
+//
+//	rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
+//
+// Deprecated: Use Registry.RegisterTypeMapEntry instead.
+func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder {
+	rb.registry.RegisterTypeMapEntry(bt, rt)
+	return rb
+}
+
+// Build creates a Registry from the current state of this RegistryBuilder.
+//
+// Deprecated: Use NewRegistry instead.
+func (rb *RegistryBuilder) Build() *Registry {
+	r := &Registry{
+		interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...),
+		interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...),
+		typeEncoders:      rb.registry.typeEncoders.Clone(),
+		typeDecoders:      rb.registry.typeDecoders.Clone(),
+		kindEncoders:      rb.registry.kindEncoders.Clone(),
+		kindDecoders:      rb.registry.kindDecoders.Clone(),
+	}
+	rb.registry.typeMap.Range(func(k, v interface{}) bool {
+		if k != nil && v != nil {
+			r.typeMap.Store(k, v)
+		}
+		return true
+	})
+	return r
+}
+
+// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
+// typed passed around and Encoders and Decoders are constructed from it.
+type Registry struct {
+	interfaceEncoders []interfaceValueEncoder
+	interfaceDecoders []interfaceValueDecoder
+	typeEncoders      *typeEncoderCache
+	typeDecoders      *typeDecoderCache
+	kindEncoders      *kindEncoderCache
+	kindDecoders      *kindDecoderCache
+	typeMap           sync.Map // map[bsontype.Type]reflect.Type
+}
+
+// NewRegistry creates a new empty Registry.
+func NewRegistry() *Registry {
+	return &Registry{
+		typeEncoders: new(typeEncoderCache),
+		typeDecoders: new(typeDecoderCache),
+		kindEncoders: new(kindEncoderCache),
+		kindDecoders: new(kindDecoderCache),
+	}
+}
+
+// RegisterTypeEncoder registers the provided ValueEncoder for the provided type.
+//
+// The type will be used as provided, so an encoder can be registered for a type and a different
+// encoder can be registered for a pointer to that type.
+//
+// If the given type is an interface, the encoder will be called when marshaling a type that is
+// that interface. It will not be called when marshaling a non-interface type that implements the
+// interface. To get the latter behavior, call RegisterHookEncoder instead.
+//
+// RegisterTypeEncoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) {
+	r.typeEncoders.Store(valueType, enc)
+}
+
+// RegisterTypeDecoder registers the provided ValueDecoder for the provided type.
+//
+// The type will be used as provided, so a decoder can be registered for a type and a different
+// decoder can be registered for a pointer to that type.
+//
+// If the given type is an interface, the decoder will be called when unmarshaling into a type that
+// is that interface. It will not be called when unmarshaling into a non-interface type that
+// implements the interface. To get the latter behavior, call RegisterHookDecoder instead.
+//
+// RegisterTypeDecoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) {
+	r.typeDecoders.Store(valueType, dec)
+}
+
+// RegisterKindEncoder registers the provided ValueEncoder for the provided kind.
+//
+// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For
+// example, consider the type MyInt defined as
+//
+//	type MyInt int32
+//
+// To define an encoder for MyInt and int32, use RegisterKindEncoder like
+//
+//	reg.RegisterKindEncoder(reflect.Int32, myEncoder)
+//
+// RegisterKindEncoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) {
+	r.kindEncoders.Store(kind, enc)
+}
+
+// RegisterKindDecoder registers the provided ValueDecoder for the provided kind.
+//
+// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For
+// example, consider the type MyInt defined as
+//
+//	type MyInt int32
+//
+// To define an decoder for MyInt and int32, use RegisterKindDecoder like
+//
+//	reg.RegisterKindDecoder(reflect.Int32, myDecoder)
+//
+// RegisterKindDecoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) {
+	r.kindDecoders.Store(kind, dec)
+}
+
+// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will
+// be called when marshaling a type if the type implements iface or a pointer to the type
+// implements iface. If the provided type is not an interface
+// (i.e. iface.Kind() != reflect.Interface), this method will panic.
+//
+// RegisterInterfaceEncoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) {
+	if iface.Kind() != reflect.Interface {
+		panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+
+			"got type %s with kind %s", iface, iface.Kind())
+		panic(panicStr)
+	}
+
+	for idx, encoder := range r.interfaceEncoders {
+		if encoder.i == iface {
+			r.interfaceEncoders[idx].ve = enc
+			return
+		}
+	}
+
+	r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc})
+}
+
+// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will
+// be called when unmarshaling into a type if the type implements iface or a pointer to the type
+// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface),
+// this method will panic.
+//
+// RegisterInterfaceDecoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) {
+	if iface.Kind() != reflect.Interface {
+		panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+
+			"got type %s with kind %s", iface, iface.Kind())
+		panic(panicStr)
+	}
+
+	for idx, decoder := range r.interfaceDecoders {
+		if decoder.i == iface {
+			r.interfaceDecoders[idx].vd = dec
+			return
+		}
+	}
+
+	r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec})
+}
+
+// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
+// mapping is decoding situations where an empty interface is used and a default type needs to be
+// created and decoded into.
+//
+// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON
+// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents
+// to decode to bson.Raw, use the following code:
+//
+//	reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
+func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) {
+	r.typeMap.Store(bt, rt)
+}
+
+// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup
+// order:
+//
+// 1. An encoder registered for the exact type. If the given type is an interface, an encoder
+// registered using RegisterTypeEncoder for that interface will be selected.
+//
+// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type
+// or by a pointer to the type.
+//
+// 3. An encoder registered using RegisterKindEncoder for the kind of value.
+//
+// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for
+// concurrent use by multiple goroutines after all codecs and encoders are registered.
+func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) {
+	if valueType == nil {
+		return nil, ErrNoEncoder{Type: valueType}
+	}
+	enc, found := r.lookupTypeEncoder(valueType)
+	if found {
+		if enc == nil {
+			return nil, ErrNoEncoder{Type: valueType}
+		}
+		return enc, nil
+	}
+
+	enc, found = r.lookupInterfaceEncoder(valueType, true)
+	if found {
+		return r.typeEncoders.LoadOrStore(valueType, enc), nil
+	}
+
+	if v, ok := r.kindEncoders.Load(valueType.Kind()); ok {
+		return r.storeTypeEncoder(valueType, v), nil
+	}
+	return nil, ErrNoEncoder{Type: valueType}
+}
+
+func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder {
+	return r.typeEncoders.LoadOrStore(rt, enc)
+}
+
+func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) {
+	return r.typeEncoders.Load(rt)
+}
+
+func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) {
+	if valueType == nil {
+		return nil, false
+	}
+	for _, ienc := range r.interfaceEncoders {
+		if valueType.Implements(ienc.i) {
+			return ienc.ve, true
+		}
+		if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) {
+			// if *t implements an interface, this will catch if t implements an interface further
+			// ahead in interfaceEncoders
+			defaultEnc, found := r.lookupInterfaceEncoder(valueType, false)
+			if !found {
+				defaultEnc, _ = r.kindEncoders.Load(valueType.Kind())
+			}
+			return newCondAddrEncoder(ienc.ve, defaultEnc), true
+		}
+	}
+	return nil, false
+}
+
+// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup
+// order:
+//
+// 1. A decoder registered for the exact type. If the given type is an interface, a decoder
+// registered using RegisterTypeDecoder for that interface will be selected.
+//
+// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by
+// a pointer to the type.
+//
+// 3. A decoder registered using RegisterKindDecoder for the kind of value.
+//
+// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for
+// concurrent use by multiple goroutines after all codecs and decoders are registered.
+func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) {
+	if valueType == nil {
+		return nil, ErrNilType
+	}
+	dec, found := r.lookupTypeDecoder(valueType)
+	if found {
+		if dec == nil {
+			return nil, ErrNoDecoder{Type: valueType}
+		}
+		return dec, nil
+	}
+
+	dec, found = r.lookupInterfaceDecoder(valueType, true)
+	if found {
+		return r.storeTypeDecoder(valueType, dec), nil
+	}
+
+	if v, ok := r.kindDecoders.Load(valueType.Kind()); ok {
+		return r.storeTypeDecoder(valueType, v), nil
+	}
+	return nil, ErrNoDecoder{Type: valueType}
+}
+
+func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) {
+	return r.typeDecoders.Load(valueType)
+}
+
+func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder {
+	return r.typeDecoders.LoadOrStore(typ, dec)
+}
+
+func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) {
+	for _, idec := range r.interfaceDecoders {
+		if valueType.Implements(idec.i) {
+			return idec.vd, true
+		}
+		if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) {
+			// if *t implements an interface, this will catch if t implements an interface further
+			// ahead in interfaceDecoders
+			defaultDec, found := r.lookupInterfaceDecoder(valueType, false)
+			if !found {
+				defaultDec, _ = r.kindDecoders.Load(valueType.Kind())
+			}
+			return newCondAddrDecoder(idec.vd, defaultDec), true
+		}
+	}
+	return nil, false
+}
+
+// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON
+// type. If no type is found, ErrNoTypeMapEntry is returned.
+//
+// LookupTypeMapEntry should not be called concurrently with any other Registry method.
+func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) {
+	v, ok := r.typeMap.Load(bt)
+	if v == nil || !ok {
+		return nil, ErrNoTypeMapEntry{Type: bt}
+	}
+	return v.(reflect.Type), nil
+}
+
+type interfaceValueEncoder struct {
+	i  reflect.Type
+	ve ValueEncoder
+}
+
+type interfaceValueDecoder struct {
+	i  reflect.Type
+	vd ValueDecoder
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..14c9fd25646ed9d1d3ddda85c6914b686dc690b3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go
@@ -0,0 +1,214 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+var defaultSliceCodec = NewSliceCodec()
+
+// SliceCodec is the Codec used for slice values.
+//
+// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To
+// configure the slice encode and decode behavior, use the configuration methods
+// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and
+// decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to marshal nil Go slices as empty
+// BSON arrays, use:
+//
+//	opt := options.Client().SetBSONOptions(&options.BSONOptions{
+//	    NilSliceAsEmpty: true,
+//	})
+//
+// See the deprecation notice for each field in SliceCodec for the corresponding
+// settings.
+type SliceCodec struct {
+	// EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of
+	// BSON null.
+	//
+	// Deprecated: Use bson.Encoder.NilSliceAsEmpty instead.
+	EncodeNilAsEmpty bool
+}
+
+// NewSliceCodec returns a MapCodec with options opts.
+//
+// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See
+// [SliceCodec] for more details.
+func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec {
+	sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...)
+
+	codec := SliceCodec{}
+	if sliceOpt.EncodeNilAsEmpty != nil {
+		codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty
+	}
+	return &codec
+}
+
+// EncodeValue is the ValueEncoder for slice types.
+func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Slice {
+		return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty {
+		return vw.WriteNull()
+	}
+
+	// If we have a []byte we want to treat it as a binary instead of as an array.
+	if val.Type().Elem() == tByte {
+		byteSlice := make([]byte, val.Len())
+		reflect.Copy(reflect.ValueOf(byteSlice), val)
+		return vw.WriteBinary(byteSlice)
+	}
+
+	// If we have a []primitive.E we want to treat it as a document instead of as an array.
+	if val.Type() == tD || val.Type().ConvertibleTo(tD) {
+		d := val.Convert(tD).Interface().(primitive.D)
+
+		dw, err := vw.WriteDocument()
+		if err != nil {
+			return err
+		}
+
+		for _, e := range d {
+			err = encodeElement(ec, dw, e)
+			if err != nil {
+				return err
+			}
+		}
+
+		return dw.WriteDocumentEnd()
+	}
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	elemType := val.Type().Elem()
+	encoder, err := ec.LookupEncoder(elemType)
+	if err != nil && elemType.Kind() != reflect.Interface {
+		return err
+	}
+
+	for idx := 0; idx < val.Len(); idx++ {
+		currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx))
+		if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+			return lookupErr
+		}
+
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		if errors.Is(lookupErr, errInvalidValue) {
+			err = vw.WriteNull()
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		err = currEncoder.EncodeValue(ec, vw, currVal)
+		if err != nil {
+			return err
+		}
+	}
+	return aw.WriteArrayEnd()
+}
+
+// DecodeValue is the ValueDecoder for slice types.
+func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Slice {
+		return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Array:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	case bsontype.Undefined:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadUndefined()
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		if val.Type().Elem() != tE {
+			return fmt.Errorf("cannot decode document into %s", val.Type())
+		}
+	case bsontype.Binary:
+		if val.Type().Elem() != tByte {
+			return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType)
+		}
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return err
+		}
+		if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+			return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype)
+		}
+
+		if val.IsNil() {
+			val.Set(reflect.MakeSlice(val.Type(), 0, len(data)))
+		}
+		val.SetLen(0)
+		val.Set(reflect.AppendSlice(val, reflect.ValueOf(data)))
+		return nil
+	case bsontype.String:
+		if sliceType := val.Type().Elem(); sliceType != tByte {
+			return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType)
+		}
+		str, err := vr.ReadString()
+		if err != nil {
+			return err
+		}
+		byteStr := []byte(str)
+
+		if val.IsNil() {
+			val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr)))
+		}
+		val.SetLen(0)
+		val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr)))
+		return nil
+	default:
+		return fmt.Errorf("cannot decode %v into a slice", vrType)
+	}
+
+	var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+	switch val.Type().Elem() {
+	case tE:
+		dc.Ancestor = val.Type()
+		elemsFunc = defaultValueDecoders.decodeD
+	default:
+		elemsFunc = defaultValueDecoders.decodeDefault
+	}
+
+	elems, err := elemsFunc(dc, vr, val)
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, len(elems)))
+	}
+
+	val.SetLen(0)
+	val.Set(reflect.Append(val, elems...))
+
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..a8f885a854f4d43b542375118e3befb5774ef4ab
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go
@@ -0,0 +1,140 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"fmt"
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// StringCodec is the Codec used for string values.
+//
+// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To
+// override the default string encode and decode behavior, create a new registry
+// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new
+// encoder and decoder for strings.
+//
+// For example,
+//
+//	reg := bson.NewRegistry()
+//	reg.RegisterKindEncoder(reflect.String, myStringEncoder)
+//	reg.RegisterKindDecoder(reflect.String, myStringDecoder)
+type StringCodec struct {
+	// DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation.
+	// If false, a string made from the raw object ID bytes will be used. Defaults to true.
+	//
+	// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
+	DecodeObjectIDAsHex bool
+}
+
+var (
+	defaultStringCodec = NewStringCodec()
+
+	// Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be
+	// used by collection type decoders (e.g. map, slice, etc) to set individual values in a
+	// collection.
+	_ typeDecoder = defaultStringCodec
+)
+
+// NewStringCodec returns a StringCodec with options opts.
+//
+// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See
+// [StringCodec] for more details.
+func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec {
+	stringOpt := bsonoptions.MergeStringCodecOptions(opts...)
+	return &StringCodec{*stringOpt.DecodeObjectIDAsHex}
+}
+
+// EncodeValue is the ValueEncoder for string types.
+func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.Kind() != reflect.String {
+		return ValueEncoderError{
+			Name:     "StringEncodeValue",
+			Kinds:    []reflect.Kind{reflect.String},
+			Received: val,
+		}
+	}
+
+	return vw.WriteString(val.String())
+}
+
+func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t.Kind() != reflect.String {
+		return emptyValue, ValueDecoderError{
+			Name:     "StringDecodeValue",
+			Kinds:    []reflect.Kind{reflect.String},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var str string
+	var err error
+	switch vr.Type() {
+	case bsontype.String:
+		str, err = vr.ReadString()
+		if err != nil {
+			return emptyValue, err
+		}
+	case bsontype.ObjectID:
+		oid, err := vr.ReadObjectID()
+		if err != nil {
+			return emptyValue, err
+		}
+		if sc.DecodeObjectIDAsHex {
+			str = oid.Hex()
+		} else {
+			// TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string.
+			byteArray := [12]byte(oid)
+			str = string(byteArray[:])
+		}
+	case bsontype.Symbol:
+		str, err = vr.ReadSymbol()
+		if err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Binary:
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return emptyValue, err
+		}
+		if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+			return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"}
+		}
+		str = string(data)
+	case bsontype.Null:
+		if err = vr.ReadNull(); err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Undefined:
+		if err = vr.ReadUndefined(); err != nil {
+			return emptyValue, err
+		}
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type())
+	}
+
+	return reflect.ValueOf(str), nil
+}
+
+// DecodeValue is the ValueDecoder for string types.
+func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.String {
+		return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val}
+	}
+
+	elem, err := sc.decodeType(dctx, vr, val.Type())
+	if err != nil {
+		return err
+	}
+
+	val.SetString(elem.String())
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8d9690c139eb11f65cf6603fca6a1a380ea8e39
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go
@@ -0,0 +1,736 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type.
+type DecodeError struct {
+	keys    []string
+	wrapped error
+}
+
+// Unwrap returns the underlying error
+func (de *DecodeError) Unwrap() error {
+	return de.wrapped
+}
+
+// Error implements the error interface.
+func (de *DecodeError) Error() string {
+	// The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the
+	// stack of BSON keys, so we call de.Keys(), which reverses them.
+	keyPath := strings.Join(de.Keys(), ".")
+	return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped)
+}
+
+// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down
+// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be
+// a string, the keys slice will be ["a", "b", "c"].
+func (de *DecodeError) Keys() []string {
+	reversedKeys := make([]string, 0, len(de.keys))
+	for idx := len(de.keys) - 1; idx >= 0; idx-- {
+		reversedKeys = append(reversedKeys, de.keys[idx])
+	}
+
+	return reversedKeys
+}
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// StructCodec is the Codec used for struct values.
+//
+// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0.
+// To configure the struct encode and decode behavior, use the configuration
+// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode
+// and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to omit zero-value structs when
+// using the "omitempty" struct tag, use:
+//
+//	opt := options.Client().SetBSONOptions(&options.BSONOptions{
+//	    OmitZeroStruct: true,
+//	})
+//
+// See the deprecation notice for each field in StructCodec for the corresponding
+// settings.
+type StructCodec struct {
+	cache  sync.Map // map[reflect.Type]*structDescription
+	parser StructTagParser
+
+	// DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the
+	// destination value passed to Decode before unmarshaling BSON documents into them.
+	//
+	// Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead.
+	DecodeZeroStruct bool
+
+	// DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the
+	// destination value passed to Decode before unmarshaling BSON documents into them.
+	//
+	// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
+	DecodeDeepZeroInline bool
+
+	// EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g.
+	// MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag
+	// option is set.
+	//
+	// Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead.
+	EncodeOmitDefaultStruct bool
+
+	// AllowUnexportedFields allows encoding and decoding values from un-exported struct fields.
+	//
+	// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
+	// supported in Go Driver 2.0.
+	AllowUnexportedFields bool
+
+	// OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is
+	// a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The
+	// default value is true.
+	//
+	// Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or
+	// options.BSONOptions.ErrorOnInlineDuplicates instead.
+	OverwriteDuplicatedInlinedFields bool
+}
+
+var _ ValueEncoder = &StructCodec{}
+var _ ValueDecoder = &StructCodec{}
+
+// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
+//
+// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See
+// [StructCodec] for more details.
+func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) {
+	if p == nil {
+		return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
+	}
+
+	structOpt := bsonoptions.MergeStructCodecOptions(opts...)
+
+	codec := &StructCodec{
+		parser: p,
+	}
+
+	if structOpt.DecodeZeroStruct != nil {
+		codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct
+	}
+	if structOpt.DecodeDeepZeroInline != nil {
+		codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline
+	}
+	if structOpt.EncodeOmitDefaultStruct != nil {
+		codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct
+	}
+	if structOpt.OverwriteDuplicatedInlinedFields != nil {
+		codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields
+	}
+	if structOpt.AllowUnexportedFields != nil {
+		codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields
+	}
+
+	return codec, nil
+}
+
+// EncodeValue handles encoding generic struct types.
+func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Struct {
+		return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+	}
+
+	sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates)
+	if err != nil {
+		return err
+	}
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+	var rv reflect.Value
+	for _, desc := range sd.fl {
+		if desc.inline == nil {
+			rv = val.Field(desc.idx)
+		} else {
+			rv, err = fieldByIndexErr(val, desc.inline)
+			if err != nil {
+				continue
+			}
+		}
+
+		desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv)
+
+		if err != nil && !errors.Is(err, errInvalidValue) {
+			return err
+		}
+
+		if errors.Is(err, errInvalidValue) {
+			if desc.omitEmpty {
+				continue
+			}
+			vw2, err := dw.WriteDocumentElement(desc.name)
+			if err != nil {
+				return err
+			}
+			err = vw2.WriteNull()
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if desc.encoder == nil {
+			return ErrNoEncoder{Type: rv.Type()}
+		}
+
+		encoder := desc.encoder
+
+		var empty bool
+		if cz, ok := encoder.(CodecZeroer); ok {
+			empty = cz.IsTypeZero(rv.Interface())
+		} else if rv.Kind() == reflect.Interface {
+			// isEmpty will not treat an interface rv as an interface, so we need to check for the
+			// nil interface separately.
+			empty = rv.IsNil()
+		} else {
+			empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct)
+		}
+		if desc.omitEmpty && empty {
+			continue
+		}
+
+		vw2, err := dw.WriteDocumentElement(desc.name)
+		if err != nil {
+			return err
+		}
+
+		ectx := EncodeContext{
+			Registry:                ec.Registry,
+			MinSize:                 desc.minSize || ec.MinSize,
+			errorOnInlineDuplicates: ec.errorOnInlineDuplicates,
+			stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt,
+			nilMapAsEmpty:           ec.nilMapAsEmpty,
+			nilSliceAsEmpty:         ec.nilSliceAsEmpty,
+			nilByteSliceAsEmpty:     ec.nilByteSliceAsEmpty,
+			omitZeroStruct:          ec.omitZeroStruct,
+			useJSONStructTags:       ec.useJSONStructTags,
+		}
+		err = encoder.EncodeValue(ectx, vw2, rv)
+		if err != nil {
+			return err
+		}
+	}
+
+	if sd.inlineMap >= 0 {
+		rv := val.Field(sd.inlineMap)
+		collisionFn := func(key string) bool {
+			_, exists := sd.fm[key]
+			return exists
+		}
+
+		return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn)
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+func newDecodeError(key string, original error) error {
+	var de *DecodeError
+	if !errors.As(original, &de) {
+		return &DecodeError{
+			keys:    []string{key},
+			wrapped: original,
+		}
+	}
+
+	de.keys = append(de.keys, key)
+	return de
+}
+
+// DecodeValue implements the Codec interface.
+// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr.
+// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared.
+func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Struct {
+		return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+	}
+
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	case bsontype.Null:
+		if err := vr.ReadNull(); err != nil {
+			return err
+		}
+
+		val.Set(reflect.Zero(val.Type()))
+		return nil
+	case bsontype.Undefined:
+		if err := vr.ReadUndefined(); err != nil {
+			return err
+		}
+
+		val.Set(reflect.Zero(val.Type()))
+		return nil
+	default:
+		return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type())
+	}
+
+	sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false)
+	if err != nil {
+		return err
+	}
+
+	if sc.DecodeZeroStruct || dc.zeroStructs {
+		val.Set(reflect.Zero(val.Type()))
+	}
+	if sc.DecodeDeepZeroInline && sd.inline {
+		val.Set(deepZero(val.Type()))
+	}
+
+	var decoder ValueDecoder
+	var inlineMap reflect.Value
+	if sd.inlineMap >= 0 {
+		inlineMap = val.Field(sd.inlineMap)
+		decoder, err = dc.LookupDecoder(inlineMap.Type().Elem())
+		if err != nil {
+			return err
+		}
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	for {
+		name, vr, err := dr.ReadElement()
+		if errors.Is(err, bsonrw.ErrEOD) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		fd, exists := sd.fm[name]
+		if !exists {
+			// if the original name isn't found in the struct description, try again with the name in lowercase
+			// this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field
+			// names
+			fd, exists = sd.fm[strings.ToLower(name)]
+		}
+
+		if !exists {
+			if sd.inlineMap < 0 {
+				// The encoding/json package requires a flag to return on error for non-existent fields.
+				// This functionality seems appropriate for the struct codec.
+				err = vr.Skip()
+				if err != nil {
+					return err
+				}
+				continue
+			}
+
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+
+			elem := reflect.New(inlineMap.Type().Elem()).Elem()
+			dc.Ancestor = inlineMap.Type()
+			err = decoder.DecodeValue(dc, vr, elem)
+			if err != nil {
+				return err
+			}
+			inlineMap.SetMapIndex(reflect.ValueOf(name), elem)
+			continue
+		}
+
+		var field reflect.Value
+		if fd.inline == nil {
+			field = val.Field(fd.idx)
+		} else {
+			field, err = getInlineField(val, fd.inline)
+			if err != nil {
+				return err
+			}
+		}
+
+		if !field.CanSet() { // Being settable is a super set of being addressable.
+			innerErr := fmt.Errorf("field %v is not settable", field)
+			return newDecodeError(fd.name, innerErr)
+		}
+		if field.Kind() == reflect.Ptr && field.IsNil() {
+			field.Set(reflect.New(field.Type().Elem()))
+		}
+		field = field.Addr()
+
+		dctx := DecodeContext{
+			Registry:            dc.Registry,
+			Truncate:            fd.truncate || dc.Truncate,
+			defaultDocumentType: dc.defaultDocumentType,
+			binaryAsSlice:       dc.binaryAsSlice,
+			useJSONStructTags:   dc.useJSONStructTags,
+			useLocalTimeZone:    dc.useLocalTimeZone,
+			zeroMaps:            dc.zeroMaps,
+			zeroStructs:         dc.zeroStructs,
+		}
+
+		if fd.decoder == nil {
+			return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()})
+		}
+
+		err = fd.decoder.DecodeValue(dctx, vr, field.Elem())
+		if err != nil {
+			return newDecodeError(fd.name, err)
+		}
+	}
+
+	return nil
+}
+
+func isEmpty(v reflect.Value, omitZeroStruct bool) bool {
+	kind := v.Kind()
+	if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) {
+		return v.Interface().(Zeroer).IsZero()
+	}
+	switch kind {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Struct:
+		if !omitZeroStruct {
+			return false
+		}
+		vt := v.Type()
+		if vt == tTime {
+			return v.Interface().(time.Time).IsZero()
+		}
+		numField := vt.NumField()
+		for i := 0; i < numField; i++ {
+			ff := vt.Field(i)
+			if ff.PkgPath != "" && !ff.Anonymous {
+				continue // Private field
+			}
+			if !isEmpty(v.Field(i), omitZeroStruct) {
+				return false
+			}
+		}
+		return true
+	}
+	return !v.IsValid() || v.IsZero()
+}
+
+type structDescription struct {
+	fm        map[string]fieldDescription
+	fl        []fieldDescription
+	inlineMap int
+	inline    bool
+}
+
+type fieldDescription struct {
+	name      string // BSON key name
+	fieldName string // struct field name
+	idx       int
+	omitEmpty bool
+	minSize   bool
+	truncate  bool
+	inline    []int
+	encoder   ValueEncoder
+	decoder   ValueDecoder
+}
+
+type byIndex []fieldDescription
+
+func (bi byIndex) Len() int { return len(bi) }
+
+func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] }
+
+func (bi byIndex) Less(i, j int) bool {
+	// If a field is inlined, its index in the top level struct is stored at inline[0]
+	iIdx, jIdx := bi[i].idx, bi[j].idx
+	if len(bi[i].inline) > 0 {
+		iIdx = bi[i].inline[0]
+	}
+	if len(bi[j].inline) > 0 {
+		jIdx = bi[j].inline[0]
+	}
+	if iIdx != jIdx {
+		return iIdx < jIdx
+	}
+	for k, biik := range bi[i].inline {
+		if k >= len(bi[j].inline) {
+			return false
+		}
+		if biik != bi[j].inline[k] {
+			return biik < bi[j].inline[k]
+		}
+	}
+	return len(bi[i].inline) < len(bi[j].inline)
+}
+
+func (sc *StructCodec) describeStruct(
+	r *Registry,
+	t reflect.Type,
+	useJSONStructTags bool,
+	errorOnDuplicates bool,
+) (*structDescription, error) {
+	// We need to analyze the struct, including getting the tags, collecting
+	// information about inlining, and create a map of the field name to the field.
+	if v, ok := sc.cache.Load(t); ok {
+		return v.(*structDescription), nil
+	}
+	// TODO(charlie): Only describe the struct once when called
+	// concurrently with the same type.
+	ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates)
+	if err != nil {
+		return nil, err
+	}
+	if v, loaded := sc.cache.LoadOrStore(t, ds); loaded {
+		ds = v.(*structDescription)
+	}
+	return ds, nil
+}
+
+func (sc *StructCodec) describeStructSlow(
+	r *Registry,
+	t reflect.Type,
+	useJSONStructTags bool,
+	errorOnDuplicates bool,
+) (*structDescription, error) {
+	numFields := t.NumField()
+	sd := &structDescription{
+		fm:        make(map[string]fieldDescription, numFields),
+		fl:        make([]fieldDescription, 0, numFields),
+		inlineMap: -1,
+	}
+
+	var fields []fieldDescription
+	for i := 0; i < numFields; i++ {
+		sf := t.Field(i)
+		if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) {
+			// field is private or unexported fields aren't allowed, ignore
+			continue
+		}
+
+		sfType := sf.Type
+		encoder, err := r.LookupEncoder(sfType)
+		if err != nil {
+			encoder = nil
+		}
+		decoder, err := r.LookupDecoder(sfType)
+		if err != nil {
+			decoder = nil
+		}
+
+		description := fieldDescription{
+			fieldName: sf.Name,
+			idx:       i,
+			encoder:   encoder,
+			decoder:   decoder,
+		}
+
+		var stags StructTags
+		// If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser
+		// instead of the parser defined on the codec.
+		if useJSONStructTags {
+			stags, err = JSONFallbackStructTagParser.ParseStructTags(sf)
+		} else {
+			stags, err = sc.parser.ParseStructTags(sf)
+		}
+		if err != nil {
+			return nil, err
+		}
+		if stags.Skip {
+			continue
+		}
+		description.name = stags.Name
+		description.omitEmpty = stags.OmitEmpty
+		description.minSize = stags.MinSize
+		description.truncate = stags.Truncate
+
+		if stags.Inline {
+			sd.inline = true
+			switch sfType.Kind() {
+			case reflect.Map:
+				if sd.inlineMap >= 0 {
+					return nil, errors.New("(struct " + t.String() + ") multiple inline maps")
+				}
+				if sfType.Key() != tString {
+					return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys")
+				}
+				sd.inlineMap = description.idx
+			case reflect.Ptr:
+				sfType = sfType.Elem()
+				if sfType.Kind() != reflect.Struct {
+					return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String())
+				}
+				fallthrough
+			case reflect.Struct:
+				inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates)
+				if err != nil {
+					return nil, err
+				}
+				for _, fd := range inlinesf.fl {
+					if fd.inline == nil {
+						fd.inline = []int{i, fd.idx}
+					} else {
+						fd.inline = append([]int{i}, fd.inline...)
+					}
+					fields = append(fields, fd)
+
+				}
+			default:
+				return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String())
+			}
+			continue
+		}
+		fields = append(fields, description)
+	}
+
+	// Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name
+	sort.Slice(fields, func(i, j int) bool {
+		x := fields
+		// sort field by name, breaking ties with depth, then
+		// breaking ties with index sequence.
+		if x[i].name != x[j].name {
+			return x[i].name < x[j].name
+		}
+		if len(x[i].inline) != len(x[j].inline) {
+			return len(x[i].inline) < len(x[j].inline)
+		}
+		return byIndex(x).Less(i, j)
+	})
+
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			sd.fl = append(sd.fl, fi)
+			sd.fm[name] = fi
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates {
+			return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name)
+		}
+		sd.fl = append(sd.fl, dominant)
+		sd.fm[name] = dominant
+	}
+
+	sort.Sort(byIndex(sd.fl))
+
+	return sd, nil
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's inlining rules. If there are multiple top-level
+// fields, the boolean will be false: This condition is an error in Go
+// and we skip all the fields.
+func dominantField(fields []fieldDescription) (fieldDescription, bool) {
+	// The fields are sorted in increasing index-length order, then by presence of tag.
+	// That means that the first field is the dominant one. We need only check
+	// for error cases: two fields at top level.
+	if len(fields) > 1 &&
+		len(fields[0].inline) == len(fields[1].inline) {
+		return fieldDescription{}, false
+	}
+	return fields[0], true
+}
+
+func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) {
+	defer func() {
+		if recovered := recover(); recovered != nil {
+			switch r := recovered.(type) {
+			case string:
+				err = fmt.Errorf("%s", r)
+			case error:
+				err = r
+			}
+		}
+	}()
+
+	result = v.FieldByIndex(index)
+	return
+}
+
+func getInlineField(val reflect.Value, index []int) (reflect.Value, error) {
+	field, err := fieldByIndexErr(val, index)
+	if err == nil {
+		return field, nil
+	}
+
+	// if parent of this element doesn't exist, fix its parent
+	inlineParent := index[:len(index)-1]
+	var fParent reflect.Value
+	if fParent, err = fieldByIndexErr(val, inlineParent); err != nil {
+		fParent, err = getInlineField(val, inlineParent)
+		if err != nil {
+			return fParent, err
+		}
+	}
+	fParent.Set(reflect.New(fParent.Type().Elem()))
+
+	return fieldByIndexErr(val, index)
+}
+
+// DeepZero returns recursive zero object
+func deepZero(st reflect.Type) (result reflect.Value) {
+	if st.Kind() == reflect.Struct {
+		numField := st.NumField()
+		for i := 0; i < numField; i++ {
+			if result == emptyValue {
+				result = reflect.Indirect(reflect.New(st))
+			}
+			f := result.Field(i)
+			if f.CanInterface() {
+				if f.Type().Kind() == reflect.Struct {
+					result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem())))
+				}
+			}
+		}
+	}
+	return result
+}
+
+// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside
+func recursivePointerTo(v reflect.Value) reflect.Value {
+	v = reflect.Indirect(v)
+	result := reflect.New(v.Type())
+	if v.Kind() == reflect.Struct {
+		for i := 0; i < v.NumField(); i++ {
+			if f := v.Field(i); f.Kind() == reflect.Ptr {
+				if f.Elem().Kind() == reflect.Struct {
+					result.Elem().Field(i).Set(recursivePointerTo(f))
+				}
+			}
+		}
+	}
+
+	return result
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..18d85bfb031297d65c4cfa0c5bdb416abcb0bed6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go
@@ -0,0 +1,148 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+	"strings"
+)
+
+// StructTagParser returns the struct tags for a given struct field.
+//
+// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
+type StructTagParser interface {
+	ParseStructTags(reflect.StructField) (StructTags, error)
+}
+
+// StructTagParserFunc is an adapter that allows a generic function to be used
+// as a StructTagParser.
+//
+// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
+type StructTagParserFunc func(reflect.StructField) (StructTags, error)
+
+// ParseStructTags implements the StructTagParser interface.
+func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) {
+	return stpf(sf)
+}
+
+// StructTags represents the struct tag fields that the StructCodec uses during
+// the encoding and decoding process.
+//
+// In the case of a struct, the lowercased field name is used as the key for each exported
+// field but this behavior may be changed using a struct tag. The tag may also contain flags to
+// adjust the marshalling behavior for the field.
+//
+// The properties are defined below:
+//
+//	OmitEmpty  Only include the field if it's not set to the zero value for the type or to
+//	           empty slices or maps.
+//
+//	MinSize    Marshal an integer of a type larger than 32 bits value as an int32, if that's
+//	           feasible while preserving the numeric value.
+//
+//	Truncate   When unmarshaling a BSON double, it is permitted to lose precision to fit within
+//	           a float32.
+//
+//	Inline     Inline the field, which must be a struct or a map, causing all of its fields
+//	           or keys to be processed as if they were part of the outer struct. For maps,
+//	           keys must not conflict with the bson keys of other struct fields.
+//
+//	Skip       This struct field should be skipped. This is usually denoted by parsing a "-"
+//	           for the name.
+//
+// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
+type StructTags struct {
+	Name      string
+	OmitEmpty bool
+	MinSize   bool
+	Truncate  bool
+	Inline    bool
+	Skip      bool
+}
+
+// DefaultStructTagParser is the StructTagParser used by the StructCodec by default.
+// It will handle the bson struct tag. See the documentation for StructTags to see
+// what each of the returned fields means.
+//
+// If there is no name in the struct tag fields, the struct field name is lowercased.
+// The tag formats accepted are:
+//
+//	"[<key>][,<flag1>[,<flag2>]]"
+//
+//	`(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// An example:
+//
+//	type T struct {
+//	    A bool
+//	    B int    "myb"
+//	    C string "myc,omitempty"
+//	    D string `bson:",omitempty" json:"jsonkey"`
+//	    E int64  ",minsize"
+//	    F int64  "myf,omitempty,minsize"
+//	}
+//
+// A struct tag either consisting entirely of '-' or with a bson key with a
+// value consisting entirely of '-' will return a StructTags with Skip true and
+// the remaining fields will be their default values.
+//
+// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0.
+var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
+	key := strings.ToLower(sf.Name)
+	tag, ok := sf.Tag.Lookup("bson")
+	if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
+		tag = string(sf.Tag)
+	}
+	return parseTags(key, tag)
+}
+
+func parseTags(key string, tag string) (StructTags, error) {
+	var st StructTags
+	if tag == "-" {
+		st.Skip = true
+		return st, nil
+	}
+
+	for idx, str := range strings.Split(tag, ",") {
+		if idx == 0 && str != "" {
+			key = str
+		}
+		switch str {
+		case "omitempty":
+			st.OmitEmpty = true
+		case "minsize":
+			st.MinSize = true
+		case "truncate":
+			st.Truncate = true
+		case "inline":
+			st.Inline = true
+		}
+	}
+
+	st.Name = key
+
+	return st, nil
+}
+
+// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser
+// but will also fallback to parsing the json tag instead on a field where the
+// bson tag isn't available.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and
+// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
+var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
+	key := strings.ToLower(sf.Name)
+	tag, ok := sf.Tag.Lookup("bson")
+	if !ok {
+		tag, ok = sf.Tag.Lookup("json")
+	}
+	if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
+		tag = string(sf.Tag)
+	}
+
+	return parseTags(key, tag)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..22fb762c415102cd8059ec781e6ea265a8c18940
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go
@@ -0,0 +1,151 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"fmt"
+	"reflect"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+const (
+	timeFormatString = "2006-01-02T15:04:05.999Z07:00"
+)
+
+// TimeCodec is the Codec used for time.Time values.
+//
+// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0.
+// To configure the time.Time encode and decode behavior, use the configuration
+// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode
+// and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to ..., use:
+//
+//	opt := options.Client().SetBSONOptions(&options.BSONOptions{
+//	    UseLocalTimeZone: true,
+//	})
+//
+// See the deprecation notice for each field in TimeCodec for the corresponding
+// settings.
+type TimeCodec struct {
+	// UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
+	//
+	// Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone
+	// instead.
+	UseLocalTimeZone bool
+}
+
+var (
+	defaultTimeCodec = NewTimeCodec()
+
+	// Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used
+	// by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
+	_ typeDecoder = defaultTimeCodec
+)
+
+// NewTimeCodec returns a TimeCodec with options opts.
+//
+// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See
+// [TimeCodec] for more details.
+func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec {
+	timeOpt := bsonoptions.MergeTimeCodecOptions(opts...)
+
+	codec := TimeCodec{}
+	if timeOpt.UseLocalTimeZone != nil {
+		codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone
+	}
+	return &codec
+}
+
+func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	if t != tTime {
+		return emptyValue, ValueDecoderError{
+			Name:     "TimeDecodeValue",
+			Types:    []reflect.Type{tTime},
+			Received: reflect.Zero(t),
+		}
+	}
+
+	var timeVal time.Time
+	switch vrType := vr.Type(); vrType {
+	case bsontype.DateTime:
+		dt, err := vr.ReadDateTime()
+		if err != nil {
+			return emptyValue, err
+		}
+		timeVal = time.Unix(dt/1000, dt%1000*1000000)
+	case bsontype.String:
+		// assume strings are in the isoTimeFormat
+		timeStr, err := vr.ReadString()
+		if err != nil {
+			return emptyValue, err
+		}
+		timeVal, err = time.Parse(timeFormatString, timeStr)
+		if err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return emptyValue, err
+		}
+		timeVal = time.Unix(i64/1000, i64%1000*1000000)
+	case bsontype.Timestamp:
+		t, _, err := vr.ReadTimestamp()
+		if err != nil {
+			return emptyValue, err
+		}
+		timeVal = time.Unix(int64(t), 0)
+	case bsontype.Null:
+		if err := vr.ReadNull(); err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Undefined:
+		if err := vr.ReadUndefined(); err != nil {
+			return emptyValue, err
+		}
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType)
+	}
+
+	if !tc.UseLocalTimeZone && !dc.useLocalTimeZone {
+		timeVal = timeVal.UTC()
+	}
+	return reflect.ValueOf(timeVal), nil
+}
+
+// DecodeValue is the ValueDecoderFunc for time.Time.
+func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tTime {
+		return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+
+	elem, err := tc.decodeType(dc, vr, tTime)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+// EncodeValue is the ValueEncoderFunc for time.TIme.
+func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tTime {
+		return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+	tt := val.Interface().(time.Time)
+	dt := primitive.NewDateTimeFromTime(tt)
+	return vw.WriteDateTime(int64(dt))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..6ade17b7d3fd6fdb4ab39fed8ce23d12cac82ee8
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go
@@ -0,0 +1,58 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"net/url"
+	"reflect"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var tBool = reflect.TypeOf(false)
+var tFloat64 = reflect.TypeOf(float64(0))
+var tInt32 = reflect.TypeOf(int32(0))
+var tInt64 = reflect.TypeOf(int64(0))
+var tString = reflect.TypeOf("")
+var tTime = reflect.TypeOf(time.Time{})
+
+var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
+var tByteSlice = reflect.TypeOf([]byte(nil))
+var tByte = reflect.TypeOf(byte(0x00))
+var tURL = reflect.TypeOf(url.URL{})
+var tJSONNumber = reflect.TypeOf(json.Number(""))
+
+var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem()
+var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem()
+var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
+var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem()
+var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem()
+
+var tBinary = reflect.TypeOf(primitive.Binary{})
+var tUndefined = reflect.TypeOf(primitive.Undefined{})
+var tOID = reflect.TypeOf(primitive.ObjectID{})
+var tDateTime = reflect.TypeOf(primitive.DateTime(0))
+var tNull = reflect.TypeOf(primitive.Null{})
+var tRegex = reflect.TypeOf(primitive.Regex{})
+var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
+var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
+var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
+var tSymbol = reflect.TypeOf(primitive.Symbol(""))
+var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
+var tDecimal = reflect.TypeOf(primitive.Decimal128{})
+var tMinKey = reflect.TypeOf(primitive.MinKey{})
+var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
+var tD = reflect.TypeOf(primitive.D{})
+var tA = reflect.TypeOf(primitive.A{})
+var tE = reflect.TypeOf(primitive.E{})
+
+var tCoreDocument = reflect.TypeOf(bsoncore.Document{})
+var tCoreArray = reflect.TypeOf(bsoncore.Array{})
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go
new file mode 100644
index 0000000000000000000000000000000000000000..39b07135b18268d6f25d3a74ddaeebec9a4b7999
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go
@@ -0,0 +1,202 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsonoptions"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// UIntCodec is the Codec used for uint values.
+//
+// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To
+// configure the uint encode and decode behavior, use the configuration methods
+// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and
+// decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to marshal Go uint values as the
+// minimum BSON int size that can represent the value, use:
+//
+//	opt := options.Client().SetBSONOptions(&options.BSONOptions{
+//	    IntMinSize: true,
+//	})
+//
+// See the deprecation notice for each field in UIntCodec for the corresponding
+// settings.
+type UIntCodec struct {
+	// EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the
+	// minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value.
+	//
+	// Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead.
+	EncodeToMinSize bool
+}
+
+var (
+	defaultUIntCodec = NewUIntCodec()
+
+	// Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used
+	// by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
+	_ typeDecoder = defaultUIntCodec
+)
+
+// NewUIntCodec returns a UIntCodec with options opts.
+//
+// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See
+// [UIntCodec] for more details.
+func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec {
+	uintOpt := bsonoptions.MergeUIntCodecOptions(opts...)
+
+	codec := UIntCodec{}
+	if uintOpt.EncodeToMinSize != nil {
+		codec.EncodeToMinSize = *uintOpt.EncodeToMinSize
+	}
+	return &codec
+}
+
+// EncodeValue is the ValueEncoder for uint types.
+func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Uint8, reflect.Uint16:
+		return vw.WriteInt32(int32(val.Uint()))
+	case reflect.Uint, reflect.Uint32, reflect.Uint64:
+		u64 := val.Uint()
+
+		// If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32
+		useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64)
+
+		if u64 <= math.MaxInt32 && useMinSize {
+			return vw.WriteInt32(int32(u64))
+		}
+		if u64 > math.MaxInt64 {
+			return fmt.Errorf("%d overflows int64", u64)
+		}
+		return vw.WriteInt64(int64(u64))
+	}
+
+	return ValueEncoderError{
+		Name:     "UintEncodeValue",
+		Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+		Received: val,
+	}
+}
+
+func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+	var i64 int64
+	var err error
+	switch vrType := vr.Type(); vrType {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return emptyValue, err
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		i64, err = vr.ReadInt64()
+		if err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return emptyValue, err
+		}
+		if !dc.Truncate && math.Floor(f64) != f64 {
+			return emptyValue, errCannotTruncate
+		}
+		if f64 > float64(math.MaxInt64) {
+			return emptyValue, fmt.Errorf("%g overflows int64", f64)
+		}
+		i64 = int64(f64)
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return emptyValue, err
+		}
+		if b {
+			i64 = 1
+		}
+	case bsontype.Null:
+		if err = vr.ReadNull(); err != nil {
+			return emptyValue, err
+		}
+	case bsontype.Undefined:
+		if err = vr.ReadUndefined(); err != nil {
+			return emptyValue, err
+		}
+	default:
+		return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType)
+	}
+
+	switch t.Kind() {
+	case reflect.Uint8:
+		if i64 < 0 || i64 > math.MaxUint8 {
+			return emptyValue, fmt.Errorf("%d overflows uint8", i64)
+		}
+
+		return reflect.ValueOf(uint8(i64)), nil
+	case reflect.Uint16:
+		if i64 < 0 || i64 > math.MaxUint16 {
+			return emptyValue, fmt.Errorf("%d overflows uint16", i64)
+		}
+
+		return reflect.ValueOf(uint16(i64)), nil
+	case reflect.Uint32:
+		if i64 < 0 || i64 > math.MaxUint32 {
+			return emptyValue, fmt.Errorf("%d overflows uint32", i64)
+		}
+
+		return reflect.ValueOf(uint32(i64)), nil
+	case reflect.Uint64:
+		if i64 < 0 {
+			return emptyValue, fmt.Errorf("%d overflows uint64", i64)
+		}
+
+		return reflect.ValueOf(uint64(i64)), nil
+	case reflect.Uint:
+		if i64 < 0 {
+			return emptyValue, fmt.Errorf("%d overflows uint", i64)
+		}
+		v := uint64(i64)
+		if v > math.MaxUint { // Can we fit this inside of an uint
+			return emptyValue, fmt.Errorf("%d overflows uint", i64)
+		}
+
+		return reflect.ValueOf(uint(v)), nil
+	default:
+		return emptyValue, ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: reflect.Zero(t),
+		}
+	}
+}
+
+// DecodeValue is the ValueDecoder for uint types.
+func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: val,
+		}
+	}
+
+	elem, err := uic.decodeType(dc, vr, val.Type())
+	if err != nil {
+		return err
+	}
+
+	val.SetUint(elem.Uint())
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..996bd17127a58df69e70a5a2756d2135a304ea3a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type ByteSliceCodecOptions struct {
+	EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
+}
+
+// ByteSliceCodec creates a new *ByteSliceCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func ByteSliceCodec() *ByteSliceCodecOptions {
+	return &ByteSliceCodecOptions{}
+}
+
+// SetEncodeNilAsEmpty specifies  if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
+func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions {
+	bs.EncodeNilAsEmpty = &b
+	return bs
+}
+
+// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions {
+	bs := ByteSliceCodec()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.EncodeNilAsEmpty != nil {
+			bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty
+		}
+	}
+
+	return bs
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..c40973c8d436c7f3e6ec0fb9fa92740e173d1685
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go
@@ -0,0 +1,8 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsonoptions defines the optional configurations for the BSON codecs.
+package bsonoptions
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..f522c7e03feff434b35c82767c03f1753d0d43ae
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type EmptyInterfaceCodecOptions struct {
+	DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
+}
+
+// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions {
+	return &EmptyInterfaceCodecOptions{}
+}
+
+// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
+func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions {
+	e.DecodeBinaryAsSlice = &b
+	return e
+}
+
+// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions {
+	e := EmptyInterfaceCodec()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.DecodeBinaryAsSlice != nil {
+			e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice
+		}
+	}
+
+	return e
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..a7a7c1d9804b7504a3e3d7ead65f94aa9239bb38
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go
@@ -0,0 +1,82 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// MapCodecOptions represents all possible options for map encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type MapCodecOptions struct {
+	DecodeZerosMap   *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false.
+	EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false.
+	// Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must
+	// either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a
+	// string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the
+	// encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override
+	// TextMarshaler/TextUnmarshaler. Defaults to false.
+	EncodeKeysWithStringer *bool
+}
+
+// MapCodec creates a new *MapCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func MapCodec() *MapCodecOptions {
+	return &MapCodecOptions{}
+}
+
+// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
+func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions {
+	t.DecodeZerosMap = &b
+	return t
+}
+
+// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
+func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions {
+	t.EncodeNilAsEmpty = &b
+	return t
+}
+
+// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the
+// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key
+// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with
+// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer
+// will override TextMarshaler/TextUnmarshaler. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
+func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions {
+	t.EncodeKeysWithStringer = &b
+	return t
+}
+
+// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions {
+	s := MapCodec()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.DecodeZerosMap != nil {
+			s.DecodeZerosMap = opt.DecodeZerosMap
+		}
+		if opt.EncodeNilAsEmpty != nil {
+			s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty
+		}
+		if opt.EncodeKeysWithStringer != nil {
+			s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer
+		}
+	}
+
+	return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..3c1e4f35ba1d2bc82ea15f2752239c8580098fef
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// SliceCodecOptions represents all possible options for slice encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type SliceCodecOptions struct {
+	EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false.
+}
+
+// SliceCodec creates a new *SliceCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func SliceCodec() *SliceCodecOptions {
+	return &SliceCodecOptions{}
+}
+
+// SetEncodeNilAsEmpty specifies  if a nil slice should encode as an empty array instead of null. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
+func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions {
+	s.EncodeNilAsEmpty = &b
+	return s
+}
+
+// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions {
+	s := SliceCodec()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.EncodeNilAsEmpty != nil {
+			s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty
+		}
+	}
+
+	return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8b76f996e491085bd0bca8cec6ef6b192a9791c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+var defaultDecodeOIDAsHex = true
+
+// StringCodecOptions represents all possible options for string encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type StringCodecOptions struct {
+	DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true.
+}
+
+// StringCodec creates a new *StringCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func StringCodec() *StringCodecOptions {
+	return &StringCodecOptions{}
+}
+
+// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made
+// from the raw object ID bytes will be used. Defaults to true.
+//
+// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
+func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions {
+	t.DecodeObjectIDAsHex = &b
+	return t
+}
+
+// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions {
+	s := &StringCodecOptions{&defaultDecodeOIDAsHex}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.DecodeObjectIDAsHex != nil {
+			s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex
+		}
+	}
+
+	return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..1cbfa32e8b406f91f9640de436c36909c4b77905
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go
@@ -0,0 +1,107 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+var defaultOverwriteDuplicatedInlinedFields = true
+
+// StructCodecOptions represents all possible options for struct encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type StructCodecOptions struct {
+	DecodeZeroStruct                 *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false.
+	DecodeDeepZeroInline             *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false.
+	EncodeOmitDefaultStruct          *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false.
+	AllowUnexportedFields            *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
+	OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true.
+}
+
+// StructCodec creates a new *StructCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func StructCodec() *StructCodecOptions {
+	return &StructCodecOptions{}
+}
+
+// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
+func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions {
+	t.DecodeZeroStruct = &b
+	return t
+}
+
+// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false.
+//
+// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
+func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions {
+	t.DecodeDeepZeroInline = &b
+	return t
+}
+
+// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all
+// its values set to their default value. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
+func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions {
+	t.EncodeOmitDefaultStruct = &b
+	return t
+}
+
+// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the
+// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when
+// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if
+// there are duplicate keys after the struct is inlined. Defaults to true.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
+func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions {
+	t.OverwriteDuplicatedInlinedFields = &b
+	return t
+}
+
+// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
+//
+// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
+// supported in Go Driver 2.0.
+func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions {
+	t.AllowUnexportedFields = &b
+	return t
+}
+
+// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions {
+	s := &StructCodecOptions{
+		OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields,
+	}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.DecodeZeroStruct != nil {
+			s.DecodeZeroStruct = opt.DecodeZeroStruct
+		}
+		if opt.DecodeDeepZeroInline != nil {
+			s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline
+		}
+		if opt.EncodeOmitDefaultStruct != nil {
+			s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct
+		}
+		if opt.OverwriteDuplicatedInlinedFields != nil {
+			s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields
+		}
+		if opt.AllowUnexportedFields != nil {
+			s.AllowUnexportedFields = opt.AllowUnexportedFields
+		}
+	}
+
+	return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f38433d226531a52a2b1ff5d45665ac90ba4785
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// TimeCodecOptions represents all possible options for time.Time encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type TimeCodecOptions struct {
+	UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false.
+}
+
+// TimeCodec creates a new *TimeCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func TimeCodec() *TimeCodecOptions {
+	return &TimeCodecOptions{}
+}
+
+// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
+func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions {
+	t.UseLocalTimeZone = &b
+	return t
+}
+
+// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions {
+	t := TimeCodec()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.UseLocalTimeZone != nil {
+			t.UseLocalTimeZone = opt.UseLocalTimeZone
+		}
+	}
+
+	return t
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..5091e4d9633fc514eba601f6a01c751dd98475ee
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// UIntCodecOptions represents all possible options for uint encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type UIntCodecOptions struct {
+	EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
+}
+
+// UIntCodec creates a new *UIntCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func UIntCodec() *UIntCodecOptions {
+	return &UIntCodecOptions{}
+}
+
+// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead.
+func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions {
+	u.EncodeToMinSize = &b
+	return u
+}
+
+// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions {
+	u := UIntCodec()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.EncodeToMinSize != nil {
+			u.EncodeToMinSize = opt.EncodeToMinSize
+		}
+	}
+
+	return u
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e25570b8556350cdea66a0e23f3a0b6905730d4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go
@@ -0,0 +1,489 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// Copier is a type that allows copying between ValueReaders, ValueWriters, and
+// []byte values.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+type Copier struct{}
+
+// NewCopier creates a new copier with the given registry. If a nil registry is provided
+// a default registry is used.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func NewCopier() Copier {
+	return Copier{}
+}
+
+// CopyDocument handles copying a document from src to dst.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func CopyDocument(dst ValueWriter, src ValueReader) error {
+	return Copier{}.CopyDocument(dst, src)
+}
+
+// CopyDocument handles copying one document from the src to the dst.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
+	dr, err := src.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	dw, err := dst.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return c.copyDocumentCore(dw, dr)
+}
+
+// CopyArrayFromBytes copies the values from a BSON array represented as a
+// []byte to a ValueWriter.
+//
+// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error {
+	aw, err := dst.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	err = c.CopyBytesToArrayWriter(aw, src)
+	if err != nil {
+		return err
+	}
+
+	return aw.WriteArrayEnd()
+}
+
+// CopyDocumentFromBytes copies the values from a BSON document represented as a
+// []byte to a ValueWriter.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
+	dw, err := dst.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	err = c.CopyBytesToDocumentWriter(dw, src)
+	if err != nil {
+		return err
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+type writeElementFn func(key string) (ValueWriter, error)
+
+// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an
+// ArrayWriter.
+//
+// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go
+// Driver 2.0.
+func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error {
+	wef := func(_ string) (ValueWriter, error) {
+		return dst.WriteArrayElement()
+	}
+
+	return c.copyBytesToValueWriter(src, wef)
+}
+
+// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a
+// DocumentWriter.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error {
+	wef := func(key string) (ValueWriter, error) {
+		return dst.WriteDocumentElement(key)
+	}
+
+	return c.copyBytesToValueWriter(src, wef)
+}
+
+func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error {
+	// TODO(skriptble): Create errors types here. Anything that is a tag should be a property.
+	length, rem, ok := bsoncore.ReadLength(src)
+	if !ok {
+		return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src))
+	}
+	if len(src) < int(length) {
+		return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length)
+	}
+	rem = rem[:length-4]
+
+	var t bsontype.Type
+	var key string
+	var val bsoncore.Value
+	for {
+		t, rem, ok = bsoncore.ReadType(rem)
+		if !ok {
+			return io.EOF
+		}
+		if t == bsontype.Type(0) {
+			if len(rem) != 0 {
+				return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem)
+			}
+			break
+		}
+
+		key, rem, ok = bsoncore.ReadKey(rem)
+		if !ok {
+			return fmt.Errorf("invalid key found. remaining bytes=%v", rem)
+		}
+
+		// write as either array element or document element using writeElementFn
+		vw, err := wef(key)
+		if err != nil {
+			return err
+		}
+
+		val, rem, ok = bsoncore.ReadValue(rem, t)
+		if !ok {
+			return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t)
+		}
+		err = c.CopyValueFromBytes(vw, t, val.Data)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CopyDocumentToBytes copies an entire document from the ValueReader and
+// returns it as bytes.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) {
+	return c.AppendDocumentBytes(nil, src)
+}
+
+// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will
+// append the result to dst.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) {
+	if br, ok := src.(BytesReader); ok {
+		_, dst, err := br.ReadValueBytes(dst)
+		return dst, err
+	}
+
+	vw := vwPool.Get().(*valueWriter)
+	defer putValueWriter(vw)
+
+	vw.reset(dst)
+
+	err := c.CopyDocument(vw, src)
+	dst = vw.buf
+	return dst, err
+}
+
+// AppendArrayBytes copies an array from the ValueReader to dst.
+//
+// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) {
+	if br, ok := src.(BytesReader); ok {
+		_, dst, err := br.ReadValueBytes(dst)
+		return dst, err
+	}
+
+	vw := vwPool.Get().(*valueWriter)
+	defer putValueWriter(vw)
+
+	vw.reset(dst)
+
+	err := c.copyArray(vw, src)
+	dst = vw.buf
+	return dst, err
+}
+
+// CopyValueFromBytes will write the value represtend by t and src to dst.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead.
+func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error {
+	if wvb, ok := dst.(BytesWriter); ok {
+		return wvb.WriteValueBytes(t, src)
+	}
+
+	vr := vrPool.Get().(*valueReader)
+	defer vrPool.Put(vr)
+
+	vr.reset(src)
+	vr.pushElement(t)
+
+	return c.CopyValue(dst, vr)
+}
+
+// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a
+// []byte.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead.
+func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) {
+	return c.AppendValueBytes(nil, src)
+}
+
+// AppendValueBytes functions the same as CopyValueToBytes, but will append the
+// result to dst.
+//
+// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
+// Driver 2.0.
+func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) {
+	if br, ok := src.(BytesReader); ok {
+		return br.ReadValueBytes(dst)
+	}
+
+	vw := vwPool.Get().(*valueWriter)
+	defer putValueWriter(vw)
+
+	start := len(dst)
+
+	vw.reset(dst)
+	vw.push(mElement)
+
+	err := c.CopyValue(vw, src)
+	if err != nil {
+		return 0, dst, err
+	}
+
+	return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil
+}
+
+// CopyValue will copy a single value from src to dst.
+//
+// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error {
+	var err error
+	switch src.Type() {
+	case bsontype.Double:
+		var f64 float64
+		f64, err = src.ReadDouble()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDouble(f64)
+	case bsontype.String:
+		var str string
+		str, err = src.ReadString()
+		if err != nil {
+			return err
+		}
+		err = dst.WriteString(str)
+	case bsontype.EmbeddedDocument:
+		err = c.CopyDocument(dst, src)
+	case bsontype.Array:
+		err = c.copyArray(dst, src)
+	case bsontype.Binary:
+		var data []byte
+		var subtype byte
+		data, subtype, err = src.ReadBinary()
+		if err != nil {
+			break
+		}
+		err = dst.WriteBinaryWithSubtype(data, subtype)
+	case bsontype.Undefined:
+		err = src.ReadUndefined()
+		if err != nil {
+			break
+		}
+		err = dst.WriteUndefined()
+	case bsontype.ObjectID:
+		var oid primitive.ObjectID
+		oid, err = src.ReadObjectID()
+		if err != nil {
+			break
+		}
+		err = dst.WriteObjectID(oid)
+	case bsontype.Boolean:
+		var b bool
+		b, err = src.ReadBoolean()
+		if err != nil {
+			break
+		}
+		err = dst.WriteBoolean(b)
+	case bsontype.DateTime:
+		var dt int64
+		dt, err = src.ReadDateTime()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDateTime(dt)
+	case bsontype.Null:
+		err = src.ReadNull()
+		if err != nil {
+			break
+		}
+		err = dst.WriteNull()
+	case bsontype.Regex:
+		var pattern, options string
+		pattern, options, err = src.ReadRegex()
+		if err != nil {
+			break
+		}
+		err = dst.WriteRegex(pattern, options)
+	case bsontype.DBPointer:
+		var ns string
+		var pointer primitive.ObjectID
+		ns, pointer, err = src.ReadDBPointer()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDBPointer(ns, pointer)
+	case bsontype.JavaScript:
+		var js string
+		js, err = src.ReadJavascript()
+		if err != nil {
+			break
+		}
+		err = dst.WriteJavascript(js)
+	case bsontype.Symbol:
+		var symbol string
+		symbol, err = src.ReadSymbol()
+		if err != nil {
+			break
+		}
+		err = dst.WriteSymbol(symbol)
+	case bsontype.CodeWithScope:
+		var code string
+		var srcScope DocumentReader
+		code, srcScope, err = src.ReadCodeWithScope()
+		if err != nil {
+			break
+		}
+
+		var dstScope DocumentWriter
+		dstScope, err = dst.WriteCodeWithScope(code)
+		if err != nil {
+			break
+		}
+		err = c.copyDocumentCore(dstScope, srcScope)
+	case bsontype.Int32:
+		var i32 int32
+		i32, err = src.ReadInt32()
+		if err != nil {
+			break
+		}
+		err = dst.WriteInt32(i32)
+	case bsontype.Timestamp:
+		var t, i uint32
+		t, i, err = src.ReadTimestamp()
+		if err != nil {
+			break
+		}
+		err = dst.WriteTimestamp(t, i)
+	case bsontype.Int64:
+		var i64 int64
+		i64, err = src.ReadInt64()
+		if err != nil {
+			break
+		}
+		err = dst.WriteInt64(i64)
+	case bsontype.Decimal128:
+		var d128 primitive.Decimal128
+		d128, err = src.ReadDecimal128()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDecimal128(d128)
+	case bsontype.MinKey:
+		err = src.ReadMinKey()
+		if err != nil {
+			break
+		}
+		err = dst.WriteMinKey()
+	case bsontype.MaxKey:
+		err = src.ReadMaxKey()
+		if err != nil {
+			break
+		}
+		err = dst.WriteMaxKey()
+	default:
+		err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type())
+	}
+
+	return err
+}
+
+func (c Copier) copyArray(dst ValueWriter, src ValueReader) error {
+	ar, err := src.ReadArray()
+	if err != nil {
+		return err
+	}
+
+	aw, err := dst.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	for {
+		vr, err := ar.ReadValue()
+		if errors.Is(err, ErrEOA) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = c.CopyValue(vw, vr)
+		if err != nil {
+			return err
+		}
+	}
+
+	return aw.WriteArrayEnd()
+}
+
+func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error {
+	for {
+		key, vr, err := dr.ReadElement()
+		if errors.Is(err, ErrEOD) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		vw, err := dw.WriteDocumentElement(key)
+		if err != nil {
+			return err
+		}
+
+		err = c.CopyValue(vw, vr)
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..750b0d2af51e421ab5b32e9527892146d8cdf457
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go
@@ -0,0 +1,9 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsonrw contains abstractions for reading and writing
+// BSON and BSON like types from sources.
+package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw"
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0702d9d3025fad6c35224a1f6745e74d61e3a6c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go
@@ -0,0 +1,806 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"encoding/base64"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+const maxNestingDepth = 200
+
+// ErrInvalidJSON indicates the JSON input is invalid
+var ErrInvalidJSON = errors.New("invalid JSON input")
+
+type jsonParseState byte
+
+const (
+	jpsStartState jsonParseState = iota
+	jpsSawBeginObject
+	jpsSawEndObject
+	jpsSawBeginArray
+	jpsSawEndArray
+	jpsSawColon
+	jpsSawComma
+	jpsSawKey
+	jpsSawValue
+	jpsDoneState
+	jpsInvalidState
+)
+
+type jsonParseMode byte
+
+const (
+	jpmInvalidMode jsonParseMode = iota
+	jpmObjectMode
+	jpmArrayMode
+)
+
+type extJSONValue struct {
+	t bsontype.Type
+	v interface{}
+}
+
+type extJSONObject struct {
+	keys   []string
+	values []*extJSONValue
+}
+
+type extJSONParser struct {
+	js *jsonScanner
+	s  jsonParseState
+	m  []jsonParseMode
+	k  string
+	v  *extJSONValue
+
+	err       error
+	canonical bool
+	depth     int
+	maxDepth  int
+
+	emptyObject bool
+	relaxedUUID bool
+}
+
+// newExtJSONParser returns a new extended JSON parser, ready to to begin
+// parsing from the first character of the argued json input. It will not
+// perform any read-ahead and will therefore not report any errors about
+// malformed JSON at this point.
+func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser {
+	return &extJSONParser{
+		js:        &jsonScanner{r: r},
+		s:         jpsStartState,
+		m:         []jsonParseMode{},
+		canonical: canonical,
+		maxDepth:  maxNestingDepth,
+	}
+}
+
+// peekType examines the next value and returns its BSON Type
+func (ejp *extJSONParser) peekType() (bsontype.Type, error) {
+	var t bsontype.Type
+	var err error
+	initialState := ejp.s
+
+	ejp.advanceState()
+	switch ejp.s {
+	case jpsSawValue:
+		t = ejp.v.t
+	case jpsSawBeginArray:
+		t = bsontype.Array
+	case jpsInvalidState:
+		err = ejp.err
+	case jpsSawComma:
+		// in array mode, seeing a comma means we need to progress again to actually observe a type
+		if ejp.peekMode() == jpmArrayMode {
+			return ejp.peekType()
+		}
+	case jpsSawEndArray:
+		// this would only be a valid state if we were in array mode, so return end-of-array error
+		err = ErrEOA
+	case jpsSawBeginObject:
+		// peek key to determine type
+		ejp.advanceState()
+		switch ejp.s {
+		case jpsSawEndObject: // empty embedded document
+			t = bsontype.EmbeddedDocument
+			ejp.emptyObject = true
+		case jpsInvalidState:
+			err = ejp.err
+		case jpsSawKey:
+			if initialState == jpsStartState {
+				return bsontype.EmbeddedDocument, nil
+			}
+			t = wrapperKeyBSONType(ejp.k)
+
+			// if $uuid is encountered, parse as binary subtype 4
+			if ejp.k == "$uuid" {
+				ejp.relaxedUUID = true
+				t = bsontype.Binary
+			}
+
+			switch t {
+			case bsontype.JavaScript:
+				// just saw $code, need to check for $scope at same level
+				_, err = ejp.readValue(bsontype.JavaScript)
+				if err != nil {
+					break
+				}
+
+				switch ejp.s {
+				case jpsSawEndObject: // type is TypeJavaScript
+				case jpsSawComma:
+					ejp.advanceState()
+
+					if ejp.s == jpsSawKey && ejp.k == "$scope" {
+						t = bsontype.CodeWithScope
+					} else {
+						err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k)
+					}
+				case jpsInvalidState:
+					err = ejp.err
+				default:
+					err = ErrInvalidJSON
+				}
+			case bsontype.CodeWithScope:
+				err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope")
+			}
+		}
+	}
+
+	return t, err
+}
+
+// readKey parses the next key and its type and returns them
+func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) {
+	if ejp.emptyObject {
+		ejp.emptyObject = false
+		return "", 0, ErrEOD
+	}
+
+	// advance to key (or return with error)
+	switch ejp.s {
+	case jpsStartState:
+		ejp.advanceState()
+		if ejp.s == jpsSawBeginObject {
+			ejp.advanceState()
+		}
+	case jpsSawBeginObject:
+		ejp.advanceState()
+	case jpsSawValue, jpsSawEndObject, jpsSawEndArray:
+		ejp.advanceState()
+		switch ejp.s {
+		case jpsSawBeginObject, jpsSawComma:
+			ejp.advanceState()
+		case jpsSawEndObject:
+			return "", 0, ErrEOD
+		case jpsDoneState:
+			return "", 0, io.EOF
+		case jpsInvalidState:
+			return "", 0, ejp.err
+		default:
+			return "", 0, ErrInvalidJSON
+		}
+	case jpsSawKey: // do nothing (key was peeked before)
+	default:
+		return "", 0, invalidRequestError("key")
+	}
+
+	// read key
+	var key string
+
+	switch ejp.s {
+	case jpsSawKey:
+		key = ejp.k
+	case jpsSawEndObject:
+		return "", 0, ErrEOD
+	case jpsInvalidState:
+		return "", 0, ejp.err
+	default:
+		return "", 0, invalidRequestError("key")
+	}
+
+	// check for colon
+	ejp.advanceState()
+	if err := ensureColon(ejp.s, key); err != nil {
+		return "", 0, err
+	}
+
+	// peek at the value to determine type
+	t, err := ejp.peekType()
+	if err != nil {
+		return "", 0, err
+	}
+
+	return key, t, nil
+}
+
+// readValue returns the value corresponding to the Type returned by peekType
+func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
+	if ejp.s == jpsInvalidState {
+		return nil, ejp.err
+	}
+
+	var v *extJSONValue
+
+	switch t {
+	case bsontype.Null, bsontype.Boolean, bsontype.String:
+		if ejp.s != jpsSawValue {
+			return nil, invalidRequestError(t.String())
+		}
+		v = ejp.v
+	case bsontype.Int32, bsontype.Int64, bsontype.Double:
+		// relaxed version allows these to be literal number values
+		if ejp.s == jpsSawValue {
+			v = ejp.v
+			break
+		}
+		fallthrough
+	case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined:
+		switch ejp.s {
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read value
+			ejp.advanceState()
+			if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) {
+				return nil, invalidJSONErrorForType("value", t)
+			}
+
+			v = ejp.v
+
+			// read end object
+			ejp.advanceState()
+			if ejp.s != jpsSawEndObject {
+				return nil, invalidJSONErrorForType("} after value", t)
+			}
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer:
+		if ejp.s != jpsSawKey {
+			return nil, invalidRequestError(t.String())
+		}
+		// read colon
+		ejp.advanceState()
+		if err := ensureColon(ejp.s, ejp.k); err != nil {
+			return nil, err
+		}
+
+		ejp.advanceState()
+		if t == bsontype.Binary && ejp.s == jpsSawValue {
+			// convert relaxed $uuid format
+			if ejp.relaxedUUID {
+				defer func() { ejp.relaxedUUID = false }()
+				uuid, err := ejp.v.parseSymbol()
+				if err != nil {
+					return nil, err
+				}
+
+				// RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing
+				// in the 8th, 13th, 18th, and 23rd characters.
+				//
+				// See https://tools.ietf.org/html/rfc4122#section-3
+				valid := len(uuid) == 36 &&
+					string(uuid[8]) == "-" &&
+					string(uuid[13]) == "-" &&
+					string(uuid[18]) == "-" &&
+					string(uuid[23]) == "-"
+				if !valid {
+					return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens")
+				}
+
+				// remove hyphens
+				uuidNoHyphens := strings.ReplaceAll(uuid, "-", "")
+				if len(uuidNoHyphens) != 32 {
+					return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens")
+				}
+
+				// convert hex to bytes
+				bytes, err := hex.DecodeString(uuidNoHyphens)
+				if err != nil {
+					return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err)
+				}
+
+				ejp.advanceState()
+				if ejp.s != jpsSawEndObject {
+					return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary)
+				}
+
+				base64 := &extJSONValue{
+					t: bsontype.String,
+					v: base64.StdEncoding.EncodeToString(bytes),
+				}
+				subType := &extJSONValue{
+					t: bsontype.String,
+					v: "04",
+				}
+
+				v = &extJSONValue{
+					t: bsontype.EmbeddedDocument,
+					v: &extJSONObject{
+						keys:   []string{"base64", "subType"},
+						values: []*extJSONValue{base64, subType},
+					},
+				}
+
+				break
+			}
+
+			// convert legacy $binary format
+			base64 := ejp.v
+
+			ejp.advanceState()
+			if ejp.s != jpsSawComma {
+				return nil, invalidJSONErrorForType(",", bsontype.Binary)
+			}
+
+			ejp.advanceState()
+			key, t, err := ejp.readKey()
+			if err != nil {
+				return nil, err
+			}
+			if key != "$type" {
+				return nil, invalidJSONErrorForType("$type", bsontype.Binary)
+			}
+
+			subType, err := ejp.readValue(t)
+			if err != nil {
+				return nil, err
+			}
+
+			ejp.advanceState()
+			if ejp.s != jpsSawEndObject {
+				return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary)
+			}
+
+			v = &extJSONValue{
+				t: bsontype.EmbeddedDocument,
+				v: &extJSONObject{
+					keys:   []string{"base64", "subType"},
+					values: []*extJSONValue{base64, subType},
+				},
+			}
+			break
+		}
+
+		// read KV pairs
+		if ejp.s != jpsSawBeginObject {
+			return nil, invalidJSONErrorForType("{", t)
+		}
+
+		keys, vals, err := ejp.readObject(2, true)
+		if err != nil {
+			return nil, err
+		}
+
+		ejp.advanceState()
+		if ejp.s != jpsSawEndObject {
+			return nil, invalidJSONErrorForType("2 key-value pairs and then }", t)
+		}
+
+		v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+
+	case bsontype.DateTime:
+		switch ejp.s {
+		case jpsSawValue:
+			v = ejp.v
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			ejp.advanceState()
+			switch ejp.s {
+			case jpsSawBeginObject:
+				keys, vals, err := ejp.readObject(1, true)
+				if err != nil {
+					return nil, err
+				}
+				v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+			case jpsSawValue:
+				if ejp.canonical {
+					return nil, invalidJSONError("{")
+				}
+				v = ejp.v
+			default:
+				if ejp.canonical {
+					return nil, invalidJSONErrorForType("object", t)
+				}
+				return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t)
+			}
+
+			ejp.advanceState()
+			if ejp.s != jpsSawEndObject {
+				return nil, invalidJSONErrorForType("value and then }", t)
+			}
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.JavaScript:
+		switch ejp.s {
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read value
+			ejp.advanceState()
+			if ejp.s != jpsSawValue {
+				return nil, invalidJSONErrorForType("value", t)
+			}
+			v = ejp.v
+
+			// read end object or comma and just return
+			ejp.advanceState()
+		case jpsSawEndObject:
+			v = ejp.v
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.CodeWithScope:
+		if ejp.s == jpsSawKey && ejp.k == "$scope" {
+			v = ejp.v // this is the $code string from earlier
+
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read {
+			ejp.advanceState()
+			if ejp.s != jpsSawBeginObject {
+				return nil, invalidJSONError("$scope to be embedded document")
+			}
+		} else {
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.EmbeddedDocument, bsontype.Array:
+		return nil, invalidRequestError(t.String())
+	}
+
+	return v, nil
+}
+
+// readObject is a utility method for reading full objects of known (or expected) size
+// it is useful for extended JSON types such as binary, datetime, regex, and timestamp
+func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) {
+	keys := make([]string, numKeys)
+	vals := make([]*extJSONValue, numKeys)
+
+	if !started {
+		ejp.advanceState()
+		if ejp.s != jpsSawBeginObject {
+			return nil, nil, invalidJSONError("{")
+		}
+	}
+
+	for i := 0; i < numKeys; i++ {
+		key, t, err := ejp.readKey()
+		if err != nil {
+			return nil, nil, err
+		}
+
+		switch ejp.s {
+		case jpsSawKey:
+			v, err := ejp.readValue(t)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			keys[i] = key
+			vals[i] = v
+		case jpsSawValue:
+			keys[i] = key
+			vals[i] = ejp.v
+		default:
+			return nil, nil, invalidJSONError("value")
+		}
+	}
+
+	ejp.advanceState()
+	if ejp.s != jpsSawEndObject {
+		return nil, nil, invalidJSONError("}")
+	}
+
+	return keys, vals, nil
+}
+
+// advanceState reads the next JSON token from the scanner and transitions
+// from the current state based on that token's type
+func (ejp *extJSONParser) advanceState() {
+	if ejp.s == jpsDoneState || ejp.s == jpsInvalidState {
+		return
+	}
+
+	jt, err := ejp.js.nextToken()
+
+	if err != nil {
+		ejp.err = err
+		ejp.s = jpsInvalidState
+		return
+	}
+
+	valid := ejp.validateToken(jt.t)
+	if !valid {
+		ejp.err = unexpectedTokenError(jt)
+		ejp.s = jpsInvalidState
+		return
+	}
+
+	switch jt.t {
+	case jttBeginObject:
+		ejp.s = jpsSawBeginObject
+		ejp.pushMode(jpmObjectMode)
+		ejp.depth++
+
+		if ejp.depth > ejp.maxDepth {
+			ejp.err = nestingDepthError(jt.p, ejp.depth)
+			ejp.s = jpsInvalidState
+		}
+	case jttEndObject:
+		ejp.s = jpsSawEndObject
+		ejp.depth--
+
+		if ejp.popMode() != jpmObjectMode {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttBeginArray:
+		ejp.s = jpsSawBeginArray
+		ejp.pushMode(jpmArrayMode)
+	case jttEndArray:
+		ejp.s = jpsSawEndArray
+
+		if ejp.popMode() != jpmArrayMode {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttColon:
+		ejp.s = jpsSawColon
+	case jttComma:
+		ejp.s = jpsSawComma
+	case jttEOF:
+		ejp.s = jpsDoneState
+		if len(ejp.m) != 0 {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttString:
+		switch ejp.s {
+		case jpsSawComma:
+			if ejp.peekMode() == jpmArrayMode {
+				ejp.s = jpsSawValue
+				ejp.v = extendJSONToken(jt)
+				return
+			}
+			fallthrough
+		case jpsSawBeginObject:
+			ejp.s = jpsSawKey
+			ejp.k = jt.v.(string)
+			return
+		}
+		fallthrough
+	default:
+		ejp.s = jpsSawValue
+		ejp.v = extendJSONToken(jt)
+	}
+}
+
+var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{
+	jpsStartState: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+		jttEOF:         true,
+	},
+	jpsSawBeginObject: {
+		jttEndObject: true,
+		jttString:    true,
+	},
+	jpsSawEndObject: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsSawBeginArray: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttEndArray:    true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawEndArray: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsSawColon: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawComma: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawKey: {
+		jttColon: true,
+	},
+	jpsSawValue: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsDoneState:    {},
+	jpsInvalidState: {},
+}
+
+func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool {
+	switch ejp.s {
+	case jpsSawEndObject:
+		// if we are at depth zero and the next token is a '{',
+		// we can consider it valid only if we are not in array mode.
+		if jtt == jttBeginObject && ejp.depth == 0 {
+			return ejp.peekMode() != jpmArrayMode
+		}
+	case jpsSawComma:
+		switch ejp.peekMode() {
+		// the only valid next token after a comma inside a document is a string (a key)
+		case jpmObjectMode:
+			return jtt == jttString
+		case jpmInvalidMode:
+			return false
+		}
+	}
+
+	_, ok := jpsValidTransitionTokens[ejp.s][jtt]
+	return ok
+}
+
+// ensureExtValueType returns true if the current value has the expected
+// value type for single-key extended JSON types. For example,
+// {"$numberInt": v} v must be TypeString
+func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool {
+	switch t {
+	case bsontype.MinKey, bsontype.MaxKey:
+		return ejp.v.t == bsontype.Int32
+	case bsontype.Undefined:
+		return ejp.v.t == bsontype.Boolean
+	case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID:
+		return ejp.v.t == bsontype.String
+	default:
+		return false
+	}
+}
+
+func (ejp *extJSONParser) pushMode(m jsonParseMode) {
+	ejp.m = append(ejp.m, m)
+}
+
+func (ejp *extJSONParser) popMode() jsonParseMode {
+	l := len(ejp.m)
+	if l == 0 {
+		return jpmInvalidMode
+	}
+
+	m := ejp.m[l-1]
+	ejp.m = ejp.m[:l-1]
+
+	return m
+}
+
+func (ejp *extJSONParser) peekMode() jsonParseMode {
+	l := len(ejp.m)
+	if l == 0 {
+		return jpmInvalidMode
+	}
+
+	return ejp.m[l-1]
+}
+
+func extendJSONToken(jt *jsonToken) *extJSONValue {
+	var t bsontype.Type
+
+	switch jt.t {
+	case jttInt32:
+		t = bsontype.Int32
+	case jttInt64:
+		t = bsontype.Int64
+	case jttDouble:
+		t = bsontype.Double
+	case jttString:
+		t = bsontype.String
+	case jttBool:
+		t = bsontype.Boolean
+	case jttNull:
+		t = bsontype.Null
+	default:
+		return nil
+	}
+
+	return &extJSONValue{t: t, v: jt.v}
+}
+
+func ensureColon(s jsonParseState, key string) error {
+	if s != jpsSawColon {
+		return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key)
+	}
+
+	return nil
+}
+
+func invalidRequestError(s string) error {
+	return fmt.Errorf("invalid request to read %s", s)
+}
+
+func invalidJSONError(expected string) error {
+	return fmt.Errorf("invalid JSON input; expected %s", expected)
+}
+
+func invalidJSONErrorForType(expected string, t bsontype.Type) error {
+	return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t)
+}
+
+func unexpectedTokenError(jt *jsonToken) error {
+	switch jt.t {
+	case jttInt32, jttInt64, jttDouble:
+		return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p)
+	case jttString:
+		return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p)
+	case jttBool:
+		return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p)
+	case jttNull:
+		return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p)
+	case jttEOF:
+		return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p)
+	default:
+		return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p)
+	}
+}
+
+func nestingDepthError(p, depth int) error {
+	return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..59ddfc4485830ac68ecd0b0cf74a6a7b5fffbb1f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go
@@ -0,0 +1,653 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+type ExtJSONValueReaderPool struct {
+	pool sync.Pool
+}
+
+// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool {
+	return &ExtJSONValueReaderPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(extJSONValueReader)
+			},
+		},
+	}
+}
+
+// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) {
+	vr := bvrp.pool.Get().(*extJSONValueReader)
+	return vr.reset(r, canonical)
+}
+
+// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing
+// is inserted into the pool and ok will be false.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) {
+	bvr, ok := vr.(*extJSONValueReader)
+	if !ok {
+		return false
+	}
+
+	bvr, _ = bvr.reset(nil, false)
+	bvrp.pool.Put(bvr)
+	return true
+}
+
+type ejvrState struct {
+	mode  mode
+	vType bsontype.Type
+	depth int
+}
+
+// extJSONValueReader is for reading extended JSON.
+type extJSONValueReader struct {
+	p *extJSONParser
+
+	stack []ejvrState
+	frame int
+}
+
+// NewExtJSONValueReader creates a new ValueReader from a given io.Reader
+// It will interpret the JSON of r as canonical or relaxed according to the
+// given canonical flag
+func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) {
+	return newExtJSONValueReader(r, canonical)
+}
+
+func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+	ejvr := new(extJSONValueReader)
+	return ejvr.reset(r, canonical)
+}
+
+func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+	p := newExtJSONParser(r, canonical)
+	typ, err := p.peekType()
+
+	if err != nil {
+		return nil, ErrInvalidJSON
+	}
+
+	var m mode
+	switch typ {
+	case bsontype.EmbeddedDocument:
+		m = mTopLevel
+	case bsontype.Array:
+		m = mArray
+	default:
+		m = mValue
+	}
+
+	stack := make([]ejvrState, 1, 5)
+	stack[0] = ejvrState{
+		mode:  m,
+		vType: typ,
+	}
+	return &extJSONValueReader{
+		p:     p,
+		stack: stack,
+	}, nil
+}
+
+func (ejvr *extJSONValueReader) advanceFrame() {
+	if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack
+		length := len(ejvr.stack)
+		if length+1 >= cap(ejvr.stack) {
+			// double it
+			buf := make([]ejvrState, 2*cap(ejvr.stack)+1)
+			copy(buf, ejvr.stack)
+			ejvr.stack = buf
+		}
+		ejvr.stack = ejvr.stack[:length+1]
+	}
+	ejvr.frame++
+
+	// Clean the stack
+	ejvr.stack[ejvr.frame].mode = 0
+	ejvr.stack[ejvr.frame].vType = 0
+	ejvr.stack[ejvr.frame].depth = 0
+}
+
+func (ejvr *extJSONValueReader) pushDocument() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mDocument
+	ejvr.stack[ejvr.frame].depth = ejvr.p.depth
+}
+
+func (ejvr *extJSONValueReader) pushCodeWithScope() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mCodeWithScope
+}
+
+func (ejvr *extJSONValueReader) pushArray() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mArray
+}
+
+func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = m
+	ejvr.stack[ejvr.frame].vType = t
+}
+
+func (ejvr *extJSONValueReader) pop() {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+		ejvr.frame--
+	case mDocument, mArray, mCodeWithScope:
+		ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
+	}
+}
+
+func (ejvr *extJSONValueReader) skipObject() {
+	// read entire object until depth returns to 0 (last ending } or ] seen)
+	depth := 1
+	for depth > 0 {
+		ejvr.p.advanceState()
+
+		// If object is empty, raise depth and continue. When emptyObject is true, the
+		// parser has already read both the opening and closing brackets of an empty
+		// object ("{}"), so the next valid token will be part of the parent document,
+		// not part of the nested document.
+		//
+		// If there is a comma, there are remaining fields, emptyObject must be set back
+		// to false, and comma must be skipped with advanceState().
+		if ejvr.p.emptyObject {
+			if ejvr.p.s == jpsSawComma {
+				ejvr.p.emptyObject = false
+				ejvr.p.advanceState()
+			}
+			depth--
+			continue
+		}
+
+		switch ejvr.p.s {
+		case jpsSawBeginObject, jpsSawBeginArray:
+			depth++
+		case jpsSawEndObject, jpsSawEndArray:
+			depth--
+		}
+	}
+}
+
+func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     ejvr.stack[ejvr.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "read",
+	}
+	if ejvr.frame != 0 {
+		te.parent = ejvr.stack[ejvr.frame-1].mode
+	}
+	return te
+}
+
+func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error {
+	return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t)
+}
+
+func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+		if ejvr.stack[ejvr.frame].vType != t {
+			return ejvr.typeError(t)
+		}
+	default:
+		modes := []mode{mElement, mValue}
+		if addModes != nil {
+			modes = append(modes, addModes...)
+		}
+		return ejvr.invalidTransitionErr(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (ejvr *extJSONValueReader) Type() bsontype.Type {
+	return ejvr.stack[ejvr.frame].vType
+}
+
+func (ejvr *extJSONValueReader) Skip() error {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+	default:
+		return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
+	}
+
+	defer ejvr.pop()
+
+	t := ejvr.stack[ejvr.frame].vType
+	switch t {
+	case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+		// read entire array, doc or CodeWithScope
+		ejvr.skipObject()
+	default:
+		_, err := ejvr.p.readValue(t)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel: // allow reading array from top level
+	case mArray:
+		return ejvr, nil
+	default:
+		if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil {
+			return nil, err
+		}
+	}
+
+	ejvr.pushArray()
+
+	return ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) {
+	if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
+		return nil, 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Binary)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	b, btype, err = v.parseBinary()
+
+	ejvr.pop()
+	return b, btype, err
+}
+
+func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) {
+	if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
+		return false, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Boolean)
+	if err != nil {
+		return false, err
+	}
+
+	if v.t != bsontype.Boolean {
+		return false, fmt.Errorf("expected type bool, but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return v.v.(bool), nil
+}
+
+func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel:
+		return ejvr, nil
+	case mElement, mValue:
+		if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument {
+			return nil, ejvr.typeError(bsontype.EmbeddedDocument)
+		}
+
+		ejvr.pushDocument()
+		return ejvr, nil
+	default:
+		return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
+	}
+}
+
+func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
+	if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
+		return "", nil, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.CodeWithScope)
+	if err != nil {
+		return "", nil, err
+	}
+
+	code, err = v.parseJavascript()
+
+	ejvr.pushCodeWithScope()
+	return code, ejvr, err
+}
+
+func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
+		return "", primitive.NilObjectID, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.DBPointer)
+	if err != nil {
+		return "", primitive.NilObjectID, err
+	}
+
+	ns, oid, err = v.parseDBPointer()
+
+	ejvr.pop()
+	return ns, oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) {
+	if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.DateTime)
+	if err != nil {
+		return 0, err
+	}
+
+	d, err := v.parseDateTime()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) {
+	if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Decimal128)
+	if err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	d, err := v.parseDecimal128()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDouble() (float64, error) {
+	if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Double)
+	if err != nil {
+		return 0, err
+	}
+
+	d, err := v.parseDouble()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt32() (int32, error) {
+	if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Int32)
+	if err != nil {
+		return 0, err
+	}
+
+	i, err := v.parseInt32()
+
+	ejvr.pop()
+	return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt64() (int64, error) {
+	if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Int64)
+	if err != nil {
+		return 0, err
+	}
+
+	i, err := v.parseInt64()
+
+	ejvr.pop()
+	return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.JavaScript)
+	if err != nil {
+		return "", err
+	}
+
+	code, err = v.parseJavascript()
+
+	ejvr.pop()
+	return code, err
+}
+
+func (ejvr *extJSONValueReader) ReadMaxKey() error {
+	if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.MaxKey)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseMinMaxKey("max")
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadMinKey() error {
+	if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.MinKey)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseMinMaxKey("min")
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadNull() error {
+	if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Null)
+	if err != nil {
+		return err
+	}
+
+	if v.t != bsontype.Null {
+		return fmt.Errorf("expected type null but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return nil
+}
+
+func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) {
+	if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.ObjectID)
+	if err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	oid, err := v.parseObjectID()
+
+	ejvr.pop()
+	return oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
+		return "", "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Regex)
+	if err != nil {
+		return "", "", err
+	}
+
+	pattern, options, err = v.parseRegex()
+
+	ejvr.pop()
+	return pattern, options, err
+}
+
+func (ejvr *extJSONValueReader) ReadString() (string, error) {
+	if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.String)
+	if err != nil {
+		return "", err
+	}
+
+	if v.t != bsontype.String {
+		return "", fmt.Errorf("expected type string but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return v.v.(string), nil
+}
+
+func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Symbol)
+	if err != nil {
+		return "", err
+	}
+
+	symbol, err = v.parseSymbol()
+
+	ejvr.pop()
+	return symbol, err
+}
+
+func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
+		return 0, 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Timestamp)
+	if err != nil {
+		return 0, 0, err
+	}
+
+	t, i, err = v.parseTimestamp()
+
+	ejvr.pop()
+	return t, i, err
+}
+
+func (ejvr *extJSONValueReader) ReadUndefined() error {
+	if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Undefined)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseUndefined()
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel, mDocument, mCodeWithScope:
+	default:
+		return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
+	}
+
+	name, t, err := ejvr.p.readKey()
+
+	if err != nil {
+		if errors.Is(err, ErrEOD) {
+			if ejvr.stack[ejvr.frame].mode == mCodeWithScope {
+				_, err := ejvr.p.peekType()
+				if err != nil {
+					return "", nil, err
+				}
+			}
+
+			ejvr.pop()
+		}
+
+		return "", nil, err
+	}
+
+	ejvr.push(mElement, t)
+	return name, ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mArray:
+	default:
+		return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
+	}
+
+	t, err := ejvr.p.peekType()
+	if err != nil {
+		if errors.Is(err, ErrEOA) {
+			ejvr.pop()
+		}
+
+		return nil, err
+	}
+
+	ejvr.push(mValue, t)
+	return ejvr, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba39c9601fb935b8f7028ae89cd50049e9bb6513
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go
@@ -0,0 +1,223 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/golang/go by The Go Authors
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bsonrw
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go
new file mode 100644
index 0000000000000000000000000000000000000000..af6ae7b76bfad9c877629b4f9ef47d2d96a1d515
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go
@@ -0,0 +1,492 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+func wrapperKeyBSONType(key string) bsontype.Type {
+	switch key {
+	case "$numberInt":
+		return bsontype.Int32
+	case "$numberLong":
+		return bsontype.Int64
+	case "$oid":
+		return bsontype.ObjectID
+	case "$symbol":
+		return bsontype.Symbol
+	case "$numberDouble":
+		return bsontype.Double
+	case "$numberDecimal":
+		return bsontype.Decimal128
+	case "$binary":
+		return bsontype.Binary
+	case "$code":
+		return bsontype.JavaScript
+	case "$scope":
+		return bsontype.CodeWithScope
+	case "$timestamp":
+		return bsontype.Timestamp
+	case "$regularExpression":
+		return bsontype.Regex
+	case "$dbPointer":
+		return bsontype.DBPointer
+	case "$date":
+		return bsontype.DateTime
+	case "$minKey":
+		return bsontype.MinKey
+	case "$maxKey":
+		return bsontype.MaxKey
+	case "$undefined":
+		return bsontype.Undefined
+	}
+
+	return bsontype.EmbeddedDocument
+}
+
+func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return nil, 0, fmt.Errorf("$binary value should be object, but instead is %s", ejv.t)
+	}
+
+	binObj := ejv.v.(*extJSONObject)
+	bFound := false
+	stFound := false
+
+	for i, key := range binObj.keys {
+		val := binObj.values[i]
+
+		switch key {
+		case "base64":
+			if bFound {
+				return nil, 0, errors.New("duplicate base64 key in $binary")
+			}
+
+			if val.t != bsontype.String {
+				return nil, 0, fmt.Errorf("$binary base64 value should be string, but instead is %s", val.t)
+			}
+
+			base64Bytes, err := base64.StdEncoding.DecodeString(val.v.(string))
+			if err != nil {
+				return nil, 0, fmt.Errorf("invalid $binary base64 string: %s", val.v.(string))
+			}
+
+			b = base64Bytes
+			bFound = true
+		case "subType":
+			if stFound {
+				return nil, 0, errors.New("duplicate subType key in $binary")
+			}
+
+			if val.t != bsontype.String {
+				return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t)
+			}
+
+			i, err := strconv.ParseUint(val.v.(string), 16, 8)
+			if err != nil {
+				return nil, 0, fmt.Errorf("invalid $binary subType string: %q: %w", val.v.(string), err)
+			}
+
+			subType = byte(i)
+			stFound = true
+		default:
+			return nil, 0, fmt.Errorf("invalid key in $binary object: %s", key)
+		}
+	}
+
+	if !bFound {
+		return nil, 0, errors.New("missing base64 field in $binary object")
+	}
+
+	if !stFound {
+		return nil, 0, errors.New("missing subType field in $binary object")
+
+	}
+
+	return b, subType, nil
+}
+
+func (ejv *extJSONValue) parseDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return "", primitive.NilObjectID, fmt.Errorf("$dbPointer value should be object, but instead is %s", ejv.t)
+	}
+
+	dbpObj := ejv.v.(*extJSONObject)
+	oidFound := false
+	nsFound := false
+
+	for i, key := range dbpObj.keys {
+		val := dbpObj.values[i]
+
+		switch key {
+		case "$ref":
+			if nsFound {
+				return "", primitive.NilObjectID, errors.New("duplicate $ref key in $dbPointer")
+			}
+
+			if val.t != bsontype.String {
+				return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $ref value should be string, but instead is %s", val.t)
+			}
+
+			ns = val.v.(string)
+			nsFound = true
+		case "$id":
+			if oidFound {
+				return "", primitive.NilObjectID, errors.New("duplicate $id key in $dbPointer")
+			}
+
+			if val.t != bsontype.String {
+				return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $id value should be string, but instead is %s", val.t)
+			}
+
+			oid, err = primitive.ObjectIDFromHex(val.v.(string))
+			if err != nil {
+				return "", primitive.NilObjectID, err
+			}
+
+			oidFound = true
+		default:
+			return "", primitive.NilObjectID, fmt.Errorf("invalid key in $dbPointer object: %s", key)
+		}
+	}
+
+	if !nsFound {
+		return "", oid, errors.New("missing $ref field in $dbPointer object")
+	}
+
+	if !oidFound {
+		return "", oid, errors.New("missing $id field in $dbPointer object")
+	}
+
+	return ns, oid, nil
+}
+
+const (
+	rfc3339Milli = "2006-01-02T15:04:05.999Z07:00"
+)
+
+var (
+	timeFormats = []string{rfc3339Milli, "2006-01-02T15:04:05.999Z0700"}
+)
+
+func (ejv *extJSONValue) parseDateTime() (int64, error) {
+	switch ejv.t {
+	case bsontype.Int32:
+		return int64(ejv.v.(int32)), nil
+	case bsontype.Int64:
+		return ejv.v.(int64), nil
+	case bsontype.String:
+		return parseDatetimeString(ejv.v.(string))
+	case bsontype.EmbeddedDocument:
+		return parseDatetimeObject(ejv.v.(*extJSONObject))
+	default:
+		return 0, fmt.Errorf("$date value should be string or object, but instead is %s", ejv.t)
+	}
+}
+
+func parseDatetimeString(data string) (int64, error) {
+	var t time.Time
+	var err error
+	// try acceptable time formats until one matches
+	for _, format := range timeFormats {
+		t, err = time.Parse(format, data)
+		if err == nil {
+			break
+		}
+	}
+	if err != nil {
+		return 0, fmt.Errorf("invalid $date value string: %s", data)
+	}
+
+	return int64(primitive.NewDateTimeFromTime(t)), nil
+}
+
+func parseDatetimeObject(data *extJSONObject) (d int64, err error) {
+	dFound := false
+
+	for i, key := range data.keys {
+		val := data.values[i]
+
+		switch key {
+		case "$numberLong":
+			if dFound {
+				return 0, errors.New("duplicate $numberLong key in $date")
+			}
+
+			if val.t != bsontype.String {
+				return 0, fmt.Errorf("$date $numberLong field should be string, but instead is %s", val.t)
+			}
+
+			d, err = val.parseInt64()
+			if err != nil {
+				return 0, err
+			}
+			dFound = true
+		default:
+			return 0, fmt.Errorf("invalid key in $date object: %s", key)
+		}
+	}
+
+	if !dFound {
+		return 0, errors.New("missing $numberLong field in $date object")
+	}
+
+	return d, nil
+}
+
+func (ejv *extJSONValue) parseDecimal128() (primitive.Decimal128, error) {
+	if ejv.t != bsontype.String {
+		return primitive.Decimal128{}, fmt.Errorf("$numberDecimal value should be string, but instead is %s", ejv.t)
+	}
+
+	d, err := primitive.ParseDecimal128(ejv.v.(string))
+	if err != nil {
+		return primitive.Decimal128{}, fmt.Errorf("$invalid $numberDecimal string: %s", ejv.v.(string))
+	}
+
+	return d, nil
+}
+
+func (ejv *extJSONValue) parseDouble() (float64, error) {
+	if ejv.t == bsontype.Double {
+		return ejv.v.(float64), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberDouble value should be string, but instead is %s", ejv.t)
+	}
+
+	switch ejv.v.(string) {
+	case "Infinity":
+		return math.Inf(1), nil
+	case "-Infinity":
+		return math.Inf(-1), nil
+	case "NaN":
+		return math.NaN(), nil
+	}
+
+	f, err := strconv.ParseFloat(ejv.v.(string), 64)
+	if err != nil {
+		return 0, err
+	}
+
+	return f, nil
+}
+
+func (ejv *extJSONValue) parseInt32() (int32, error) {
+	if ejv.t == bsontype.Int32 {
+		return ejv.v.(int32), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberInt value should be string, but instead is %s", ejv.t)
+	}
+
+	i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	if i < math.MinInt32 || i > math.MaxInt32 {
+		return 0, fmt.Errorf("$numberInt value should be int32 but instead is int64: %d", i)
+	}
+
+	return int32(i), nil
+}
+
+func (ejv *extJSONValue) parseInt64() (int64, error) {
+	if ejv.t == bsontype.Int64 {
+		return ejv.v.(int64), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberLong value should be string, but instead is %s", ejv.t)
+	}
+
+	i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	return i, nil
+}
+
+func (ejv *extJSONValue) parseJavascript() (code string, err error) {
+	if ejv.t != bsontype.String {
+		return "", fmt.Errorf("$code value should be string, but instead is %s", ejv.t)
+	}
+
+	return ejv.v.(string), nil
+}
+
+func (ejv *extJSONValue) parseMinMaxKey(minmax string) error {
+	if ejv.t != bsontype.Int32 {
+		return fmt.Errorf("$%sKey value should be int32, but instead is %s", minmax, ejv.t)
+	}
+
+	if ejv.v.(int32) != 1 {
+		return fmt.Errorf("$%sKey value must be 1, but instead is %d", minmax, ejv.v.(int32))
+	}
+
+	return nil
+}
+
+func (ejv *extJSONValue) parseObjectID() (primitive.ObjectID, error) {
+	if ejv.t != bsontype.String {
+		return primitive.NilObjectID, fmt.Errorf("$oid value should be string, but instead is %s", ejv.t)
+	}
+
+	return primitive.ObjectIDFromHex(ejv.v.(string))
+}
+
+func (ejv *extJSONValue) parseRegex() (pattern, options string, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return "", "", fmt.Errorf("$regularExpression value should be object, but instead is %s", ejv.t)
+	}
+
+	regexObj := ejv.v.(*extJSONObject)
+	patFound := false
+	optFound := false
+
+	for i, key := range regexObj.keys {
+		val := regexObj.values[i]
+
+		switch key {
+		case "pattern":
+			if patFound {
+				return "", "", errors.New("duplicate pattern key in $regularExpression")
+			}
+
+			if val.t != bsontype.String {
+				return "", "", fmt.Errorf("$regularExpression pattern value should be string, but instead is %s", val.t)
+			}
+
+			pattern = val.v.(string)
+			patFound = true
+		case "options":
+			if optFound {
+				return "", "", errors.New("duplicate options key in $regularExpression")
+			}
+
+			if val.t != bsontype.String {
+				return "", "", fmt.Errorf("$regularExpression options value should be string, but instead is %s", val.t)
+			}
+
+			options = val.v.(string)
+			optFound = true
+		default:
+			return "", "", fmt.Errorf("invalid key in $regularExpression object: %s", key)
+		}
+	}
+
+	if !patFound {
+		return "", "", errors.New("missing pattern field in $regularExpression object")
+	}
+
+	if !optFound {
+		return "", "", errors.New("missing options field in $regularExpression object")
+
+	}
+
+	return pattern, options, nil
+}
+
+func (ejv *extJSONValue) parseSymbol() (string, error) {
+	if ejv.t != bsontype.String {
+		return "", fmt.Errorf("$symbol value should be string, but instead is %s", ejv.t)
+	}
+
+	return ejv.v.(string), nil
+}
+
+func (ejv *extJSONValue) parseTimestamp() (t, i uint32, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return 0, 0, fmt.Errorf("$timestamp value should be object, but instead is %s", ejv.t)
+	}
+
+	handleKey := func(key string, val *extJSONValue, flag bool) (uint32, error) {
+		if flag {
+			return 0, fmt.Errorf("duplicate %s key in $timestamp", key)
+		}
+
+		switch val.t {
+		case bsontype.Int32:
+			value := val.v.(int32)
+
+			if value < 0 {
+				return 0, fmt.Errorf("$timestamp %s number should be uint32: %d", key, value)
+			}
+
+			return uint32(value), nil
+		case bsontype.Int64:
+			value := val.v.(int64)
+			if value < 0 || value > int64(math.MaxUint32) {
+				return 0, fmt.Errorf("$timestamp %s number should be uint32: %d", key, value)
+			}
+
+			return uint32(value), nil
+		default:
+			return 0, fmt.Errorf("$timestamp %s value should be uint32, but instead is %s", key, val.t)
+		}
+	}
+
+	tsObj := ejv.v.(*extJSONObject)
+	tFound := false
+	iFound := false
+
+	for j, key := range tsObj.keys {
+		val := tsObj.values[j]
+
+		switch key {
+		case "t":
+			if t, err = handleKey(key, val, tFound); err != nil {
+				return 0, 0, err
+			}
+
+			tFound = true
+		case "i":
+			if i, err = handleKey(key, val, iFound); err != nil {
+				return 0, 0, err
+			}
+
+			iFound = true
+		default:
+			return 0, 0, fmt.Errorf("invalid key in $timestamp object: %s", key)
+		}
+	}
+
+	if !tFound {
+		return 0, 0, errors.New("missing t field in $timestamp object")
+	}
+
+	if !iFound {
+		return 0, 0, errors.New("missing i field in $timestamp object")
+	}
+
+	return t, i, nil
+}
+
+func (ejv *extJSONValue) parseUndefined() error {
+	if ejv.t != bsontype.Boolean {
+		return fmt.Errorf("undefined value should be boolean, but instead is %s", ejv.t)
+	}
+
+	if !ejv.v.(bool) {
+		return fmt.Errorf("$undefined balue boolean should be true, but instead is %v", ejv.v.(bool))
+	}
+
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..57781ff9f317e470cb549adbe461280162086f3d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go
@@ -0,0 +1,750 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters.
+//
+// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
+type ExtJSONValueWriterPool struct {
+	pool sync.Pool
+}
+
+// NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON.
+//
+// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
+func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool {
+	return &ExtJSONValueWriterPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(extJSONValueWriter)
+			},
+		},
+	}
+}
+
+// Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination.
+//
+// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
+func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter {
+	vw := bvwp.pool.Get().(*extJSONValueWriter)
+	if writer, ok := w.(*SliceWriter); ok {
+		vw.reset(*writer, canonical, escapeHTML)
+		vw.w = writer
+		return vw
+	}
+	vw.buf = vw.buf[:0]
+	vw.w = w
+	return vw
+}
+
+// Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing
+// happens and ok will be false.
+//
+// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
+func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
+	bvw, ok := vw.(*extJSONValueWriter)
+	if !ok {
+		return false
+	}
+
+	if _, ok := bvw.w.(*SliceWriter); ok {
+		bvw.buf = nil
+	}
+	bvw.w = nil
+
+	bvwp.pool.Put(bvw)
+	return true
+}
+
+type ejvwState struct {
+	mode mode
+}
+
+type extJSONValueWriter struct {
+	w   io.Writer
+	buf []byte
+
+	stack      []ejvwState
+	frame      int64
+	canonical  bool
+	escapeHTML bool
+	newlines   bool
+}
+
+// NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w.
+func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter, error) {
+	if w == nil {
+		return nil, errNilWriter
+	}
+
+	// Enable newlines for all Extended JSON value writers created by NewExtJSONValueWriter. We
+	// expect these value writers to be used with an Encoder, which should add newlines after
+	// encoded Extended JSON documents.
+	return newExtJSONWriter(w, canonical, escapeHTML, true), nil
+}
+
+func newExtJSONWriter(w io.Writer, canonical, escapeHTML, newlines bool) *extJSONValueWriter {
+	stack := make([]ejvwState, 1, 5)
+	stack[0] = ejvwState{mode: mTopLevel}
+
+	return &extJSONValueWriter{
+		w:          w,
+		buf:        []byte{},
+		stack:      stack,
+		canonical:  canonical,
+		escapeHTML: escapeHTML,
+		newlines:   newlines,
+	}
+}
+
+func newExtJSONWriterFromSlice(buf []byte, canonical, escapeHTML bool) *extJSONValueWriter {
+	stack := make([]ejvwState, 1, 5)
+	stack[0] = ejvwState{mode: mTopLevel}
+
+	return &extJSONValueWriter{
+		buf:        buf,
+		stack:      stack,
+		canonical:  canonical,
+		escapeHTML: escapeHTML,
+	}
+}
+
+func (ejvw *extJSONValueWriter) reset(buf []byte, canonical, escapeHTML bool) {
+	if ejvw.stack == nil {
+		ejvw.stack = make([]ejvwState, 1, 5)
+	}
+
+	ejvw.stack = ejvw.stack[:1]
+	ejvw.stack[0] = ejvwState{mode: mTopLevel}
+	ejvw.canonical = canonical
+	ejvw.escapeHTML = escapeHTML
+	ejvw.frame = 0
+	ejvw.buf = buf
+	ejvw.w = nil
+}
+
+func (ejvw *extJSONValueWriter) advanceFrame() {
+	if ejvw.frame+1 >= int64(len(ejvw.stack)) { // We need to grow the stack
+		length := len(ejvw.stack)
+		if length+1 >= cap(ejvw.stack) {
+			// double it
+			buf := make([]ejvwState, 2*cap(ejvw.stack)+1)
+			copy(buf, ejvw.stack)
+			ejvw.stack = buf
+		}
+		ejvw.stack = ejvw.stack[:length+1]
+	}
+	ejvw.frame++
+}
+
+func (ejvw *extJSONValueWriter) push(m mode) {
+	ejvw.advanceFrame()
+
+	ejvw.stack[ejvw.frame].mode = m
+}
+
+func (ejvw *extJSONValueWriter) pop() {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mElement, mValue:
+		ejvw.frame--
+	case mDocument, mArray, mCodeWithScope:
+		ejvw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
+	}
+}
+
+func (ejvw *extJSONValueWriter) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     ejvw.stack[ejvw.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "write",
+	}
+	if ejvw.frame != 0 {
+		te.parent = ejvw.stack[ejvw.frame-1].mode
+	}
+	return te
+}
+
+func (ejvw *extJSONValueWriter) ensureElementValue(destination mode, callerName string, addmodes ...mode) error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mElement, mValue:
+	default:
+		modes := []mode{mElement, mValue}
+		if addmodes != nil {
+			modes = append(modes, addmodes...)
+		}
+		return ejvw.invalidTransitionErr(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) writeExtendedSingleValue(key string, value string, quotes bool) {
+	var s string
+	if quotes {
+		s = fmt.Sprintf(`{"$%s":"%s"}`, key, value)
+	} else {
+		s = fmt.Sprintf(`{"$%s":%s}`, key, value)
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte(s)...)
+}
+
+func (ejvw *extJSONValueWriter) WriteArray() (ArrayWriter, error) {
+	if err := ejvw.ensureElementValue(mArray, "WriteArray"); err != nil {
+		return nil, err
+	}
+
+	ejvw.buf = append(ejvw.buf, '[')
+
+	ejvw.push(mArray)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteBinary(b []byte) error {
+	return ejvw.WriteBinaryWithSubtype(b, 0x00)
+}
+
+func (ejvw *extJSONValueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteBinaryWithSubtype"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$binary":{"base64":"`)
+	buf.WriteString(base64.StdEncoding.EncodeToString(b))
+	buf.WriteString(fmt.Sprintf(`","subType":"%02x"}},`, btype))
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteBoolean(b bool) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteBoolean"); err != nil {
+		return err
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte(strconv.FormatBool(b))...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
+	if err := ejvw.ensureElementValue(mCodeWithScope, "WriteCodeWithScope"); err != nil {
+		return nil, err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$code":`)
+	writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
+	buf.WriteString(`,"$scope":{`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.push(mCodeWithScope)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDBPointer"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$dbPointer":{"$ref":"`)
+	buf.WriteString(ns)
+	buf.WriteString(`","$id":{"$oid":"`)
+	buf.WriteString(oid.Hex())
+	buf.WriteString(`"}}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDateTime(dt int64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDateTime"); err != nil {
+		return err
+	}
+
+	t := time.Unix(dt/1e3, dt%1e3*1e6).UTC()
+
+	if ejvw.canonical || t.Year() < 1970 || t.Year() > 9999 {
+		s := fmt.Sprintf(`{"$numberLong":"%d"}`, dt)
+		ejvw.writeExtendedSingleValue("date", s, false)
+	} else {
+		ejvw.writeExtendedSingleValue("date", t.Format(rfc3339Milli), true)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDecimal128(d primitive.Decimal128) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDecimal128"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("numberDecimal", d.String(), true)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocument() (DocumentWriter, error) {
+	if ejvw.stack[ejvw.frame].mode == mTopLevel {
+		ejvw.buf = append(ejvw.buf, '{')
+		return ejvw, nil
+	}
+
+	if err := ejvw.ensureElementValue(mDocument, "WriteDocument", mTopLevel); err != nil {
+		return nil, err
+	}
+
+	ejvw.buf = append(ejvw.buf, '{')
+	ejvw.push(mDocument)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDouble(f float64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDouble"); err != nil {
+		return err
+	}
+
+	s := formatDouble(f)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberDouble", s, true)
+	} else {
+		switch s {
+		case "Infinity":
+			fallthrough
+		case "-Infinity":
+			fallthrough
+		case "NaN":
+			s = fmt.Sprintf(`{"$numberDouble":"%s"}`, s)
+		}
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteInt32(i int32) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteInt32"); err != nil {
+		return err
+	}
+
+	s := strconv.FormatInt(int64(i), 10)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberInt", s, true)
+	} else {
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteInt64(i int64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteInt64"); err != nil {
+		return err
+	}
+
+	s := strconv.FormatInt(i, 10)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberLong", s, true)
+	} else {
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteJavascript(code string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteJavascript"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
+
+	ejvw.writeExtendedSingleValue("code", buf.String(), false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteMaxKey() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteMaxKey"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("maxKey", "1", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteMinKey() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteMinKey"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("minKey", "1", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteNull() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteNull"); err != nil {
+		return err
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte("null")...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteObjectID(oid primitive.ObjectID) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteObjectID"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("oid", oid.Hex(), true)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteRegex(pattern string, options string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteRegex"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$regularExpression":{"pattern":`)
+	writeStringWithEscapes(pattern, &buf, ejvw.escapeHTML)
+	buf.WriteString(`,"options":"`)
+	buf.WriteString(sortStringAlphebeticAscending(options))
+	buf.WriteString(`"}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteString(s string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteString"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(s, &buf, ejvw.escapeHTML)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteSymbol(symbol string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteSymbol"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(symbol, &buf, ejvw.escapeHTML)
+
+	ejvw.writeExtendedSingleValue("symbol", buf.String(), false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteTimestamp(t uint32, i uint32) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteTimestamp"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$timestamp":{"t":`)
+	buf.WriteString(strconv.FormatUint(uint64(t), 10))
+	buf.WriteString(`,"i":`)
+	buf.WriteString(strconv.FormatUint(uint64(i), 10))
+	buf.WriteString(`}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteUndefined() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteUndefined"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("undefined", "true", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mDocument, mTopLevel, mCodeWithScope:
+		var buf bytes.Buffer
+		writeStringWithEscapes(key, &buf, ejvw.escapeHTML)
+
+		ejvw.buf = append(ejvw.buf, []byte(fmt.Sprintf(`%s:`, buf.String()))...)
+		ejvw.push(mElement)
+	default:
+		return nil, ejvw.invalidTransitionErr(mElement, "WriteDocumentElement", []mode{mDocument, mTopLevel, mCodeWithScope})
+	}
+
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocumentEnd() error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mDocument, mTopLevel, mCodeWithScope:
+	default:
+		return fmt.Errorf("incorrect mode to end document: %s", ejvw.stack[ejvw.frame].mode)
+	}
+
+	// close the document
+	if ejvw.buf[len(ejvw.buf)-1] == ',' {
+		ejvw.buf[len(ejvw.buf)-1] = '}'
+	} else {
+		ejvw.buf = append(ejvw.buf, '}')
+	}
+
+	switch ejvw.stack[ejvw.frame].mode {
+	case mCodeWithScope:
+		ejvw.buf = append(ejvw.buf, '}')
+		fallthrough
+	case mDocument:
+		ejvw.buf = append(ejvw.buf, ',')
+	case mTopLevel:
+		// If the value writer has newlines enabled, end top-level documents with a newline so that
+		// multiple documents encoded to the same writer are separated by newlines. That matches the
+		// Go json.Encoder behavior and also works with bsonrw.NewExtJSONValueReader.
+		if ejvw.newlines {
+			ejvw.buf = append(ejvw.buf, '\n')
+		}
+		if ejvw.w != nil {
+			if _, err := ejvw.w.Write(ejvw.buf); err != nil {
+				return err
+			}
+			ejvw.buf = ejvw.buf[:0]
+		}
+	}
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteArrayElement() (ValueWriter, error) {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mArray:
+		ejvw.push(mValue)
+	default:
+		return nil, ejvw.invalidTransitionErr(mValue, "WriteArrayElement", []mode{mArray})
+	}
+
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteArrayEnd() error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mArray:
+		// close the array
+		if ejvw.buf[len(ejvw.buf)-1] == ',' {
+			ejvw.buf[len(ejvw.buf)-1] = ']'
+		} else {
+			ejvw.buf = append(ejvw.buf, ']')
+		}
+
+		ejvw.buf = append(ejvw.buf, ',')
+
+		ejvw.pop()
+	default:
+		return fmt.Errorf("incorrect mode to end array: %s", ejvw.stack[ejvw.frame].mode)
+	}
+
+	return nil
+}
+
+func formatDouble(f float64) string {
+	var s string
+	switch {
+	case math.IsInf(f, 1):
+		s = "Infinity"
+	case math.IsInf(f, -1):
+		s = "-Infinity"
+	case math.IsNaN(f):
+		s = "NaN"
+	default:
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, 'E') && !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
+
+var hexChars = "0123456789abcdef"
+
+func writeStringWithEscapes(s string, buf *bytes.Buffer, escapeHTML bool) {
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			case '\t':
+				buf.WriteByte('\\')
+				buf.WriteByte('t')
+			case '\b':
+				buf.WriteByte('\\')
+				buf.WriteByte('b')
+			case '\f':
+				buf.WriteByte('\\')
+				buf.WriteByte('f')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hexChars[b>>4])
+				buf.WriteByte(hexChars[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hexChars[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+}
+
+type sortableString []rune
+
+func (ss sortableString) Len() int {
+	return len(ss)
+}
+
+func (ss sortableString) Less(i, j int) bool {
+	return ss[i] < ss[j]
+}
+
+func (ss sortableString) Swap(i, j int) {
+	ss[i], ss[j] = ss[j], ss[i]
+}
+
+func sortStringAlphebeticAscending(s string) string {
+	ss := sortableString([]rune(s))
+	sort.Sort(ss)
+	return string([]rune(ss))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go
new file mode 100644
index 0000000000000000000000000000000000000000..97828919164822474409cf513e6ad0f2fc3df25d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go
@@ -0,0 +1,533 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+)
+
+type jsonTokenType byte
+
+const (
+	jttBeginObject jsonTokenType = iota
+	jttEndObject
+	jttBeginArray
+	jttEndArray
+	jttColon
+	jttComma
+	jttInt32
+	jttInt64
+	jttDouble
+	jttString
+	jttBool
+	jttNull
+	jttEOF
+)
+
+type jsonToken struct {
+	t jsonTokenType
+	v interface{}
+	p int
+}
+
+type jsonScanner struct {
+	r           io.Reader
+	buf         []byte
+	pos         int
+	lastReadErr error
+}
+
+// nextToken returns the next JSON token if one exists. A token is a character
+// of the JSON grammar, a number, a string, or a literal.
+func (js *jsonScanner) nextToken() (*jsonToken, error) {
+	c, err := js.readNextByte()
+
+	// keep reading until a non-space is encountered (break on read error or EOF)
+	for isWhiteSpace(c) && err == nil {
+		c, err = js.readNextByte()
+	}
+
+	if errors.Is(err, io.EOF) {
+		return &jsonToken{t: jttEOF}, nil
+	} else if err != nil {
+		return nil, err
+	}
+
+	// switch on the character
+	switch c {
+	case '{':
+		return &jsonToken{t: jttBeginObject, v: byte('{'), p: js.pos - 1}, nil
+	case '}':
+		return &jsonToken{t: jttEndObject, v: byte('}'), p: js.pos - 1}, nil
+	case '[':
+		return &jsonToken{t: jttBeginArray, v: byte('['), p: js.pos - 1}, nil
+	case ']':
+		return &jsonToken{t: jttEndArray, v: byte(']'), p: js.pos - 1}, nil
+	case ':':
+		return &jsonToken{t: jttColon, v: byte(':'), p: js.pos - 1}, nil
+	case ',':
+		return &jsonToken{t: jttComma, v: byte(','), p: js.pos - 1}, nil
+	case '"': // RFC-8259 only allows for double quotes (") not single (')
+		return js.scanString()
+	default:
+		// check if it's a number
+		switch {
+		case c == '-' || isDigit(c):
+			return js.scanNumber(c)
+		case c == 't' || c == 'f' || c == 'n':
+			// maybe a literal
+			return js.scanLiteral(c)
+		default:
+			return nil, fmt.Errorf("invalid JSON input. Position: %d. Character: %c", js.pos-1, c)
+		}
+	}
+}
+
+// readNextByte attempts to read the next byte from the buffer. If the buffer
+// has been exhausted, this function calls readIntoBuf, thus refilling the
+// buffer and resetting the read position to 0
+func (js *jsonScanner) readNextByte() (byte, error) {
+	if js.pos >= len(js.buf) {
+		err := js.readIntoBuf()
+
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	b := js.buf[js.pos]
+	js.pos++
+
+	return b, nil
+}
+
+// readNNextBytes reads n bytes into dst, starting at offset
+func (js *jsonScanner) readNNextBytes(dst []byte, n, offset int) error {
+	var err error
+
+	for i := 0; i < n; i++ {
+		dst[i+offset], err = js.readNextByte()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// readIntoBuf reads up to 512 bytes from the scanner's io.Reader into the buffer
+func (js *jsonScanner) readIntoBuf() error {
+	if js.lastReadErr != nil {
+		js.buf = js.buf[:0]
+		js.pos = 0
+		return js.lastReadErr
+	}
+
+	if cap(js.buf) == 0 {
+		js.buf = make([]byte, 0, 512)
+	}
+
+	n, err := js.r.Read(js.buf[:cap(js.buf)])
+	if err != nil {
+		js.lastReadErr = err
+		if n > 0 {
+			err = nil
+		}
+	}
+	js.buf = js.buf[:n]
+	js.pos = 0
+
+	return err
+}
+
+func isWhiteSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+func isDigit(c byte) bool {
+	return unicode.IsDigit(rune(c))
+}
+
+func isValueTerminator(c byte) bool {
+	return c == ',' || c == '}' || c == ']' || isWhiteSpace(c)
+}
+
+// getu4 decodes the 4-byte hex sequence from the beginning of s, returning the hex value as a rune,
+// or it returns -1. Note that the "\u" from the unicode escape sequence should not be present.
+// It is copied and lightly modified from the Go JSON decode function at
+// https://github.com/golang/go/blob/1b0a0316802b8048d69da49dc23c5a5ab08e8ae8/src/encoding/json/decode.go#L1169-L1188
+func getu4(s []byte) rune {
+	if len(s) < 4 {
+		return -1
+	}
+	var r rune
+	for _, c := range s[:4] {
+		switch {
+		case '0' <= c && c <= '9':
+			c -= '0'
+		case 'a' <= c && c <= 'f':
+			c = c - 'a' + 10
+		case 'A' <= c && c <= 'F':
+			c = c - 'A' + 10
+		default:
+			return -1
+		}
+		r = r*16 + rune(c)
+	}
+	return r
+}
+
+// scanString reads from an opening '"' to a closing '"' and handles escaped characters
+func (js *jsonScanner) scanString() (*jsonToken, error) {
+	var b bytes.Buffer
+	var c byte
+	var err error
+
+	p := js.pos - 1
+
+	for {
+		c, err = js.readNextByte()
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				return nil, errors.New("end of input in JSON string")
+			}
+			return nil, err
+		}
+
+	evalNextChar:
+		switch c {
+		case '\\':
+			c, err = js.readNextByte()
+			if err != nil {
+				if errors.Is(err, io.EOF) {
+					return nil, errors.New("end of input in JSON string")
+				}
+				return nil, err
+			}
+
+		evalNextEscapeChar:
+			switch c {
+			case '"', '\\', '/':
+				b.WriteByte(c)
+			case 'b':
+				b.WriteByte('\b')
+			case 'f':
+				b.WriteByte('\f')
+			case 'n':
+				b.WriteByte('\n')
+			case 'r':
+				b.WriteByte('\r')
+			case 't':
+				b.WriteByte('\t')
+			case 'u':
+				us := make([]byte, 4)
+				err = js.readNNextBytes(us, 4, 0)
+				if err != nil {
+					return nil, fmt.Errorf("invalid unicode sequence in JSON string: %s", us)
+				}
+
+				rn := getu4(us)
+
+				// If the rune we just decoded is the high or low value of a possible surrogate pair,
+				// try to decode the next sequence as the low value of a surrogate pair. We're
+				// expecting the next sequence to be another Unicode escape sequence (e.g. "\uDD1E"),
+				// but need to handle cases where the input is not a valid surrogate pair.
+				// For more context on unicode surrogate pairs, see:
+				// https://www.christianfscott.com/rust-chars-vs-go-runes/
+				// https://www.unicode.org/glossary/#high_surrogate_code_point
+				if utf16.IsSurrogate(rn) {
+					c, err = js.readNextByte()
+					if err != nil {
+						if errors.Is(err, io.EOF) {
+							return nil, errors.New("end of input in JSON string")
+						}
+						return nil, err
+					}
+
+					// If the next value isn't the beginning of a backslash escape sequence, write
+					// the Unicode replacement character for the surrogate value and goto the
+					// beginning of the next char eval block.
+					if c != '\\' {
+						b.WriteRune(unicode.ReplacementChar)
+						goto evalNextChar
+					}
+
+					c, err = js.readNextByte()
+					if err != nil {
+						if errors.Is(err, io.EOF) {
+							return nil, errors.New("end of input in JSON string")
+						}
+						return nil, err
+					}
+
+					// If the next value isn't the beginning of a unicode escape sequence, write the
+					// Unicode replacement character for the surrogate value and goto the beginning
+					// of the next escape char eval block.
+					if c != 'u' {
+						b.WriteRune(unicode.ReplacementChar)
+						goto evalNextEscapeChar
+					}
+
+					err = js.readNNextBytes(us, 4, 0)
+					if err != nil {
+						return nil, fmt.Errorf("invalid unicode sequence in JSON string: %s", us)
+					}
+
+					rn2 := getu4(us)
+
+					// Try to decode the pair of runes as a utf16 surrogate pair. If that fails, write
+					// the Unicode replacement character for the surrogate value and the 2nd decoded rune.
+					if rnPair := utf16.DecodeRune(rn, rn2); rnPair != unicode.ReplacementChar {
+						b.WriteRune(rnPair)
+					} else {
+						b.WriteRune(unicode.ReplacementChar)
+						b.WriteRune(rn2)
+					}
+
+					break
+				}
+
+				b.WriteRune(rn)
+			default:
+				return nil, fmt.Errorf("invalid escape sequence in JSON string '\\%c'", c)
+			}
+		case '"':
+			return &jsonToken{t: jttString, v: b.String(), p: p}, nil
+		default:
+			b.WriteByte(c)
+		}
+	}
+}
+
+// scanLiteral reads an unquoted sequence of characters and determines if it is one of
+// three valid JSON literals (true, false, null); if so, it returns the appropriate
+// jsonToken; otherwise, it returns an error
+func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) {
+	p := js.pos - 1
+
+	lit := make([]byte, 4)
+	lit[0] = first
+
+	err := js.readNNextBytes(lit, 3, 1)
+	if err != nil {
+		return nil, err
+	}
+
+	c5, err := js.readNextByte()
+
+	switch {
+	case bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)):
+		js.pos = int(math.Max(0, float64(js.pos-1)))
+		return &jsonToken{t: jttBool, v: true, p: p}, nil
+	case bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)):
+		js.pos = int(math.Max(0, float64(js.pos-1)))
+		return &jsonToken{t: jttNull, v: nil, p: p}, nil
+	case bytes.Equal([]byte("fals"), lit):
+		if c5 == 'e' {
+			c5, err = js.readNextByte()
+
+			if isValueTerminator(c5) || errors.Is(err, io.EOF) {
+				js.pos = int(math.Max(0, float64(js.pos-1)))
+				return &jsonToken{t: jttBool, v: false, p: p}, nil
+			}
+		}
+	}
+
+	return nil, fmt.Errorf("invalid JSON literal. Position: %d, literal: %s", p, lit)
+}
+
+type numberScanState byte
+
+const (
+	nssSawLeadingMinus numberScanState = iota
+	nssSawLeadingZero
+	nssSawIntegerDigits
+	nssSawDecimalPoint
+	nssSawFractionDigits
+	nssSawExponentLetter
+	nssSawExponentSign
+	nssSawExponentDigits
+	nssDone
+	nssInvalid
+)
+
+// scanNumber reads a JSON number (according to RFC-8259)
+func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
+	var b bytes.Buffer
+	var s numberScanState
+	var c byte
+	var err error
+
+	t := jttInt64 // assume it's an int64 until the type can be determined
+	start := js.pos - 1
+
+	b.WriteByte(first)
+
+	switch first {
+	case '-':
+		s = nssSawLeadingMinus
+	case '0':
+		s = nssSawLeadingZero
+	default:
+		s = nssSawIntegerDigits
+	}
+
+	for {
+		c, err = js.readNextByte()
+
+		if err != nil && !errors.Is(err, io.EOF) {
+			return nil, err
+		}
+
+		switch s {
+		case nssSawLeadingMinus:
+			switch c {
+			case '0':
+				s = nssSawLeadingZero
+				b.WriteByte(c)
+			default:
+				if isDigit(c) {
+					s = nssSawIntegerDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawLeadingZero:
+			switch c {
+			case '.':
+				s = nssSawDecimalPoint
+				b.WriteByte(c)
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || errors.Is(err, io.EOF) {
+					s = nssDone
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawIntegerDigits:
+			switch c {
+			case '.':
+				s = nssSawDecimalPoint
+				b.WriteByte(c)
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				switch {
+				case isWhiteSpace(c) || errors.Is(err, io.EOF):
+					s = nssDone
+				case isDigit(c):
+					s = nssSawIntegerDigits
+					b.WriteByte(c)
+				default:
+					s = nssInvalid
+				}
+			}
+		case nssSawDecimalPoint:
+			t = jttDouble
+			if isDigit(c) {
+				s = nssSawFractionDigits
+				b.WriteByte(c)
+			} else {
+				s = nssInvalid
+			}
+		case nssSawFractionDigits:
+			switch c {
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				switch {
+				case isWhiteSpace(c) || errors.Is(err, io.EOF):
+					s = nssDone
+				case isDigit(c):
+					s = nssSawFractionDigits
+					b.WriteByte(c)
+				default:
+					s = nssInvalid
+				}
+			}
+		case nssSawExponentLetter:
+			t = jttDouble
+			switch c {
+			case '+', '-':
+				s = nssSawExponentSign
+				b.WriteByte(c)
+			default:
+				if isDigit(c) {
+					s = nssSawExponentDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawExponentSign:
+			if isDigit(c) {
+				s = nssSawExponentDigits
+				b.WriteByte(c)
+			} else {
+				s = nssInvalid
+			}
+		case nssSawExponentDigits:
+			switch c {
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				switch {
+				case isWhiteSpace(c) || errors.Is(err, io.EOF):
+					s = nssDone
+				case isDigit(c):
+					s = nssSawExponentDigits
+					b.WriteByte(c)
+				default:
+					s = nssInvalid
+				}
+			}
+		}
+
+		switch s {
+		case nssInvalid:
+			return nil, fmt.Errorf("invalid JSON number. Position: %d", start)
+		case nssDone:
+			js.pos = int(math.Max(0, float64(js.pos-1)))
+			if t != jttDouble {
+				v, err := strconv.ParseInt(b.String(), 10, 64)
+				if err == nil {
+					if v < math.MinInt32 || v > math.MaxInt32 {
+						return &jsonToken{t: jttInt64, v: v, p: start}, nil
+					}
+
+					return &jsonToken{t: jttInt32, v: int32(v), p: start}, nil
+				}
+			}
+
+			v, err := strconv.ParseFloat(b.String(), 64)
+			if err != nil {
+				return nil, err
+			}
+
+			return &jsonToken{t: jttDouble, v: v, p: start}, nil
+		}
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go
new file mode 100644
index 0000000000000000000000000000000000000000..617b5e2212ae9a215e67f83a2696129ed0845947
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go
@@ -0,0 +1,108 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"fmt"
+)
+
+type mode int
+
+const (
+	_ mode = iota
+	mTopLevel
+	mDocument
+	mArray
+	mValue
+	mElement
+	mCodeWithScope
+	mSpacer
+)
+
+func (m mode) String() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "DocumentMode"
+	case mArray:
+		str = "ArrayMode"
+	case mValue:
+		str = "ValueMode"
+	case mElement:
+		str = "ElementMode"
+	case mCodeWithScope:
+		str = "CodeWithScopeMode"
+	case mSpacer:
+		str = "CodeWithScopeSpacerFrame"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+func (m mode) TypeString() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "Document"
+	case mArray:
+		str = "Array"
+	case mValue:
+		str = "Value"
+	case mElement:
+		str = "Element"
+	case mCodeWithScope:
+		str = "CodeWithScope"
+	case mSpacer:
+		str = "CodeWithScopeSpacer"
+	default:
+		str = "Unknown"
+	}
+
+	return str
+}
+
+// TransitionError is an error returned when an invalid progressing a
+// ValueReader or ValueWriter state machine occurs.
+// If read is false, the error is for writing
+type TransitionError struct {
+	name        string
+	parent      mode
+	current     mode
+	destination mode
+	modes       []mode
+	action      string
+}
+
+func (te TransitionError) Error() string {
+	errString := fmt.Sprintf("%s can only %s", te.name, te.action)
+	if te.destination != mode(0) {
+		errString = fmt.Sprintf("%s a %s", errString, te.destination.TypeString())
+	}
+	errString = fmt.Sprintf("%s while positioned on a", errString)
+	for ind, m := range te.modes {
+		if ind != 0 && len(te.modes) > 2 {
+			errString = fmt.Sprintf("%s,", errString)
+		}
+		if ind == len(te.modes)-1 && len(te.modes) > 1 {
+			errString = fmt.Sprintf("%s or", errString)
+		}
+		errString = fmt.Sprintf("%s %s", errString, m.TypeString())
+	}
+	errString = fmt.Sprintf("%s but is positioned on a %s", errString, te.current.TypeString())
+	if te.parent != mode(0) {
+		errString = fmt.Sprintf("%s with parent %s", errString, te.parent.TypeString())
+	}
+	return errString
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..324b10b616776e33db528c29beb61e3c01287b7e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ArrayReader is implemented by types that allow reading values from a BSON
+// array.
+type ArrayReader interface {
+	ReadValue() (ValueReader, error)
+}
+
+// DocumentReader is implemented by types that allow reading elements from a
+// BSON document.
+type DocumentReader interface {
+	ReadElement() (string, ValueReader, error)
+}
+
+// ValueReader is a generic interface used to read values from BSON. This type
+// is implemented by several types with different underlying representations of
+// BSON, such as a bson.Document, raw BSON bytes, or extended JSON.
+type ValueReader interface {
+	Type() bsontype.Type
+	Skip() error
+
+	ReadArray() (ArrayReader, error)
+	ReadBinary() (b []byte, btype byte, err error)
+	ReadBoolean() (bool, error)
+	ReadDocument() (DocumentReader, error)
+	ReadCodeWithScope() (code string, dr DocumentReader, err error)
+	ReadDBPointer() (ns string, oid primitive.ObjectID, err error)
+	ReadDateTime() (int64, error)
+	ReadDecimal128() (primitive.Decimal128, error)
+	ReadDouble() (float64, error)
+	ReadInt32() (int32, error)
+	ReadInt64() (int64, error)
+	ReadJavascript() (code string, err error)
+	ReadMaxKey() error
+	ReadMinKey() error
+	ReadNull() error
+	ReadObjectID() (primitive.ObjectID, error)
+	ReadRegex() (pattern, options string, err error)
+	ReadString() (string, error)
+	ReadSymbol() (symbol string, err error)
+	ReadTimestamp() (t, i uint32, err error)
+	ReadUndefined() error
+}
+
+// BytesReader is a generic interface used to read BSON bytes from a
+// ValueReader. This imterface is meant to be a superset of ValueReader, so that
+// types that implement ValueReader may also implement this interface.
+//
+// The bytes of the value will be appended to dst.
+//
+// Deprecated: BytesReader will not be supported in Go Driver 2.0.
+type BytesReader interface {
+	ReadValueBytes(dst []byte) (bsontype.Type, []byte, error)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..0e07d505586c3010ccfe191f53e7e04d7a5cb051
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go
@@ -0,0 +1,888 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+var _ ValueReader = (*valueReader)(nil)
+
+var vrPool = sync.Pool{
+	New: func() interface{} {
+		return new(valueReader)
+	},
+}
+
+// BSONValueReaderPool is a pool for ValueReaders that read BSON.
+//
+// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
+type BSONValueReaderPool struct {
+	pool sync.Pool
+}
+
+// NewBSONValueReaderPool instantiates a new BSONValueReaderPool.
+//
+// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
+func NewBSONValueReaderPool() *BSONValueReaderPool {
+	return &BSONValueReaderPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(valueReader)
+			},
+		},
+	}
+}
+
+// Get retrieves a ValueReader from the pool and uses src as the underlying BSON.
+//
+// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
+func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader {
+	vr := bvrp.pool.Get().(*valueReader)
+	vr.reset(src)
+	return vr
+}
+
+// Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing
+// is inserted into the pool and ok will be false.
+//
+// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
+func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) {
+	bvr, ok := vr.(*valueReader)
+	if !ok {
+		return false
+	}
+
+	bvr.reset(nil)
+	bvrp.pool.Put(bvr)
+	return true
+}
+
+// ErrEOA is the error returned when the end of a BSON array has been reached.
+var ErrEOA = errors.New("end of array")
+
+// ErrEOD is the error returned when the end of a BSON document has been reached.
+var ErrEOD = errors.New("end of document")
+
+type vrState struct {
+	mode  mode
+	vType bsontype.Type
+	end   int64
+}
+
+// valueReader is for reading BSON values.
+type valueReader struct {
+	offset int64
+	d      []byte
+
+	stack []vrState
+	frame int64
+}
+
+// NewBSONDocumentReader returns a ValueReader using b for the underlying BSON
+// representation. Parameter b must be a BSON Document.
+func NewBSONDocumentReader(b []byte) ValueReader {
+	// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes a []byte while the
+	// TODO writer takes an io.Writer. We should have two versions of each, one that takes a []byte and one that takes an
+	// TODO io.Reader or io.Writer. The []byte version will need to return a thing that can return the finished []byte since
+	// TODO it might be reallocated when appended to.
+	return newValueReader(b)
+}
+
+// NewBSONValueReader returns a ValueReader that starts in the Value mode instead of in top
+// level document mode. This enables the creation of a ValueReader for a single BSON value.
+func NewBSONValueReader(t bsontype.Type, val []byte) ValueReader {
+	stack := make([]vrState, 1, 5)
+	stack[0] = vrState{
+		mode:  mValue,
+		vType: t,
+	}
+	return &valueReader{
+		d:     val,
+		stack: stack,
+	}
+}
+
+func newValueReader(b []byte) *valueReader {
+	stack := make([]vrState, 1, 5)
+	stack[0] = vrState{
+		mode: mTopLevel,
+	}
+	return &valueReader{
+		d:     b,
+		stack: stack,
+	}
+}
+
+func (vr *valueReader) reset(b []byte) {
+	if vr.stack == nil {
+		vr.stack = make([]vrState, 1, 5)
+	}
+	vr.stack = vr.stack[:1]
+	vr.stack[0] = vrState{mode: mTopLevel}
+	vr.d = b
+	vr.offset = 0
+	vr.frame = 0
+}
+
+func (vr *valueReader) advanceFrame() {
+	if vr.frame+1 >= int64(len(vr.stack)) { // We need to grow the stack
+		length := len(vr.stack)
+		if length+1 >= cap(vr.stack) {
+			// double it
+			buf := make([]vrState, 2*cap(vr.stack)+1)
+			copy(buf, vr.stack)
+			vr.stack = buf
+		}
+		vr.stack = vr.stack[:length+1]
+	}
+	vr.frame++
+
+	// Clean the stack
+	vr.stack[vr.frame].mode = 0
+	vr.stack[vr.frame].vType = 0
+	vr.stack[vr.frame].end = 0
+}
+
+func (vr *valueReader) pushDocument() error {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mDocument
+
+	size, err := vr.readLength()
+	if err != nil {
+		return err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return nil
+}
+
+func (vr *valueReader) pushArray() error {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mArray
+
+	size, err := vr.readLength()
+	if err != nil {
+		return err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return nil
+}
+
+func (vr *valueReader) pushElement(t bsontype.Type) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mElement
+	vr.stack[vr.frame].vType = t
+}
+
+func (vr *valueReader) pushValue(t bsontype.Type) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mValue
+	vr.stack[vr.frame].vType = t
+}
+
+func (vr *valueReader) pushCodeWithScope() (int64, error) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mCodeWithScope
+
+	size, err := vr.readLength()
+	if err != nil {
+		return 0, err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return int64(size), nil
+}
+
+func (vr *valueReader) pop() {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+		vr.frame--
+	case mDocument, mArray, mCodeWithScope:
+		vr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
+	}
+}
+
+func (vr *valueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     vr.stack[vr.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "read",
+	}
+	if vr.frame != 0 {
+		te.parent = vr.stack[vr.frame-1].mode
+	}
+	return te
+}
+
+func (vr *valueReader) typeError(t bsontype.Type) error {
+	return fmt.Errorf("positioned on %s, but attempted to read %s", vr.stack[vr.frame].vType, t)
+}
+
+func (vr *valueReader) invalidDocumentLengthError() error {
+	return fmt.Errorf("document is invalid, end byte is at %d, but null byte found at %d", vr.stack[vr.frame].end, vr.offset)
+}
+
+func (vr *valueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string) error {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+		if vr.stack[vr.frame].vType != t {
+			return vr.typeError(t)
+		}
+	default:
+		return vr.invalidTransitionErr(destination, callerName, []mode{mElement, mValue})
+	}
+
+	return nil
+}
+
+func (vr *valueReader) Type() bsontype.Type {
+	return vr.stack[vr.frame].vType
+}
+
+func (vr *valueReader) nextElementLength() (int32, error) {
+	var length int32
+	var err error
+	switch vr.stack[vr.frame].vType {
+	case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+		length, err = vr.peekLength()
+	case bsontype.Binary:
+		length, err = vr.peekLength()
+		length += 4 + 1 // binary length + subtype byte
+	case bsontype.Boolean:
+		length = 1
+	case bsontype.DBPointer:
+		length, err = vr.peekLength()
+		length += 4 + 12 // string length + ObjectID length
+	case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
+		length = 8
+	case bsontype.Decimal128:
+		length = 16
+	case bsontype.Int32:
+		length = 4
+	case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
+		length, err = vr.peekLength()
+		length += 4
+	case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
+		length = 0
+	case bsontype.ObjectID:
+		length = 12
+	case bsontype.Regex:
+		regex := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+		if regex < 0 {
+			err = io.EOF
+			break
+		}
+		pattern := bytes.IndexByte(vr.d[vr.offset+int64(regex)+1:], 0x00)
+		if pattern < 0 {
+			err = io.EOF
+			break
+		}
+		length = int32(int64(regex) + 1 + int64(pattern) + 1)
+	default:
+		return 0, fmt.Errorf("attempted to read bytes of unknown BSON type %v", vr.stack[vr.frame].vType)
+	}
+
+	return length, err
+}
+
+func (vr *valueReader) ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel:
+		length, err := vr.peekLength()
+		if err != nil {
+			return bsontype.Type(0), nil, err
+		}
+		dst, err = vr.appendBytes(dst, length)
+		if err != nil {
+			return bsontype.Type(0), nil, err
+		}
+		return bsontype.Type(0), dst, nil
+	case mElement, mValue:
+		length, err := vr.nextElementLength()
+		if err != nil {
+			return bsontype.Type(0), dst, err
+		}
+
+		dst, err = vr.appendBytes(dst, length)
+		t := vr.stack[vr.frame].vType
+		vr.pop()
+		return t, dst, err
+	default:
+		return bsontype.Type(0), nil, vr.invalidTransitionErr(0, "ReadValueBytes", []mode{mElement, mValue})
+	}
+}
+
+func (vr *valueReader) Skip() error {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+	default:
+		return vr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
+	}
+
+	length, err := vr.nextElementLength()
+	if err != nil {
+		return err
+	}
+
+	err = vr.skipBytes(length)
+	vr.pop()
+	return err
+}
+
+func (vr *valueReader) ReadArray() (ArrayReader, error) {
+	if err := vr.ensureElementValue(bsontype.Array, mArray, "ReadArray"); err != nil {
+		return nil, err
+	}
+
+	err := vr.pushArray()
+	if err != nil {
+		return nil, err
+	}
+
+	return vr, nil
+}
+
+func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) {
+	if err := vr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
+		return nil, 0, err
+	}
+
+	length, err := vr.readLength()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	btype, err = vr.readByte()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	// Check length in case it is an old binary without a length.
+	if btype == 0x02 && length > 4 {
+		length, err = vr.readLength()
+		if err != nil {
+			return nil, 0, err
+		}
+	}
+
+	b, err = vr.readBytes(length)
+	if err != nil {
+		return nil, 0, err
+	}
+	// Make a copy of the returned byte slice because it's just a subslice from the valueReader's
+	// buffer and is not safe to return in the unmarshaled value.
+	cp := make([]byte, len(b))
+	copy(cp, b)
+
+	vr.pop()
+	return cp, btype, nil
+}
+
+func (vr *valueReader) ReadBoolean() (bool, error) {
+	if err := vr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
+		return false, err
+	}
+
+	b, err := vr.readByte()
+	if err != nil {
+		return false, err
+	}
+
+	if b > 1 {
+		return false, fmt.Errorf("invalid byte for boolean, %b", b)
+	}
+
+	vr.pop()
+	return b == 1, nil
+}
+
+func (vr *valueReader) ReadDocument() (DocumentReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel:
+		// read size
+		size, err := vr.readLength()
+		if err != nil {
+			return nil, err
+		}
+		if int(size) != len(vr.d) {
+			return nil, fmt.Errorf("invalid document length")
+		}
+		vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+		return vr, nil
+	case mElement, mValue:
+		if vr.stack[vr.frame].vType != bsontype.EmbeddedDocument {
+			return nil, vr.typeError(bsontype.EmbeddedDocument)
+		}
+	default:
+		return nil, vr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
+	}
+
+	err := vr.pushDocument()
+	if err != nil {
+		return nil, err
+	}
+
+	return vr, nil
+}
+
+func (vr *valueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
+	if err := vr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
+		return "", nil, err
+	}
+
+	totalLength, err := vr.readLength()
+	if err != nil {
+		return "", nil, err
+	}
+	strLength, err := vr.readLength()
+	if err != nil {
+		return "", nil, err
+	}
+	if strLength <= 0 {
+		return "", nil, fmt.Errorf("invalid string length: %d", strLength)
+	}
+	strBytes, err := vr.readBytes(strLength)
+	if err != nil {
+		return "", nil, err
+	}
+	code = string(strBytes[:len(strBytes)-1])
+
+	size, err := vr.pushCodeWithScope()
+	if err != nil {
+		return "", nil, err
+	}
+
+	// The total length should equal:
+	// 4 (total length) + strLength + 4 (the length of str itself) + (document length)
+	componentsLength := int64(4+strLength+4) + size
+	if int64(totalLength) != componentsLength {
+		return "", nil, fmt.Errorf(
+			"length of CodeWithScope does not match lengths of components; total: %d; components: %d",
+			totalLength, componentsLength,
+		)
+	}
+	return code, vr, nil
+}
+
+func (vr *valueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if err := vr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
+		return "", oid, err
+	}
+
+	ns, err = vr.readString()
+	if err != nil {
+		return "", oid, err
+	}
+
+	oidbytes, err := vr.readBytes(12)
+	if err != nil {
+		return "", oid, err
+	}
+
+	copy(oid[:], oidbytes)
+
+	vr.pop()
+	return ns, oid, nil
+}
+
+func (vr *valueReader) ReadDateTime() (int64, error) {
+	if err := vr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
+		return 0, err
+	}
+
+	i, err := vr.readi64()
+	if err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return i, nil
+}
+
+func (vr *valueReader) ReadDecimal128() (primitive.Decimal128, error) {
+	if err := vr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	b, err := vr.readBytes(16)
+	if err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	l := binary.LittleEndian.Uint64(b[0:8])
+	h := binary.LittleEndian.Uint64(b[8:16])
+
+	vr.pop()
+	return primitive.NewDecimal128(h, l), nil
+}
+
+func (vr *valueReader) ReadDouble() (float64, error) {
+	if err := vr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
+		return 0, err
+	}
+
+	u, err := vr.readu64()
+	if err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return math.Float64frombits(u), nil
+}
+
+func (vr *valueReader) ReadInt32() (int32, error) {
+	if err := vr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return vr.readi32()
+}
+
+func (vr *valueReader) ReadInt64() (int64, error) {
+	if err := vr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return vr.readi64()
+}
+
+func (vr *valueReader) ReadJavascript() (code string, err error) {
+	if err := vr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadMaxKey() error {
+	if err := vr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadMinKey() error {
+	if err := vr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadNull() error {
+	if err := vr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadObjectID() (primitive.ObjectID, error) {
+	if err := vr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	oidbytes, err := vr.readBytes(12)
+	if err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	var oid primitive.ObjectID
+	copy(oid[:], oidbytes)
+
+	vr.pop()
+	return oid, nil
+}
+
+func (vr *valueReader) ReadRegex() (string, string, error) {
+	if err := vr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
+		return "", "", err
+	}
+
+	pattern, err := vr.readCString()
+	if err != nil {
+		return "", "", err
+	}
+
+	options, err := vr.readCString()
+	if err != nil {
+		return "", "", err
+	}
+
+	vr.pop()
+	return pattern, options, nil
+}
+
+func (vr *valueReader) ReadString() (string, error) {
+	if err := vr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadSymbol() (symbol string, err error) {
+	if err := vr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadTimestamp() (t uint32, i uint32, err error) {
+	if err := vr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
+		return 0, 0, err
+	}
+
+	i, err = vr.readu32()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	t, err = vr.readu32()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	vr.pop()
+	return t, i, nil
+}
+
+func (vr *valueReader) ReadUndefined() error {
+	if err := vr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadElement() (string, ValueReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel, mDocument, mCodeWithScope:
+	default:
+		return "", nil, vr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
+	}
+
+	t, err := vr.readByte()
+	if err != nil {
+		return "", nil, err
+	}
+
+	if t == 0 {
+		if vr.offset != vr.stack[vr.frame].end {
+			return "", nil, vr.invalidDocumentLengthError()
+		}
+
+		vr.pop()
+		return "", nil, ErrEOD
+	}
+
+	name, err := vr.readCString()
+	if err != nil {
+		return "", nil, err
+	}
+
+	vr.pushElement(bsontype.Type(t))
+	return name, vr, nil
+}
+
+func (vr *valueReader) ReadValue() (ValueReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mArray:
+	default:
+		return nil, vr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
+	}
+
+	t, err := vr.readByte()
+	if err != nil {
+		return nil, err
+	}
+
+	if t == 0 {
+		if vr.offset != vr.stack[vr.frame].end {
+			return nil, vr.invalidDocumentLengthError()
+		}
+
+		vr.pop()
+		return nil, ErrEOA
+	}
+
+	if err := vr.skipCString(); err != nil {
+		return nil, err
+	}
+
+	vr.pushValue(bsontype.Type(t))
+	return vr, nil
+}
+
+// readBytes reads length bytes from the valueReader starting at the current offset. Note that the
+// returned byte slice is a subslice from the valueReader buffer and must be converted or copied
+// before returning in an unmarshaled value.
+func (vr *valueReader) readBytes(length int32) ([]byte, error) {
+	if length < 0 {
+		return nil, fmt.Errorf("invalid length: %d", length)
+	}
+
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return nil, io.EOF
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+
+	return vr.d[start : start+int64(length)], nil
+}
+
+func (vr *valueReader) appendBytes(dst []byte, length int32) ([]byte, error) {
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return nil, io.EOF
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+	return append(dst, vr.d[start:start+int64(length)]...), nil
+}
+
+func (vr *valueReader) skipBytes(length int32) error {
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return io.EOF
+	}
+
+	vr.offset += int64(length)
+	return nil
+}
+
+func (vr *valueReader) readByte() (byte, error) {
+	if vr.offset+1 > int64(len(vr.d)) {
+		return 0x0, io.EOF
+	}
+
+	vr.offset++
+	return vr.d[vr.offset-1], nil
+}
+
+func (vr *valueReader) skipCString() error {
+	idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+	if idx < 0 {
+		return io.EOF
+	}
+	vr.offset += int64(idx) + 1
+	return nil
+}
+
+func (vr *valueReader) readCString() (string, error) {
+	idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+	if idx < 0 {
+		return "", io.EOF
+	}
+	start := vr.offset
+	// idx does not include the null byte
+	vr.offset += int64(idx) + 1
+	return string(vr.d[start : start+int64(idx)]), nil
+}
+
+func (vr *valueReader) readString() (string, error) {
+	length, err := vr.readLength()
+	if err != nil {
+		return "", err
+	}
+
+	if int64(length)+vr.offset > int64(len(vr.d)) {
+		return "", io.EOF
+	}
+
+	if length <= 0 {
+		return "", fmt.Errorf("invalid string length: %d", length)
+	}
+
+	if vr.d[vr.offset+int64(length)-1] != 0x00 {
+		return "", fmt.Errorf("string does not end with null byte, but with %v", vr.d[vr.offset+int64(length)-1])
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+	return string(vr.d[start : start+int64(length)-1]), nil
+}
+
+func (vr *valueReader) peekLength() (int32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil
+}
+
+func (vr *valueReader) readLength() (int32, error) { return vr.readi32() }
+
+func (vr *valueReader) readi32() (int32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 4
+	return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil
+}
+
+func (vr *valueReader) readu32() (uint32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 4
+	return binary.LittleEndian.Uint32(vr.d[idx:]), nil
+}
+
+func (vr *valueReader) readi64() (int64, error) {
+	if vr.offset+8 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 8
+	return int64(binary.LittleEndian.Uint64(vr.d[idx:])), nil
+}
+
+func (vr *valueReader) readu64() (uint64, error) {
+	if vr.offset+8 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 8
+	return binary.LittleEndian.Uint64(vr.d[idx:]), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..501c6d7f979443db3d2e0baf7eca78dc45004bee
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go
@@ -0,0 +1,640 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var _ ValueWriter = (*valueWriter)(nil)
+
+var vwPool = sync.Pool{
+	New: func() interface{} {
+		return new(valueWriter)
+	},
+}
+
+func putValueWriter(vw *valueWriter) {
+	if vw != nil {
+		vw.w = nil // don't leak the writer
+		vwPool.Put(vw)
+	}
+}
+
+// BSONValueWriterPool is a pool for BSON ValueWriters.
+//
+// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
+type BSONValueWriterPool struct {
+	pool sync.Pool
+}
+
+// NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON.
+//
+// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
+func NewBSONValueWriterPool() *BSONValueWriterPool {
+	return &BSONValueWriterPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(valueWriter)
+			},
+		},
+	}
+}
+
+// Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination.
+//
+// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
+func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter {
+	vw := bvwp.pool.Get().(*valueWriter)
+
+	// TODO: Having to call reset here with the same buffer doesn't really make sense.
+	vw.reset(vw.buf)
+	vw.buf = vw.buf[:0]
+	vw.w = w
+	return vw
+}
+
+// GetAtModeElement retrieves a ValueWriterFlusher from the pool and resets it to use w as the destination.
+//
+// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
+func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlusher {
+	vw := bvwp.Get(w).(*valueWriter)
+	vw.push(mElement)
+	return vw
+}
+
+// Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing
+// happens and ok will be false.
+//
+// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
+func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
+	bvw, ok := vw.(*valueWriter)
+	if !ok {
+		return false
+	}
+
+	bvwp.pool.Put(bvw)
+	return true
+}
+
+// This is here so that during testing we can change it and not require
+// allocating a 4GB slice.
+var maxSize = math.MaxInt32
+
+var errNilWriter = errors.New("cannot create a ValueWriter from a nil io.Writer")
+
+type errMaxDocumentSizeExceeded struct {
+	size int64
+}
+
+func (mdse errMaxDocumentSizeExceeded) Error() string {
+	return fmt.Sprintf("document size (%d) is larger than the max int32", mdse.size)
+}
+
+type vwMode int
+
+const (
+	_ vwMode = iota
+	vwTopLevel
+	vwDocument
+	vwArray
+	vwValue
+	vwElement
+	vwCodeWithScope
+)
+
+func (vm vwMode) String() string {
+	var str string
+
+	switch vm {
+	case vwTopLevel:
+		str = "TopLevel"
+	case vwDocument:
+		str = "DocumentMode"
+	case vwArray:
+		str = "ArrayMode"
+	case vwValue:
+		str = "ValueMode"
+	case vwElement:
+		str = "ElementMode"
+	case vwCodeWithScope:
+		str = "CodeWithScopeMode"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+type vwState struct {
+	mode   mode
+	key    string
+	arrkey int
+	start  int32
+}
+
+type valueWriter struct {
+	w   io.Writer
+	buf []byte
+
+	stack []vwState
+	frame int64
+}
+
+func (vw *valueWriter) advanceFrame() {
+	vw.frame++
+	if vw.frame >= int64(len(vw.stack)) {
+		vw.stack = append(vw.stack, vwState{})
+	}
+}
+
+func (vw *valueWriter) push(m mode) {
+	vw.advanceFrame()
+
+	// Clean the stack
+	vw.stack[vw.frame] = vwState{mode: m}
+
+	switch m {
+	case mDocument, mArray, mCodeWithScope:
+		vw.reserveLength() // WARN: this is not needed
+	}
+}
+
+func (vw *valueWriter) reserveLength() {
+	vw.stack[vw.frame].start = int32(len(vw.buf))
+	vw.buf = append(vw.buf, 0x00, 0x00, 0x00, 0x00)
+}
+
+func (vw *valueWriter) pop() {
+	switch vw.stack[vw.frame].mode {
+	case mElement, mValue:
+		vw.frame--
+	case mDocument, mArray, mCodeWithScope:
+		vw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
+	}
+}
+
+// NewBSONValueWriter creates a ValueWriter that writes BSON to w.
+//
+// This ValueWriter will only write entire documents to the io.Writer and it
+// will buffer the document as it is built.
+func NewBSONValueWriter(w io.Writer) (ValueWriter, error) {
+	if w == nil {
+		return nil, errNilWriter
+	}
+	return newValueWriter(w), nil
+}
+
+func newValueWriter(w io.Writer) *valueWriter {
+	vw := new(valueWriter)
+	stack := make([]vwState, 1, 5)
+	stack[0] = vwState{mode: mTopLevel}
+	vw.w = w
+	vw.stack = stack
+
+	return vw
+}
+
+// TODO: only used in tests
+func newValueWriterFromSlice(buf []byte) *valueWriter {
+	vw := new(valueWriter)
+	stack := make([]vwState, 1, 5)
+	stack[0] = vwState{mode: mTopLevel}
+	vw.stack = stack
+	vw.buf = buf
+
+	return vw
+}
+
+func (vw *valueWriter) reset(buf []byte) {
+	if vw.stack == nil {
+		vw.stack = make([]vwState, 1, 5)
+	}
+	vw.stack = vw.stack[:1]
+	vw.stack[0] = vwState{mode: mTopLevel}
+	vw.buf = buf
+	vw.frame = 0
+	vw.w = nil
+}
+
+func (vw *valueWriter) invalidTransitionError(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     vw.stack[vw.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "write",
+	}
+	if vw.frame != 0 {
+		te.parent = vw.stack[vw.frame-1].mode
+	}
+	return te
+}
+
+func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error {
+	frame := &vw.stack[vw.frame]
+	switch frame.mode {
+	case mElement:
+		key := frame.key
+		if !isValidCString(key) {
+			return errors.New("BSON element key cannot contain null bytes")
+		}
+		vw.appendHeader(t, key)
+	case mValue:
+		vw.appendIntHeader(t, frame.arrkey)
+	default:
+		modes := []mode{mElement, mValue}
+		if addmodes != nil {
+			modes = append(modes, addmodes...)
+		}
+		return vw.invalidTransitionError(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (vw *valueWriter) WriteValueBytes(t bsontype.Type, b []byte) error {
+	if err := vw.writeElementHeader(t, mode(0), "WriteValueBytes"); err != nil {
+		return err
+	}
+	vw.buf = append(vw.buf, b...)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteArray() (ArrayWriter, error) {
+	if err := vw.writeElementHeader(bsontype.Array, mArray, "WriteArray"); err != nil {
+		return nil, err
+	}
+
+	vw.push(mArray)
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteBinary(b []byte) error {
+	return vw.WriteBinaryWithSubtype(b, 0x00)
+}
+
+func (vw *valueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
+	if err := vw.writeElementHeader(bsontype.Binary, mode(0), "WriteBinaryWithSubtype"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendBinary(vw.buf, btype, b)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteBoolean(b bool) error {
+	if err := vw.writeElementHeader(bsontype.Boolean, mode(0), "WriteBoolean"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendBoolean(vw.buf, b)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
+	if err := vw.writeElementHeader(bsontype.CodeWithScope, mCodeWithScope, "WriteCodeWithScope"); err != nil {
+		return nil, err
+	}
+
+	// CodeWithScope is a different than other types because we need an extra
+	// frame on the stack. In the EndDocument code, we write the document
+	// length, pop, write the code with scope length, and pop. To simplify the
+	// pop code, we push a spacer frame that we'll always jump over.
+	vw.push(mCodeWithScope)
+	vw.buf = bsoncore.AppendString(vw.buf, code)
+	vw.push(mSpacer)
+	vw.push(mDocument)
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
+	if err := vw.writeElementHeader(bsontype.DBPointer, mode(0), "WriteDBPointer"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDBPointer(vw.buf, ns, oid)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDateTime(dt int64) error {
+	if err := vw.writeElementHeader(bsontype.DateTime, mode(0), "WriteDateTime"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDateTime(vw.buf, dt)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDecimal128(d128 primitive.Decimal128) error {
+	if err := vw.writeElementHeader(bsontype.Decimal128, mode(0), "WriteDecimal128"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDecimal128(vw.buf, d128)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDouble(f float64) error {
+	if err := vw.writeElementHeader(bsontype.Double, mode(0), "WriteDouble"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDouble(vw.buf, f)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteInt32(i32 int32) error {
+	if err := vw.writeElementHeader(bsontype.Int32, mode(0), "WriteInt32"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendInt32(vw.buf, i32)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteInt64(i64 int64) error {
+	if err := vw.writeElementHeader(bsontype.Int64, mode(0), "WriteInt64"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendInt64(vw.buf, i64)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteJavascript(code string) error {
+	if err := vw.writeElementHeader(bsontype.JavaScript, mode(0), "WriteJavascript"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendJavaScript(vw.buf, code)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteMaxKey() error {
+	if err := vw.writeElementHeader(bsontype.MaxKey, mode(0), "WriteMaxKey"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteMinKey() error {
+	if err := vw.writeElementHeader(bsontype.MinKey, mode(0), "WriteMinKey"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteNull() error {
+	if err := vw.writeElementHeader(bsontype.Null, mode(0), "WriteNull"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteObjectID(oid primitive.ObjectID) error {
+	if err := vw.writeElementHeader(bsontype.ObjectID, mode(0), "WriteObjectID"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendObjectID(vw.buf, oid)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteRegex(pattern string, options string) error {
+	if !isValidCString(pattern) || !isValidCString(options) {
+		return errors.New("BSON regex values cannot contain null bytes")
+	}
+	if err := vw.writeElementHeader(bsontype.Regex, mode(0), "WriteRegex"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendRegex(vw.buf, pattern, sortStringAlphebeticAscending(options))
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteString(s string) error {
+	if err := vw.writeElementHeader(bsontype.String, mode(0), "WriteString"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendString(vw.buf, s)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDocument() (DocumentWriter, error) {
+	if vw.stack[vw.frame].mode == mTopLevel {
+		vw.reserveLength()
+		return vw, nil
+	}
+	if err := vw.writeElementHeader(bsontype.EmbeddedDocument, mDocument, "WriteDocument", mTopLevel); err != nil {
+		return nil, err
+	}
+
+	vw.push(mDocument)
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteSymbol(symbol string) error {
+	if err := vw.writeElementHeader(bsontype.Symbol, mode(0), "WriteSymbol"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendSymbol(vw.buf, symbol)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteTimestamp(t uint32, i uint32) error {
+	if err := vw.writeElementHeader(bsontype.Timestamp, mode(0), "WriteTimestamp"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendTimestamp(vw.buf, t, i)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteUndefined() error {
+	if err := vw.writeElementHeader(bsontype.Undefined, mode(0), "WriteUndefined"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
+	switch vw.stack[vw.frame].mode {
+	case mTopLevel, mDocument:
+	default:
+		return nil, vw.invalidTransitionError(mElement, "WriteDocumentElement", []mode{mTopLevel, mDocument})
+	}
+
+	vw.push(mElement)
+	vw.stack[vw.frame].key = key
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteDocumentEnd() error {
+	switch vw.stack[vw.frame].mode {
+	case mTopLevel, mDocument:
+	default:
+		return fmt.Errorf("incorrect mode to end document: %s", vw.stack[vw.frame].mode)
+	}
+
+	vw.buf = append(vw.buf, 0x00)
+
+	err := vw.writeLength()
+	if err != nil {
+		return err
+	}
+
+	if vw.stack[vw.frame].mode == mTopLevel {
+		if err = vw.Flush(); err != nil {
+			return err
+		}
+	}
+
+	vw.pop()
+
+	if vw.stack[vw.frame].mode == mCodeWithScope {
+		// We ignore the error here because of the guarantee of writeLength.
+		// See the docs for writeLength for more info.
+		_ = vw.writeLength()
+		vw.pop()
+	}
+	return nil
+}
+
+func (vw *valueWriter) Flush() error {
+	if vw.w == nil {
+		return nil
+	}
+
+	if _, err := vw.w.Write(vw.buf); err != nil {
+		return err
+	}
+	// reset buffer
+	vw.buf = vw.buf[:0]
+	return nil
+}
+
+func (vw *valueWriter) WriteArrayElement() (ValueWriter, error) {
+	if vw.stack[vw.frame].mode != mArray {
+		return nil, vw.invalidTransitionError(mValue, "WriteArrayElement", []mode{mArray})
+	}
+
+	arrkey := vw.stack[vw.frame].arrkey
+	vw.stack[vw.frame].arrkey++
+
+	vw.push(mValue)
+	vw.stack[vw.frame].arrkey = arrkey
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteArrayEnd() error {
+	if vw.stack[vw.frame].mode != mArray {
+		return fmt.Errorf("incorrect mode to end array: %s", vw.stack[vw.frame].mode)
+	}
+
+	vw.buf = append(vw.buf, 0x00)
+
+	err := vw.writeLength()
+	if err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+// NOTE: We assume that if we call writeLength more than once the same function
+// within the same function without altering the vw.buf that this method will
+// not return an error. If this changes ensure that the following methods are
+// updated:
+//
+// - WriteDocumentEnd
+func (vw *valueWriter) writeLength() error {
+	length := len(vw.buf)
+	if length > maxSize {
+		return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))}
+	}
+	frame := &vw.stack[vw.frame]
+	length -= int(frame.start)
+	start := frame.start
+
+	_ = vw.buf[start+3] // BCE
+	vw.buf[start+0] = byte(length)
+	vw.buf[start+1] = byte(length >> 8)
+	vw.buf[start+2] = byte(length >> 16)
+	vw.buf[start+3] = byte(length >> 24)
+	return nil
+}
+
+func isValidCString(cs string) bool {
+	// Disallow the zero byte in a cstring because the zero byte is used as the
+	// terminating character.
+	//
+	// It's safe to check bytes instead of runes because all multibyte UTF-8
+	// code points start with (binary) 11xxxxxx or 10xxxxxx, so 00000000 (i.e.
+	// 0) will never be part of a multibyte UTF-8 code point. This logic is the
+	// same as the "r < utf8.RuneSelf" case in strings.IndexRune but can be
+	// inlined.
+	//
+	// https://cs.opensource.google/go/go/+/refs/tags/go1.21.1:src/strings/strings.go;l=127
+	return strings.IndexByte(cs, 0) == -1
+}
+
+// appendHeader is the same as bsoncore.AppendHeader but does not check if the
+// key is a valid C string since the caller has already checked for that.
+//
+// The caller of this function must check if key is a valid C string.
+func (vw *valueWriter) appendHeader(t bsontype.Type, key string) {
+	vw.buf = bsoncore.AppendType(vw.buf, t)
+	vw.buf = append(vw.buf, key...)
+	vw.buf = append(vw.buf, 0x00)
+}
+
+func (vw *valueWriter) appendIntHeader(t bsontype.Type, key int) {
+	vw.buf = bsoncore.AppendType(vw.buf, t)
+	vw.buf = strconv.AppendInt(vw.buf, int64(key), 10)
+	vw.buf = append(vw.buf, 0x00)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..628f452932da9096b3aa4f662afd3ba531144ffe
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ArrayWriter is the interface used to create a BSON or BSON adjacent array.
+// Callers must ensure they call WriteArrayEnd when they have finished creating
+// the array.
+type ArrayWriter interface {
+	WriteArrayElement() (ValueWriter, error)
+	WriteArrayEnd() error
+}
+
+// DocumentWriter is the interface used to create a BSON or BSON adjacent
+// document. Callers must ensure they call WriteDocumentEnd when they have
+// finished creating the document.
+type DocumentWriter interface {
+	WriteDocumentElement(string) (ValueWriter, error)
+	WriteDocumentEnd() error
+}
+
+// ValueWriter is the interface used to write BSON values. Implementations of
+// this interface handle creating BSON or BSON adjacent representations of the
+// values.
+type ValueWriter interface {
+	WriteArray() (ArrayWriter, error)
+	WriteBinary(b []byte) error
+	WriteBinaryWithSubtype(b []byte, btype byte) error
+	WriteBoolean(bool) error
+	WriteCodeWithScope(code string) (DocumentWriter, error)
+	WriteDBPointer(ns string, oid primitive.ObjectID) error
+	WriteDateTime(dt int64) error
+	WriteDecimal128(primitive.Decimal128) error
+	WriteDouble(float64) error
+	WriteInt32(int32) error
+	WriteInt64(int64) error
+	WriteJavascript(code string) error
+	WriteMaxKey() error
+	WriteMinKey() error
+	WriteNull() error
+	WriteObjectID(primitive.ObjectID) error
+	WriteRegex(pattern, options string) error
+	WriteString(string) error
+	WriteDocument() (DocumentWriter, error)
+	WriteSymbol(symbol string) error
+	WriteTimestamp(t, i uint32) error
+	WriteUndefined() error
+}
+
+// ValueWriterFlusher is a superset of ValueWriter that exposes functionality to flush to the underlying buffer.
+//
+// Deprecated: ValueWriterFlusher will not be supported in Go Driver 2.0.
+type ValueWriterFlusher interface {
+	ValueWriter
+	Flush() error
+}
+
+// BytesWriter is the interface used to write BSON bytes to a ValueWriter.
+// This interface is meant to be a superset of ValueWriter, so that types that
+// implement ValueWriter may also implement this interface.
+//
+// Deprecated: BytesWriter will not be supported in Go Driver 2.0.
+type BytesWriter interface {
+	WriteValueBytes(t bsontype.Type, b []byte) error
+}
+
+// SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer.
+//
+// Deprecated: SliceWriter will not be supported in Go Driver 2.0.
+type SliceWriter []byte
+
+// Write writes the bytes to the underlying slice.
+//
+// Deprecated: SliceWriter will not be supported in Go Driver 2.0.
+func (sw *SliceWriter) Write(p []byte) (int, error) {
+	written := len(p)
+	*sw = append(*sw, p...)
+	return written, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go
new file mode 100644
index 0000000000000000000000000000000000000000..255d9909e3d2c6c713d35c36b22f7dcc3b486347
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go
@@ -0,0 +1,116 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsontype is a utility package that contains types for each BSON type and the
+// a stringifier for the Type to enable easier debugging when working with BSON.
+package bsontype // import "go.mongodb.org/mongo-driver/bson/bsontype"
+
+// BSON element types as described in https://bsonspec.org/spec.html.
+//
+// Deprecated: Use bson.Type* constants instead.
+const (
+	Double           Type = 0x01
+	String           Type = 0x02
+	EmbeddedDocument Type = 0x03
+	Array            Type = 0x04
+	Binary           Type = 0x05
+	Undefined        Type = 0x06
+	ObjectID         Type = 0x07
+	Boolean          Type = 0x08
+	DateTime         Type = 0x09
+	Null             Type = 0x0A
+	Regex            Type = 0x0B
+	DBPointer        Type = 0x0C
+	JavaScript       Type = 0x0D
+	Symbol           Type = 0x0E
+	CodeWithScope    Type = 0x0F
+	Int32            Type = 0x10
+	Timestamp        Type = 0x11
+	Int64            Type = 0x12
+	Decimal128       Type = 0x13
+	MinKey           Type = 0xFF
+	MaxKey           Type = 0x7F
+)
+
+// BSON binary element subtypes as described in https://bsonspec.org/spec.html.
+//
+// Deprecated: Use the bson.TypeBinary* constants instead.
+const (
+	BinaryGeneric     byte = 0x00
+	BinaryFunction    byte = 0x01
+	BinaryBinaryOld   byte = 0x02
+	BinaryUUIDOld     byte = 0x03
+	BinaryUUID        byte = 0x04
+	BinaryMD5         byte = 0x05
+	BinaryEncrypted   byte = 0x06
+	BinaryColumn      byte = 0x07
+	BinarySensitive   byte = 0x08
+	BinaryUserDefined byte = 0x80
+)
+
+// Type represents a BSON type.
+type Type byte
+
+// String returns the string representation of the BSON type's name.
+func (bt Type) String() string {
+	switch bt {
+	case '\x01':
+		return "double"
+	case '\x02':
+		return "string"
+	case '\x03':
+		return "embedded document"
+	case '\x04':
+		return "array"
+	case '\x05':
+		return "binary"
+	case '\x06':
+		return "undefined"
+	case '\x07':
+		return "objectID"
+	case '\x08':
+		return "boolean"
+	case '\x09':
+		return "UTC datetime"
+	case '\x0A':
+		return "null"
+	case '\x0B':
+		return "regex"
+	case '\x0C':
+		return "dbPointer"
+	case '\x0D':
+		return "javascript"
+	case '\x0E':
+		return "symbol"
+	case '\x0F':
+		return "code with scope"
+	case '\x10':
+		return "32-bit integer"
+	case '\x11':
+		return "timestamp"
+	case '\x12':
+		return "64-bit integer"
+	case '\x13':
+		return "128-bit decimal"
+	case '\xFF':
+		return "min key"
+	case '\x7F':
+		return "max key"
+	default:
+		return "invalid"
+	}
+}
+
+// IsValid will return true if the Type is valid.
+func (bt Type) IsValid() bool {
+	switch bt {
+	case Double, String, EmbeddedDocument, Array, Binary, Undefined, ObjectID, Boolean, DateTime, Null, Regex,
+		DBPointer, JavaScript, Symbol, CodeWithScope, Int32, Timestamp, Int64, Decimal128, MinKey, MaxKey:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..eac74cd39990ff51270750db5c859ef1cd4d68a1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go
@@ -0,0 +1,208 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+)
+
+// ErrDecodeToNil is the error returned when trying to decode to a nil value
+var ErrDecodeToNil = errors.New("cannot Decode to nil value")
+
+// This pool is used to keep the allocations of Decoders down. This is only used for the Marshal*
+// methods and is not consumable from outside of this package. The Decoders retrieved from this pool
+// must have both Reset and SetRegistry called on them.
+var decPool = sync.Pool{
+	New: func() interface{} {
+		return new(Decoder)
+	},
+}
+
+// A Decoder reads and decodes BSON documents from a stream. It reads from a bsonrw.ValueReader as
+// the source of BSON data.
+type Decoder struct {
+	dc bsoncodec.DecodeContext
+	vr bsonrw.ValueReader
+
+	// We persist defaultDocumentM and defaultDocumentD on the Decoder to prevent overwriting from
+	// (*Decoder).SetContext.
+	defaultDocumentM bool
+	defaultDocumentD bool
+
+	binaryAsSlice     bool
+	useJSONStructTags bool
+	useLocalTimeZone  bool
+	zeroMaps          bool
+	zeroStructs       bool
+}
+
+// NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr.
+func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) {
+	if vr == nil {
+		return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
+	}
+
+	return &Decoder{
+		dc: bsoncodec.DecodeContext{Registry: DefaultRegistry},
+		vr: vr,
+	}, nil
+}
+
+// NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr.
+//
+// Deprecated: Use [NewDecoder] and use the Decoder configuration methods set the desired unmarshal
+// behavior instead.
+func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) {
+	if dc.Registry == nil {
+		dc.Registry = DefaultRegistry
+	}
+	if vr == nil {
+		return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
+	}
+
+	return &Decoder{
+		dc: dc,
+		vr: vr,
+	}, nil
+}
+
+// Decode reads the next BSON document from the stream and decodes it into the
+// value pointed to by val.
+//
+// See [Unmarshal] for details about BSON unmarshaling behavior.
+func (d *Decoder) Decode(val interface{}) error {
+	if unmarshaler, ok := val.(Unmarshaler); ok {
+		// TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method.
+		buf, err := bsonrw.Copier{}.CopyDocumentToBytes(d.vr)
+		if err != nil {
+			return err
+		}
+		return unmarshaler.UnmarshalBSON(buf)
+	}
+
+	rval := reflect.ValueOf(val)
+	switch rval.Kind() {
+	case reflect.Ptr:
+		if rval.IsNil() {
+			return ErrDecodeToNil
+		}
+		rval = rval.Elem()
+	case reflect.Map:
+		if rval.IsNil() {
+			return ErrDecodeToNil
+		}
+	default:
+		return fmt.Errorf("argument to Decode must be a pointer or a map, but got %v", rval)
+	}
+	decoder, err := d.dc.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+
+	if d.defaultDocumentM {
+		d.dc.DefaultDocumentM()
+	}
+	if d.defaultDocumentD {
+		d.dc.DefaultDocumentD()
+	}
+	if d.binaryAsSlice {
+		d.dc.BinaryAsSlice()
+	}
+	if d.useJSONStructTags {
+		d.dc.UseJSONStructTags()
+	}
+	if d.useLocalTimeZone {
+		d.dc.UseLocalTimeZone()
+	}
+	if d.zeroMaps {
+		d.dc.ZeroMaps()
+	}
+	if d.zeroStructs {
+		d.dc.ZeroStructs()
+	}
+
+	return decoder.DecodeValue(d.dc, d.vr, rval)
+}
+
+// Reset will reset the state of the decoder, using the same *DecodeContext used in
+// the original construction but using vr for reading.
+func (d *Decoder) Reset(vr bsonrw.ValueReader) error {
+	// TODO:(GODRIVER-2719): Remove error return value.
+	d.vr = vr
+	return nil
+}
+
+// SetRegistry replaces the current registry of the decoder with r.
+func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error {
+	// TODO:(GODRIVER-2719): Remove error return value.
+	d.dc.Registry = r
+	return nil
+}
+
+// SetContext replaces the current registry of the decoder with dc.
+//
+// Deprecated: Use the Decoder configuration methods to set the desired unmarshal behavior instead.
+func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error {
+	// TODO:(GODRIVER-2719): Remove error return value.
+	d.dc = dc
+	return nil
+}
+
+// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This
+// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
+func (d *Decoder) DefaultDocumentM() {
+	d.defaultDocumentM = true
+}
+
+// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This
+// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
+func (d *Decoder) DefaultDocumentD() {
+	d.defaultDocumentD = true
+}
+
+// AllowTruncatingDoubles causes the Decoder to truncate the fractional part of BSON "double" values
+// when attempting to unmarshal them into a Go integer (int, int8, int16, int32, or int64) struct
+// field. The truncation logic does not apply to BSON "decimal128" values.
+func (d *Decoder) AllowTruncatingDoubles() {
+	d.dc.Truncate = true
+}
+
+// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or
+// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
+func (d *Decoder) BinaryAsSlice() {
+	d.binaryAsSlice = true
+}
+
+// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson"
+// struct tag is not specified.
+func (d *Decoder) UseJSONStructTags() {
+	d.useJSONStructTags = true
+}
+
+// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead
+// of the UTC timezone.
+func (d *Decoder) UseLocalTimeZone() {
+	d.useLocalTimeZone = true
+}
+
+// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value
+// passed to Decode before unmarshaling BSON documents into them.
+func (d *Decoder) ZeroMaps() {
+	d.zeroMaps = true
+}
+
+// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination
+// value passed to Decode before unmarshaling BSON documents into them.
+func (d *Decoder) ZeroStructs() {
+	d.zeroStructs = true
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb075b47892ff4374cc3fb584c61c732a12807c3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go
@@ -0,0 +1,142 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to
+// store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org.
+// The BSON library handles marshaling and unmarshaling of values through a configurable codec system. For a description
+// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information
+// and usage examples, check out the [Work with BSON] page in the Go Driver docs site.
+//
+// # Raw BSON
+//
+// The Raw family of types is used to validate and retrieve elements from a slice of bytes. This
+// type is most useful when you want do lookups on BSON bytes without unmarshaling it into another
+// type.
+//
+// Example:
+//
+//	var raw bson.Raw = ... // bytes from somewhere
+//	err := raw.Validate()
+//	if err != nil { return err }
+//	val := raw.Lookup("foo")
+//	i32, ok := val.Int32OK()
+//	// do something with i32...
+//
+// # Native Go Types
+//
+// The D and M types defined in this package can be used to build representations of BSON using native Go types. D is a
+// slice and M is a map. For more information about the use cases for these types, see the documentation on the type
+// definitions.
+//
+// Note that a D should not be constructed with duplicate key names, as that can cause undefined server behavior.
+//
+// Example:
+//
+//	bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//	bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+//
+// When decoding BSON to a D or M, the following type mappings apply when unmarshaling:
+//
+//  1. BSON int32 unmarshals to an int32.
+//  2. BSON int64 unmarshals to an int64.
+//  3. BSON double unmarshals to a float64.
+//  4. BSON string unmarshals to a string.
+//  5. BSON boolean unmarshals to a bool.
+//  6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M).
+//  7. BSON array unmarshals to a bson.A.
+//  8. BSON ObjectId unmarshals to a primitive.ObjectID.
+//  9. BSON datetime unmarshals to a primitive.DateTime.
+//  10. BSON binary unmarshals to a primitive.Binary.
+//  11. BSON regular expression unmarshals to a primitive.Regex.
+//  12. BSON JavaScript unmarshals to a primitive.JavaScript.
+//  13. BSON code with scope unmarshals to a primitive.CodeWithScope.
+//  14. BSON timestamp unmarshals to an primitive.Timestamp.
+//  15. BSON 128-bit decimal unmarshals to an primitive.Decimal128.
+//  16. BSON min key unmarshals to an primitive.MinKey.
+//  17. BSON max key unmarshals to an primitive.MaxKey.
+//  18. BSON undefined unmarshals to a primitive.Undefined.
+//  19. BSON null unmarshals to nil.
+//  20. BSON DBPointer unmarshals to a primitive.DBPointer.
+//  21. BSON symbol unmarshals to a primitive.Symbol.
+//
+// The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are:
+//
+//  1. time.Time marshals to a BSON datetime.
+//  2. int8, int16, and int32 marshal to a BSON int32.
+//  3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64
+//     otherwise.
+//  4. int64 marshals to BSON int64 (unless [Encoder.IntMinSize] is set).
+//  5. uint8 and uint16 marshal to a BSON int32.
+//  6. uint, uint32, and uint64 marshal to a BSON int64 (unless [Encoder.IntMinSize] is set).
+//  7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshaling a BSON null or
+//     undefined value into a string will yield the empty string.).
+//
+// # Structs
+//
+// Structs can be marshaled/unmarshaled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended
+// JSON, the following rules apply:
+//
+//  1. Only exported fields in structs will be marshaled or unmarshaled.
+//
+//  2. When marshaling a struct, each field will be lowercased to generate the key for the corresponding BSON element.
+//     For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g.
+//     `bson:"fooField"` to generate key "fooField" instead).
+//
+//  3. An embedded struct field is marshaled as a subdocument. The key will be the lowercased name of the field's type.
+//
+//  4. A pointer field is marshaled as the underlying type if the pointer is non-nil. If the pointer is nil, it is
+//     marshaled as a BSON null value.
+//
+//  5. When unmarshaling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents
+//     unmarshaled into an interface{} field will be unmarshaled as a D.
+//
+// The encoding of each struct field can be customized by the "bson" struct tag.
+//
+// This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new
+// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON
+// tags are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below:
+//
+// Example:
+//
+//	structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser)
+//
+// The bson tag gives the name of the field, possibly followed by a comma-separated list of options.
+// The name may be empty in order to specify options without overriding the default field name. The following options can
+// be used to configure behavior:
+//
+//  1. omitempty: If the "omitempty" struct tag is specified on a field, the field will not be marshaled if it is set to
+//     an "empty" value. Numbers, booleans, and strings are considered empty if their value is equal to the zero value for
+//     the type (i.e. 0 for numbers, false for booleans, and "" for strings). Slices, maps, and arrays are considered
+//     empty if they are of length zero. Interfaces and pointers are considered empty if their value is nil. By default,
+//     structs are only considered empty if the struct type implements [bsoncodec.Zeroer] and the IsZero
+//     method returns true. Struct types that do not implement [bsoncodec.Zeroer] are never considered empty and will be
+//     marshaled as embedded documents. NOTE: It is recommended that this tag be used for all slice and map fields.
+//
+//  2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of
+//     the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For
+//     other types, this tag is ignored.
+//
+//  3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles
+//     unmarshaled into that field will be truncated at the decimal point. For example, if 3.14 is unmarshaled into a
+//     field of type int, it will be unmarshaled as 3. If this tag is not specified, the decoder will throw an error if
+//     the value cannot be decoded without losing precision. For float64 or non-numeric types, this tag is ignored.
+//
+//  4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when
+//     marshaling and "un-flattened" when unmarshaling. This means that all of the fields in that struct/map will be
+//     pulled up one level and will become top-level fields rather than being fields in a nested document. For example,
+//     if a map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will
+//     be {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If
+//     there are duplicated fields in the resulting document when an inlined struct is marshaled, the inlined field will
+//     be overwritten. If there are duplicated fields in the resulting document when an inlined map is marshaled, an
+//     error will be returned. This tag can be used with fields that are pointers to structs. If an inlined pointer field
+//     is nil, it will not be marshaled. For fields that are not maps or structs, this tag is ignored.
+//
+// # Marshaling and Unmarshaling
+//
+// Manually marshaling and unmarshaling can be done with the Marshal and Unmarshal family of functions.
+//
+// [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/
+package bson
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..0be2a97fbcd01bfabc1107df357f57abcf48dc29
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go
@@ -0,0 +1,199 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"reflect"
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+)
+
+// This pool is used to keep the allocations of Encoders down. This is only used for the Marshal*
+// methods and is not consumable from outside of this package. The Encoders retrieved from this pool
+// must have both Reset and SetRegistry called on them.
+var encPool = sync.Pool{
+	New: func() interface{} {
+		return new(Encoder)
+	},
+}
+
+// An Encoder writes a serialization format to an output stream. It writes to a bsonrw.ValueWriter
+// as the destination of BSON data.
+type Encoder struct {
+	ec bsoncodec.EncodeContext
+	vw bsonrw.ValueWriter
+
+	errorOnInlineDuplicates bool
+	intMinSize              bool
+	stringifyMapKeysWithFmt bool
+	nilMapAsEmpty           bool
+	nilSliceAsEmpty         bool
+	nilByteSliceAsEmpty     bool
+	omitZeroStruct          bool
+	useJSONStructTags       bool
+}
+
+// NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw.
+func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) {
+	// TODO:(GODRIVER-2719): Remove error return value.
+	if vw == nil {
+		return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
+	}
+
+	return &Encoder{
+		ec: bsoncodec.EncodeContext{Registry: DefaultRegistry},
+		vw: vw,
+	}, nil
+}
+
+// NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw.
+//
+// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal
+// behavior instead.
+func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) {
+	if ec.Registry == nil {
+		ec = bsoncodec.EncodeContext{Registry: DefaultRegistry}
+	}
+	if vw == nil {
+		return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
+	}
+
+	return &Encoder{
+		ec: ec,
+		vw: vw,
+	}, nil
+}
+
+// Encode writes the BSON encoding of val to the stream.
+//
+// See [Marshal] for details about BSON marshaling behavior.
+func (e *Encoder) Encode(val interface{}) error {
+	if marshaler, ok := val.(Marshaler); ok {
+		// TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse?
+		buf, err := marshaler.MarshalBSON()
+		if err != nil {
+			return err
+		}
+		return bsonrw.Copier{}.CopyDocumentFromBytes(e.vw, buf)
+	}
+
+	encoder, err := e.ec.LookupEncoder(reflect.TypeOf(val))
+	if err != nil {
+		return err
+	}
+
+	// Copy the configurations applied to the Encoder over to the EncodeContext, which actually
+	// communicates those configurations to the default ValueEncoders.
+	if e.errorOnInlineDuplicates {
+		e.ec.ErrorOnInlineDuplicates()
+	}
+	if e.intMinSize {
+		e.ec.MinSize = true
+	}
+	if e.stringifyMapKeysWithFmt {
+		e.ec.StringifyMapKeysWithFmt()
+	}
+	if e.nilMapAsEmpty {
+		e.ec.NilMapAsEmpty()
+	}
+	if e.nilSliceAsEmpty {
+		e.ec.NilSliceAsEmpty()
+	}
+	if e.nilByteSliceAsEmpty {
+		e.ec.NilByteSliceAsEmpty()
+	}
+	if e.omitZeroStruct {
+		e.ec.OmitZeroStruct()
+	}
+	if e.useJSONStructTags {
+		e.ec.UseJSONStructTags()
+	}
+
+	return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val))
+}
+
+// Reset will reset the state of the Encoder, using the same *EncodeContext used in
+// the original construction but using vw.
+func (e *Encoder) Reset(vw bsonrw.ValueWriter) error {
+	// TODO:(GODRIVER-2719): Remove error return value.
+	e.vw = vw
+	return nil
+}
+
+// SetRegistry replaces the current registry of the Encoder with r.
+func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error {
+	// TODO:(GODRIVER-2719): Remove error return value.
+	e.ec.Registry = r
+	return nil
+}
+
+// SetContext replaces the current EncodeContext of the encoder with ec.
+//
+// Deprecated: Use the Encoder configuration methods set the desired marshal behavior instead.
+func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error {
+	// TODO:(GODRIVER-2719): Remove error return value.
+	e.ec = ec
+	return nil
+}
+
+// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in
+// the marshaled BSON when the "inline" struct tag option is set.
+func (e *Encoder) ErrorOnInlineDuplicates() {
+	e.errorOnInlineDuplicates = true
+}
+
+// IntMinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, uint,
+// uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) that can
+// represent the integer value.
+func (e *Encoder) IntMinSize() {
+	e.intMinSize = true
+}
+
+// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name
+// strings using fmt.Sprint instead of the default string conversion logic.
+func (e *Encoder) StringifyMapKeysWithFmt() {
+	e.stringifyMapKeysWithFmt = true
+}
+
+// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON
+// null.
+func (e *Encoder) NilMapAsEmpty() {
+	e.nilMapAsEmpty = true
+}
+
+// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON
+// null.
+func (e *Encoder) NilSliceAsEmpty() {
+	e.nilSliceAsEmpty = true
+}
+
+// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values
+// instead of BSON null.
+func (e *Encoder) NilByteSliceAsEmpty() {
+	e.nilByteSliceAsEmpty = true
+}
+
+// TODO(GODRIVER-2820): Update the description to remove the note about only examining exported
+// TODO struct fields once the logic is updated to also inspect private struct fields.
+
+// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{})
+// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set.
+//
+// Note that the Encoder only examines exported struct fields when determining if a struct is the
+// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty.
+func (e *Encoder) OmitZeroStruct() {
+	e.omitZeroStruct = true
+}
+
+// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson"
+// struct tag is not specified.
+func (e *Encoder) UseJSONStructTags() {
+	e.useJSONStructTags = true
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go
new file mode 100644
index 0000000000000000000000000000000000000000..17ce6697e04fd4308293e97a2d19ab1da59ddfe7
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go
@@ -0,0 +1,453 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"bytes"
+	"encoding/json"
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+const defaultDstCap = 256
+
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+var extjPool = bsonrw.NewExtJSONValueWriterPool()
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into a valid BSON document.
+//
+// Implementations of Marshaler must return a full BSON document. To create
+// custom BSON marshaling behavior for individual values in a BSON document,
+// implement the ValueMarshaler interface instead.
+type Marshaler interface {
+	MarshalBSON() ([]byte, error)
+}
+
+// ValueMarshaler is the interface implemented by types that can marshal
+// themselves into a valid BSON value. The format of the returned bytes must
+// match the returned type.
+//
+// Implementations of ValueMarshaler must return an individual BSON value. To
+// create custom BSON marshaling behavior for an entire BSON document, implement
+// the Marshaler interface instead.
+type ValueMarshaler interface {
+	MarshalBSONValue() (bsontype.Type, []byte, error)
+}
+
+// Marshal returns the BSON encoding of val as a BSON document. If val is not a type that can be transformed into a
+// document, MarshalValue should be used instead.
+//
+// Marshal will use the default registry created by NewRegistry to recursively
+// marshal val into a []byte. Marshal will inspect struct tags and alter the
+// marshaling process accordingly.
+func Marshal(val interface{}) ([]byte, error) {
+	return MarshalWithRegistry(DefaultRegistry, val)
+}
+
+// MarshalAppend will encode val as a BSON document and append the bytes to dst. If dst is not large enough to hold the
+// bytes, it will be grown. If val is not a type that can be transformed into a document, MarshalValueAppend should be
+// used instead.
+//
+// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into
+// [bsonrw.NewBSONValueWriter]:
+//
+//	buf := bytes.NewBuffer(dst)
+//	vw, err := bsonrw.NewBSONValueWriter(buf)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//
+// See [Encoder] for more examples.
+func MarshalAppend(dst []byte, val interface{}) ([]byte, error) {
+	return MarshalAppendWithRegistry(DefaultRegistry, dst, val)
+}
+
+// MarshalWithRegistry returns the BSON encoding of val as a BSON document. If val is not a type that can be transformed
+// into a document, MarshalValueWithRegistry should be used instead.
+//
+// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead:
+//
+//	buf := new(bytes.Buffer)
+//	vw, err := bsonrw.NewBSONValueWriter(buf)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc.SetRegistry(reg)
+//
+// See [Encoder] for more examples.
+func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) {
+	dst := make([]byte, 0)
+	return MarshalAppendWithRegistry(r, dst, val)
+}
+
+// MarshalWithContext returns the BSON encoding of val as a BSON document using EncodeContext ec. If val is not a type
+// that can be transformed into a document, MarshalValueWithContext should be used instead.
+//
+// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal
+// behavior instead:
+//
+//	buf := bytes.NewBuffer(dst)
+//	vw, err := bsonrw.NewBSONValueWriter(buf)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc.IntMinSize()
+//
+// See [Encoder] for more examples.
+func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) {
+	dst := make([]byte, 0)
+	return MarshalAppendWithContext(ec, dst, val)
+}
+
+// MarshalAppendWithRegistry will encode val as a BSON document using Registry r and append the bytes to dst. If dst is
+// not large enough to hold the bytes, it will be grown. If val is not a type that can be transformed into a document,
+// MarshalValueAppendWithRegistry should be used instead.
+//
+// Deprecated: Use [NewEncoder], and pass the dst byte slice (wrapped by a bytes.Buffer) into
+// [bsonrw.NewBSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] instead:
+//
+//	buf := bytes.NewBuffer(dst)
+//	vw, err := bsonrw.NewBSONValueWriter(buf)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc.SetRegistry(reg)
+//
+// See [Encoder] for more examples.
+func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) {
+	return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val)
+}
+
+// Pool of buffers for marshalling BSON.
+var bufPool = sync.Pool{
+	New: func() interface{} {
+		return new(bytes.Buffer)
+	},
+}
+
+// MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the
+// bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be
+// transformed into a document, MarshalValueAppendWithContext should be used instead.
+//
+// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into
+// [bsonrw.NewBSONValueWriter], and use the Encoder configuration methods to set the desired marshal
+// behavior instead:
+//
+//	buf := bytes.NewBuffer(dst)
+//	vw, err := bsonrw.NewBSONValueWriter(buf)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc.IntMinSize()
+//
+// See [Encoder] for more examples.
+func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) {
+	sw := bufPool.Get().(*bytes.Buffer)
+	defer func() {
+		// Proper usage of a sync.Pool requires each entry to have approximately
+		// the same memory cost. To obtain this property when the stored type
+		// contains a variably-sized buffer, we add a hard limit on the maximum
+		// buffer to place back in the pool. We limit the size to 16MiB because
+		// that's the maximum wire message size supported by any current MongoDB
+		// server.
+		//
+		// Comment based on
+		// https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147
+		//
+		// Recycle byte slices that are smaller than 16MiB and at least half
+		// occupied.
+		if sw.Cap() < 16*1024*1024 && sw.Cap()/2 < sw.Len() {
+			bufPool.Put(sw)
+		}
+	}()
+
+	sw.Reset()
+	vw := bvwPool.Get(sw)
+	defer bvwPool.Put(vw)
+
+	enc := encPool.Get().(*Encoder)
+	defer encPool.Put(enc)
+
+	err := enc.Reset(vw)
+	if err != nil {
+		return nil, err
+	}
+	err = enc.SetContext(ec)
+	if err != nil {
+		return nil, err
+	}
+
+	err = enc.Encode(val)
+	if err != nil {
+		return nil, err
+	}
+
+	return append(dst, sw.Bytes()...), nil
+}
+
+// MarshalValue returns the BSON encoding of val.
+//
+// MarshalValue will use bson.DefaultRegistry to transform val into a BSON value. If val is a struct, this function will
+// inspect struct tags and alter the marshalling process accordingly.
+func MarshalValue(val interface{}) (bsontype.Type, []byte, error) {
+	return MarshalValueWithRegistry(DefaultRegistry, val)
+}
+
+// MarshalValueAppend will append the BSON encoding of val to dst. If dst is not large enough to hold the BSON encoding
+// of val, dst will be grown.
+//
+// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
+// Driver 2.0.
+func MarshalValueAppend(dst []byte, val interface{}) (bsontype.Type, []byte, error) {
+	return MarshalValueAppendWithRegistry(DefaultRegistry, dst, val)
+}
+
+// MarshalValueWithRegistry returns the BSON encoding of val using Registry r.
+//
+// Deprecated: Using a custom registry to marshal individual BSON values will not be supported in Go
+// Driver 2.0.
+func MarshalValueWithRegistry(r *bsoncodec.Registry, val interface{}) (bsontype.Type, []byte, error) {
+	dst := make([]byte, 0)
+	return MarshalValueAppendWithRegistry(r, dst, val)
+}
+
+// MarshalValueWithContext returns the BSON encoding of val using EncodeContext ec.
+//
+// Deprecated: Using a custom EncodeContext to marshal individual BSON elements will not be
+// supported in Go Driver 2.0.
+func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsontype.Type, []byte, error) {
+	dst := make([]byte, 0)
+	return MarshalValueAppendWithContext(ec, dst, val)
+}
+
+// MarshalValueAppendWithRegistry will append the BSON encoding of val to dst using Registry r. If dst is not large
+// enough to hold the BSON encoding of val, dst will be grown.
+//
+// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
+// Driver 2.0.
+func MarshalValueAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) (bsontype.Type, []byte, error) {
+	return MarshalValueAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val)
+}
+
+// MarshalValueAppendWithContext will append the BSON encoding of val to dst using EncodeContext ec. If dst is not large
+// enough to hold the BSON encoding of val, dst will be grown.
+//
+// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
+// Driver 2.0.
+func MarshalValueAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) (bsontype.Type, []byte, error) {
+	// get a ValueWriter configured to write to dst
+	sw := new(bsonrw.SliceWriter)
+	*sw = dst
+	vwFlusher := bvwPool.GetAtModeElement(sw)
+
+	// get an Encoder and encode the value
+	enc := encPool.Get().(*Encoder)
+	defer encPool.Put(enc)
+	if err := enc.Reset(vwFlusher); err != nil {
+		return 0, nil, err
+	}
+	if err := enc.SetContext(ec); err != nil {
+		return 0, nil, err
+	}
+	if err := enc.Encode(val); err != nil {
+		return 0, nil, err
+	}
+
+	// flush the bytes written because we cannot guarantee that a full document has been written
+	// after the flush, *sw will be in the format
+	// [value type, 0 (null byte to indicate end of empty element name), value bytes..]
+	if err := vwFlusher.Flush(); err != nil {
+		return 0, nil, err
+	}
+	buffer := *sw
+	return bsontype.Type(buffer[0]), buffer[2:], nil
+}
+
+// MarshalExtJSON returns the extended JSON encoding of val.
+func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONWithRegistry(DefaultRegistry, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppend will append the extended JSON encoding of val to dst.
+// If dst is not large enough to hold the extended JSON encoding of val, dst
+// will be grown.
+//
+// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into
+// [bsonrw.NewExtJSONValueWriter] instead:
+//
+//	buf := bytes.NewBuffer(dst)
+//	vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//
+// See [Encoder] for more examples.
+func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r.
+//
+// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead:
+//
+//	buf := new(bytes.Buffer)
+//	vw, err := bsonrw.NewBSONValueWriter(buf)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc.SetRegistry(reg)
+//
+// See [Encoder] for more examples.
+func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	dst := make([]byte, 0, defaultDstCap)
+	return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r.
+//
+// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal
+// behavior instead:
+//
+//	buf := new(bytes.Buffer)
+//	vw, err := bsonrw.NewBSONValueWriter(buf)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc.IntMinSize()
+//
+// See [Encoder] for more examples.
+func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	dst := make([]byte, 0, defaultDstCap)
+	return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of
+// val to dst using Registry r. If dst is not large enough to hold the BSON
+// encoding of val, dst will be grown.
+//
+// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into
+// [bsonrw.NewExtJSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry]
+// instead:
+//
+//	buf := bytes.NewBuffer(dst)
+//	vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//
+// See [Encoder] for more examples.
+func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppendWithContext will append the extended JSON encoding of
+// val to dst using Registry r. If dst is not large enough to hold the BSON
+// encoding of val, dst will be grown.
+//
+// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into
+// [bsonrw.NewExtJSONValueWriter], and use the Encoder configuration methods to set the desired marshal
+// behavior instead:
+//
+//	buf := bytes.NewBuffer(dst)
+//	vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc, err := bson.NewEncoder(vw)
+//	if err != nil {
+//		panic(err)
+//	}
+//	enc.IntMinSize()
+//
+// See [Encoder] for more examples.
+func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	sw := new(bsonrw.SliceWriter)
+	*sw = dst
+	ejvw := extjPool.Get(sw, canonical, escapeHTML)
+	defer extjPool.Put(ejvw)
+
+	enc := encPool.Get().(*Encoder)
+	defer encPool.Put(enc)
+
+	err := enc.Reset(ejvw)
+	if err != nil {
+		return nil, err
+	}
+	err = enc.SetContext(ec)
+	if err != nil {
+		return nil, err
+	}
+
+	err = enc.Encode(val)
+	if err != nil {
+		return nil, err
+	}
+
+	return *sw, nil
+}
+
+// IndentExtJSON will prefix and indent the provided extended JSON src and append it to dst.
+func IndentExtJSON(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+	return json.Indent(dst, src, prefix, indent)
+}
+
+// MarshalExtJSONIndent returns the extended JSON encoding of val with each line with prefixed
+// and indented.
+func MarshalExtJSONIndent(val interface{}, canonical, escapeHTML bool, prefix, indent string) ([]byte, error) {
+	marshaled, err := MarshalExtJSON(val, canonical, escapeHTML)
+	if err != nil {
+		return nil, err
+	}
+
+	var buf bytes.Buffer
+	err = IndentExtJSON(&buf, marshaled, prefix, indent)
+	if err != nil {
+		return nil, err
+	}
+
+	return buf.Bytes(), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go
new file mode 100644
index 0000000000000000000000000000000000000000..db8be74d8498eb96685ca7441b2ac33e390a9898
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go
@@ -0,0 +1,432 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package primitive
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math/big"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// These constants are the maximum and minimum values for the exponent field in a decimal128 value.
+const (
+	MaxDecimal128Exp = 6111
+	MinDecimal128Exp = -6176
+)
+
+// These errors are returned when an invalid value is parsed as a big.Int.
+var (
+	ErrParseNaN    = errors.New("cannot parse NaN as a *big.Int")
+	ErrParseInf    = errors.New("cannot parse Infinity as a *big.Int")
+	ErrParseNegInf = errors.New("cannot parse -Infinity as a *big.Int")
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+// NewDecimal128 creates a Decimal128 using the provide high and low uint64s.
+func NewDecimal128(h, l uint64) Decimal128 {
+	return Decimal128{h: h, l: l}
+}
+
+// GetBytes returns the underlying bytes of the BSON decimal value as two uint64 values. The first
+// contains the most first 8 bytes of the value and the second contains the latter.
+func (d Decimal128) GetBytes() (uint64, uint64) {
+	return d.h, d.l
+}
+
+// String returns a string representation of the decimal value.
+func (d Decimal128) String() string {
+	var posSign int      // positive sign
+	var exp int          // exponent
+	var high, low uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		posSign = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Infinity"[posSign:]
+	}
+
+	low = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		exp = int(d.h >> 47 & (1<<14 - 1))
+		// Spec says all of these values are out of range.
+		high, low = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		exp = int(d.h >> 49 & (1<<14 - 1))
+		high = d.h & (1<<49 - 1)
+	}
+	exp += MinDecimal128Exp
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if high == 0 && low == 0 && exp == 0 {
+		return "-0"[posSign:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + exp
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		high, low, rem = divmod(high, low, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || low == 0 && high == 0 && rem > 0 && rem < 10 && (dot < i-6 || exp > 0)) {
+				exp += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if low == 0 && high == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || exp > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && low == 0 && high == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if exp > 0 {
+		return string(repr[last+posSign:]) + "E+" + strconv.Itoa(exp)
+	}
+	if exp < 0 {
+		return string(repr[last+posSign:]) + "E" + strconv.Itoa(exp)
+	}
+	return string(repr[last+posSign:])
+}
+
+// BigInt returns significand as big.Int and exponent, bi * 10 ^ exp.
+func (d Decimal128) BigInt() (*big.Int, int, error) {
+	high, low := d.GetBytes()
+	posSign := high>>63&1 == 0 // positive sign
+
+	switch high >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return nil, 0, ErrParseNaN
+	case 0x1E:
+		if posSign {
+			return nil, 0, ErrParseInf
+		}
+		return nil, 0, ErrParseNegInf
+	}
+
+	var exp int
+	if high>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		exp = int(high >> 47 & (1<<14 - 1))
+		// Spec says all of these values are out of range.
+		high, low = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		exp = int(high >> 49 & (1<<14 - 1))
+		high &= (1<<49 - 1)
+	}
+	exp += MinDecimal128Exp
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if high == 0 && low == 0 && exp == 0 {
+		return new(big.Int), 0, nil
+	}
+
+	bi := big.NewInt(0)
+	const host32bit = ^uint(0)>>32 == 0
+	if host32bit {
+		bi.SetBits([]big.Word{big.Word(low), big.Word(low >> 32), big.Word(high), big.Word(high >> 32)})
+	} else {
+		bi.SetBits([]big.Word{big.Word(low), big.Word(high)})
+	}
+
+	if !posSign {
+		return bi.Neg(bi), exp, nil
+	}
+	return bi, exp, nil
+}
+
+// IsNaN returns whether d is NaN.
+func (d Decimal128) IsNaN() bool {
+	return d.h>>58&(1<<5-1) == 0x1F
+}
+
+// IsInf returns:
+//
+//	+1 d == Infinity
+//	 0 other case
+//	-1 d == -Infinity
+func (d Decimal128) IsInf() int {
+	if d.h>>58&(1<<5-1) != 0x1E {
+		return 0
+	}
+
+	if d.h>>63&1 == 0 {
+		return 1
+	}
+	return -1
+}
+
+// IsZero returns true if d is the empty Decimal128.
+func (d Decimal128) IsZero() bool {
+	return d.h == 0 && d.l == 0
+}
+
+// MarshalJSON returns Decimal128 as a string.
+func (d Decimal128) MarshalJSON() ([]byte, error) {
+	return json.Marshal(d.String())
+}
+
+// UnmarshalJSON creates a primitive.Decimal128 from a JSON string, an extended JSON $numberDecimal value, or the string
+// "null". If b is a JSON string or extended JSON value, d will have the value of that string, and if b is "null", d will
+// be unchanged.
+func (d *Decimal128) UnmarshalJSON(b []byte) error {
+	// Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer Decimal128 field
+	// will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not
+	// enter the UnmarshalJSON hook.
+	if string(b) == "null" {
+		return nil
+	}
+
+	var res interface{}
+	err := json.Unmarshal(b, &res)
+	if err != nil {
+		return err
+	}
+	str, ok := res.(string)
+
+	// Extended JSON
+	if !ok {
+		m, ok := res.(map[string]interface{})
+		if !ok {
+			return errors.New("not an extended JSON Decimal128: expected document")
+		}
+		d128, ok := m["$numberDecimal"]
+		if !ok {
+			return errors.New("not an extended JSON Decimal128: expected key $numberDecimal")
+		}
+		str, ok = d128.(string)
+		if !ok {
+			return errors.New("not an extended JSON Decimal128: expected decimal to be string")
+		}
+	}
+
+	*d, err = ParseDecimal128(str)
+	return err
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+// match scientific notation number, example -10.15e-18
+var normalNumber = regexp.MustCompile(`^(?P<int>[-+]?\d*)?(?:\.(?P<dec>\d*))?(?:[Ee](?P<exp>[-+]?\d+))?$`)
+
+// ParseDecimal128 takes the given string and attempts to parse it into a valid
+// Decimal128 value.
+func ParseDecimal128(s string) (Decimal128, error) {
+	if s == "" {
+		return dErr(s)
+	}
+
+	matches := normalNumber.FindStringSubmatch(s)
+	if len(matches) == 0 {
+		orig := s
+		neg := s[0] == '-'
+		if neg || s[0] == '+' {
+			s = s[1:]
+		}
+
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	intPart := matches[1]
+	decPart := matches[2]
+	expPart := matches[3]
+
+	var err error
+	exp := 0
+	if expPart != "" {
+		exp, err = strconv.Atoi(expPart)
+		if err != nil {
+			return dErr(s)
+		}
+	}
+	if decPart != "" {
+		exp -= len(decPart)
+	}
+
+	if len(strings.Trim(intPart+decPart, "-0")) > 35 {
+		return dErr(s)
+	}
+
+	// Parse the significand (i.e. the non-exponent part) as a big.Int.
+	bi, ok := new(big.Int).SetString(intPart+decPart, 10)
+	if !ok {
+		return dErr(s)
+	}
+
+	d, ok := ParseDecimal128FromBigInt(bi, exp)
+	if !ok {
+		return dErr(s)
+	}
+
+	if bi.Sign() == 0 && s[0] == '-' {
+		d.h |= 1 << 63
+	}
+
+	return d, nil
+}
+
+var (
+	ten  = big.NewInt(10)
+	zero = new(big.Int)
+
+	maxS, _ = new(big.Int).SetString("9999999999999999999999999999999999", 10)
+)
+
+// ParseDecimal128FromBigInt attempts to parse the given significand and exponent into a valid Decimal128 value.
+func ParseDecimal128FromBigInt(bi *big.Int, exp int) (Decimal128, bool) {
+	// copy
+	bi = new(big.Int).Set(bi)
+
+	q := new(big.Int)
+	r := new(big.Int)
+
+	// If the significand is zero, the logical value will always be zero, independent of the
+	// exponent. However, the loops for handling out-of-range exponent values below may be extremely
+	// slow for zero values because the significand never changes. Limit the exponent value to the
+	// supported range here to prevent entering the loops below.
+	if bi.Cmp(zero) == 0 {
+		if exp > MaxDecimal128Exp {
+			exp = MaxDecimal128Exp
+		}
+		if exp < MinDecimal128Exp {
+			exp = MinDecimal128Exp
+		}
+	}
+
+	for bigIntCmpAbs(bi, maxS) == 1 {
+		bi, _ = q.QuoRem(bi, ten, r)
+		if r.Cmp(zero) != 0 {
+			return Decimal128{}, false
+		}
+		exp++
+		if exp > MaxDecimal128Exp {
+			return Decimal128{}, false
+		}
+	}
+
+	for exp < MinDecimal128Exp {
+		// Subnormal.
+		bi, _ = q.QuoRem(bi, ten, r)
+		if r.Cmp(zero) != 0 {
+			return Decimal128{}, false
+		}
+		exp++
+	}
+	for exp > MaxDecimal128Exp {
+		// Clamped.
+		bi.Mul(bi, ten)
+		if bigIntCmpAbs(bi, maxS) == 1 {
+			return Decimal128{}, false
+		}
+		exp--
+	}
+
+	b := bi.Bytes()
+	var h, l uint64
+	for i := 0; i < len(b); i++ {
+		if i < len(b)-8 {
+			h = h<<8 | uint64(b[i])
+			continue
+		}
+		l = l<<8 | uint64(b[i])
+	}
+
+	h |= uint64(exp-MinDecimal128Exp) & uint64(1<<14-1) << 49
+	if bi.Sign() == -1 {
+		h |= 1 << 63
+	}
+
+	return Decimal128{h: h, l: l}, true
+}
+
+// bigIntCmpAbs computes big.Int.Cmp(absoluteValue(x), absoluteValue(y)).
+func bigIntCmpAbs(x, y *big.Int) int {
+	xAbs := bigIntAbsValue(x)
+	yAbs := bigIntAbsValue(y)
+	return xAbs.Cmp(yAbs)
+}
+
+// bigIntAbsValue returns a big.Int containing the absolute value of b.
+// If b is already a non-negative number, it is returned without any changes or copies.
+func bigIntAbsValue(b *big.Int) *big.Int {
+	if b.Sign() >= 0 {
+		return b // already positive
+	}
+	return new(big.Int).Abs(b)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go
new file mode 100644
index 0000000000000000000000000000000000000000..c130e3ff195a06a1687a9960f27b06ae0e7332bb
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go
@@ -0,0 +1,206 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package primitive
+
+import (
+	"crypto/rand"
+	"encoding"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"sync/atomic"
+	"time"
+)
+
+// ErrInvalidHex indicates that a hex string cannot be converted to an ObjectID.
+var ErrInvalidHex = errors.New("the provided hex string is not a valid ObjectID")
+
+// ObjectID is the BSON ObjectID type.
+type ObjectID [12]byte
+
+// NilObjectID is the zero value for ObjectID.
+var NilObjectID ObjectID
+
+var objectIDCounter = readRandomUint32()
+var processUnique = processUniqueBytes()
+
+var _ encoding.TextMarshaler = ObjectID{}
+var _ encoding.TextUnmarshaler = &ObjectID{}
+
+// NewObjectID generates a new ObjectID.
+func NewObjectID() ObjectID {
+	return NewObjectIDFromTimestamp(time.Now())
+}
+
+// NewObjectIDFromTimestamp generates a new ObjectID based on the given time.
+func NewObjectIDFromTimestamp(timestamp time.Time) ObjectID {
+	var b [12]byte
+
+	binary.BigEndian.PutUint32(b[0:4], uint32(timestamp.Unix()))
+	copy(b[4:9], processUnique[:])
+	putUint24(b[9:12], atomic.AddUint32(&objectIDCounter, 1))
+
+	return b
+}
+
+// Timestamp extracts the time part of the ObjectId.
+func (id ObjectID) Timestamp() time.Time {
+	unixSecs := binary.BigEndian.Uint32(id[0:4])
+	return time.Unix(int64(unixSecs), 0).UTC()
+}
+
+// Hex returns the hex encoding of the ObjectID as a string.
+func (id ObjectID) Hex() string {
+	var buf [24]byte
+	hex.Encode(buf[:], id[:])
+	return string(buf[:])
+}
+
+func (id ObjectID) String() string {
+	return fmt.Sprintf("ObjectID(%q)", id.Hex())
+}
+
+// IsZero returns true if id is the empty ObjectID.
+func (id ObjectID) IsZero() bool {
+	return id == NilObjectID
+}
+
+// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a
+// valid ObjectID.
+func ObjectIDFromHex(s string) (ObjectID, error) {
+	if len(s) != 24 {
+		return NilObjectID, ErrInvalidHex
+	}
+
+	var oid [12]byte
+	_, err := hex.Decode(oid[:], []byte(s))
+	if err != nil {
+		return NilObjectID, err
+	}
+
+	return oid, nil
+}
+
+// IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not.
+//
+// Deprecated: Use ObjectIDFromHex and check the error instead.
+func IsValidObjectID(s string) bool {
+	_, err := ObjectIDFromHex(s)
+	return err == nil
+}
+
+// MarshalText returns the ObjectID as UTF-8-encoded text. Implementing this allows us to use ObjectID
+// as a map key when marshalling JSON. See https://pkg.go.dev/encoding#TextMarshaler
+func (id ObjectID) MarshalText() ([]byte, error) {
+	return []byte(id.Hex()), nil
+}
+
+// UnmarshalText populates the byte slice with the ObjectID. Implementing this allows us to use ObjectID
+// as a map key when unmarshalling JSON. See https://pkg.go.dev/encoding#TextUnmarshaler
+func (id *ObjectID) UnmarshalText(b []byte) error {
+	oid, err := ObjectIDFromHex(string(b))
+	if err != nil {
+		return err
+	}
+	*id = oid
+	return nil
+}
+
+// MarshalJSON returns the ObjectID as a string
+func (id ObjectID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(id.Hex())
+}
+
+// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 24 bytes long, it
+// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes
+// long, it will be populated with the BSON representation of the ObjectID. This method also accepts empty strings and
+// decodes them as NilObjectID. For any other inputs, an error will be returned.
+func (id *ObjectID) UnmarshalJSON(b []byte) error {
+	// Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer ObjectID field
+	// will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not
+	// enter the UnmarshalJSON hook.
+	if string(b) == "null" {
+		return nil
+	}
+
+	var err error
+	switch len(b) {
+	case 12:
+		copy(id[:], b)
+	default:
+		// Extended JSON
+		var res interface{}
+		err := json.Unmarshal(b, &res)
+		if err != nil {
+			return err
+		}
+		str, ok := res.(string)
+		if !ok {
+			m, ok := res.(map[string]interface{})
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			oid, ok := m["$oid"]
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			str, ok = oid.(string)
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+		}
+
+		// An empty string is not a valid ObjectID, but we treat it as a special value that decodes as NilObjectID.
+		if len(str) == 0 {
+			copy(id[:], NilObjectID[:])
+			return nil
+		}
+
+		if len(str) != 24 {
+			return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 24 but it is %d", len(str))
+		}
+
+		_, err = hex.Decode(id[:], []byte(str))
+		if err != nil {
+			return err
+		}
+	}
+
+	return err
+}
+
+func processUniqueBytes() [5]byte {
+	var b [5]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err))
+	}
+
+	return b
+}
+
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err))
+	}
+
+	return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+}
+
+func putUint24(b []byte, v uint32) {
+	b[0] = byte(v >> 16)
+	b[1] = byte(v >> 8)
+	b[2] = byte(v)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go
new file mode 100644
index 0000000000000000000000000000000000000000..65f4fbb94929637e325fbc0f553fef38c35e5652
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go
@@ -0,0 +1,231 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package primitive contains types similar to Go primitives for BSON types that do not have direct
+// Go primitive representations.
+package primitive // import "go.mongodb.org/mongo-driver/bson/primitive"
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// Binary represents a BSON binary value.
+type Binary struct {
+	Subtype byte
+	Data    []byte
+}
+
+// Equal compares bp to bp2 and returns true if they are equal.
+func (bp Binary) Equal(bp2 Binary) bool {
+	if bp.Subtype != bp2.Subtype {
+		return false
+	}
+	return bytes.Equal(bp.Data, bp2.Data)
+}
+
+// IsZero returns if bp is the empty Binary.
+func (bp Binary) IsZero() bool {
+	return bp.Subtype == 0 && len(bp.Data) == 0
+}
+
+// Undefined represents the BSON undefined value type.
+type Undefined struct{}
+
+// DateTime represents the BSON datetime value.
+type DateTime int64
+
+var _ json.Marshaler = DateTime(0)
+var _ json.Unmarshaler = (*DateTime)(nil)
+
+// MarshalJSON marshal to time type.
+func (d DateTime) MarshalJSON() ([]byte, error) {
+	return json.Marshal(d.Time().UTC())
+}
+
+// UnmarshalJSON creates a primitive.DateTime from a JSON string.
+func (d *DateTime) UnmarshalJSON(data []byte) error {
+	// Ignore "null" to keep parity with the time.Time type and the standard library. Decoding "null" into a non-pointer
+	// DateTime field will leave the field unchanged. For pointer values, the encoding/json will set the pointer to nil
+	// and will not defer to the UnmarshalJSON hook.
+	if string(data) == "null" {
+		return nil
+	}
+
+	var tempTime time.Time
+	if err := json.Unmarshal(data, &tempTime); err != nil {
+		return err
+	}
+
+	*d = NewDateTimeFromTime(tempTime)
+	return nil
+}
+
+// Time returns the date as a time type.
+func (d DateTime) Time() time.Time {
+	return time.Unix(int64(d)/1000, int64(d)%1000*1000000)
+}
+
+// NewDateTimeFromTime creates a new DateTime from a Time.
+func NewDateTimeFromTime(t time.Time) DateTime {
+	return DateTime(t.Unix()*1e3 + int64(t.Nanosecond())/1e6)
+}
+
+// Null represents the BSON null value.
+type Null struct{}
+
+// Regex represents a BSON regex value.
+type Regex struct {
+	Pattern string
+	Options string
+}
+
+func (rp Regex) String() string {
+	return fmt.Sprintf(`{"pattern": "%s", "options": "%s"}`, rp.Pattern, rp.Options)
+}
+
+// Equal compares rp to rp2 and returns true if they are equal.
+func (rp Regex) Equal(rp2 Regex) bool {
+	return rp.Pattern == rp2.Pattern && rp.Options == rp2.Options
+}
+
+// IsZero returns if rp is the empty Regex.
+func (rp Regex) IsZero() bool {
+	return rp.Pattern == "" && rp.Options == ""
+}
+
+// DBPointer represents a BSON dbpointer value.
+type DBPointer struct {
+	DB      string
+	Pointer ObjectID
+}
+
+func (d DBPointer) String() string {
+	return fmt.Sprintf(`{"db": "%s", "pointer": "%s"}`, d.DB, d.Pointer)
+}
+
+// Equal compares d to d2 and returns true if they are equal.
+func (d DBPointer) Equal(d2 DBPointer) bool {
+	return d == d2
+}
+
+// IsZero returns if d is the empty DBPointer.
+func (d DBPointer) IsZero() bool {
+	return d.DB == "" && d.Pointer.IsZero()
+}
+
+// JavaScript represents a BSON JavaScript code value.
+type JavaScript string
+
+// Symbol represents a BSON symbol value.
+type Symbol string
+
+// CodeWithScope represents a BSON JavaScript code with scope value.
+type CodeWithScope struct {
+	Code  JavaScript
+	Scope interface{}
+}
+
+func (cws CodeWithScope) String() string {
+	return fmt.Sprintf(`{"code": "%s", "scope": %v}`, cws.Code, cws.Scope)
+}
+
+// Timestamp represents a BSON timestamp value.
+type Timestamp struct {
+	T uint32
+	I uint32
+}
+
+// After reports whether the time instant tp is after tp2.
+func (tp Timestamp) After(tp2 Timestamp) bool {
+	return tp.T > tp2.T || (tp.T == tp2.T && tp.I > tp2.I)
+}
+
+// Before reports whether the time instant tp is before tp2.
+func (tp Timestamp) Before(tp2 Timestamp) bool {
+	return tp.T < tp2.T || (tp.T == tp2.T && tp.I < tp2.I)
+}
+
+// Equal compares tp to tp2 and returns true if they are equal.
+func (tp Timestamp) Equal(tp2 Timestamp) bool {
+	return tp.T == tp2.T && tp.I == tp2.I
+}
+
+// IsZero returns if tp is the zero Timestamp.
+func (tp Timestamp) IsZero() bool {
+	return tp.T == 0 && tp.I == 0
+}
+
+// Compare compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after
+// tp2, it returns +1; if they're the same, it returns 0.
+func (tp Timestamp) Compare(tp2 Timestamp) int {
+	switch {
+	case tp.Equal(tp2):
+		return 0
+	case tp.Before(tp2):
+		return -1
+	default:
+		return +1
+	}
+}
+
+// CompareTimestamp compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after
+// tp2, it returns +1; if they're the same, it returns 0.
+//
+// Deprecated: Use Timestamp.Compare instead.
+func CompareTimestamp(tp, tp2 Timestamp) int {
+	return tp.Compare(tp2)
+}
+
+// MinKey represents the BSON minkey value.
+type MinKey struct{}
+
+// MaxKey represents the BSON maxkey value.
+type MaxKey struct{}
+
+// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters,
+// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead.
+//
+// Example usage:
+//
+//	bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+type D []E
+
+// Map creates a map from the elements of the D.
+//
+// Deprecated: Converting directly from a D to an M will not be supported in Go Driver 2.0. Instead,
+// users should marshal the D to BSON using bson.Marshal and unmarshal it to M using bson.Unmarshal.
+func (d D) Map() M {
+	m := make(M, len(d))
+	for _, e := range d {
+		m[e.Key] = e.Value
+	}
+	return m
+}
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E struct {
+	Key   string
+	Value interface{}
+}
+
+// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not
+// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be
+// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead.
+//
+// Example usage:
+//
+//	bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+type M map[string]interface{}
+
+// An A is an ordered representation of a BSON array.
+//
+// Example usage:
+//
+//	bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}}
+type A []interface{}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go
new file mode 100644
index 0000000000000000000000000000000000000000..ff32a87a7955522757c159b91f044361d212bf1b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go
@@ -0,0 +1,122 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+)
+
+var tRawValue = reflect.TypeOf(RawValue{})
+var tRaw = reflect.TypeOf(Raw(nil))
+
+var primitiveCodecs PrimitiveCodecs
+
+// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types
+// defined in this package.
+//
+// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
+// registered.
+type PrimitiveCodecs struct{}
+
+// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs
+// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created.
+//
+// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
+// registered.
+func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil"))
+	}
+
+	rb.
+		RegisterTypeEncoder(tRawValue, bsoncodec.ValueEncoderFunc(pc.RawValueEncodeValue)).
+		RegisterTypeEncoder(tRaw, bsoncodec.ValueEncoderFunc(pc.RawEncodeValue)).
+		RegisterTypeDecoder(tRawValue, bsoncodec.ValueDecoderFunc(pc.RawValueDecodeValue)).
+		RegisterTypeDecoder(tRaw, bsoncodec.ValueDecoderFunc(pc.RawDecodeValue))
+}
+
+// RawValueEncodeValue is the ValueEncoderFunc for RawValue.
+//
+// If the RawValue's Type is "invalid" and the RawValue's Value is not empty or
+// nil, then this method will return an error.
+//
+// Deprecated: Use bson.NewRegistry to get a registry with all primitive
+// encoders and decoders registered.
+func (PrimitiveCodecs) RawValueEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRawValue {
+		return bsoncodec.ValueEncoderError{
+			Name:     "RawValueEncodeValue",
+			Types:    []reflect.Type{tRawValue},
+			Received: val,
+		}
+	}
+
+	rawvalue := val.Interface().(RawValue)
+
+	if !rawvalue.Type.IsValid() {
+		return fmt.Errorf("the RawValue Type specifies an invalid BSON type: %#x", byte(rawvalue.Type))
+	}
+
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value)
+}
+
+// RawValueDecodeValue is the ValueDecoderFunc for RawValue.
+//
+// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
+// registered.
+func (PrimitiveCodecs) RawValueDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRawValue {
+		return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val}
+	}
+
+	t, value, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(RawValue{Type: t, Value: value}))
+	return nil
+}
+
+// RawEncodeValue is the ValueEncoderFunc for Reader.
+//
+// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
+// registered.
+func (PrimitiveCodecs) RawEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRaw {
+		return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val}
+	}
+
+	rdr := val.Interface().(Raw)
+
+	return bsonrw.Copier{}.CopyDocumentFromBytes(vw, rdr)
+}
+
+// RawDecodeValue is the ValueDecoderFunc for Reader.
+//
+// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
+// registered.
+func (PrimitiveCodecs) RawDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRaw {
+		return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	rdr, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(Raw), vr)
+	val.Set(reflect.ValueOf(rdr))
+	return err
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go
new file mode 100644
index 0000000000000000000000000000000000000000..130da61ba05486a7a2ed14096d1d03008d875c98
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go
@@ -0,0 +1,101 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"io"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilReader indicates that an operation was attempted on a nil bson.Reader.
+var ErrNilReader = errors.New("nil reader")
+
+// Raw is a raw encoded BSON document. It can be used to delay BSON document decoding or precompute
+// a BSON encoded document.
+//
+// A Raw must be a full BSON document. Use the RawValue type for individual BSON values.
+type Raw []byte
+
+// ReadDocument reads a BSON document from the io.Reader and returns it as a bson.Raw. If the
+// reader contains multiple BSON documents, only the first document is read.
+func ReadDocument(r io.Reader) (Raw, error) {
+	doc, err := bsoncore.NewDocumentFromReader(r)
+	return Raw(doc), err
+}
+
+// NewFromIOReader reads a BSON document from the io.Reader and returns it as a bson.Raw. If the
+// reader contains multiple BSON documents, only the first document is read.
+//
+// Deprecated: Use ReadDocument instead.
+func NewFromIOReader(r io.Reader) (Raw, error) {
+	return ReadDocument(r)
+}
+
+// Validate validates the document. This method only validates the first document in
+// the slice, to validate other documents, the slice must be resliced.
+func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() }
+
+// Lookup search the document, potentially recursively, for the given key. If
+// there are multiple keys provided, this method will recurse down, as long as
+// the top and intermediate nodes are either documents or arrays.If an error
+// occurs or if the value doesn't exist, an empty RawValue is returned.
+func (r Raw) Lookup(key ...string) RawValue {
+	return convertFromCoreValue(bsoncore.Document(r).Lookup(key...))
+}
+
+// LookupErr searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (r Raw) LookupErr(key ...string) (RawValue, error) {
+	val, err := bsoncore.Document(r).LookupErr(key...)
+	return convertFromCoreValue(val), err
+}
+
+// Elements returns this document as a slice of elements. The returned slice will contain valid
+// elements. If the document is not valid, the elements up to the invalid point will be returned
+// along with an error.
+func (r Raw) Elements() ([]RawElement, error) {
+	doc := bsoncore.Document(r)
+	if len(doc) == 0 {
+		return nil, nil
+	}
+	elems, err := doc.Elements()
+	if err != nil {
+		return nil, err
+	}
+	relems := make([]RawElement, 0, len(elems))
+	for _, elem := range elems {
+		relems = append(relems, RawElement(elem))
+	}
+	return relems, nil
+}
+
+// Values returns this document as a slice of values. The returned slice will contain valid values.
+// If the document is not valid, the values up to the invalid point will be returned along with an
+// error.
+func (r Raw) Values() ([]RawValue, error) {
+	vals, err := bsoncore.Document(r).Values()
+	rvals := make([]RawValue, 0, len(vals))
+	for _, val := range vals {
+		rvals = append(rvals, convertFromCoreValue(val))
+	}
+	return rvals, err
+}
+
+// Index searches for and retrieves the element at the given index. This method will panic if
+// the document is invalid or if the index is out of bounds.
+func (r Raw) Index(index uint) RawElement { return RawElement(bsoncore.Document(r).Index(index)) }
+
+// IndexErr searches for and retrieves the element at the given index.
+func (r Raw) IndexErr(index uint) (RawElement, error) {
+	elem, err := bsoncore.Document(r).IndexErr(index)
+	return RawElement(elem), err
+}
+
+// String returns the BSON document encoded as Extended JSON.
+func (r Raw) String() string { return bsoncore.Document(r).String() }
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go
new file mode 100644
index 0000000000000000000000000000000000000000..8ce13c2cc7334f55d0a36f9f1ca38720c653987e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go
@@ -0,0 +1,48 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// RawElement is a raw encoded BSON document or array element.
+type RawElement []byte
+
+// Key returns the key for this element. If the element is not valid, this method returns an empty
+// string. If knowing if the element is valid is important, use KeyErr.
+func (re RawElement) Key() string { return bsoncore.Element(re).Key() }
+
+// KeyErr returns the key for this element, returning an error if the element is not valid.
+func (re RawElement) KeyErr() (string, error) { return bsoncore.Element(re).KeyErr() }
+
+// Value returns the value of this element. If the element is not valid, this method returns an
+// empty Value. If knowing if the element is valid is important, use ValueErr.
+func (re RawElement) Value() RawValue { return convertFromCoreValue(bsoncore.Element(re).Value()) }
+
+// ValueErr returns the value for this element, returning an error if the element is not valid.
+func (re RawElement) ValueErr() (RawValue, error) {
+	val, err := bsoncore.Element(re).ValueErr()
+	return convertFromCoreValue(val), err
+}
+
+// Validate ensures re is a valid BSON element.
+func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() }
+
+// String returns the BSON element encoded as Extended JSON.
+func (re RawElement) String() string {
+	doc := bsoncore.BuildDocument(nil, re)
+	j, err := MarshalExtJSON(Raw(doc), true, false)
+	if err != nil {
+		return "<malformed>"
+	}
+	return string(j)
+}
+
+// DebugString outputs a human readable version of RawElement. It will attempt to stringify the
+// valid components of the element even if the entire element is not valid.
+func (re RawElement) DebugString() string { return bsoncore.Element(re).DebugString() }
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go
new file mode 100644
index 0000000000000000000000000000000000000000..a8088e1e30b7350b9e5e9108f6957c782238abec
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go
@@ -0,0 +1,324 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"reflect"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilContext is returned when the provided DecodeContext is nil.
+var ErrNilContext = errors.New("DecodeContext cannot be nil")
+
+// ErrNilRegistry is returned when the provided registry is nil.
+var ErrNilRegistry = errors.New("Registry cannot be nil")
+
+// RawValue is a raw encoded BSON value. It can be used to delay BSON value decoding or precompute
+// BSON encoded value. Type is the BSON type of the value and Value is the raw encoded BSON value.
+//
+// A RawValue must be an individual BSON value. Use the Raw type for full BSON documents.
+type RawValue struct {
+	Type  bsontype.Type
+	Value []byte
+
+	r *bsoncodec.Registry
+}
+
+// IsZero reports whether the RawValue is zero, i.e. no data is present on
+// the RawValue. It returns true if Type is 0 and Value is empty or nil.
+func (rv RawValue) IsZero() bool {
+	return rv.Type == 0x00 && len(rv.Value) == 0
+}
+
+// Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an
+// error is returned. This method will use the registry used to create the RawValue, if the RawValue
+// was created from partial BSON processing, or it will use the default registry. Users wishing to
+// specify the registry to use should use UnmarshalWithRegistry.
+func (rv RawValue) Unmarshal(val interface{}) error {
+	reg := rv.r
+	if reg == nil {
+		reg = DefaultRegistry
+	}
+	return rv.UnmarshalWithRegistry(reg, val)
+}
+
+// Equal compares rv and rv2 and returns true if they are equal.
+func (rv RawValue) Equal(rv2 RawValue) bool {
+	if rv.Type != rv2.Type {
+		return false
+	}
+
+	if !bytes.Equal(rv.Value, rv2.Value) {
+		return false
+	}
+
+	return true
+}
+
+// UnmarshalWithRegistry performs the same unmarshalling as Unmarshal but uses the provided registry
+// instead of the one attached or the default registry.
+func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) error {
+	if r == nil {
+		return ErrNilRegistry
+	}
+
+	vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	dec, err := r.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval)
+}
+
+// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses
+// the provided DecodeContext instead of the one attached or the default
+// registry.
+//
+// Deprecated: Use [RawValue.UnmarshalWithRegistry] with a custom registry to customize
+// unmarshal behavior instead.
+func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error {
+	if dc == nil {
+		return ErrNilContext
+	}
+
+	vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	dec, err := dc.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return dec.DecodeValue(*dc, vr, rval)
+}
+
+func convertFromCoreValue(v bsoncore.Value) RawValue { return RawValue{Type: v.Type, Value: v.Data} }
+func convertToCoreValue(v RawValue) bsoncore.Value {
+	return bsoncore.Value{Type: v.Type, Data: v.Value}
+}
+
+// Validate ensures the value is a valid BSON value.
+func (rv RawValue) Validate() error { return convertToCoreValue(rv).Validate() }
+
+// IsNumber returns true if the type of v is a numeric BSON type.
+func (rv RawValue) IsNumber() bool { return convertToCoreValue(rv).IsNumber() }
+
+// String implements the fmt.String interface. This method will return values in extended JSON
+// format. If the value is not valid, this returns an empty string
+func (rv RawValue) String() string { return convertToCoreValue(rv).String() }
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (rv RawValue) DebugString() string { return convertToCoreValue(rv).DebugString() }
+
+// Double returns the float64 value for this element.
+// It panics if e's BSON type is not bsontype.Double.
+func (rv RawValue) Double() float64 { return convertToCoreValue(rv).Double() }
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (rv RawValue) DoubleOK() (float64, bool) { return convertToCoreValue(rv).DoubleOK() }
+
+// StringValue returns the string value for this element.
+// It panics if e's BSON type is not bsontype.String.
+//
+// NOTE: This method is called StringValue to avoid a collision with the String method which
+// implements the fmt.Stringer interface.
+func (rv RawValue) StringValue() string { return convertToCoreValue(rv).StringValue() }
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (rv RawValue) StringValueOK() (string, bool) { return convertToCoreValue(rv).StringValueOK() }
+
+// Document returns the BSON document the Value represents as a Document. It panics if the
+// value is a BSON type other than document.
+func (rv RawValue) Document() Raw { return Raw(convertToCoreValue(rv).Document()) }
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (rv RawValue) DocumentOK() (Raw, bool) {
+	doc, ok := convertToCoreValue(rv).DocumentOK()
+	return Raw(doc), ok
+}
+
+// Array returns the BSON array the Value represents as an Array. It panics if the
+// value is a BSON type other than array.
+func (rv RawValue) Array() Raw { return Raw(convertToCoreValue(rv).Array()) }
+
+// ArrayOK is the same as Array, except it returns a boolean instead
+// of panicking.
+func (rv RawValue) ArrayOK() (Raw, bool) {
+	doc, ok := convertToCoreValue(rv).ArrayOK()
+	return Raw(doc), ok
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (rv RawValue) Binary() (subtype byte, data []byte) { return convertToCoreValue(rv).Binary() }
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) BinaryOK() (subtype byte, data []byte, ok bool) {
+	return convertToCoreValue(rv).BinaryOK()
+}
+
+// ObjectID returns the BSON objectid value the Value represents. It panics if the value is a BSON
+// type other than objectid.
+func (rv RawValue) ObjectID() primitive.ObjectID { return convertToCoreValue(rv).ObjectID() }
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) {
+	return convertToCoreValue(rv).ObjectIDOK()
+}
+
+// Boolean returns the boolean value the Value represents. It panics if the
+// value is a BSON type other than boolean.
+func (rv RawValue) Boolean() bool { return convertToCoreValue(rv).Boolean() }
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) BooleanOK() (bool, bool) { return convertToCoreValue(rv).BooleanOK() }
+
+// DateTime returns the BSON datetime value the Value represents as a
+// unix timestamp. It panics if the value is a BSON type other than datetime.
+func (rv RawValue) DateTime() int64 { return convertToCoreValue(rv).DateTime() }
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) DateTimeOK() (int64, bool) { return convertToCoreValue(rv).DateTimeOK() }
+
+// Time returns the BSON datetime value the Value represents. It panics if the value is a BSON
+// type other than datetime.
+func (rv RawValue) Time() time.Time { return convertToCoreValue(rv).Time() }
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) TimeOK() (time.Time, bool) { return convertToCoreValue(rv).TimeOK() }
+
+// Regex returns the BSON regex value the Value represents. It panics if the value is a BSON
+// type other than regex.
+func (rv RawValue) Regex() (pattern, options string) { return convertToCoreValue(rv).Regex() }
+
+// RegexOK is the same as Regex, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) RegexOK() (pattern, options string, ok bool) {
+	return convertToCoreValue(rv).RegexOK()
+}
+
+// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
+// type other than DBPointer.
+func (rv RawValue) DBPointer() (string, primitive.ObjectID) {
+	return convertToCoreValue(rv).DBPointer()
+}
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) DBPointerOK() (string, primitive.ObjectID, bool) {
+	return convertToCoreValue(rv).DBPointerOK()
+}
+
+// JavaScript returns the BSON JavaScript code value the Value represents. It panics if the value is
+// a BSON type other than JavaScript code.
+func (rv RawValue) JavaScript() string { return convertToCoreValue(rv).JavaScript() }
+
+// JavaScriptOK is the same as Javascript, excepti that it returns a boolean
+// instead of panicking.
+func (rv RawValue) JavaScriptOK() (string, bool) { return convertToCoreValue(rv).JavaScriptOK() }
+
+// Symbol returns the BSON symbol value the Value represents. It panics if the value is a BSON
+// type other than symbol.
+func (rv RawValue) Symbol() string { return convertToCoreValue(rv).Symbol() }
+
+// SymbolOK is the same as Symbol, excepti that it returns a boolean
+// instead of panicking.
+func (rv RawValue) SymbolOK() (string, bool) { return convertToCoreValue(rv).SymbolOK() }
+
+// CodeWithScope returns the BSON JavaScript code with scope the Value represents.
+// It panics if the value is a BSON type other than JavaScript code with scope.
+func (rv RawValue) CodeWithScope() (string, Raw) {
+	code, scope := convertToCoreValue(rv).CodeWithScope()
+	return code, Raw(scope)
+}
+
+// CodeWithScopeOK is the same as CodeWithScope, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) CodeWithScopeOK() (string, Raw, bool) {
+	code, scope, ok := convertToCoreValue(rv).CodeWithScopeOK()
+	return code, Raw(scope), ok
+}
+
+// Int32 returns the int32 the Value represents. It panics if the value is a BSON type other than
+// int32.
+func (rv RawValue) Int32() int32 { return convertToCoreValue(rv).Int32() }
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32OK() }
+
+// AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// Deprecated: Use AsInt64 instead. If an int32 is required, convert the returned value to an int32
+// and perform any required overflow/underflow checking.
+func (rv RawValue) AsInt32() int32 { return convertToCoreValue(rv).AsInt32() }
+
+// AsInt32OK is the same as AsInt32, except that it returns a boolean instead of
+// panicking.
+//
+// Deprecated: Use AsInt64OK instead. If an int32 is required, convert the returned value to an
+// int32 and perform any required overflow/underflow checking.
+func (rv RawValue) AsInt32OK() (int32, bool) { return convertToCoreValue(rv).AsInt32OK() }
+
+// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (rv RawValue) Timestamp() (t, i uint32) { return convertToCoreValue(rv).Timestamp() }
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) TimestampOK() (t, i uint32, ok bool) { return convertToCoreValue(rv).TimestampOK() }
+
+// Int64 returns the int64 the Value represents. It panics if the value is a BSON type other than
+// int64.
+func (rv RawValue) Int64() int64 { return convertToCoreValue(rv).Int64() }
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) Int64OK() (int64, bool) { return convertToCoreValue(rv).Int64OK() }
+
+// AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method
+// will panic.
+func (rv RawValue) AsInt64() int64 { return convertToCoreValue(rv).AsInt64() }
+
+// AsInt64OK is the same as AsInt64, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) AsInt64OK() (int64, bool) { return convertToCoreValue(rv).AsInt64OK() }
+
+// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
+// decimal.
+func (rv RawValue) Decimal128() primitive.Decimal128 { return convertToCoreValue(rv).Decimal128() }
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) Decimal128OK() (primitive.Decimal128, bool) {
+	return convertToCoreValue(rv).Decimal128OK()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6afb2850e2efd44251c134dc22d9e820a2d3f2a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go
@@ -0,0 +1,47 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+)
+
+// DefaultRegistry is the default bsoncodec.Registry. It contains the default
+// codecs and the primitive codecs.
+//
+// Deprecated: Use [NewRegistry] to construct a new default registry. To use a
+// custom registry when marshaling or unmarshaling, use the "SetRegistry" method
+// on an [Encoder] or [Decoder] instead:
+//
+//	dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data))
+//	if err != nil {
+//	    panic(err)
+//	}
+//	dec.SetRegistry(reg)
+//
+// See [Encoder] and [Decoder] for more examples.
+var DefaultRegistry = NewRegistry()
+
+// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and
+// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the
+// PrimitiveCodecs type in this package.
+//
+// Deprecated: Use [NewRegistry] instead.
+func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
+	rb := bsoncodec.NewRegistryBuilder()
+	bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb)
+	bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb)
+	primitiveCodecs.RegisterPrimitiveCodecs(rb)
+	return rb
+}
+
+// NewRegistry creates a new Registry configured with the default encoders and decoders from the
+// bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the PrimitiveCodecs
+// type in this package.
+func NewRegistry() *bsoncodec.Registry {
+	return NewRegistryBuilder().Build()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef398124672e5ec5dfc9019953150e1eea96b189
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go
@@ -0,0 +1,50 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// BSON element types as described in https://bsonspec.org/spec.html.
+const (
+	TypeDouble           = bsontype.Double
+	TypeString           = bsontype.String
+	TypeEmbeddedDocument = bsontype.EmbeddedDocument
+	TypeArray            = bsontype.Array
+	TypeBinary           = bsontype.Binary
+	TypeUndefined        = bsontype.Undefined
+	TypeObjectID         = bsontype.ObjectID
+	TypeBoolean          = bsontype.Boolean
+	TypeDateTime         = bsontype.DateTime
+	TypeNull             = bsontype.Null
+	TypeRegex            = bsontype.Regex
+	TypeDBPointer        = bsontype.DBPointer
+	TypeJavaScript       = bsontype.JavaScript
+	TypeSymbol           = bsontype.Symbol
+	TypeCodeWithScope    = bsontype.CodeWithScope
+	TypeInt32            = bsontype.Int32
+	TypeTimestamp        = bsontype.Timestamp
+	TypeInt64            = bsontype.Int64
+	TypeDecimal128       = bsontype.Decimal128
+	TypeMinKey           = bsontype.MinKey
+	TypeMaxKey           = bsontype.MaxKey
+)
+
+// BSON binary element subtypes as described in https://bsonspec.org/spec.html.
+const (
+	TypeBinaryGeneric     = bsontype.BinaryGeneric
+	TypeBinaryFunction    = bsontype.BinaryFunction
+	TypeBinaryBinaryOld   = bsontype.BinaryBinaryOld
+	TypeBinaryUUIDOld     = bsontype.BinaryUUIDOld
+	TypeBinaryUUID        = bsontype.BinaryUUID
+	TypeBinaryMD5         = bsontype.BinaryMD5
+	TypeBinaryEncrypted   = bsontype.BinaryEncrypted
+	TypeBinaryColumn      = bsontype.BinaryColumn
+	TypeBinarySensitive   = bsontype.BinarySensitive
+	TypeBinaryUserDefined = bsontype.BinaryUserDefined
+)
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go
new file mode 100644
index 0000000000000000000000000000000000000000..66da17ee01722f31b433c2aa67f7e553b211f115
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go
@@ -0,0 +1,177 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"bytes"
+
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// Unmarshaler is the interface implemented by types that can unmarshal a BSON
+// document representation of themselves. The input can be assumed to be a valid
+// encoding of a BSON document. UnmarshalBSON must copy the JSON data if it
+// wishes to retain the data after returning.
+//
+// Unmarshaler is only used to unmarshal full BSON documents. To create custom
+// BSON unmarshaling behavior for individual values in a BSON document,
+// implement the ValueUnmarshaler interface instead.
+type Unmarshaler interface {
+	UnmarshalBSON([]byte) error
+}
+
+// ValueUnmarshaler is the interface implemented by types that can unmarshal a
+// BSON value representation of themselves. The input can be assumed to be a
+// valid encoding of a BSON value. UnmarshalBSONValue must copy the BSON value
+// bytes if it wishes to retain the data after returning.
+//
+// ValueUnmarshaler is only used to unmarshal individual values in a BSON
+// document. To create custom BSON unmarshaling behavior for an entire BSON
+// document, implement the Unmarshaler interface instead.
+type ValueUnmarshaler interface {
+	UnmarshalBSONValue(bsontype.Type, []byte) error
+}
+
+// Unmarshal parses the BSON-encoded data and stores the result in the value
+// pointed to by val. If val is nil or not a pointer, Unmarshal returns
+// InvalidUnmarshalError.
+func Unmarshal(data []byte, val interface{}) error {
+	return UnmarshalWithRegistry(DefaultRegistry, data, val)
+}
+
+// UnmarshalWithRegistry parses the BSON-encoded data using Registry r and
+// stores the result in the value pointed to by val. If val is nil or not
+// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+//
+// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead:
+//
+//	dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data))
+//	if err != nil {
+//		panic(err)
+//	}
+//	dec.SetRegistry(reg)
+//
+// See [Decoder] for more examples.
+func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error {
+	vr := bsonrw.NewBSONDocumentReader(data)
+	return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val)
+}
+
+// UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and
+// stores the result in the value pointed to by val. If val is nil or not
+// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+//
+// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal
+// behavior instead:
+//
+//	dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data))
+//	if err != nil {
+//		panic(err)
+//	}
+//	dec.DefaultDocumentM()
+//
+// See [Decoder] for more examples.
+func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error {
+	vr := bsonrw.NewBSONDocumentReader(data)
+	return unmarshalFromReader(dc, vr, val)
+}
+
+// UnmarshalValue parses the BSON value of type t with bson.DefaultRegistry and
+// stores the result in the value pointed to by val. If val is nil or not a pointer,
+// UnmarshalValue returns an error.
+func UnmarshalValue(t bsontype.Type, data []byte, val interface{}) error {
+	return UnmarshalValueWithRegistry(DefaultRegistry, t, data, val)
+}
+
+// UnmarshalValueWithRegistry parses the BSON value of type t with registry r and
+// stores the result in the value pointed to by val. If val is nil or not a pointer,
+// UnmarshalValue returns an error.
+//
+// Deprecated: Using a custom registry to unmarshal individual BSON values will not be supported in
+// Go Driver 2.0.
+func UnmarshalValueWithRegistry(r *bsoncodec.Registry, t bsontype.Type, data []byte, val interface{}) error {
+	vr := bsonrw.NewBSONValueReader(t, data)
+	return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val)
+}
+
+// UnmarshalExtJSON parses the extended JSON-encoded data and stores the result
+// in the value pointed to by val. If val is nil or not a pointer, Unmarshal
+// returns InvalidUnmarshalError.
+func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error {
+	return UnmarshalExtJSONWithRegistry(DefaultRegistry, data, canonical, val)
+}
+
+// UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using
+// Registry r and stores the result in the value pointed to by val. If val is
+// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+//
+// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead:
+//
+//	vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true)
+//	if err != nil {
+//		panic(err)
+//	}
+//	dec, err := bson.NewDecoder(vr)
+//	if err != nil {
+//		panic(err)
+//	}
+//	dec.SetRegistry(reg)
+//
+// See [Decoder] for more examples.
+func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error {
+	ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
+	if err != nil {
+		return err
+	}
+
+	return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, ejvr, val)
+}
+
+// UnmarshalExtJSONWithContext parses the extended JSON-encoded data using
+// DecodeContext dc and stores the result in the value pointed to by val. If val is
+// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+//
+// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal
+// behavior instead:
+//
+//	vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true)
+//	if err != nil {
+//		panic(err)
+//	}
+//	dec, err := bson.NewDecoder(vr)
+//	if err != nil {
+//		panic(err)
+//	}
+//	dec.DefaultDocumentM()
+//
+// See [Decoder] for more examples.
+func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error {
+	ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
+	if err != nil {
+		return err
+	}
+
+	return unmarshalFromReader(dc, ejvr, val)
+}
+
+func unmarshalFromReader(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val interface{}) error {
+	dec := decPool.Get().(*Decoder)
+	defer decPool.Put(dec)
+
+	err := dec.Reset(vr)
+	if err != nil {
+		return err
+	}
+	err = dec.SetContext(dc)
+	if err != nil {
+		return err
+	}
+
+	return dec.Decode(val)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/event/doc.go b/vendor/go.mongodb.org/mongo-driver/event/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..da1da4d47fa14d0f1344bc15f4b0ecab77fe5ddf
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/event/doc.go
@@ -0,0 +1,56 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package event is a library for monitoring events from the MongoDB Go
+// driver. Monitors can be set for commands sent to the MongoDB cluster,
+// connection pool changes, or changes on the MongoDB cluster.
+//
+// Monitoring commands requires specifying a CommandMonitor when constructing
+// a mongo.Client. A CommandMonitor can be set to monitor started, succeeded,
+// and/or failed events. A CommandStartedEvent can be correlated to its matching
+// CommandSucceededEvent or CommandFailedEvent through the RequestID field. For
+// example, the following code collects the names of started events:
+//
+//	var commandStarted []string
+//	cmdMonitor := &event.CommandMonitor{
+//	  Started: func(_ context.Context, evt *event.CommandStartedEvent) {
+//	    commandStarted = append(commandStarted, evt.CommandName)
+//	  },
+//	}
+//	clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetMonitor(cmdMonitor)
+//	client, err := mongo.Connect(context.Background(), clientOpts)
+//
+// Monitoring the connection pool requires specifying a PoolMonitor when constructing
+// a mongo.Client. The following code tracks the number of checked out connections:
+//
+//	var int connsCheckedOut
+//	poolMonitor := &event.PoolMonitor{
+//	  Event: func(evt *event.PoolEvent) {
+//	    switch evt.Type {
+//	    case event.GetSucceeded:
+//	      connsCheckedOut++
+//	    case event.ConnectionReturned:
+//	      connsCheckedOut--
+//	    }
+//	  },
+//	}
+//	clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetPoolMonitor(poolMonitor)
+//	client, err := mongo.Connect(context.Background(), clientOpts)
+//
+// Monitoring server changes specifying a ServerMonitor object when constructing
+// a mongo.Client. Different functions can be set on the ServerMonitor to
+// monitor different kinds of events. See ServerMonitor for more details.
+// The following code appends ServerHeartbeatStartedEvents to a slice:
+//
+//	   var heartbeatStarted []*event.ServerHeartbeatStartedEvent
+//	   svrMonitor := &event.ServerMonitor{
+//	     ServerHeartbeatStarted: func(e *event.ServerHeartbeatStartedEvent) {
+//		      heartbeatStarted = append(heartbeatStarted, e)
+//	     }
+//	   }
+//	   clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetServerMonitor(svrMonitor)
+//	   client, err := mongo.Connect(context.Background(), clientOpts)
+package event
diff --git a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go
new file mode 100644
index 0000000000000000000000000000000000000000..ddc7abacf7b56add99c29447088037756d3fa50e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go
@@ -0,0 +1,213 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package event // import "go.mongodb.org/mongo-driver/event"
+
+import (
+	"context"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+)
+
+// CommandStartedEvent represents an event generated when a command is sent to a server.
+type CommandStartedEvent struct {
+	Command      bson.Raw
+	DatabaseName string
+	CommandName  string
+	RequestID    int64
+	ConnectionID string
+	// ServerConnectionID contains the connection ID from the server of the operation. If the server does not return
+	// this value (e.g. on MDB < 4.2), it is unset. If the server connection ID would cause an int32 overflow, then
+	// then this field will be nil.
+	//
+	// Deprecated: Use ServerConnectionID64.
+	ServerConnectionID *int32
+	// ServerConnectionID64 contains the connection ID from the server of the operation. If the server does not
+	// return this value (e.g. on MDB < 4.2), it is unset.
+	ServerConnectionID64 *int64
+	// ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer.
+	// Otherwise, it is unset.
+	ServiceID *primitive.ObjectID
+}
+
+// CommandFinishedEvent represents a generic command finishing.
+type CommandFinishedEvent struct {
+	// Deprecated: Use Duration instead.
+	DurationNanos int64
+	Duration      time.Duration
+	CommandName   string
+	DatabaseName  string
+	RequestID     int64
+	ConnectionID  string
+	// ServerConnectionID contains the connection ID from the server of the operation. If the server does not return
+	// this value (e.g. on MDB < 4.2), it is unset.If the server connection ID would cause an int32 overflow, then
+	// this field will be nil.
+	//
+	// Deprecated: Use ServerConnectionID64.
+	ServerConnectionID *int32
+	// ServerConnectionID64 contains the connection ID from the server of the operation. If the server does not
+	// return this value (e.g. on MDB < 4.2), it is unset.
+	ServerConnectionID64 *int64
+	// ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer.
+	// Otherwise, it is unset.
+	ServiceID *primitive.ObjectID
+}
+
+// CommandSucceededEvent represents an event generated when a command's execution succeeds.
+type CommandSucceededEvent struct {
+	CommandFinishedEvent
+	Reply bson.Raw
+}
+
+// CommandFailedEvent represents an event generated when a command's execution fails.
+type CommandFailedEvent struct {
+	CommandFinishedEvent
+	Failure string
+}
+
+// CommandMonitor represents a monitor that is triggered for different events.
+type CommandMonitor struct {
+	Started   func(context.Context, *CommandStartedEvent)
+	Succeeded func(context.Context, *CommandSucceededEvent)
+	Failed    func(context.Context, *CommandFailedEvent)
+}
+
+// strings for pool command monitoring reasons
+const (
+	ReasonIdle              = "idle"
+	ReasonPoolClosed        = "poolClosed"
+	ReasonStale             = "stale"
+	ReasonConnectionErrored = "connectionError"
+	ReasonTimedOut          = "timeout"
+	ReasonError             = "error"
+)
+
+// strings for pool command monitoring types
+const (
+	PoolCreated        = "ConnectionPoolCreated"
+	PoolReady          = "ConnectionPoolReady"
+	PoolCleared        = "ConnectionPoolCleared"
+	PoolClosedEvent    = "ConnectionPoolClosed"
+	ConnectionCreated  = "ConnectionCreated"
+	ConnectionReady    = "ConnectionReady"
+	ConnectionClosed   = "ConnectionClosed"
+	GetStarted         = "ConnectionCheckOutStarted"
+	GetFailed          = "ConnectionCheckOutFailed"
+	GetSucceeded       = "ConnectionCheckedOut"
+	ConnectionReturned = "ConnectionCheckedIn"
+)
+
+// MonitorPoolOptions contains pool options as formatted in pool events
+type MonitorPoolOptions struct {
+	MaxPoolSize        uint64 `json:"maxPoolSize"`
+	MinPoolSize        uint64 `json:"minPoolSize"`
+	WaitQueueTimeoutMS uint64 `json:"maxIdleTimeMS"`
+}
+
+// PoolEvent contains all information summarizing a pool event
+type PoolEvent struct {
+	Type         string              `json:"type"`
+	Address      string              `json:"address"`
+	ConnectionID uint64              `json:"connectionId"`
+	PoolOptions  *MonitorPoolOptions `json:"options"`
+	Duration     time.Duration       `json:"duration"`
+	Reason       string              `json:"reason"`
+	// ServiceID is only set if the Type is PoolCleared and the server is deployed behind a load balancer. This field
+	// can be used to distinguish between individual servers in a load balanced deployment.
+	ServiceID    *primitive.ObjectID `json:"serviceId"`
+	Interruption bool                `json:"interruptInUseConnections"`
+	Error        error               `json:"error"`
+}
+
+// PoolMonitor is a function that allows the user to gain access to events occurring in the pool
+type PoolMonitor struct {
+	Event func(*PoolEvent)
+}
+
+// ServerDescriptionChangedEvent represents a server description change.
+type ServerDescriptionChangedEvent struct {
+	Address             address.Address
+	TopologyID          primitive.ObjectID // A unique identifier for the topology this server is a part of
+	PreviousDescription description.Server
+	NewDescription      description.Server
+}
+
+// ServerOpeningEvent is an event generated when the server is initialized.
+type ServerOpeningEvent struct {
+	Address    address.Address
+	TopologyID primitive.ObjectID // A unique identifier for the topology this server is a part of
+}
+
+// ServerClosedEvent is an event generated when the server is closed.
+type ServerClosedEvent struct {
+	Address    address.Address
+	TopologyID primitive.ObjectID // A unique identifier for the topology this server is a part of
+}
+
+// TopologyDescriptionChangedEvent represents a topology description change.
+type TopologyDescriptionChangedEvent struct {
+	TopologyID          primitive.ObjectID // A unique identifier for the topology this server is a part of
+	PreviousDescription description.Topology
+	NewDescription      description.Topology
+}
+
+// TopologyOpeningEvent is an event generated when the topology is initialized.
+type TopologyOpeningEvent struct {
+	TopologyID primitive.ObjectID // A unique identifier for the topology this server is a part of
+}
+
+// TopologyClosedEvent is an event generated when the topology is closed.
+type TopologyClosedEvent struct {
+	TopologyID primitive.ObjectID // A unique identifier for the topology this server is a part of
+}
+
+// ServerHeartbeatStartedEvent is an event generated when the heartbeat is started.
+type ServerHeartbeatStartedEvent struct {
+	ConnectionID string // The address this heartbeat was sent to with a unique identifier
+	Awaited      bool   // If this heartbeat was awaitable
+}
+
+// ServerHeartbeatSucceededEvent is an event generated when the heartbeat succeeds.
+type ServerHeartbeatSucceededEvent struct {
+	// Deprecated: Use Duration instead.
+	DurationNanos int64
+	Duration      time.Duration
+	Reply         description.Server
+	ConnectionID  string // The address this heartbeat was sent to with a unique identifier
+	Awaited       bool   // If this heartbeat was awaitable
+}
+
+// ServerHeartbeatFailedEvent is an event generated when the heartbeat fails.
+type ServerHeartbeatFailedEvent struct {
+	// Deprecated: Use Duration instead.
+	DurationNanos int64
+	Duration      time.Duration
+	Failure       error
+	ConnectionID  string // The address this heartbeat was sent to with a unique identifier
+	Awaited       bool   // If this heartbeat was awaitable
+}
+
+// ServerMonitor represents a monitor that is triggered for different server events. The client
+// will monitor changes on the MongoDB deployment it is connected to, and this monitor reports
+// the changes in the client's representation of the deployment. The topology represents the
+// overall deployment, and heartbeats are sent to individual servers to check their current status.
+type ServerMonitor struct {
+	ServerDescriptionChanged func(*ServerDescriptionChangedEvent)
+	ServerOpening            func(*ServerOpeningEvent)
+	ServerClosed             func(*ServerClosedEvent)
+	// TopologyDescriptionChanged is called when the topology is locked, so the callback should
+	// not attempt any operation that requires server selection on the same client.
+	TopologyDescriptionChanged func(*TopologyDescriptionChangedEvent)
+	TopologyOpening            func(*TopologyOpeningEvent)
+	TopologyClosed             func(*TopologyClosedEvent)
+	ServerHeartbeatStarted     func(*ServerHeartbeatStartedEvent)
+	ServerHeartbeatSucceeded   func(*ServerHeartbeatSucceededEvent)
+	ServerHeartbeatFailed      func(*ServerHeartbeatFailedEvent)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/error.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..63d06a1769c89effdb407ab882edba125574a777
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/error.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/awserr/error.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+type Error interface {
+	// Satisfy the generic error interface.
+	error
+
+	// Returns the short phrase depicting the classification of the error.
+	Code() string
+
+	// Returns the error details message.
+	Message() string
+
+	// Returns the original error if one was set.  Nil is returned if not set.
+	OrigErr() error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+	// Satisfy the base Error interface.
+	Error
+
+	// Returns the original error if one was set.  Nil is returned if not set.
+	OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+	var errs []error
+	if origErr != nil {
+		errs = append(errs, origErr)
+	}
+	return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+	return newBaseError(code, message, errs)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/types.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..18cb4cda284e9ec53f41d07f936de19b07a68aa3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/types.go
@@ -0,0 +1,144 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/awserr/types.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package awserr
+
+import (
+	"fmt"
+)
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional.  If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+	msg := fmt.Sprintf("%s: %s", code, message)
+	if extra != "" {
+		msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+	}
+	if origErr != nil {
+		msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+	}
+	return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+	// Classification of error
+	code string
+
+	// Detailed information about error
+	message string
+
+	// Optional original error this error is based off of. Allows building
+	// chained errors.
+	errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+	b := &baseError{
+		code:    code,
+		message: message,
+		errs:    origErrs,
+	}
+
+	return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+	size := len(b.errs)
+	if size > 0 {
+		return SprintError(b.code, b.message, "", errorList(b.errs))
+	}
+
+	return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+	return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+	return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+	return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+	switch len(b.errs) {
+	case 0:
+		return nil
+	case 1:
+		return b.errs[0]
+	default:
+		if err, ok := b.errs[0].(Error); ok {
+			return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+		}
+		return NewBatchError("BatchedErrors",
+			"multiple errors occurred", b.errs)
+	}
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+	return b.errs
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+	msg := ""
+	// How do we want to handle the array size being zero
+	if size := len(e); size > 0 {
+		for i := 0; i < size; i++ {
+			msg += e[i].Error()
+			// We check the next index to see if it is within the slice.
+			// If it is, then we append a newline. We do this, because unit tests
+			// could be broken with the additional '\n'
+			if i+1 < size {
+				msg += "\n"
+			}
+		}
+	}
+	return msg
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/chain_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/chain_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..6843927153d31e550e95d8a5408c817822f4cebc
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/chain_provider.go
@@ -0,0 +1,72 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/credentials/chain_provider.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package credentials
+
+import (
+	"go.mongodb.org/mongo-driver/internal/aws/awserr"
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+type ChainProvider struct {
+	Providers []Provider
+	curr      Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+	return NewCredentials(&ChainProvider{
+		Providers: append([]Provider{}, providers...),
+	})
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+	var errs = make([]error, 0, len(c.Providers))
+	for _, p := range c.Providers {
+		creds, err := p.Retrieve()
+		if err == nil {
+			c.curr = p
+			return creds, nil
+		}
+		errs = append(errs, err)
+	}
+	c.curr = nil
+
+	var err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+	return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one.  If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+	if c.curr != nil {
+		return c.curr.IsExpired()
+	}
+
+	return true
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/credentials.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/credentials.go
new file mode 100644
index 0000000000000000000000000000000000000000..53181aa163d3c887f2d8ef462bcdb6a5e38476df
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/credentials.go
@@ -0,0 +1,197 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/credentials/credentials.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package credentials
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws/awserr"
+	"golang.org/x/sync/singleflight"
+)
+
+// A Value is the AWS credentials value for individual credential fields.
+//
+// A Value is also used to represent Azure credentials.
+// Azure credentials only consist of an access token, which is stored in the `SessionToken` field.
+type Value struct {
+	// AWS Access key ID
+	AccessKeyID string
+
+	// AWS Secret Access Key
+	SecretAccessKey string
+
+	// AWS Session Token
+	SessionToken string
+
+	// Provider used to get credentials
+	ProviderName string
+}
+
+// HasKeys returns if the credentials Value has both AccessKeyID and
+// SecretAccessKey value set.
+func (v Value) HasKeys() bool {
+	return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+	// Retrieve returns nil if it successfully retrieved the value.
+	// Error is returned if the value were not obtainable, or empty.
+	Retrieve() (Value, error)
+
+	// IsExpired returns if the credentials are no longer valid, and need
+	// to be retrieved.
+	IsExpired() bool
+}
+
+// ProviderWithContext is a Provider that can retrieve credentials with a Context
+type ProviderWithContext interface {
+	Provider
+
+	RetrieveWithContext(context.Context) (Value, error)
+}
+
+// A Credentials provides concurrency safe retrieval of AWS credentials Value.
+//
+// A Credentials is also used to fetch Azure credentials Value.
+//
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+	sf singleflight.Group
+
+	m        sync.RWMutex
+	creds    Value
+	provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+	c := &Credentials{
+		provider: provider,
+	}
+	return c
+}
+
+// GetWithContext returns the credentials value, or error if the credentials
+// Value failed to be retrieved. Will return early if the passed in context is
+// canceled.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) GetWithContext(ctx context.Context) (Value, error) {
+	// Check if credentials are cached, and not expired.
+	select {
+	case curCreds, ok := <-c.asyncIsExpired():
+		// ok will only be true, of the credentials were not expired. ok will
+		// be false and have no value if the credentials are expired.
+		if ok {
+			return curCreds, nil
+		}
+	case <-ctx.Done():
+		return Value{}, awserr.New("RequestCanceled",
+			"request context canceled", ctx.Err())
+	}
+
+	// Cannot pass context down to the actual retrieve, because the first
+	// context would cancel the whole group when there is not direct
+	// association of items in the group.
+	resCh := c.sf.DoChan("", func() (interface{}, error) {
+		return c.singleRetrieve(&suppressedContext{ctx})
+	})
+	select {
+	case res := <-resCh:
+		return res.Val.(Value), res.Err
+	case <-ctx.Done():
+		return Value{}, awserr.New("RequestCanceled",
+			"request context canceled", ctx.Err())
+	}
+}
+
+func (c *Credentials) singleRetrieve(ctx context.Context) (interface{}, error) {
+	c.m.Lock()
+	defer c.m.Unlock()
+
+	if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
+		return curCreds, nil
+	}
+
+	var creds Value
+	var err error
+	if p, ok := c.provider.(ProviderWithContext); ok {
+		creds, err = p.RetrieveWithContext(ctx)
+	} else {
+		creds, err = c.provider.Retrieve()
+	}
+	if err == nil {
+		c.creds = creds
+	}
+
+	return creds, err
+}
+
+// asyncIsExpired returns a channel of credentials Value. If the channel is
+// closed the credentials are expired and credentials value are not empty.
+func (c *Credentials) asyncIsExpired() <-chan Value {
+	ch := make(chan Value, 1)
+	go func() {
+		c.m.RLock()
+		defer c.m.RUnlock()
+
+		if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
+			ch <- curCreds
+		}
+
+		close(ch)
+	}()
+
+	return ch
+}
+
+// isExpiredLocked helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpiredLocked(creds interface{}) bool {
+	return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
+}
+
+type suppressedContext struct {
+	context.Context
+}
+
+func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
+	return time.Time{}, false
+}
+
+func (s *suppressedContext) Done() <-chan struct{} {
+	return nil
+}
+
+func (s *suppressedContext) Err() error {
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/header_rules.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/header_rules.go
new file mode 100644
index 0000000000000000000000000000000000000000..a3726467f3a36845be21112f6c232698e3cfe0c4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/header_rules.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/signer/v4/header_rules.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package v4
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+	IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+	for _, rule := range r {
+		if rule.IsValid(value) {
+			return true
+		}
+	}
+	return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+	_, ok := m[value]
+	return ok
+}
+
+// excludeList is a generic rule for exclude listing
+type excludeList struct {
+	rule
+}
+
+// IsValid for exclude list checks if the value is within the exclude list
+func (b excludeList) IsValid(value string) bool {
+	return !b.rule.IsValid(value)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/request.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/request.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a43bb303b487415c234c1f59e698ed0c4c2b563
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/request.go
@@ -0,0 +1,80 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/request/request.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package v4
+
+import (
+	"net/http"
+	"strings"
+)
+
+// Returns host from request
+func getHost(r *http.Request) string {
+	if r.Host != "" {
+		return r.Host
+	}
+
+	if r.URL == nil {
+		return ""
+	}
+
+	return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+	colon := strings.IndexByte(hostport, ':')
+	if colon == -1 {
+		return hostport
+	}
+	if i := strings.IndexByte(hostport, ']'); i != -1 {
+		return strings.TrimPrefix(hostport[:i], "[")
+	}
+	return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+	colon := strings.IndexByte(hostport, ':')
+	if colon == -1 {
+		return ""
+	}
+	if i := strings.Index(hostport, "]:"); i != -1 {
+		return hostport[i+len("]:"):]
+	}
+	if strings.Contains(hostport, "]") {
+		return ""
+	}
+	return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+	if port == "" {
+		return true
+	}
+
+	lowerCaseScheme := strings.ToLower(scheme)
+	if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+		return true
+	}
+
+	return false
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/uri_path.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/uri_path.go
new file mode 100644
index 0000000000000000000000000000000000000000..69b6005eb589a3fd395cf001cca922f0a7634845
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/uri_path.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/signer/v4/uri_path.go
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/private/protocol/rest/build.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package v4
+
+import (
+	"bytes"
+	"fmt"
+	"net/url"
+	"strings"
+)
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+func init() {
+	for i := 0; i < len(noEscape); i++ {
+		// AWS expects every character except these to be escaped
+		noEscape[i] = (i >= 'A' && i <= 'Z') ||
+			(i >= 'a' && i <= 'z') ||
+			(i >= '0' && i <= '9') ||
+			i == '-' ||
+			i == '.' ||
+			i == '_' ||
+			i == '~'
+	}
+}
+
+func getURIPath(u *url.URL) string {
+	var uri string
+
+	if len(u.Opaque) > 0 {
+		uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+	} else {
+		uri = u.EscapedPath()
+	}
+
+	if len(uri) == 0 {
+		uri = "/"
+	}
+
+	return uri
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+	var buf bytes.Buffer
+	for i := 0; i < len(path); i++ {
+		c := path[i]
+		if noEscape[c] || (c == '/' && !encodeSep) {
+			buf.WriteByte(c)
+		} else {
+			fmt.Fprintf(&buf, "%%%02X", c)
+		}
+	}
+	return buf.String()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/v4.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/v4.go
new file mode 100644
index 0000000000000000000000000000000000000000..6cf4586bb99b1dc7ec0838ca0f8f4c959a93e3c8
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/v4.go
@@ -0,0 +1,421 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/signer/v4/v4.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package v4
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"sort"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws"
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+)
+
+const (
+	authorizationHeader     = "Authorization"
+	authHeaderSignatureElem = "Signature="
+
+	authHeaderPrefix = "AWS4-HMAC-SHA256"
+	timeFormat       = "20060102T150405Z"
+	shortTimeFormat  = "20060102"
+	awsV4Request     = "aws4_request"
+
+	// emptyStringSHA256 is a SHA256 of an empty string
+	emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+	excludeList{
+		mapRule{
+			authorizationHeader: struct{}{},
+			"User-Agent":        struct{}{},
+			"X-Amzn-Trace-Id":   struct{}{},
+		},
+	},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+	// The authentication credentials the request will be signed against.
+	// This value must be set to sign requests.
+	Credentials *credentials.Credentials
+}
+
+// NewSigner returns a Signer pointer configured with the credentials provided.
+func NewSigner(credentials *credentials.Credentials) *Signer {
+	v4 := &Signer{
+		Credentials: credentials,
+	}
+
+	return v4
+}
+
+type signingCtx struct {
+	ServiceName      string
+	Region           string
+	Request          *http.Request
+	Body             io.ReadSeeker
+	Query            url.Values
+	Time             time.Time
+	SignedHeaderVals http.Header
+
+	credValues credentials.Value
+
+	bodyDigest       string
+	signedHeaders    string
+	canonicalHeaders string
+	canonicalString  string
+	credentialString string
+	stringToSign     string
+	signature        string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+	return v4.signWithBody(r, body, service, region, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+	ctx := &signingCtx{
+		Request:     r,
+		Body:        body,
+		Query:       r.URL.Query(),
+		Time:        signTime,
+		ServiceName: service,
+		Region:      region,
+	}
+
+	for key := range ctx.Query {
+		sort.Strings(ctx.Query[key])
+	}
+
+	if ctx.isRequestSigned() {
+		ctx.Time = time.Now()
+	}
+
+	var err error
+	ctx.credValues, err = v4.Credentials.GetWithContext(r.Context())
+	if err != nil {
+		return http.Header{}, err
+	}
+
+	ctx.sanitizeHostForHeader()
+	ctx.assignAmzQueryValues()
+	if err := ctx.build(); err != nil {
+		return nil, err
+	}
+
+	var reader io.ReadCloser
+	if body != nil {
+		var ok bool
+		if reader, ok = body.(io.ReadCloser); !ok {
+			reader = ioutil.NopCloser(body)
+		}
+	}
+	r.Body = reader
+
+	return ctx.SignedHeaderVals, nil
+}
+
+// sanitizeHostForHeader removes default port from host and updates request.Host
+func (ctx *signingCtx) sanitizeHostForHeader() {
+	r := ctx.Request
+	host := getHost(r)
+	port := portOnly(host)
+	if port != "" && isDefaultPort(r.URL.Scheme, port) {
+		r.Host = stripPort(host)
+	}
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+	if ctx.credValues.SessionToken != "" {
+		ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+	}
+}
+
+func (ctx *signingCtx) build() error {
+	ctx.buildTime()             // no depends
+	ctx.buildCredentialString() // no depends
+
+	if err := ctx.buildBodyDigest(); err != nil {
+		return err
+	}
+
+	unsignedHeaders := ctx.Request.Header
+
+	ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+	ctx.buildCanonicalString() // depends on canon headers / signed headers
+	ctx.buildStringToSign()    // depends on canon string
+	ctx.buildSignature()       // depends on string to sign
+
+	parts := []string{
+		authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+		"SignedHeaders=" + ctx.signedHeaders,
+		authHeaderSignatureElem + ctx.signature,
+	}
+	ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", "))
+
+	return nil
+}
+
+func (ctx *signingCtx) buildTime() {
+	ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time))
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+	ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time)
+}
+
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+	headers := make([]string, 0, len(header)+1)
+	headers = append(headers, "host")
+	for k, v := range header {
+		if !r.IsValid(k) {
+			continue // ignored header
+		}
+		if ctx.SignedHeaderVals == nil {
+			ctx.SignedHeaderVals = make(http.Header)
+		}
+
+		lowerCaseKey := strings.ToLower(k)
+		if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+			// include additional values
+			ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+			continue
+		}
+
+		headers = append(headers, lowerCaseKey)
+		ctx.SignedHeaderVals[lowerCaseKey] = v
+	}
+	sort.Strings(headers)
+
+	ctx.signedHeaders = strings.Join(headers, ";")
+
+	headerItems := make([]string, len(headers))
+	for i, k := range headers {
+		if k == "host" {
+			if ctx.Request.Host != "" {
+				headerItems[i] = "host:" + ctx.Request.Host
+			} else {
+				headerItems[i] = "host:" + ctx.Request.URL.Host
+			}
+		} else {
+			headerValues := make([]string, len(ctx.SignedHeaderVals[k]))
+			for i, v := range ctx.SignedHeaderVals[k] {
+				headerValues[i] = strings.TrimSpace(v)
+			}
+			headerItems[i] = k + ":" +
+				strings.Join(headerValues, ",")
+		}
+	}
+	stripExcessSpaces(headerItems)
+	ctx.canonicalHeaders = strings.Join(headerItems, "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+	ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+	uri := getURIPath(ctx.Request.URL)
+
+	uri = EscapePath(uri, false)
+
+	ctx.canonicalString = strings.Join([]string{
+		ctx.Request.Method,
+		uri,
+		ctx.Request.URL.RawQuery,
+		ctx.canonicalHeaders + "\n",
+		ctx.signedHeaders,
+		ctx.bodyDigest,
+	}, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+	ctx.stringToSign = strings.Join([]string{
+		authHeaderPrefix,
+		formatTime(ctx.Time),
+		ctx.credentialString,
+		hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))),
+	}, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+	creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time)
+	signature := hmacSHA256(creds, []byte(ctx.stringToSign))
+	ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() error {
+	hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+	if hash == "" {
+		if ctx.Body == nil {
+			hash = emptyStringSHA256
+		} else {
+			if !aws.IsReaderSeekable(ctx.Body) {
+				return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
+			}
+			hashBytes, err := makeSha256Reader(ctx.Body)
+			if err != nil {
+				return err
+			}
+			hash = hex.EncodeToString(hashBytes)
+		}
+	}
+	ctx.bodyDigest = hash
+
+	return nil
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+	return ctx.Request.Header.Get("Authorization") != ""
+}
+
+func hmacSHA256(key []byte, data []byte) []byte {
+	hash := hmac.New(sha256.New, key)
+	hash.Write(data)
+	return hash.Sum(nil)
+}
+
+func hashSHA256(data []byte) []byte {
+	hash := sha256.New()
+	hash.Write(data)
+	return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
+	hash := sha256.New()
+	start, err := reader.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		// ensure error is return if unable to seek back to start of payload.
+		_, err = reader.Seek(start, io.SeekStart)
+	}()
+
+	// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
+	// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
+	size, err := aws.SeekerLen(reader)
+	if err != nil {
+		_, _ = io.Copy(hash, reader)
+	} else {
+		_, _ = io.CopyN(hash, reader, size)
+	}
+
+	return hash.Sum(nil), nil
+}
+
+const doubleSpace = "  "
+
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+	var j, k, l, m, spaces int
+	for i, str := range vals {
+		// revive:disable:empty-block
+
+		// Trim trailing spaces
+		for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+		}
+
+		// Trim leading spaces
+		for k = 0; k < j && str[k] == ' '; k++ {
+		}
+
+		// revive:enable:empty-block
+
+		str = str[k : j+1]
+
+		// Strip multiple spaces.
+		j = strings.Index(str, doubleSpace)
+		if j < 0 {
+			vals[i] = str
+			continue
+		}
+
+		buf := []byte(str)
+		for k, m, l = j, j, len(buf); k < l; k++ {
+			if buf[k] == ' ' {
+				if spaces == 0 {
+					// First space.
+					buf[m] = buf[k]
+					m++
+				}
+				spaces++
+			} else {
+				// End of multiple spaces.
+				spaces = 0
+				buf[m] = buf[k]
+				m++
+			}
+		}
+
+		vals[i] = string(buf[:m])
+	}
+}
+
+func buildSigningScope(region, service string, dt time.Time) string {
+	return strings.Join([]string{
+		formatShortTime(dt),
+		region,
+		service,
+		awsV4Request,
+	}, "/")
+}
+
+func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte {
+	keyDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt)))
+	keyRegion := hmacSHA256(keyDate, []byte(region))
+	keyService := hmacSHA256(keyRegion, []byte(service))
+	signingKey := hmacSHA256(keyService, []byte(awsV4Request))
+	return signingKey
+}
+
+func formatShortTime(dt time.Time) string {
+	return dt.UTC().Format(shortTimeFormat)
+}
+
+func formatTime(dt time.Time) string {
+	return dt.UTC().Format(timeFormat)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/types.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..52aecda76b66ac9b014c39afdcaf2a27bc30600d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/types.go
@@ -0,0 +1,153 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from:
+// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/types.go
+// See THIRD-PARTY-NOTICES for original license terms
+
+package aws
+
+import (
+	"io"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
+// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
+// streaming payload API operations.
+//
+// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
+// operation's input will prevent that operation being retried in the case of
+// network errors, and cause operation requests to fail if the operation
+// requires payload signing.
+//
+// Note: If using With S3 PutObject to stream an object upload The SDK's S3
+// Upload manager (s3manager.Uploader) provides support for streaming with the
+// ability to retry network errors.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+	return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+	r io.Reader
+}
+
+// IsReaderSeekable returns if the underlying reader type can be seeked. A
+// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
+// type.
+func IsReaderSeekable(r io.Reader) bool {
+	switch v := r.(type) {
+	case ReaderSeekerCloser:
+		return v.IsSeeker()
+	case *ReaderSeekerCloser:
+		return v.IsSeeker()
+	case io.ReadSeeker:
+		return true
+	default:
+		return false
+	}
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be
+// returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+	switch t := r.r.(type) {
+	case io.Reader:
+		return t.Read(p)
+	}
+	return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+	switch t := r.r.(type) {
+	case io.Seeker:
+		return t.Seek(offset, whence)
+	}
+	return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+	_, ok := r.r.(io.Seeker)
+	return ok
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r ReaderSeekerCloser) HasLen() (int, bool) {
+	type lenner interface {
+		Len() int
+	}
+
+	if lr, ok := r.r.(lenner); ok {
+		return lr.Len(), true
+	}
+
+	return 0, false
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r ReaderSeekerCloser) GetLen() (int64, error) {
+	if l, ok := r.HasLen(); ok {
+		return int64(l), nil
+	}
+
+	if s, ok := r.r.(io.Seeker); ok {
+		return seekerLen(s)
+	}
+
+	return -1, nil
+}
+
+// SeekerLen attempts to get the number of bytes remaining at the seeker's
+// current position.  Returns the number of bytes remaining or error.
+func SeekerLen(s io.Seeker) (int64, error) {
+	// Determine if the seeker is actually seekable. ReaderSeekerCloser
+	// hides the fact that a io.Readers might not actually be seekable.
+	switch v := s.(type) {
+	case ReaderSeekerCloser:
+		return v.GetLen()
+	case *ReaderSeekerCloser:
+		return v.GetLen()
+	}
+
+	return seekerLen(s)
+}
+
+func seekerLen(s io.Seeker) (int64, error) {
+	curOffset, err := s.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return 0, err
+	}
+
+	endOffset, err := s.Seek(0, io.SeekEnd)
+	if err != nil {
+		return 0, err
+	}
+
+	_, err = s.Seek(curOffset, io.SeekStart)
+	if err != nil {
+		return 0, err
+	}
+
+	return endOffset - curOffset, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/bsonutil/bsonutil.go b/vendor/go.mongodb.org/mongo-driver/internal/bsonutil/bsonutil.go
new file mode 100644
index 0000000000000000000000000000000000000000..eebb328906fc1072eabc6c95621cd303218cbf45
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/bsonutil/bsonutil.go
@@ -0,0 +1,62 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonutil
+
+import (
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/bson"
+)
+
+// StringSliceFromRawValue decodes the provided BSON value into a []string. This function returns an error if the value
+// is not an array or any of the elements in the array are not strings. The name parameter is used to add context to
+// error messages.
+func StringSliceFromRawValue(name string, val bson.RawValue) ([]string, error) {
+	arr, ok := val.ArrayOK()
+	if !ok {
+		return nil, fmt.Errorf("expected '%s' to be an array but it's a BSON %s", name, val.Type)
+	}
+
+	arrayValues, err := arr.Values()
+	if err != nil {
+		return nil, err
+	}
+
+	strs := make([]string, 0, len(arrayValues))
+	for _, arrayVal := range arrayValues {
+		str, ok := arrayVal.StringValueOK()
+		if !ok {
+			return nil, fmt.Errorf("expected '%s' to be an array of strings, but found a BSON %s", name, arrayVal.Type)
+		}
+		strs = append(strs, str)
+	}
+	return strs, nil
+}
+
+// RawToDocuments converts a bson.Raw that is internally an array of documents to []bson.Raw.
+func RawToDocuments(doc bson.Raw) []bson.Raw {
+	values, err := doc.Values()
+	if err != nil {
+		panic(fmt.Sprintf("error converting BSON document to values: %v", err))
+	}
+
+	out := make([]bson.Raw, len(values))
+	for i := range values {
+		out[i] = values[i].Document()
+	}
+
+	return out
+}
+
+// RawToInterfaces takes one or many bson.Raw documents and returns them as a []interface{}.
+func RawToInterfaces(docs ...bson.Raw) []interface{} {
+	out := make([]interface{}, len(docs))
+	for i := range docs {
+		out[i] = docs[i]
+	}
+	return out
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/codecutil/encoding.go b/vendor/go.mongodb.org/mongo-driver/internal/codecutil/encoding.go
new file mode 100644
index 0000000000000000000000000000000000000000..2aaf8f27196eacc564a6a95185d49158451dae73
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/codecutil/encoding.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package codecutil
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var ErrNilValue = errors.New("value is nil")
+
+// MarshalError is returned when attempting to transform a value into a document
+// results in an error.
+type MarshalError struct {
+	Value interface{}
+	Err   error
+}
+
+// Error implements the error interface.
+func (e MarshalError) Error() string {
+	return fmt.Sprintf("cannot transform type %s to a BSON Document: %v",
+		reflect.TypeOf(e.Value), e.Err)
+}
+
+// EncoderFn is used to functionally construct an encoder for marshaling values.
+type EncoderFn func(io.Writer) (*bson.Encoder, error)
+
+// MarshalValue will attempt to encode the value with the encoder returned by
+// the encoder function.
+func MarshalValue(val interface{}, encFn EncoderFn) (bsoncore.Value, error) {
+	// If the val is already a bsoncore.Value, then do nothing.
+	if bval, ok := val.(bsoncore.Value); ok {
+		return bval, nil
+	}
+
+	if val == nil {
+		return bsoncore.Value{}, ErrNilValue
+	}
+
+	buf := new(bytes.Buffer)
+
+	enc, err := encFn(buf)
+	if err != nil {
+		return bsoncore.Value{}, err
+	}
+
+	// Encode the value in a single-element document with an empty key. Use
+	// bsoncore to extract the first element and return the BSON value.
+	err = enc.Encode(bson.D{{Key: "", Value: val}})
+	if err != nil {
+		return bsoncore.Value{}, MarshalError{Value: val, Err: err}
+	}
+
+	return bsoncore.Document(buf.Bytes()).Index(0).Value(), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/assume_role_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/assume_role_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..3a95cf401d39692e97ff5471592a55a29a133ecf
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/assume_role_provider.go
@@ -0,0 +1,148 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package credproviders
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+	"go.mongodb.org/mongo-driver/internal/uuid"
+)
+
+const (
+	// assumeRoleProviderName provides a name of assume role provider
+	assumeRoleProviderName = "AssumeRoleProvider"
+
+	stsURI = `https://sts.amazonaws.com/?Action=AssumeRoleWithWebIdentity&RoleSessionName=%s&RoleArn=%s&WebIdentityToken=%s&Version=2011-06-15`
+)
+
+// An AssumeRoleProvider retrieves credentials for assume role with web identity.
+type AssumeRoleProvider struct {
+	AwsRoleArnEnv              EnvVar
+	AwsWebIdentityTokenFileEnv EnvVar
+	AwsRoleSessionNameEnv      EnvVar
+
+	httpClient *http.Client
+	expiration time.Time
+
+	// expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring.
+	// This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions.
+	//
+	// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+	// 10 seconds before the credentials are actually expired.
+	expiryWindow time.Duration
+}
+
+// NewAssumeRoleProvider returns a pointer to an assume role provider.
+func NewAssumeRoleProvider(httpClient *http.Client, expiryWindow time.Duration) *AssumeRoleProvider {
+	return &AssumeRoleProvider{
+		// AwsRoleArnEnv is the environment variable for AWS_ROLE_ARN
+		AwsRoleArnEnv: EnvVar("AWS_ROLE_ARN"),
+		// AwsWebIdentityTokenFileEnv is the environment variable for AWS_WEB_IDENTITY_TOKEN_FILE
+		AwsWebIdentityTokenFileEnv: EnvVar("AWS_WEB_IDENTITY_TOKEN_FILE"),
+		// AwsRoleSessionNameEnv is the environment variable for AWS_ROLE_SESSION_NAME
+		AwsRoleSessionNameEnv: EnvVar("AWS_ROLE_SESSION_NAME"),
+		httpClient:            httpClient,
+		expiryWindow:          expiryWindow,
+	}
+}
+
+// RetrieveWithContext retrieves the keys from the AWS service.
+func (a *AssumeRoleProvider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) {
+	const defaultHTTPTimeout = 10 * time.Second
+
+	v := credentials.Value{ProviderName: assumeRoleProviderName}
+
+	roleArn := a.AwsRoleArnEnv.Get()
+	tokenFile := a.AwsWebIdentityTokenFileEnv.Get()
+	if tokenFile == "" && roleArn == "" {
+		return v, errors.New("AWS_WEB_IDENTITY_TOKEN_FILE and AWS_ROLE_ARN are missing")
+	}
+	if tokenFile != "" && roleArn == "" {
+		return v, errors.New("AWS_WEB_IDENTITY_TOKEN_FILE is set, but AWS_ROLE_ARN is missing")
+	}
+	if tokenFile == "" && roleArn != "" {
+		return v, errors.New("AWS_ROLE_ARN is set, but AWS_WEB_IDENTITY_TOKEN_FILE is missing")
+	}
+	token, err := ioutil.ReadFile(tokenFile)
+	if err != nil {
+		return v, err
+	}
+
+	sessionName := a.AwsRoleSessionNameEnv.Get()
+	if sessionName == "" {
+		// Use a UUID if the RoleSessionName is not given.
+		id, err := uuid.New()
+		if err != nil {
+			return v, err
+		}
+		sessionName = id.String()
+	}
+
+	fullURI := fmt.Sprintf(stsURI, sessionName, roleArn, string(token))
+
+	req, err := http.NewRequest(http.MethodPost, fullURI, nil)
+	if err != nil {
+		return v, err
+	}
+	req.Header.Set("Accept", "application/json")
+
+	ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout)
+	defer cancel()
+	resp, err := a.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return v, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return v, fmt.Errorf("response failure: %s", resp.Status)
+	}
+
+	var stsResp struct {
+		Response struct {
+			Result struct {
+				Credentials struct {
+					AccessKeyID     string  `json:"AccessKeyId"`
+					SecretAccessKey string  `json:"SecretAccessKey"`
+					Token           string  `json:"SessionToken"`
+					Expiration      float64 `json:"Expiration"`
+				} `json:"Credentials"`
+			} `json:"AssumeRoleWithWebIdentityResult"`
+		} `json:"AssumeRoleWithWebIdentityResponse"`
+	}
+
+	err = json.NewDecoder(resp.Body).Decode(&stsResp)
+	if err != nil {
+		return v, err
+	}
+	v.AccessKeyID = stsResp.Response.Result.Credentials.AccessKeyID
+	v.SecretAccessKey = stsResp.Response.Result.Credentials.SecretAccessKey
+	v.SessionToken = stsResp.Response.Result.Credentials.Token
+	if !v.HasKeys() {
+		return v, errors.New("failed to retrieve web identity keys")
+	}
+	sec := int64(stsResp.Response.Result.Credentials.Expiration)
+	a.expiration = time.Unix(sec, 0).Add(-a.expiryWindow)
+
+	return v, nil
+}
+
+// Retrieve retrieves the keys from the AWS service.
+func (a *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+	return a.RetrieveWithContext(context.Background())
+}
+
+// IsExpired returns true if the credentials are expired.
+func (a *AssumeRoleProvider) IsExpired() bool {
+	return a.expiration.Before(time.Now())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ec2_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ec2_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..771bfca1346d0485364a63d0f7b6770a3e647c47
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ec2_provider.go
@@ -0,0 +1,183 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package credproviders
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+)
+
+const (
+	// ec2ProviderName provides a name of EC2 provider
+	ec2ProviderName = "EC2Provider"
+
+	awsEC2URI       = "http://169.254.169.254/"
+	awsEC2RolePath  = "latest/meta-data/iam/security-credentials/"
+	awsEC2TokenPath = "latest/api/token"
+
+	defaultHTTPTimeout = 10 * time.Second
+)
+
+// An EC2Provider retrieves credentials from EC2 metadata.
+type EC2Provider struct {
+	httpClient *http.Client
+	expiration time.Time
+
+	// expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring.
+	// This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions.
+	//
+	// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+	// 10 seconds before the credentials are actually expired.
+	expiryWindow time.Duration
+}
+
+// NewEC2Provider returns a pointer to an EC2 credential provider.
+func NewEC2Provider(httpClient *http.Client, expiryWindow time.Duration) *EC2Provider {
+	return &EC2Provider{
+		httpClient:   httpClient,
+		expiryWindow: expiryWindow,
+	}
+}
+
+func (e *EC2Provider) getToken(ctx context.Context) (string, error) {
+	req, err := http.NewRequest(http.MethodPut, awsEC2URI+awsEC2TokenPath, nil)
+	if err != nil {
+		return "", err
+	}
+	const defaultEC2TTLSeconds = "30"
+	req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", defaultEC2TTLSeconds)
+
+	ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout)
+	defer cancel()
+	resp, err := e.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return "", fmt.Errorf("%s %s failed: %s", req.Method, req.URL.String(), resp.Status)
+	}
+
+	token, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return "", err
+	}
+	if len(token) == 0 {
+		return "", errors.New("unable to retrieve token from EC2 metadata")
+	}
+	return string(token), nil
+}
+
+func (e *EC2Provider) getRoleName(ctx context.Context, token string) (string, error) {
+	req, err := http.NewRequest(http.MethodGet, awsEC2URI+awsEC2RolePath, nil)
+	if err != nil {
+		return "", err
+	}
+	req.Header.Set("X-aws-ec2-metadata-token", token)
+
+	ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout)
+	defer cancel()
+	resp, err := e.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return "", fmt.Errorf("%s %s failed: %s", req.Method, req.URL.String(), resp.Status)
+	}
+
+	role, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return "", err
+	}
+	if len(role) == 0 {
+		return "", errors.New("unable to retrieve role_name from EC2 metadata")
+	}
+	return string(role), nil
+}
+
+func (e *EC2Provider) getCredentials(ctx context.Context, token string, role string) (credentials.Value, time.Time, error) {
+	v := credentials.Value{ProviderName: ec2ProviderName}
+
+	pathWithRole := awsEC2URI + awsEC2RolePath + role
+	req, err := http.NewRequest(http.MethodGet, pathWithRole, nil)
+	if err != nil {
+		return v, time.Time{}, err
+	}
+	req.Header.Set("X-aws-ec2-metadata-token", token)
+	ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout)
+	defer cancel()
+	resp, err := e.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return v, time.Time{}, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return v, time.Time{}, fmt.Errorf("%s %s failed: %s", req.Method, req.URL.String(), resp.Status)
+	}
+
+	var ec2Resp struct {
+		AccessKeyID     string    `json:"AccessKeyId"`
+		SecretAccessKey string    `json:"SecretAccessKey"`
+		Token           string    `json:"Token"`
+		Expiration      time.Time `json:"Expiration"`
+	}
+
+	err = json.NewDecoder(resp.Body).Decode(&ec2Resp)
+	if err != nil {
+		return v, time.Time{}, err
+	}
+
+	v.AccessKeyID = ec2Resp.AccessKeyID
+	v.SecretAccessKey = ec2Resp.SecretAccessKey
+	v.SessionToken = ec2Resp.Token
+
+	return v, ec2Resp.Expiration, nil
+}
+
+// RetrieveWithContext retrieves the keys from the AWS service.
+func (e *EC2Provider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) {
+	v := credentials.Value{ProviderName: ec2ProviderName}
+
+	token, err := e.getToken(ctx)
+	if err != nil {
+		return v, err
+	}
+
+	role, err := e.getRoleName(ctx, token)
+	if err != nil {
+		return v, err
+	}
+
+	v, exp, err := e.getCredentials(ctx, token, role)
+	if err != nil {
+		return v, err
+	}
+	if !v.HasKeys() {
+		return v, errors.New("failed to retrieve EC2 keys")
+	}
+	e.expiration = exp.Add(-e.expiryWindow)
+
+	return v, nil
+}
+
+// Retrieve retrieves the keys from the AWS service.
+func (e *EC2Provider) Retrieve() (credentials.Value, error) {
+	return e.RetrieveWithContext(context.Background())
+}
+
+// IsExpired returns true if the credentials are expired.
+func (e *EC2Provider) IsExpired() bool {
+	return e.expiration.Before(time.Now())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ecs_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ecs_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..0c3a27e626078d401df0cacd30476281aaa0012a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ecs_provider.go
@@ -0,0 +1,112 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package credproviders
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+)
+
+const (
+	// ecsProviderName provides a name of ECS provider
+	ecsProviderName = "ECSProvider"
+
+	awsRelativeURI = "http://169.254.170.2/"
+)
+
+// An ECSProvider retrieves credentials from ECS metadata.
+type ECSProvider struct {
+	AwsContainerCredentialsRelativeURIEnv EnvVar
+
+	httpClient *http.Client
+	expiration time.Time
+
+	// expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring.
+	// This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions.
+	//
+	// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+	// 10 seconds before the credentials are actually expired.
+	expiryWindow time.Duration
+}
+
+// NewECSProvider returns a pointer to an ECS credential provider.
+func NewECSProvider(httpClient *http.Client, expiryWindow time.Duration) *ECSProvider {
+	return &ECSProvider{
+		// AwsContainerCredentialsRelativeURIEnv is the environment variable for AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
+		AwsContainerCredentialsRelativeURIEnv: EnvVar("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"),
+		httpClient:                            httpClient,
+		expiryWindow:                          expiryWindow,
+	}
+}
+
+// RetrieveWithContext retrieves the keys from the AWS service.
+func (e *ECSProvider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) {
+	const defaultHTTPTimeout = 10 * time.Second
+
+	v := credentials.Value{ProviderName: ecsProviderName}
+
+	relativeEcsURI := e.AwsContainerCredentialsRelativeURIEnv.Get()
+	if len(relativeEcsURI) == 0 {
+		return v, errors.New("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is missing")
+	}
+	fullURI := awsRelativeURI + relativeEcsURI
+
+	req, err := http.NewRequest(http.MethodGet, fullURI, nil)
+	if err != nil {
+		return v, err
+	}
+	req.Header.Set("Accept", "application/json")
+
+	ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout)
+	defer cancel()
+	resp, err := e.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return v, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return v, fmt.Errorf("response failure: %s", resp.Status)
+	}
+
+	var ecsResp struct {
+		AccessKeyID     string    `json:"AccessKeyId"`
+		SecretAccessKey string    `json:"SecretAccessKey"`
+		Token           string    `json:"Token"`
+		Expiration      time.Time `json:"Expiration"`
+	}
+
+	err = json.NewDecoder(resp.Body).Decode(&ecsResp)
+	if err != nil {
+		return v, err
+	}
+
+	v.AccessKeyID = ecsResp.AccessKeyID
+	v.SecretAccessKey = ecsResp.SecretAccessKey
+	v.SessionToken = ecsResp.Token
+	if !v.HasKeys() {
+		return v, errors.New("failed to retrieve ECS keys")
+	}
+	e.expiration = ecsResp.Expiration.Add(-e.expiryWindow)
+
+	return v, nil
+}
+
+// Retrieve retrieves the keys from the AWS service.
+func (e *ECSProvider) Retrieve() (credentials.Value, error) {
+	return e.RetrieveWithContext(context.Background())
+}
+
+// IsExpired returns true if the credentials are expired.
+func (e *ECSProvider) IsExpired() bool {
+	return e.expiration.Before(time.Now())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/env_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/env_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..59ca633635d4aa519f3caa777e08c5636791cf0f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/env_provider.go
@@ -0,0 +1,69 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package credproviders
+
+import (
+	"os"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+)
+
+// envProviderName provides a name of Env provider
+const envProviderName = "EnvProvider"
+
+// EnvVar is an environment variable
+type EnvVar string
+
+// Get retrieves the environment variable
+func (ev EnvVar) Get() string {
+	return os.Getenv(string(ev))
+}
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+type EnvProvider struct {
+	AwsAccessKeyIDEnv     EnvVar
+	AwsSecretAccessKeyEnv EnvVar
+	AwsSessionTokenEnv    EnvVar
+
+	retrieved bool
+}
+
+// NewEnvProvider returns a pointer to an ECS credential provider.
+func NewEnvProvider() *EnvProvider {
+	return &EnvProvider{
+		// AwsAccessKeyIDEnv is the environment variable for AWS_ACCESS_KEY_ID
+		AwsAccessKeyIDEnv: EnvVar("AWS_ACCESS_KEY_ID"),
+		// AwsSecretAccessKeyEnv is the environment variable for AWS_SECRET_ACCESS_KEY
+		AwsSecretAccessKeyEnv: EnvVar("AWS_SECRET_ACCESS_KEY"),
+		// AwsSessionTokenEnv is the environment variable for AWS_SESSION_TOKEN
+		AwsSessionTokenEnv: EnvVar("AWS_SESSION_TOKEN"),
+	}
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (credentials.Value, error) {
+	e.retrieved = false
+
+	v := credentials.Value{
+		AccessKeyID:     e.AwsAccessKeyIDEnv.Get(),
+		SecretAccessKey: e.AwsSecretAccessKeyEnv.Get(),
+		SessionToken:    e.AwsSessionTokenEnv.Get(),
+		ProviderName:    envProviderName,
+	}
+	err := verify(v)
+	if err == nil {
+		e.retrieved = true
+	}
+
+	return v, err
+}
+
+// IsExpired returns true if the credentials have not been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+	return !e.retrieved
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/imds_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/imds_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..96dad1a829e83749b9993dbd81b5e814e8732236
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/imds_provider.go
@@ -0,0 +1,103 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package credproviders
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+)
+
+const (
+	// AzureProviderName provides a name of Azure provider
+	AzureProviderName = "AzureProvider"
+
+	azureURI = "http://169.254.169.254/metadata/identity/oauth2/token"
+)
+
+// An AzureProvider retrieves credentials from Azure IMDS.
+type AzureProvider struct {
+	httpClient   *http.Client
+	expiration   time.Time
+	expiryWindow time.Duration
+}
+
+// NewAzureProvider returns a pointer to an Azure credential provider.
+func NewAzureProvider(httpClient *http.Client, expiryWindow time.Duration) *AzureProvider {
+	return &AzureProvider{
+		httpClient:   httpClient,
+		expiration:   time.Time{},
+		expiryWindow: expiryWindow,
+	}
+}
+
+// RetrieveWithContext retrieves the keys from the Azure service.
+func (a *AzureProvider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) {
+	v := credentials.Value{ProviderName: AzureProviderName}
+	req, err := http.NewRequest(http.MethodGet, azureURI, nil)
+	if err != nil {
+		return v, fmt.Errorf("unable to retrieve Azure credentials: %w", err)
+	}
+	q := make(url.Values)
+	q.Set("api-version", "2018-02-01")
+	q.Set("resource", "https://vault.azure.net")
+	req.URL.RawQuery = q.Encode()
+	req.Header.Set("Metadata", "true")
+	req.Header.Set("Accept", "application/json")
+
+	resp, err := a.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return v, fmt.Errorf("unable to retrieve Azure credentials: %w", err)
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return v, fmt.Errorf("unable to retrieve Azure credentials: error reading response body: %w", err)
+	}
+	if resp.StatusCode != http.StatusOK {
+		return v, fmt.Errorf("unable to retrieve Azure credentials: expected StatusCode 200, got StatusCode: %v. Response body: %s", resp.StatusCode, body)
+	}
+	var tokenResponse struct {
+		AccessToken string `json:"access_token"`
+		ExpiresIn   string `json:"expires_in"`
+	}
+	// Attempt to read body as JSON
+	err = json.Unmarshal(body, &tokenResponse)
+	if err != nil {
+		return v, fmt.Errorf("unable to retrieve Azure credentials: error reading body JSON: %w (response body: %s)", err, body)
+	}
+	if tokenResponse.AccessToken == "" {
+		return v, fmt.Errorf("unable to retrieve Azure credentials: got unexpected empty accessToken from Azure Metadata Server. Response body: %s", body)
+	}
+	v.SessionToken = tokenResponse.AccessToken
+
+	expiresIn, err := time.ParseDuration(tokenResponse.ExpiresIn + "s")
+	if err != nil {
+		return v, err
+	}
+	if expiration := expiresIn - a.expiryWindow; expiration > 0 {
+		a.expiration = time.Now().Add(expiration)
+	}
+
+	return v, err
+}
+
+// Retrieve retrieves the keys from the Azure service.
+func (a *AzureProvider) Retrieve() (credentials.Value, error) {
+	return a.RetrieveWithContext(context.Background())
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (a *AzureProvider) IsExpired() bool {
+	return a.expiration.Before(time.Now())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/static_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/static_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b49613941c17abcdd7523e402b6f2cfa1330027
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/static_provider.go
@@ -0,0 +1,59 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package credproviders
+
+import (
+	"errors"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+)
+
+// staticProviderName provides a name of Static provider
+const staticProviderName = "StaticProvider"
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+	credentials.Value
+
+	verified bool
+	err      error
+}
+
+func verify(v credentials.Value) error {
+	if !v.HasKeys() {
+		return errors.New("failed to retrieve ACCESS_KEY_ID and SECRET_ACCESS_KEY")
+	}
+	if v.AccessKeyID != "" && v.SecretAccessKey == "" {
+		return errors.New("ACCESS_KEY_ID is set, but SECRET_ACCESS_KEY is missing")
+	}
+	if v.AccessKeyID == "" && v.SecretAccessKey != "" {
+		return errors.New("SECRET_ACCESS_KEY is set, but ACCESS_KEY_ID is missing")
+	}
+	if v.AccessKeyID == "" && v.SecretAccessKey == "" && v.SessionToken != "" {
+		return errors.New("AWS_SESSION_TOKEN is set, but ACCESS_KEY_ID and SECRET_ACCESS_KEY are missing")
+	}
+	return nil
+
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (credentials.Value, error) {
+	if !s.verified {
+		s.err = verify(s.Value)
+		s.Value.ProviderName = staticProviderName
+		s.verified = true
+	}
+	return s.Value, s.err
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+	return false
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go b/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go
new file mode 100644
index 0000000000000000000000000000000000000000..20a6d43a0d64bd495c0af62855add40a78418506
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package csfle
+
+import (
+	"errors"
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+const (
+	EncryptedCacheCollection      = "ecc"
+	EncryptedStateCollection      = "esc"
+	EncryptedCompactionCollection = "ecoc"
+)
+
+// GetEncryptedStateCollectionName returns the encrypted state collection name associated with dataCollectionName.
+func GetEncryptedStateCollectionName(efBSON bsoncore.Document, dataCollectionName string, stateCollection string) (string, error) {
+	fieldName := stateCollection + "Collection"
+	val, err := efBSON.LookupErr(fieldName)
+	if err != nil {
+		if !errors.Is(err, bsoncore.ErrElementNotFound) {
+			return "", err
+		}
+		// Return default name.
+		defaultName := "enxcol_." + dataCollectionName + "." + stateCollection
+		return defaultName, nil
+	}
+
+	stateCollectionName, ok := val.StringValueOK()
+	if !ok {
+		return "", fmt.Errorf("expected string for '%v', got: %v", fieldName, val.Type)
+	}
+	return stateCollectionName, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go b/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go
new file mode 100644
index 0000000000000000000000000000000000000000..43801a5d4f1fdf0a461cab941dba518ba8987217
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package csot
+
+import (
+	"context"
+	"time"
+)
+
+type timeoutKey struct{}
+
+// MakeTimeoutContext returns a new context with Client-Side Operation Timeout (CSOT) feature-gated behavior
+// and a Timeout set to the passed in Duration. Setting a Timeout on a single operation is not supported in
+// public API.
+//
+// TODO(GODRIVER-2348) We may be able to remove this function once CSOT feature-gated behavior becomes the
+// TODO default behavior.
+func MakeTimeoutContext(ctx context.Context, to time.Duration) (context.Context, context.CancelFunc) {
+	// Only use the passed in Duration as a timeout on the Context if it
+	// is non-zero and if the Context doesn't already have a timeout.
+	cancelFunc := func() {}
+	if _, deadlineSet := ctx.Deadline(); to != 0 && !deadlineSet {
+		ctx, cancelFunc = context.WithTimeout(ctx, to)
+	}
+
+	// Add timeoutKey either way to indicate CSOT is enabled.
+	return context.WithValue(ctx, timeoutKey{}, true), cancelFunc
+}
+
+func IsTimeoutContext(ctx context.Context) bool {
+	return ctx.Value(timeoutKey{}) != nil
+}
+
+// ZeroRTTMonitor implements the RTTMonitor interface and is used internally for testing. It returns 0 for all
+// RTT calculations and an empty string for RTT statistics.
+type ZeroRTTMonitor struct{}
+
+// EWMA implements the RTT monitor interface.
+func (zrm *ZeroRTTMonitor) EWMA() time.Duration {
+	return 0
+}
+
+// Min implements the RTT monitor interface.
+func (zrm *ZeroRTTMonitor) Min() time.Duration {
+	return 0
+}
+
+// P90 implements the RTT monitor interface.
+func (zrm *ZeroRTTMonitor) P90() time.Duration {
+	return 0
+}
+
+// Stats implements the RTT monitor interface.
+func (zrm *ZeroRTTMonitor) Stats() string {
+	return ""
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/driverutil/hello.go b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/hello.go
new file mode 100644
index 0000000000000000000000000000000000000000..18a70f0cadfc31e1c48b152c8436d7f37c166f7a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/hello.go
@@ -0,0 +1,128 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driverutil
+
+import (
+	"os"
+	"strings"
+)
+
+const AwsLambdaPrefix = "AWS_Lambda_"
+
+const (
+	// FaaS environment variable names
+
+	// EnvVarAWSExecutionEnv is the AWS Execution environment variable.
+	EnvVarAWSExecutionEnv = "AWS_EXECUTION_ENV"
+	// EnvVarAWSLambdaRuntimeAPI is the AWS Lambda runtime API variable.
+	EnvVarAWSLambdaRuntimeAPI = "AWS_LAMBDA_RUNTIME_API"
+	// EnvVarFunctionsWorkerRuntime is the functions worker runtime variable.
+	EnvVarFunctionsWorkerRuntime = "FUNCTIONS_WORKER_RUNTIME"
+	// EnvVarKService is the K Service variable.
+	EnvVarKService = "K_SERVICE"
+	// EnvVarFunctionName is the function name variable.
+	EnvVarFunctionName = "FUNCTION_NAME"
+	// EnvVarVercel is the Vercel variable.
+	EnvVarVercel = "VERCEL"
+	// EnvVarK8s is the K8s variable.
+	EnvVarK8s = "KUBERNETES_SERVICE_HOST"
+)
+
+const (
+	// FaaS environment variable names
+
+	// EnvVarAWSRegion is the AWS region variable.
+	EnvVarAWSRegion = "AWS_REGION"
+	// EnvVarAWSLambdaFunctionMemorySize is the AWS Lambda function memory size variable.
+	EnvVarAWSLambdaFunctionMemorySize = "AWS_LAMBDA_FUNCTION_MEMORY_SIZE"
+	// EnvVarFunctionMemoryMB is the function memory in megabytes variable.
+	EnvVarFunctionMemoryMB = "FUNCTION_MEMORY_MB"
+	// EnvVarFunctionTimeoutSec is the function timeout in seconds variable.
+	EnvVarFunctionTimeoutSec = "FUNCTION_TIMEOUT_SEC"
+	// EnvVarFunctionRegion is the function region variable.
+	EnvVarFunctionRegion = "FUNCTION_REGION"
+	// EnvVarVercelRegion is the Vercel region variable.
+	EnvVarVercelRegion = "VERCEL_REGION"
+)
+
+const (
+	// FaaS environment names used by the client
+
+	// EnvNameAWSLambda is the AWS Lambda environment name.
+	EnvNameAWSLambda = "aws.lambda"
+	// EnvNameAzureFunc is the Azure Function environment name.
+	EnvNameAzureFunc = "azure.func"
+	// EnvNameGCPFunc is the Google Cloud Function environment name.
+	EnvNameGCPFunc = "gcp.func"
+	// EnvNameVercel is the Vercel environment name.
+	EnvNameVercel = "vercel"
+)
+
+// GetFaasEnvName parses the FaaS environment variable name and returns the
+// corresponding name used by the client. If none of the variables or variables
+// for multiple names are populated the client.env value MUST be entirely
+// omitted. When variables for multiple "client.env.name" values are present,
+// "vercel" takes precedence over "aws.lambda"; any other combination MUST cause
+// "client.env" to be entirely omitted.
+func GetFaasEnvName() string {
+	envVars := []string{
+		EnvVarAWSExecutionEnv,
+		EnvVarAWSLambdaRuntimeAPI,
+		EnvVarFunctionsWorkerRuntime,
+		EnvVarKService,
+		EnvVarFunctionName,
+		EnvVarVercel,
+	}
+
+	// If none of the variables are populated the client.env value MUST be
+	// entirely omitted.
+	names := make(map[string]struct{})
+
+	for _, envVar := range envVars {
+		val := os.Getenv(envVar)
+		if val == "" {
+			continue
+		}
+
+		var name string
+
+		switch envVar {
+		case EnvVarAWSExecutionEnv:
+			if !strings.HasPrefix(val, AwsLambdaPrefix) {
+				continue
+			}
+
+			name = EnvNameAWSLambda
+		case EnvVarAWSLambdaRuntimeAPI:
+			name = EnvNameAWSLambda
+		case EnvVarFunctionsWorkerRuntime:
+			name = EnvNameAzureFunc
+		case EnvVarKService, EnvVarFunctionName:
+			name = EnvNameGCPFunc
+		case EnvVarVercel:
+			// "vercel" takes precedence over "aws.lambda".
+			delete(names, EnvNameAWSLambda)
+
+			name = EnvNameVercel
+		}
+
+		names[name] = struct{}{}
+		if len(names) > 1 {
+			// If multiple names are populated the client.env value
+			// MUST be entirely omitted.
+			names = nil
+
+			break
+		}
+	}
+
+	for name := range names {
+		return name
+	}
+
+	return ""
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/driverutil/operation.go b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/operation.go
new file mode 100644
index 0000000000000000000000000000000000000000..32704312ff5fe9818f1b5bfbd2628726cf6e198f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/operation.go
@@ -0,0 +1,31 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driverutil
+
+// Operation Names should be sourced from the command reference documentation:
+// https://www.mongodb.com/docs/manual/reference/command/
+const (
+	AbortTransactionOp  = "abortTransaction"  // AbortTransactionOp is the name for aborting a transaction
+	AggregateOp         = "aggregate"         // AggregateOp is the name for aggregating
+	CommitTransactionOp = "commitTransaction" // CommitTransactionOp is the name for committing a transaction
+	CountOp             = "count"             // CountOp is the name for counting
+	CreateOp            = "create"            // CreateOp is the name for creating
+	CreateIndexesOp     = "createIndexes"     // CreateIndexesOp is the name for creating indexes
+	DeleteOp            = "delete"            // DeleteOp is the name for deleting
+	DistinctOp          = "distinct"          // DistinctOp is the name for distinct
+	DropOp              = "drop"              // DropOp is the name for dropping
+	DropDatabaseOp      = "dropDatabase"      // DropDatabaseOp is the name for dropping a database
+	DropIndexesOp       = "dropIndexes"       // DropIndexesOp is the name for dropping indexes
+	EndSessionsOp       = "endSessions"       // EndSessionsOp is the name for ending sessions
+	FindAndModifyOp     = "findAndModify"     // FindAndModifyOp is the name for finding and modifying
+	FindOp              = "find"              // FindOp is the name for finding
+	InsertOp            = "insert"            // InsertOp is the name for inserting
+	ListCollectionsOp   = "listCollections"   // ListCollectionsOp is the name for listing collections
+	ListIndexesOp       = "listIndexes"       // ListIndexesOp is the name for listing indexes
+	ListDatabasesOp     = "listDatabases"     // ListDatabasesOp is the name for listing databases
+	UpdateOp            = "update"            // UpdateOp is the name for updating
+)
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/handshake/handshake.go b/vendor/go.mongodb.org/mongo-driver/internal/handshake/handshake.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9537d3ef8a3dfa42b55df55f55897918f55a5fc
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/handshake/handshake.go
@@ -0,0 +1,13 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package handshake
+
+// LegacyHello is the legacy version of the hello command.
+var LegacyHello = "isMaster"
+
+// LegacyHelloLowercase is the lowercase, legacy version of the hello command.
+var LegacyHelloLowercase = "ismaster"
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/httputil/httputil.go b/vendor/go.mongodb.org/mongo-driver/internal/httputil/httputil.go
new file mode 100644
index 0000000000000000000000000000000000000000..db0dd5f1279527247b300289b90f2555581ec128
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/httputil/httputil.go
@@ -0,0 +1,30 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package httputil
+
+import (
+	"net/http"
+)
+
+// DefaultHTTPClient is the default HTTP client used across the driver.
+var DefaultHTTPClient = &http.Client{
+	Transport: http.DefaultTransport.(*http.Transport).Clone(),
+}
+
+// CloseIdleHTTPConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle in a "keep-alive"
+// state. It does not interrupt any connections currently in use.
+//
+// Borrowed from the Go standard library.
+func CloseIdleHTTPConnections(client *http.Client) {
+	type closeIdler interface {
+		CloseIdleConnections()
+	}
+	if tr, ok := client.Transport.(closeIdler); ok {
+		tr.CloseIdleConnections()
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/component.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/component.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a3d553208f78a9e1fbdea239c252bf4fc4c322a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/component.go
@@ -0,0 +1,314 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package logger
+
+import (
+	"os"
+	"strconv"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+const (
+	CommandFailed                    = "Command failed"
+	CommandStarted                   = "Command started"
+	CommandSucceeded                 = "Command succeeded"
+	ConnectionPoolCreated            = "Connection pool created"
+	ConnectionPoolReady              = "Connection pool ready"
+	ConnectionPoolCleared            = "Connection pool cleared"
+	ConnectionPoolClosed             = "Connection pool closed"
+	ConnectionCreated                = "Connection created"
+	ConnectionReady                  = "Connection ready"
+	ConnectionClosed                 = "Connection closed"
+	ConnectionCheckoutStarted        = "Connection checkout started"
+	ConnectionCheckoutFailed         = "Connection checkout failed"
+	ConnectionCheckedOut             = "Connection checked out"
+	ConnectionCheckedIn              = "Connection checked in"
+	ServerSelectionFailed            = "Server selection failed"
+	ServerSelectionStarted           = "Server selection started"
+	ServerSelectionSucceeded         = "Server selection succeeded"
+	ServerSelectionWaiting           = "Waiting for suitable server to become available"
+	TopologyClosed                   = "Stopped topology monitoring"
+	TopologyDescriptionChanged       = "Topology description changed"
+	TopologyOpening                  = "Starting topology monitoring"
+	TopologyServerClosed             = "Stopped server monitoring"
+	TopologyServerHeartbeatFailed    = "Server heartbeat failed"
+	TopologyServerHeartbeatStarted   = "Server heartbeat started"
+	TopologyServerHeartbeatSucceeded = "Server heartbeat succeeded"
+	TopologyServerOpening            = "Starting server monitoring"
+)
+
+const (
+	KeyAwaited             = "awaited"
+	KeyCommand             = "command"
+	KeyCommandName         = "commandName"
+	KeyDatabaseName        = "databaseName"
+	KeyDriverConnectionID  = "driverConnectionId"
+	KeyDurationMS          = "durationMS"
+	KeyError               = "error"
+	KeyFailure             = "failure"
+	KeyMaxConnecting       = "maxConnecting"
+	KeyMaxIdleTimeMS       = "maxIdleTimeMS"
+	KeyMaxPoolSize         = "maxPoolSize"
+	KeyMessage             = "message"
+	KeyMinPoolSize         = "minPoolSize"
+	KeyNewDescription      = "newDescription"
+	KeyOperation           = "operation"
+	KeyOperationID         = "operationId"
+	KeyPreviousDescription = "previousDescription"
+	KeyRemainingTimeMS     = "remainingTimeMS"
+	KeyReason              = "reason"
+	KeyReply               = "reply"
+	KeyRequestID           = "requestId"
+	KeySelector            = "selector"
+	KeyServerConnectionID  = "serverConnectionId"
+	KeyServerHost          = "serverHost"
+	KeyServerPort          = "serverPort"
+	KeyServiceID           = "serviceId"
+	KeyTimestamp           = "timestamp"
+	KeyTopologyDescription = "topologyDescription"
+	KeyTopologyID          = "topologyId"
+)
+
+// KeyValues is a list of key-value pairs.
+type KeyValues []interface{}
+
+// Add adds a key-value pair to an instance of a KeyValues list.
+func (kvs *KeyValues) Add(key string, value interface{}) {
+	*kvs = append(*kvs, key, value)
+}
+
+const (
+	ReasonConnClosedStale              = "Connection became stale because the pool was cleared"
+	ReasonConnClosedIdle               = "Connection has been available but unused for longer than the configured max idle time"
+	ReasonConnClosedError              = "An error occurred while using the connection"
+	ReasonConnClosedPoolClosed         = "Connection pool was closed"
+	ReasonConnCheckoutFailedTimout     = "Wait queue timeout elapsed without a connection becoming available"
+	ReasonConnCheckoutFailedError      = "An error occurred while trying to establish a new connection"
+	ReasonConnCheckoutFailedPoolClosed = "Connection pool was closed"
+)
+
+// Component is an enumeration representing the "components" which can be
+// logged against. A LogLevel can be configured on a per-component basis.
+type Component int
+
+const (
+	// ComponentAll enables logging for all components.
+	ComponentAll Component = iota
+
+	// ComponentCommand enables command monitor logging.
+	ComponentCommand
+
+	// ComponentTopology enables topology logging.
+	ComponentTopology
+
+	// ComponentServerSelection enables server selection logging.
+	ComponentServerSelection
+
+	// ComponentConnection enables connection services logging.
+	ComponentConnection
+)
+
+const (
+	mongoDBLogAllEnvVar             = "MONGODB_LOG_ALL"
+	mongoDBLogCommandEnvVar         = "MONGODB_LOG_COMMAND"
+	mongoDBLogTopologyEnvVar        = "MONGODB_LOG_TOPOLOGY"
+	mongoDBLogServerSelectionEnvVar = "MONGODB_LOG_SERVER_SELECTION"
+	mongoDBLogConnectionEnvVar      = "MONGODB_LOG_CONNECTION"
+)
+
+var componentEnvVarMap = map[string]Component{
+	mongoDBLogAllEnvVar:             ComponentAll,
+	mongoDBLogCommandEnvVar:         ComponentCommand,
+	mongoDBLogTopologyEnvVar:        ComponentTopology,
+	mongoDBLogServerSelectionEnvVar: ComponentServerSelection,
+	mongoDBLogConnectionEnvVar:      ComponentConnection,
+}
+
+// EnvHasComponentVariables returns true if the environment contains any of the
+// component environment variables.
+func EnvHasComponentVariables() bool {
+	for envVar := range componentEnvVarMap {
+		if os.Getenv(envVar) != "" {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Command is a struct defining common fields that must be included in all
+// commands.
+type Command struct {
+	// TODO(GODRIVER-2824): change the DriverConnectionID type to int64.
+	DriverConnectionID uint64              // Driver's ID for the connection
+	Name               string              // Command name
+	DatabaseName       string              // Database name
+	Message            string              // Message associated with the command
+	OperationID        int32               // Driver-generated operation ID
+	RequestID          int64               // Driver-generated request ID
+	ServerConnectionID *int64              // Server's ID for the connection used for the command
+	ServerHost         string              // Hostname or IP address for the server
+	ServerPort         string              // Port for the server
+	ServiceID          *primitive.ObjectID // ID for the command  in load balancer mode
+}
+
+// SerializeCommand takes a command and a variable number of key-value pairs and
+// returns a slice of interface{} that can be passed to the logger for
+// structured logging.
+func SerializeCommand(cmd Command, extraKeysAndValues ...interface{}) KeyValues {
+	// Initialize the boilerplate keys and values.
+	keysAndValues := KeyValues{
+		KeyCommandName, cmd.Name,
+		KeyDatabaseName, cmd.DatabaseName,
+		KeyDriverConnectionID, cmd.DriverConnectionID,
+		KeyMessage, cmd.Message,
+		KeyOperationID, cmd.OperationID,
+		KeyRequestID, cmd.RequestID,
+		KeyServerHost, cmd.ServerHost,
+	}
+
+	// Add the extra keys and values.
+	for i := 0; i < len(extraKeysAndValues); i += 2 {
+		keysAndValues.Add(extraKeysAndValues[i].(string), extraKeysAndValues[i+1])
+	}
+
+	port, err := strconv.ParseInt(cmd.ServerPort, 10, 32)
+	if err == nil {
+		keysAndValues.Add(KeyServerPort, port)
+	}
+
+	// Add the "serverConnectionId" if it is not nil.
+	if cmd.ServerConnectionID != nil {
+		keysAndValues.Add(KeyServerConnectionID, *cmd.ServerConnectionID)
+	}
+
+	// Add the "serviceId" if it is not nil.
+	if cmd.ServiceID != nil {
+		keysAndValues.Add(KeyServiceID, cmd.ServiceID.Hex())
+	}
+
+	return keysAndValues
+}
+
+// Connection contains data that all connection log messages MUST contain.
+type Connection struct {
+	Message    string // Message associated with the connection
+	ServerHost string // Hostname or IP address for the server
+	ServerPort string // Port for the server
+}
+
+// SerializeConnection serializes a Connection message into a slice of keys and
+// values that can be passed to a logger.
+func SerializeConnection(conn Connection, extraKeysAndValues ...interface{}) KeyValues {
+	// Initialize the boilerplate keys and values.
+	keysAndValues := KeyValues{
+		KeyMessage, conn.Message,
+		KeyServerHost, conn.ServerHost,
+	}
+
+	// Add the optional keys and values.
+	for i := 0; i < len(extraKeysAndValues); i += 2 {
+		keysAndValues.Add(extraKeysAndValues[i].(string), extraKeysAndValues[i+1])
+	}
+
+	port, err := strconv.ParseInt(conn.ServerPort, 10, 32)
+	if err == nil {
+		keysAndValues.Add(KeyServerPort, port)
+	}
+
+	return keysAndValues
+}
+
+// Server contains data that all server messages MAY contain.
+type Server struct {
+	DriverConnectionID uint64             // Driver's ID for the connection
+	TopologyID         primitive.ObjectID // Driver's unique ID for this topology
+	Message            string             // Message associated with the topology
+	ServerConnectionID *int64             // Server's ID for the connection
+	ServerHost         string             // Hostname or IP address for the server
+	ServerPort         string             // Port for the server
+}
+
+// SerializeServer serializes a Server message into a slice of keys and
+// values that can be passed to a logger.
+func SerializeServer(srv Server, extraKV ...interface{}) KeyValues {
+	// Initialize the boilerplate keys and values.
+	keysAndValues := KeyValues{
+		KeyDriverConnectionID, srv.DriverConnectionID,
+		KeyMessage, srv.Message,
+		KeyServerHost, srv.ServerHost,
+		KeyTopologyID, srv.TopologyID.Hex(),
+	}
+
+	if connID := srv.ServerConnectionID; connID != nil {
+		keysAndValues.Add(KeyServerConnectionID, *connID)
+	}
+
+	port, err := strconv.ParseInt(srv.ServerPort, 10, 32)
+	if err == nil {
+		keysAndValues.Add(KeyServerPort, port)
+	}
+
+	// Add the optional keys and values.
+	for i := 0; i < len(extraKV); i += 2 {
+		keysAndValues.Add(extraKV[i].(string), extraKV[i+1])
+	}
+
+	return keysAndValues
+}
+
+// ServerSelection contains data that all server selection messages MUST
+// contain.
+type ServerSelection struct {
+	Selector            string
+	OperationID         *int32
+	Operation           string
+	TopologyDescription string
+}
+
+// SerializeServerSelection serializes a Topology message into a slice of keys
+// and values that can be passed to a logger.
+func SerializeServerSelection(srvSelection ServerSelection, extraKV ...interface{}) KeyValues {
+	keysAndValues := KeyValues{
+		KeySelector, srvSelection.Selector,
+		KeyOperation, srvSelection.Operation,
+		KeyTopologyDescription, srvSelection.TopologyDescription,
+	}
+
+	if srvSelection.OperationID != nil {
+		keysAndValues.Add(KeyOperationID, *srvSelection.OperationID)
+	}
+
+	// Add the optional keys and values.
+	for i := 0; i < len(extraKV); i += 2 {
+		keysAndValues.Add(extraKV[i].(string), extraKV[i+1])
+	}
+
+	return keysAndValues
+}
+
+// Topology contains data that all topology messages MAY contain.
+type Topology struct {
+	ID      primitive.ObjectID // Driver's unique ID for this topology
+	Message string             // Message associated with the topology
+}
+
+// SerializeTopology serializes a Topology message into a slice of keys and
+// values that can be passed to a logger.
+func SerializeTopology(topo Topology, extraKV ...interface{}) KeyValues {
+	keysAndValues := KeyValues{
+		KeyTopologyID, topo.ID.Hex(),
+	}
+
+	// Add the optional keys and values.
+	for i := 0; i < len(extraKV); i += 2 {
+		keysAndValues.Add(extraKV[i].(string), extraKV[i+1])
+	}
+
+	return keysAndValues
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/context.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..785f141c41409d9fc6fe926a2c67a424b5ca6f01
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/context.go
@@ -0,0 +1,48 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package logger
+
+import "context"
+
+// contextKey is a custom type used to prevent key collisions when using the
+// context package.
+type contextKey string
+
+const (
+	contextKeyOperation   contextKey = "operation"
+	contextKeyOperationID contextKey = "operationID"
+)
+
+// WithOperationName adds the operation name to the context.
+func WithOperationName(ctx context.Context, operation string) context.Context {
+	return context.WithValue(ctx, contextKeyOperation, operation)
+}
+
+// WithOperationID adds the operation ID to the context.
+func WithOperationID(ctx context.Context, operationID int32) context.Context {
+	return context.WithValue(ctx, contextKeyOperationID, operationID)
+}
+
+// OperationName returns the operation name from the context.
+func OperationName(ctx context.Context) (string, bool) {
+	operationName := ctx.Value(contextKeyOperation)
+	if operationName == nil {
+		return "", false
+	}
+
+	return operationName.(string), true
+}
+
+// OperationID returns the operation ID from the context.
+func OperationID(ctx context.Context) (int32, bool) {
+	operationID := ctx.Value(contextKeyOperationID)
+	if operationID == nil {
+		return 0, false
+	}
+
+	return operationID.(int32), true
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a6c1bdcabf7681dac30896be66869f911a0899a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package logger
+
+import (
+	"encoding/json"
+	"io"
+	"math"
+	"sync"
+	"time"
+)
+
+// IOSink writes a JSON-encoded message to the io.Writer.
+type IOSink struct {
+	enc *json.Encoder
+
+	// encMu protects the encoder from concurrent writes. While the logger
+	// itself does not concurrently write to the sink, the sink may be used
+	// concurrently within the driver.
+	encMu sync.Mutex
+}
+
+// Compile-time check to ensure IOSink implements the LogSink interface.
+var _ LogSink = &IOSink{}
+
+// NewIOSink will create an IOSink object that writes JSON messages to the
+// provided io.Writer.
+func NewIOSink(out io.Writer) *IOSink {
+	return &IOSink{
+		enc: json.NewEncoder(out),
+	}
+}
+
+// Info will write a JSON-encoded message to the io.Writer.
+func (sink *IOSink) Info(_ int, msg string, keysAndValues ...interface{}) {
+	mapSize := len(keysAndValues) / 2
+	if math.MaxInt-mapSize >= 2 {
+		mapSize += 2
+	}
+	kvMap := make(map[string]interface{}, mapSize)
+
+	kvMap[KeyTimestamp] = time.Now().UnixNano()
+	kvMap[KeyMessage] = msg
+
+	for i := 0; i < len(keysAndValues); i += 2 {
+		kvMap[keysAndValues[i].(string)] = keysAndValues[i+1]
+	}
+
+	sink.encMu.Lock()
+	defer sink.encMu.Unlock()
+
+	_ = sink.enc.Encode(kvMap)
+}
+
+// Error will write a JSON-encoded error message to the io.Writer.
+func (sink *IOSink) Error(err error, msg string, kv ...interface{}) {
+	kv = append(kv, KeyError, err.Error())
+	sink.Info(0, msg, kv...)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/level.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/level.go
new file mode 100644
index 0000000000000000000000000000000000000000..07f85b35d7610556f6693f0b5ecf6524d6f9b95b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/level.go
@@ -0,0 +1,74 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package logger
+
+import "strings"
+
+// DiffToInfo is the number of levels in the Go Driver that come before the
+// "Info" level. This should ensure that "Info" is the 0th level passed to the
+// sink.
+const DiffToInfo = 1
+
+// Level is an enumeration representing the log severity levels supported by
+// the driver. The order of the logging levels is important. The driver expects
+// that a user will likely use the "logr" package to create a LogSink, which
+// defaults InfoLevel as 0. Any additions to the Level enumeration before the
+// InfoLevel will need to also update the "diffToInfo" constant.
+type Level int
+
+const (
+	// LevelOff suppresses logging.
+	LevelOff Level = iota
+
+	// LevelInfo enables logging of informational messages. These logs are
+	// high-level information about normal driver behavior.
+	LevelInfo
+
+	// LevelDebug enables logging of debug messages. These logs can be
+	// voluminous and are intended for detailed information that may be
+	// helpful when debugging an application.
+	LevelDebug
+)
+
+const (
+	levelLiteralOff       = "off"
+	levelLiteralEmergency = "emergency"
+	levelLiteralAlert     = "alert"
+	levelLiteralCritical  = "critical"
+	levelLiteralError     = "error"
+	levelLiteralWarning   = "warning"
+	levelLiteralNotice    = "notice"
+	levelLiteralInfo      = "info"
+	levelLiteralDebug     = "debug"
+	levelLiteralTrace     = "trace"
+)
+
+var LevelLiteralMap = map[string]Level{
+	levelLiteralOff:       LevelOff,
+	levelLiteralEmergency: LevelInfo,
+	levelLiteralAlert:     LevelInfo,
+	levelLiteralCritical:  LevelInfo,
+	levelLiteralError:     LevelInfo,
+	levelLiteralWarning:   LevelInfo,
+	levelLiteralNotice:    LevelInfo,
+	levelLiteralInfo:      LevelInfo,
+	levelLiteralDebug:     LevelDebug,
+	levelLiteralTrace:     LevelDebug,
+}
+
+// ParseLevel will check if the given string is a valid environment variable
+// for a logging severity level. If it is, then it will return the associated
+// driver's Level. The default Level is “LevelOff”.
+func ParseLevel(str string) Level {
+	for literal, level := range LevelLiteralMap {
+		if strings.EqualFold(literal, str) {
+			return level
+		}
+	}
+
+	return LevelOff
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..2250286e4a0b42e91e8a18122f84e38cfba91386
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go
@@ -0,0 +1,275 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package logger provides the internal logging solution for the MongoDB Go
+// Driver.
+package logger
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// DefaultMaxDocumentLength is the default maximum number of bytes that can be
+// logged for a stringified BSON document.
+const DefaultMaxDocumentLength = 1000
+
+// TruncationSuffix are trailing ellipsis "..." appended to a message to
+// indicate to the user that truncation occurred. This constant does not count
+// toward the max document length.
+const TruncationSuffix = "..."
+
+const logSinkPathEnvVar = "MONGODB_LOG_PATH"
+const maxDocumentLengthEnvVar = "MONGODB_LOG_MAX_DOCUMENT_LENGTH"
+
+// LogSink represents a logging implementation, this interface should be 1-1
+// with the exported "LogSink" interface in the mongo/options package.
+type LogSink interface {
+	// Info logs a non-error message with the given key/value pairs. The
+	// level argument is provided for optional logging.
+	Info(level int, msg string, keysAndValues ...interface{})
+
+	// Error logs an error, with the given message and key/value pairs.
+	Error(err error, msg string, keysAndValues ...interface{})
+}
+
+// Logger represents the configuration for the internal logger.
+type Logger struct {
+	ComponentLevels   map[Component]Level // Log levels for each component.
+	Sink              LogSink             // LogSink for log printing.
+	MaxDocumentLength uint                // Command truncation width.
+	logFile           *os.File            // File to write logs to.
+}
+
+// New will construct a new logger. If any of the given options are the
+// zero-value of the argument type, then the constructor will attempt to
+// source the data from the environment. If the environment has not been set,
+// then the constructor will the respective default values.
+func New(sink LogSink, maxDocLen uint, compLevels map[Component]Level) (*Logger, error) {
+	logger := &Logger{
+		ComponentLevels:   selectComponentLevels(compLevels),
+		MaxDocumentLength: selectMaxDocumentLength(maxDocLen),
+	}
+
+	sink, logFile, err := selectLogSink(sink)
+	if err != nil {
+		return nil, err
+	}
+
+	logger.Sink = sink
+	logger.logFile = logFile
+
+	return logger, nil
+}
+
+// Close will close the logger's log file, if it exists.
+func (logger *Logger) Close() error {
+	if logger.logFile != nil {
+		return logger.logFile.Close()
+	}
+
+	return nil
+}
+
+// LevelComponentEnabled will return true if the given LogLevel is enabled for
+// the given LogComponent. If the ComponentLevels on the logger are enabled for
+// "ComponentAll", then this function will return true for any level bound by
+// the level assigned to "ComponentAll".
+//
+// If the level is not enabled (i.e. LevelOff), then false is returned. This is
+// to avoid false positives, such as returning "true" for a component that is
+// not enabled. For example, without this condition, an empty LevelComponent
+// would be considered "enabled" for "LevelOff".
+func (logger *Logger) LevelComponentEnabled(level Level, component Component) bool {
+	if level == LevelOff {
+		return false
+	}
+
+	if logger.ComponentLevels == nil {
+		return false
+	}
+
+	return logger.ComponentLevels[component] >= level ||
+		logger.ComponentLevels[ComponentAll] >= level
+}
+
+// Print will synchronously print the given message to the configured LogSink.
+// If the LogSink is nil, then this method will do nothing. Future work could be done to make
+// this method asynchronous, see buffer management in libraries such as log4j.
+//
+// It's worth noting that many structured logs defined by DBX-wide
+// specifications include a "message" field, which is often shared with the
+// message arguments passed to this print function. The "Info" method used by
+// this function is implemented based on the go-logr/logr LogSink interface,
+// which is why "Print" has a message parameter. Any duplication in code is
+// intentional to adhere to the logr pattern.
+func (logger *Logger) Print(level Level, component Component, msg string, keysAndValues ...interface{}) {
+	// If the level is not enabled for the component, then
+	// skip the message.
+	if !logger.LevelComponentEnabled(level, component) {
+		return
+	}
+
+	// If the sink is nil, then skip the message.
+	if logger.Sink == nil {
+		return
+	}
+
+	logger.Sink.Info(int(level)-DiffToInfo, msg, keysAndValues...)
+}
+
+// Error logs an error, with the given message and key/value pairs.
+// It functions similarly to Print, but may have unique behavior, and should be
+// preferred for logging errors.
+func (logger *Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+	if logger.Sink == nil {
+		return
+	}
+
+	logger.Sink.Error(err, msg, keysAndValues...)
+}
+
+// selectMaxDocumentLength will return the integer value of the first non-zero
+// function, with the user-defined function taking priority over the environment
+// variables. For the environment, the function will attempt to get the value of
+// "MONGODB_LOG_MAX_DOCUMENT_LENGTH" and parse it as an unsigned integer. If the
+// environment variable is not set or is not an unsigned integer, then this
+// function will return the default max document length.
+func selectMaxDocumentLength(maxDocLen uint) uint {
+	if maxDocLen != 0 {
+		return maxDocLen
+	}
+
+	maxDocLenEnv := os.Getenv(maxDocumentLengthEnvVar)
+	if maxDocLenEnv != "" {
+		maxDocLenEnvInt, err := strconv.ParseUint(maxDocLenEnv, 10, 32)
+		if err == nil {
+			return uint(maxDocLenEnvInt)
+		}
+	}
+
+	return DefaultMaxDocumentLength
+}
+
+const (
+	logSinkPathStdout = "stdout"
+	logSinkPathStderr = "stderr"
+)
+
+// selectLogSink will return the first non-nil LogSink, with the user-defined
+// LogSink taking precedence over the environment-defined LogSink. If no LogSink
+// is defined, then this function will return a LogSink that writes to stderr.
+func selectLogSink(sink LogSink) (LogSink, *os.File, error) {
+	if sink != nil {
+		return sink, nil, nil
+	}
+
+	path := os.Getenv(logSinkPathEnvVar)
+	lowerPath := strings.ToLower(path)
+
+	if lowerPath == string(logSinkPathStderr) {
+		return NewIOSink(os.Stderr), nil, nil
+	}
+
+	if lowerPath == string(logSinkPathStdout) {
+		return NewIOSink(os.Stdout), nil, nil
+	}
+
+	if path != "" {
+		logFile, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
+		if err != nil {
+			return nil, nil, fmt.Errorf("unable to open log file: %w", err)
+		}
+
+		return NewIOSink(logFile), logFile, nil
+	}
+
+	return NewIOSink(os.Stderr), nil, nil
+}
+
+// selectComponentLevels returns a new map of LogComponents to LogLevels that is
+// the result of merging the user-defined data with the environment, with the
+// user-defined data taking priority.
+func selectComponentLevels(componentLevels map[Component]Level) map[Component]Level {
+	selected := make(map[Component]Level)
+
+	// Determine if the "MONGODB_LOG_ALL" environment variable is set.
+	var globalEnvLevel *Level
+	if all := os.Getenv(mongoDBLogAllEnvVar); all != "" {
+		level := ParseLevel(all)
+		globalEnvLevel = &level
+	}
+
+	for envVar, component := range componentEnvVarMap {
+		// If the component already has a level, then skip it.
+		if _, ok := componentLevels[component]; ok {
+			selected[component] = componentLevels[component]
+
+			continue
+		}
+
+		// If the "MONGODB_LOG_ALL" environment variable is set, then
+		// set the level for the component to the value of the
+		// environment variable.
+		if globalEnvLevel != nil {
+			selected[component] = *globalEnvLevel
+
+			continue
+		}
+
+		// Otherwise, set the level for the component to the value of
+		// the environment variable.
+		selected[component] = ParseLevel(os.Getenv(envVar))
+	}
+
+	return selected
+}
+
+// truncate will truncate a string to the given width, appending "..." to the
+// end of the string if it is truncated. This routine is safe for multi-byte
+// characters.
+func truncate(str string, width uint) string {
+	if width == 0 {
+		return ""
+	}
+
+	if len(str) <= int(width) {
+		return str
+	}
+
+	// Truncate the byte slice of the string to the given width.
+	newStr := str[:width]
+
+	// Check if the last byte is at the beginning of a multi-byte character.
+	// If it is, then remove the last byte.
+	if newStr[len(newStr)-1]&0xC0 == 0xC0 {
+		return newStr[:len(newStr)-1] + TruncationSuffix
+	}
+
+	// Check if the last byte is in the middle of a multi-byte character. If
+	// it is, then step back until we find the beginning of the character.
+	if newStr[len(newStr)-1]&0xC0 == 0x80 {
+		for i := len(newStr) - 1; i >= 0; i-- {
+			if newStr[i]&0xC0 == 0xC0 {
+				return newStr[:i] + TruncationSuffix
+			}
+		}
+	}
+
+	return newStr + TruncationSuffix
+}
+
+// FormatMessage formats a BSON document for logging. The document is truncated
+// to the given width.
+func FormatMessage(msg string, width uint) string {
+	if len(msg) == 0 {
+		return "{}"
+	}
+
+	return truncate(msg, width)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/ptrutil/int64.go b/vendor/go.mongodb.org/mongo-driver/internal/ptrutil/int64.go
new file mode 100644
index 0000000000000000000000000000000000000000..1c3ab57efa6a27b869c537cd5c986b44a316a62e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/ptrutil/int64.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package ptrutil
+
+// CompareInt64 is a piecewise function with the following return conditions:
+//
+// (1)  2, ptr1 != nil AND ptr2 == nil
+// (2)  1, *ptr1 > *ptr2
+// (3)  0, ptr1 == ptr2 or *ptr1 == *ptr2
+// (4) -1, *ptr1 < *ptr2
+// (5) -2, ptr1 == nil AND ptr2 != nil
+func CompareInt64(ptr1, ptr2 *int64) int {
+	if ptr1 == ptr2 {
+		// This will catch the double nil or same-pointer cases.
+		return 0
+	}
+
+	if ptr1 == nil && ptr2 != nil {
+		return -2
+	}
+
+	if ptr1 != nil && ptr2 == nil {
+		return 2
+	}
+
+	if *ptr1 > *ptr2 {
+		return 1
+	}
+
+	if *ptr1 < *ptr2 {
+		return -1
+	}
+
+	return 0
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/rand/bits.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/bits.go
new file mode 100644
index 0000000000000000000000000000000000000000..44790091447acbfb93760d0575faf14c688759a3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/rand/bits.go
@@ -0,0 +1,38 @@
+// Copied from https://cs.opensource.google/go/go/+/946b4baaf6521d521928500b2b57429c149854e7:src/math/bits.go
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry.
+// The carry input must be 0 or 1; otherwise the behavior is undefined.
+// The carryOut output is guaranteed to be 0 or 1.
+func Add64(x, y, carry uint64) (sum, carryOut uint64) {
+	yc := y + carry
+	sum = x + yc
+	if sum < x || yc < y {
+		carryOut = 1
+	}
+	return
+}
+
+// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y
+// with the product bits' upper half returned in hi and the lower
+// half returned in lo.
+func Mul64(x, y uint64) (hi, lo uint64) {
+	const mask32 = 1<<32 - 1
+	x0 := x & mask32
+	x1 := x >> 32
+	y0 := y & mask32
+	y1 := y >> 32
+	w0 := x0 * y0
+	t := x1*y0 + w0>>32
+	w1 := t & mask32
+	w2 := t >> 32
+	w1 += x0 * y1
+	hi = x1*y1 + w2 + w1>>32
+	lo = x * y
+	return
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/rand/exp.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/exp.go
new file mode 100644
index 0000000000000000000000000000000000000000..859e4e0e4273efc073f3ae84a4f1e51fd8ff1fd5
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/rand/exp.go
@@ -0,0 +1,223 @@
+// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/exp.go
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+	"math"
+)
+
+/*
+ * Exponential distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * http://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+	re = 7.69711747013104972
+)
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1).
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+//	sample = ExpFloat64() / desiredRateParameter
+func (r *Rand) ExpFloat64() float64 {
+	for {
+		j := r.Uint32()
+		i := j & 0xFF
+		x := float64(j) * float64(we[i])
+		if j < ke[i] {
+			return x
+		}
+		if i == 0 {
+			return re - math.Log(r.Float64())
+		}
+		if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) {
+			return x
+		}
+	}
+}
+
+var ke = [256]uint32{
+	0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
+	0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
+	0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78,
+	0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651,
+	0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca,
+	0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8,
+	0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea,
+	0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba,
+	0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed,
+	0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662,
+	0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3,
+	0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace,
+	0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6,
+	0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7,
+	0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415,
+	0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4,
+	0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36,
+	0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46,
+	0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac,
+	0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245,
+	0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52,
+	0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06,
+	0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0,
+	0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9,
+	0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76,
+	0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516,
+	0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289,
+	0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed,
+	0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb,
+	0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e,
+	0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a,
+	0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1,
+	0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b,
+	0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621,
+	0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d,
+	0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3,
+	0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73,
+	0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88,
+	0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a,
+	0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb,
+	0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176,
+	0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be,
+	0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192,
+	0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed,
+	0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936,
+	0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b,
+	0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4,
+	0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1,
+	0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482,
+	0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023,
+	0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
+	0xe6da6ecf,
+}
+var we = [256]float32{
+	2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
+	3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
+	5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11,
+	7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11,
+	9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10,
+	1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10,
+	1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10,
+	1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10,
+	1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10,
+	1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10,
+	1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10,
+	1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10,
+	1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10,
+	1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10,
+	2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10,
+	2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10,
+	2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10,
+	2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10,
+	2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10,
+	2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10,
+	2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10,
+	2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10,
+	2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10,
+	2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10,
+	3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10,
+	3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10,
+	3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10,
+	3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10,
+	3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10,
+	3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10,
+	3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10,
+	3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10,
+	3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10,
+	4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10,
+	4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10,
+	4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10,
+	4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10,
+	4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10,
+	4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10,
+	4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10,
+	4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10,
+	5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10,
+	5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10,
+	5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10,
+	5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10,
+	5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10,
+	5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10,
+	6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10,
+	6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10,
+	6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10,
+	6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10,
+	6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10,
+	7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10,
+	7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10,
+	7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10,
+	8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10,
+	8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10,
+	8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10,
+	9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10,
+	9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09,
+	1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09,
+	1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09,
+	1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
+	1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
+}
+var fe = [256]float32{
+	1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
+	0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
+	0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665,
+	0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967,
+	0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896,
+	0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092,
+	0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386,
+	0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495,
+	0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752,
+	0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325,
+	0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955,
+	0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694,
+	0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218,
+	0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763,
+	0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044,
+	0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796,
+	0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408,
+	0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928,
+	0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393,
+	0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625,
+	0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107,
+	0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878,
+	0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438,
+	0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682,
+	0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852,
+	0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479,
+	0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354,
+	0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494,
+	0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119,
+	0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624,
+	0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574,
+	0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672,
+	0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763,
+	0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816,
+	0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919,
+	0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274,
+	0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195,
+	0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106,
+	0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434,
+	0.062193416, 0.060783047, 0.059384305, 0.057997175,
+	0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236,
+	0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623,
+	0.043502413, 0.042254124, 0.041017443, 0.039792392,
+	0.038578995, 0.037377283, 0.036187284, 0.035009038,
+	0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566,
+	0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421,
+	0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867,
+	0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392,
+	0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414,
+	0.008780315, 0.007963077, 0.0071633533, 0.006381906,
+	0.0056196423, 0.0048776558, 0.004157295, 0.0034602648,
+	0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693,
+	0.00045413437,
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/rand/normal.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/normal.go
new file mode 100644
index 0000000000000000000000000000000000000000..8c74a358deb1ea97038072f009079a862c78266a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/rand/normal.go
@@ -0,0 +1,158 @@
+// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/normal.go
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+	"math"
+)
+
+/*
+ * Normal distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * http://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+	rn = 3.442619855899
+)
+
+func absInt32(i int32) uint32 {
+	if i < 0 {
+		return uint32(-i)
+	}
+	return uint32(i)
+}
+
+// NormFloat64 returns a normally distributed float64 in the range
+// [-math.MaxFloat64, +math.MaxFloat64] with
+// standard normal distribution (mean = 0, stddev = 1).
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+//	sample = NormFloat64() * desiredStdDev + desiredMean
+func (r *Rand) NormFloat64() float64 {
+	for {
+		j := int32(r.Uint32()) // Possibly negative
+		i := j & 0x7F
+		x := float64(j) * float64(wn[i])
+		if absInt32(j) < kn[i] {
+			// This case should be hit better than 99% of the time.
+			return x
+		}
+
+		if i == 0 {
+			// This extra work is only required for the base strip.
+			for {
+				x = -math.Log(r.Float64()) * (1.0 / rn)
+				y := -math.Log(r.Float64())
+				if y+y >= x*x {
+					break
+				}
+			}
+			if j > 0 {
+				return rn + x
+			}
+			return -rn - x
+		}
+		if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) {
+			return x
+		}
+	}
+}
+
+var kn = [128]uint32{
+	0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
+	0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
+	0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7,
+	0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883,
+	0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30,
+	0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa,
+	0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d,
+	0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18,
+	0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924,
+	0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a,
+	0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4,
+	0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62,
+	0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e,
+	0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473,
+	0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd,
+	0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568,
+	0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08,
+	0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc,
+	0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94,
+	0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb,
+	0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075,
+	0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba,
+	0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded,
+	0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72,
+	0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
+	0x7ba90bdc, 0x7a722176, 0x77d664e5,
+}
+var wn = [128]float32{
+	1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
+	2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
+	2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10,
+	3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10,
+	3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10,
+	4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10,
+	4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10,
+	4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10,
+	5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10,
+	5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10,
+	5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10,
+	5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10,
+	6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10,
+	6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10,
+	6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10,
+	6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10,
+	7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10,
+	7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10,
+	7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10,
+	7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10,
+	8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10,
+	8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10,
+	8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10,
+	9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10,
+	9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10,
+	9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09,
+	1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09,
+	1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09,
+	1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09,
+	1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09,
+	1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
+	1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
+}
+var fn = [128]float32{
+	1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303,
+	0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177,
+	0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569,
+	0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277,
+	0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434,
+	0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903,
+	0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055,
+	0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766,
+	0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872,
+	0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642,
+	0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446,
+	0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685,
+	0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484,
+	0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807,
+	0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239,
+	0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877,
+	0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499,
+	0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037,
+	0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265,
+	0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602,
+	0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467,
+	0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058,
+	0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863,
+	0.040742867, 0.03688439, 0.033087887, 0.029356318,
+	0.025693292, 0.022103304, 0.018592102, 0.015167298,
+	0.011839478, 0.008624485, 0.005548995, 0.0026696292,
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/rand/rand.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/rand.go
new file mode 100644
index 0000000000000000000000000000000000000000..4c3d3e6ee2b707fa0bc92a58a069928a6b8cfde6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/rand/rand.go
@@ -0,0 +1,374 @@
+// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rand.go
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rand implements pseudo-random number generators.
+//
+// Random numbers are generated by a Source. Top-level functions, such as
+// Float64 and Int, use a default shared Source that produces a deterministic
+// sequence of values each time a program is run. Use the Seed function to
+// initialize the default Source if different behavior is required for each run.
+// The default Source, a LockedSource, is safe for concurrent use by multiple
+// goroutines, but Sources created by NewSource are not. However, Sources are small
+// and it is reasonable to have a separate Source for each goroutine, seeded
+// differently, to avoid locking.
+//
+// For random numbers suitable for security-sensitive work, see the crypto/rand
+// package.
+package rand
+
+import "sync"
+
+// A Source represents a source of uniformly-distributed
+// pseudo-random int64 values in the range [0, 1<<64).
+type Source interface {
+	Uint64() uint64
+	Seed(seed uint64)
+}
+
+// NewSource returns a new pseudo-random Source seeded with the given value.
+func NewSource(seed uint64) Source {
+	var rng PCGSource
+	rng.Seed(seed)
+	return &rng
+}
+
+// A Rand is a source of random numbers.
+type Rand struct {
+	src Source
+
+	// readVal contains remainder of 64-bit integer used for bytes
+	// generation during most recent Read call.
+	// It is saved so next Read call can start where the previous
+	// one finished.
+	readVal uint64
+	// readPos indicates the number of low-order bytes of readVal
+	// that are still valid.
+	readPos int8
+}
+
+// New returns a new Rand that uses random values from src
+// to generate other random values.
+func New(src Source) *Rand {
+	return &Rand{src: src}
+}
+
+// Seed uses the provided seed value to initialize the generator to a deterministic state.
+// Seed should not be called concurrently with any other Rand method.
+func (r *Rand) Seed(seed uint64) {
+	if lk, ok := r.src.(*LockedSource); ok {
+		lk.seedPos(seed, &r.readPos)
+		return
+	}
+
+	r.src.Seed(seed)
+	r.readPos = 0
+}
+
+// Uint64 returns a pseudo-random 64-bit integer as a uint64.
+func (r *Rand) Uint64() uint64 { return r.src.Uint64() }
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
+func (r *Rand) Int63() int64 { return int64(r.src.Uint64() &^ (1 << 63)) }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32.
+func (r *Rand) Uint32() uint32 { return uint32(r.Uint64() >> 32) }
+
+// Int31 returns a non-negative pseudo-random 31-bit integer as an int32.
+func (r *Rand) Int31() int32 { return int32(r.Uint64() >> 33) }
+
+// Int returns a non-negative pseudo-random int.
+func (r *Rand) Int() int {
+	u := uint(r.Uint64())
+	return int(u << 1 >> 1) // clear sign bit.
+}
+
+const maxUint64 = (1 << 64) - 1
+
+// Uint64n returns, as a uint64, a pseudo-random number in [0,n).
+// It is guaranteed more uniform than taking a Source value mod n
+// for any n that is not a power of 2.
+func (r *Rand) Uint64n(n uint64) uint64 {
+	if n&(n-1) == 0 { // n is power of two, can mask
+		if n == 0 {
+			panic("invalid argument to Uint64n")
+		}
+		return r.Uint64() & (n - 1)
+	}
+	// If n does not divide v, to avoid bias we must not use
+	// a v that is within maxUint64%n of the top of the range.
+	v := r.Uint64()
+	if v > maxUint64-n { // Fast check.
+		ceiling := maxUint64 - maxUint64%n
+		for v >= ceiling {
+			v = r.Uint64()
+		}
+	}
+
+	return v % n
+}
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func (r *Rand) Int63n(n int64) int64 {
+	if n <= 0 {
+		panic("invalid argument to Int63n")
+	}
+	return int64(r.Uint64n(uint64(n)))
+}
+
+// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func (r *Rand) Int31n(n int32) int32 {
+	if n <= 0 {
+		panic("invalid argument to Int31n")
+	}
+	// TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines.
+	return int32(r.Uint64n(uint64(n)))
+}
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func (r *Rand) Intn(n int) int {
+	if n <= 0 {
+		panic("invalid argument to Intn")
+	}
+	// TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines.
+	return int(r.Uint64n(uint64(n)))
+}
+
+// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).
+func (r *Rand) Float64() float64 {
+	// There is one bug in the value stream: r.Int63() may be so close
+	// to 1<<63 that the division rounds up to 1.0, and we've guaranteed
+	// that the result is always less than 1.0.
+	//
+	// We tried to fix this by mapping 1.0 back to 0.0, but since float64
+	// values near 0 are much denser than near 1, mapping 1 to 0 caused
+	// a theoretically significant overshoot in the probability of returning 0.
+	// Instead of that, if we round up to 1, just try again.
+	// Getting 1 only happens 1/2⁵³ of the time, so most clients
+	// will not observe it anyway.
+again:
+	f := float64(r.Uint64n(1<<53)) / (1 << 53)
+	if f == 1.0 {
+		goto again // resample; this branch is taken O(never)
+	}
+	return f
+}
+
+// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0).
+func (r *Rand) Float32() float32 {
+	// We do not want to return 1.0.
+	// This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64).
+again:
+	f := float32(r.Float64())
+	if f == 1 {
+		goto again // resample; this branch is taken O(very rarely)
+	}
+	return f
+}
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func (r *Rand) Perm(n int) []int {
+	m := make([]int, n)
+	// In the following loop, the iteration when i=0 always swaps m[0] with m[0].
+	// A change to remove this useless iteration is to assign 1 to i in the init
+	// statement. But Perm also effects r. Making this change will affect
+	// the final state of r. So this change can't be made for compatibility
+	// reasons for Go 1.
+	for i := 0; i < n; i++ {
+		j := r.Intn(i + 1)
+		m[i] = m[j]
+		m[j] = i
+	}
+	return m
+}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements. Shuffle panics if n < 0.
+// swap swaps the elements with indexes i and j.
+func (r *Rand) Shuffle(n int, swap func(i, j int)) {
+	if n < 0 {
+		panic("invalid argument to Shuffle")
+	}
+
+	// Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
+	// Shuffle really ought not be called with n that doesn't fit in 32 bits.
+	// Not only will it take a very long time, but with 2³¹! possible permutations,
+	// there's no way that any PRNG can have a big enough internal state to
+	// generate even a minuscule percentage of the possible permutations.
+	// Nevertheless, the right API signature accepts an int n, so handle it as best we can.
+	i := n - 1
+	for ; i > 1<<31-1-1; i-- {
+		j := int(r.Int63n(int64(i + 1)))
+		swap(i, j)
+	}
+	for ; i > 0; i-- {
+		j := int(r.Int31n(int32(i + 1)))
+		swap(i, j)
+	}
+}
+
+// Read generates len(p) random bytes and writes them into p. It
+// always returns len(p) and a nil error.
+// Read should not be called concurrently with any other Rand method unless
+// the underlying source is a LockedSource.
+func (r *Rand) Read(p []byte) (n int, err error) {
+	if lk, ok := r.src.(*LockedSource); ok {
+		return lk.Read(p, &r.readVal, &r.readPos)
+	}
+	return read(p, r.src, &r.readVal, &r.readPos)
+}
+
+func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err error) {
+	pos := *readPos
+	val := *readVal
+	rng, _ := src.(*PCGSource)
+	for n = 0; n < len(p); n++ {
+		if pos == 0 {
+			if rng != nil {
+				val = rng.Uint64()
+			} else {
+				val = src.Uint64()
+			}
+			pos = 8
+		}
+		p[n] = byte(val)
+		val >>= 8
+		pos--
+	}
+	*readPos = pos
+	*readVal = val
+	return
+}
+
+/*
+ * Top-level convenience functions
+ */
+
+var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)})
+
+// Type assert that globalRand's source is a LockedSource whose src is a PCGSource.
+var _ PCGSource = globalRand.src.(*LockedSource).src
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as
+// if seeded by Seed(1).
+// Seed, unlike the Rand.Seed method, is safe for concurrent use.
+func Seed(seed uint64) { globalRand.Seed(seed) }
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64
+// from the default Source.
+func Int63() int64 { return globalRand.Int63() }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32
+// from the default Source.
+func Uint32() uint32 { return globalRand.Uint32() }
+
+// Uint64 returns a pseudo-random 64-bit value as a uint64
+// from the default Source.
+func Uint64() uint64 { return globalRand.Uint64() }
+
+// Int31 returns a non-negative pseudo-random 31-bit integer as an int32
+// from the default Source.
+func Int31() int32 { return globalRand.Int31() }
+
+// Int returns a non-negative pseudo-random int from the default Source.
+func Int() int { return globalRand.Int() }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return globalRand.Int63n(n) }
+
+// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Int31n(n int32) int32 { return globalRand.Int31n(n) }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Intn(n int) int { return globalRand.Intn(n) }
+
+// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0)
+// from the default Source.
+func Float64() float64 { return globalRand.Float64() }
+
+// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0)
+// from the default Source.
+func Float32() float32 { return globalRand.Float32() }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n)
+// from the default Source.
+func Perm(n int) []int { return globalRand.Perm(n) }
+
+// Shuffle pseudo-randomizes the order of elements using the default Source.
+// n is the number of elements. Shuffle panics if n < 0.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) }
+
+// Read generates len(p) random bytes from the default Source and
+// writes them into p. It always returns len(p) and a nil error.
+// Read, unlike the Rand.Read method, is safe for concurrent use.
+func Read(p []byte) (n int, err error) { return globalRand.Read(p) }
+
+// NormFloat64 returns a normally distributed float64 in the range
+// [-math.MaxFloat64, +math.MaxFloat64] with
+// standard normal distribution (mean = 0, stddev = 1)
+// from the default Source.
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+//	sample = NormFloat64() * desiredStdDev + desiredMean
+func NormFloat64() float64 { return globalRand.NormFloat64() }
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source.
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+//	sample = ExpFloat64() / desiredRateParameter
+func ExpFloat64() float64 { return globalRand.ExpFloat64() }
+
+// LockedSource is an implementation of Source that is concurrency-safe.
+// A Rand using a LockedSource is safe for concurrent use.
+//
+// The zero value of LockedSource is valid, but should be seeded before use.
+type LockedSource struct {
+	lk  sync.Mutex
+	src PCGSource
+}
+
+func (s *LockedSource) Uint64() (n uint64) {
+	s.lk.Lock()
+	n = s.src.Uint64()
+	s.lk.Unlock()
+	return
+}
+
+func (s *LockedSource) Seed(seed uint64) {
+	s.lk.Lock()
+	s.src.Seed(seed)
+	s.lk.Unlock()
+}
+
+// seedPos implements Seed for a LockedSource without a race condition.
+func (s *LockedSource) seedPos(seed uint64, readPos *int8) {
+	s.lk.Lock()
+	s.src.Seed(seed)
+	*readPos = 0
+	s.lk.Unlock()
+}
+
+// Read implements Read for a LockedSource.
+func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) {
+	s.lk.Lock()
+	n, err = read(p, &s.src, readVal, readPos)
+	s.lk.Unlock()
+	return
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/rand/rng.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/rng.go
new file mode 100644
index 0000000000000000000000000000000000000000..f04f9879891cd0314cef8800bcc4b1c88ba9c983
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/rand/rng.go
@@ -0,0 +1,93 @@
+// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rng.go
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+	"encoding/binary"
+	"io"
+	"math/bits"
+)
+
+// PCGSource is an implementation of a 64-bit permuted congruential
+// generator as defined in
+//
+//	PCG: A Family of Simple Fast Space-Efficient Statistically Good
+//	Algorithms for Random Number Generation
+//	Melissa E. O’Neill, Harvey Mudd College
+//	http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf
+//
+// The generator here is the congruential generator PCG XSL RR 128/64 (LCG)
+// as found in the software available at http://www.pcg-random.org/.
+// It has period 2^128 with 128 bits of state, producing 64-bit values.
+// Is state is represented by two uint64 words.
+type PCGSource struct {
+	low  uint64
+	high uint64
+}
+
+const (
+	maxUint32 = (1 << 32) - 1
+
+	multiplier = 47026247687942121848144207491837523525
+	mulHigh    = multiplier >> 64
+	mulLow     = multiplier & maxUint64
+
+	increment = 117397592171526113268558934119004209487
+	incHigh   = increment >> 64
+	incLow    = increment & maxUint64
+
+	// TODO: Use these?
+	initializer = 245720598905631564143578724636268694099
+	initHigh    = initializer >> 64
+	initLow     = initializer & maxUint64
+)
+
+// Seed uses the provided seed value to initialize the generator to a deterministic state.
+func (pcg *PCGSource) Seed(seed uint64) {
+	pcg.low = seed
+	pcg.high = seed // TODO: What is right?
+}
+
+// Uint64 returns a pseudo-random 64-bit unsigned integer as a uint64.
+func (pcg *PCGSource) Uint64() uint64 {
+	pcg.multiply()
+	pcg.add()
+	// XOR high and low 64 bits together and rotate right by high 6 bits of state.
+	return bits.RotateLeft64(pcg.high^pcg.low, -int(pcg.high>>58))
+}
+
+func (pcg *PCGSource) add() {
+	var carry uint64
+	pcg.low, carry = Add64(pcg.low, incLow, 0)
+	pcg.high, _ = Add64(pcg.high, incHigh, carry)
+}
+
+func (pcg *PCGSource) multiply() {
+	hi, lo := Mul64(pcg.low, mulLow)
+	hi += pcg.high * mulLow
+	hi += pcg.low * mulHigh
+	pcg.low = lo
+	pcg.high = hi
+}
+
+// MarshalBinary returns the binary representation of the current state of the generator.
+func (pcg *PCGSource) MarshalBinary() ([]byte, error) {
+	var buf [16]byte
+	binary.BigEndian.PutUint64(buf[:8], pcg.high)
+	binary.BigEndian.PutUint64(buf[8:], pcg.low)
+	return buf[:], nil
+}
+
+// UnmarshalBinary sets the state of the generator to the state represented in data.
+func (pcg *PCGSource) UnmarshalBinary(data []byte) error {
+	if len(data) < 16 {
+		return io.ErrUnexpectedEOF
+	}
+	pcg.low = binary.BigEndian.Uint64(data[8:])
+	pcg.high = binary.BigEndian.Uint64(data[:8])
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd8c6d6f410fa3231913adf47cc81059376e76e1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package randutil provides common random number utilities.
+package randutil
+
+import (
+	crand "crypto/rand"
+	"fmt"
+	"io"
+
+	xrand "go.mongodb.org/mongo-driver/internal/rand"
+)
+
+// NewLockedRand returns a new "x/exp/rand" pseudo-random number generator seeded with a
+// cryptographically-secure random number.
+// It is safe to use from multiple goroutines.
+func NewLockedRand() *xrand.Rand {
+	var randSrc = new(xrand.LockedSource)
+	randSrc.Seed(cryptoSeed())
+	return xrand.New(randSrc)
+}
+
+// cryptoSeed returns a random uint64 read from the "crypto/rand" random number generator. It is
+// intended to be used to seed pseudorandom number generators at package initialization. It panics
+// if it encounters any errors.
+func cryptoSeed() uint64 {
+	var b [8]byte
+	_, err := io.ReadFull(crand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("failed to read 8 bytes from a \"crypto/rand\".Reader: %v", err))
+	}
+
+	return (uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) |
+		(uint64(b[4]) << 32) | (uint64(b[5]) << 40) | (uint64(b[6]) << 48) | (uint64(b[7]) << 56)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go
new file mode 100644
index 0000000000000000000000000000000000000000..86c2a33a73f34b1113dd73321cc2d0d4e4da92aa
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go
@@ -0,0 +1,68 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package uuid
+
+import (
+	"encoding/hex"
+	"io"
+
+	"go.mongodb.org/mongo-driver/internal/randutil"
+)
+
+// UUID represents a UUID.
+type UUID [16]byte
+
+// A source is a UUID generator that reads random values from a io.Reader.
+// It should be safe to use from multiple goroutines.
+type source struct {
+	random io.Reader
+}
+
+// new returns a random UUIDv4 with bytes read from the source's random number generator.
+func (s *source) new() (UUID, error) {
+	var uuid UUID
+	_, err := io.ReadFull(s.random, uuid[:])
+	if err != nil {
+		return UUID{}, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}
+
+// newSource returns a source that uses a pseudo-random number generator in reandutil package.
+// It is intended to be used to initialize the package-global UUID generator.
+func newSource() *source {
+	return &source{
+		random: randutil.NewLockedRand(),
+	}
+}
+
+// globalSource is a package-global pseudo-random UUID generator.
+var globalSource = newSource()
+
+// New returns a random UUIDv4. It uses a global pseudo-random number generator in randutil
+// at package initialization.
+//
+// New should not be used to generate cryptographically-secure random UUIDs.
+func New() (UUID, error) {
+	return globalSource.new()
+}
+
+func (uuid UUID) String() string {
+	var str [36]byte
+	hex.Encode(str[:], uuid[:4])
+	str[8] = '-'
+	hex.Encode(str[9:13], uuid[4:6])
+	str[13] = '-'
+	hex.Encode(str[14:18], uuid[6:8])
+	str[18] = '-'
+	hex.Encode(str[19:23], uuid[8:10])
+	str[23] = '-'
+	hex.Encode(str[24:], uuid[10:])
+	return string(str[:])
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go b/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb6abbcd7ca6c229912c33aaa1feaedc45590c94
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go
@@ -0,0 +1,50 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package address provides structured representations of network addresses.
+package address // import "go.mongodb.org/mongo-driver/mongo/address"
+
+import (
+	"net"
+	"strings"
+)
+
+const defaultPort = "27017"
+
+// Address is a network address. It can either be an IP address or a DNS name.
+type Address string
+
+// Network is the network protocol for this address. In most cases this will be
+// "tcp" or "unix".
+func (a Address) Network() string {
+	if strings.HasSuffix(string(a), "sock") {
+		return "unix"
+	}
+	return "tcp"
+}
+
+// String is the canonical version of this address, e.g. localhost:27017,
+// 1.2.3.4:27017, example.com:27017.
+func (a Address) String() string {
+	// TODO: unicode case folding?
+	s := strings.ToLower(string(a))
+	if len(s) == 0 {
+		return ""
+	}
+	if a.Network() != "unix" {
+		_, _, err := net.SplitHostPort(s)
+		if err != nil && strings.Contains(err.Error(), "missing port in address") {
+			s += ":" + defaultPort
+		}
+	}
+
+	return s
+}
+
+// Canonicalize creates a canonicalized address.
+func (a Address) Canonicalize() Address {
+	return Address(a.String())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/background_context.go b/vendor/go.mongodb.org/mongo-driver/mongo/background_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..e4146e8b7ca8144bda5d19de16c8faa55340debb
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/background_context.go
@@ -0,0 +1,34 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import "context"
+
+// backgroundContext is an implementation of the context.Context interface that wraps a child Context. Value requests
+// are forwarded to the child Context but the Done and Err functions are overridden to ensure the new context does not
+// time out or get cancelled.
+type backgroundContext struct {
+	context.Context
+	childValuesCtx context.Context
+}
+
+// newBackgroundContext creates a new Context whose behavior matches that of context.Background(), but Value calls are
+// forwarded to the provided ctx parameter. If ctx is nil, context.Background() is returned.
+func newBackgroundContext(ctx context.Context) context.Context {
+	if ctx == nil {
+		return context.Background()
+	}
+
+	return &backgroundContext{
+		Context:        context.Background(),
+		childValuesCtx: ctx,
+	}
+}
+
+func (b *backgroundContext) Value(key interface{}) interface{} {
+	return b.childValuesCtx.Value(key)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go
new file mode 100644
index 0000000000000000000000000000000000000000..51d59d0ffad9c4f2be37bc549da1934dd1a7c368
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"time"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+// batchCursor is the interface implemented by types that can provide batches of document results.
+// The Cursor type is built on top of this type.
+type batchCursor interface {
+	// ID returns the ID of the cursor.
+	ID() int64
+
+	// Next returns true if there is a batch available.
+	Next(context.Context) bool
+
+	// Batch will return a DocumentSequence for the current batch of documents. The returned
+	// DocumentSequence is only valid until the next call to Next or Close.
+	Batch() *bsoncore.DocumentSequence
+
+	// Server returns a pointer to the cursor's server.
+	Server() driver.Server
+
+	// Err returns the last error encountered.
+	Err() error
+
+	// Close closes the cursor.
+	Close(context.Context) error
+
+	// SetBatchSize is a modifier function used to adjust the batch size of
+	// the cursor that implements it.
+	SetBatchSize(int32)
+
+	// SetMaxTime will set the maximum amount of time the server will allow
+	// the operations to execute. The server will error if this field is set
+	// but the cursor is not configured with awaitData=true.
+	//
+	// The time.Duration value passed by this setter will be converted and
+	// rounded down to the nearest millisecond.
+	SetMaxTime(time.Duration)
+
+	// SetComment will set a user-configurable comment that can be used to
+	// identify the operation in server logs.
+	SetComment(interface{})
+}
+
+// changeStreamCursor is the interface implemented by batch cursors that also provide the functionality for retrieving
+// a postBatchResumeToken from commands and allows for the cursor to be killed rather than closed
+type changeStreamCursor interface {
+	batchCursor
+	// PostBatchResumeToken returns the latest seen post batch resume token.
+	PostBatchResumeToken() bsoncore.Document
+
+	// KillCursor kills cursor on server without closing batch cursor
+	KillCursor(context.Context) error
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go
new file mode 100644
index 0000000000000000000000000000000000000000..40f1181e0e4d9a8560f7cad84dabf66eb45cf8af
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go
@@ -0,0 +1,598 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+type bulkWriteBatch struct {
+	models   []WriteModel
+	canRetry bool
+	indexes  []int
+}
+
+// bulkWrite performs a bulkwrite operation
+type bulkWrite struct {
+	comment                  interface{}
+	ordered                  *bool
+	bypassDocumentValidation *bool
+	models                   []WriteModel
+	session                  *session.Client
+	collection               *Collection
+	selector                 description.ServerSelector
+	writeConcern             *writeconcern.WriteConcern
+	result                   BulkWriteResult
+	let                      interface{}
+}
+
+func (bw *bulkWrite) execute(ctx context.Context) error {
+	ordered := true
+	if bw.ordered != nil {
+		ordered = *bw.ordered
+	}
+
+	batches := createBatches(bw.models, ordered)
+	bw.result = BulkWriteResult{
+		UpsertedIDs: make(map[int64]interface{}),
+	}
+
+	bwErr := BulkWriteException{
+		WriteErrors: make([]BulkWriteError, 0),
+	}
+
+	var lastErr error
+	continueOnError := !ordered
+	for _, batch := range batches {
+		if len(batch.models) == 0 {
+			continue
+		}
+
+		batchRes, batchErr, err := bw.runBatch(ctx, batch)
+
+		bw.mergeResults(batchRes)
+
+		bwErr.WriteConcernError = batchErr.WriteConcernError
+		bwErr.Labels = append(bwErr.Labels, batchErr.Labels...)
+
+		bwErr.WriteErrors = append(bwErr.WriteErrors, batchErr.WriteErrors...)
+
+		commandErrorOccurred := err != nil && !errors.Is(err, driver.ErrUnacknowledgedWrite)
+		writeErrorOccurred := len(batchErr.WriteErrors) > 0 || batchErr.WriteConcernError != nil
+		if !continueOnError && (commandErrorOccurred || writeErrorOccurred) {
+			if err != nil {
+				return err
+			}
+
+			return bwErr
+		}
+
+		if err != nil {
+			lastErr = err
+		}
+	}
+
+	bw.result.MatchedCount -= bw.result.UpsertedCount
+	if lastErr != nil {
+		_, lastErr = processWriteError(lastErr)
+		return lastErr
+	}
+	if len(bwErr.WriteErrors) > 0 || bwErr.WriteConcernError != nil {
+		return bwErr
+	}
+	return nil
+}
+
+func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWriteResult, BulkWriteException, error) {
+	batchRes := BulkWriteResult{
+		UpsertedIDs: make(map[int64]interface{}),
+	}
+	batchErr := BulkWriteException{}
+
+	var writeErrors []driver.WriteError
+	switch batch.models[0].(type) {
+	case *InsertOneModel:
+		res, err := bw.runInsert(ctx, batch)
+		if err != nil {
+			var writeErr driver.WriteCommandError
+			if !errors.As(err, &writeErr) {
+				return BulkWriteResult{}, batchErr, err
+			}
+			writeErrors = writeErr.WriteErrors
+			batchErr.Labels = writeErr.Labels
+			batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError)
+		}
+		batchRes.InsertedCount = res.N
+	case *DeleteOneModel, *DeleteManyModel:
+		res, err := bw.runDelete(ctx, batch)
+		if err != nil {
+			var writeErr driver.WriteCommandError
+			if !errors.As(err, &writeErr) {
+				return BulkWriteResult{}, batchErr, err
+			}
+			writeErrors = writeErr.WriteErrors
+			batchErr.Labels = writeErr.Labels
+			batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError)
+		}
+		batchRes.DeletedCount = res.N
+	case *ReplaceOneModel, *UpdateOneModel, *UpdateManyModel:
+		res, err := bw.runUpdate(ctx, batch)
+		if err != nil {
+			var writeErr driver.WriteCommandError
+			if !errors.As(err, &writeErr) {
+				return BulkWriteResult{}, batchErr, err
+			}
+			writeErrors = writeErr.WriteErrors
+			batchErr.Labels = writeErr.Labels
+			batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError)
+		}
+		batchRes.MatchedCount = res.N
+		batchRes.ModifiedCount = res.NModified
+		batchRes.UpsertedCount = int64(len(res.Upserted))
+		for _, upsert := range res.Upserted {
+			batchRes.UpsertedIDs[int64(batch.indexes[upsert.Index])] = upsert.ID
+		}
+	}
+
+	batchErr.WriteErrors = make([]BulkWriteError, 0, len(writeErrors))
+	convWriteErrors := writeErrorsFromDriverWriteErrors(writeErrors)
+	for _, we := range convWriteErrors {
+		request := batch.models[we.Index]
+		we.Index = batch.indexes[we.Index]
+		batchErr.WriteErrors = append(batchErr.WriteErrors, BulkWriteError{
+			WriteError: we,
+			Request:    request,
+		})
+	}
+	return batchRes, batchErr, nil
+}
+
+func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (operation.InsertResult, error) {
+	docs := make([]bsoncore.Document, len(batch.models))
+	var i int
+	for _, model := range batch.models {
+		converted := model.(*InsertOneModel)
+		doc, err := marshal(converted.Document, bw.collection.bsonOpts, bw.collection.registry)
+		if err != nil {
+			return operation.InsertResult{}, err
+		}
+		doc, _, err = ensureID(doc, primitive.NilObjectID, bw.collection.bsonOpts, bw.collection.registry)
+		if err != nil {
+			return operation.InsertResult{}, err
+		}
+
+		docs[i] = doc
+		i++
+	}
+
+	op := operation.NewInsert(docs...).
+		Session(bw.session).WriteConcern(bw.writeConcern).CommandMonitor(bw.collection.client.monitor).
+		ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock).
+		Database(bw.collection.db.name).Collection(bw.collection.name).
+		Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).
+		ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout).
+		Logger(bw.collection.client.logger).Authenticator(bw.collection.client.authenticator)
+	if bw.comment != nil {
+		comment, err := marshalValue(bw.comment, bw.collection.bsonOpts, bw.collection.registry)
+		if err != nil {
+			return op.Result(), err
+		}
+		op.Comment(comment)
+	}
+	if bw.bypassDocumentValidation != nil && *bw.bypassDocumentValidation {
+		op = op.BypassDocumentValidation(*bw.bypassDocumentValidation)
+	}
+	if bw.ordered != nil {
+		op = op.Ordered(*bw.ordered)
+	}
+
+	retry := driver.RetryNone
+	if bw.collection.client.retryWrites && batch.canRetry {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err := op.Execute(ctx)
+
+	return op.Result(), err
+}
+
+func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (operation.DeleteResult, error) {
+	docs := make([]bsoncore.Document, len(batch.models))
+	var i int
+	var hasHint bool
+
+	for _, model := range batch.models {
+		var doc bsoncore.Document
+		var err error
+
+		switch converted := model.(type) {
+		case *DeleteOneModel:
+			doc, err = createDeleteDoc(
+				converted.Filter,
+				converted.Collation,
+				converted.Hint,
+				true,
+				bw.collection.bsonOpts,
+				bw.collection.registry)
+			hasHint = hasHint || (converted.Hint != nil)
+		case *DeleteManyModel:
+			doc, err = createDeleteDoc(
+				converted.Filter,
+				converted.Collation,
+				converted.Hint,
+				false,
+				bw.collection.bsonOpts,
+				bw.collection.registry)
+			hasHint = hasHint || (converted.Hint != nil)
+		}
+
+		if err != nil {
+			return operation.DeleteResult{}, err
+		}
+
+		docs[i] = doc
+		i++
+	}
+
+	op := operation.NewDelete(docs...).
+		Session(bw.session).WriteConcern(bw.writeConcern).CommandMonitor(bw.collection.client.monitor).
+		ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock).
+		Database(bw.collection.db.name).Collection(bw.collection.name).
+		Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint).
+		ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout).
+		Logger(bw.collection.client.logger).Authenticator(bw.collection.client.authenticator)
+	if bw.comment != nil {
+		comment, err := marshalValue(bw.comment, bw.collection.bsonOpts, bw.collection.registry)
+		if err != nil {
+			return op.Result(), err
+		}
+		op.Comment(comment)
+	}
+	if bw.let != nil {
+		let, err := marshal(bw.let, bw.collection.bsonOpts, bw.collection.registry)
+		if err != nil {
+			return operation.DeleteResult{}, err
+		}
+		op = op.Let(let)
+	}
+	if bw.ordered != nil {
+		op = op.Ordered(*bw.ordered)
+	}
+	retry := driver.RetryNone
+	if bw.collection.client.retryWrites && batch.canRetry {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err := op.Execute(ctx)
+
+	return op.Result(), err
+}
+
+func createDeleteDoc(
+	filter interface{},
+	collation *options.Collation,
+	hint interface{},
+	deleteOne bool,
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+) (bsoncore.Document, error) {
+	f, err := marshal(filter, bsonOpts, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	var limit int32
+	if deleteOne {
+		limit = 1
+	}
+	didx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendDocumentElement(doc, "q", f)
+	doc = bsoncore.AppendInt32Element(doc, "limit", limit)
+	if collation != nil {
+		doc = bsoncore.AppendDocumentElement(doc, "collation", collation.ToDocument())
+	}
+	if hint != nil {
+		if isUnorderedMap(hint) {
+			return nil, ErrMapForOrderedArgument{"hint"}
+		}
+		hintVal, err := marshalValue(hint, bsonOpts, registry)
+		if err != nil {
+			return nil, err
+		}
+		doc = bsoncore.AppendValueElement(doc, "hint", hintVal)
+	}
+	doc, _ = bsoncore.AppendDocumentEnd(doc, didx)
+
+	return doc, nil
+}
+
+func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (operation.UpdateResult, error) {
+	docs := make([]bsoncore.Document, len(batch.models))
+	var hasHint bool
+	var hasArrayFilters bool
+	for i, model := range batch.models {
+		var doc bsoncore.Document
+		var err error
+
+		switch converted := model.(type) {
+		case *ReplaceOneModel:
+			doc, err = createUpdateDoc(
+				converted.Filter,
+				converted.Replacement,
+				converted.Hint,
+				nil,
+				converted.Collation,
+				converted.Upsert,
+				false,
+				false,
+				bw.collection.bsonOpts,
+				bw.collection.registry)
+			hasHint = hasHint || (converted.Hint != nil)
+		case *UpdateOneModel:
+			doc, err = createUpdateDoc(
+				converted.Filter,
+				converted.Update,
+				converted.Hint,
+				converted.ArrayFilters,
+				converted.Collation,
+				converted.Upsert,
+				false,
+				true,
+				bw.collection.bsonOpts,
+				bw.collection.registry)
+			hasHint = hasHint || (converted.Hint != nil)
+			hasArrayFilters = hasArrayFilters || (converted.ArrayFilters != nil)
+		case *UpdateManyModel:
+			doc, err = createUpdateDoc(
+				converted.Filter,
+				converted.Update,
+				converted.Hint,
+				converted.ArrayFilters,
+				converted.Collation,
+				converted.Upsert,
+				true,
+				true,
+				bw.collection.bsonOpts,
+				bw.collection.registry)
+			hasHint = hasHint || (converted.Hint != nil)
+			hasArrayFilters = hasArrayFilters || (converted.ArrayFilters != nil)
+		}
+		if err != nil {
+			return operation.UpdateResult{}, err
+		}
+
+		docs[i] = doc
+	}
+
+	op := operation.NewUpdate(docs...).
+		Session(bw.session).WriteConcern(bw.writeConcern).CommandMonitor(bw.collection.client.monitor).
+		ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock).
+		Database(bw.collection.db.name).Collection(bw.collection.name).
+		Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint).
+		ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI).
+		Timeout(bw.collection.client.timeout).Logger(bw.collection.client.logger).
+		Authenticator(bw.collection.client.authenticator)
+	if bw.comment != nil {
+		comment, err := marshalValue(bw.comment, bw.collection.bsonOpts, bw.collection.registry)
+		if err != nil {
+			return op.Result(), err
+		}
+		op.Comment(comment)
+	}
+	if bw.let != nil {
+		let, err := marshal(bw.let, bw.collection.bsonOpts, bw.collection.registry)
+		if err != nil {
+			return operation.UpdateResult{}, err
+		}
+		op = op.Let(let)
+	}
+	if bw.ordered != nil {
+		op = op.Ordered(*bw.ordered)
+	}
+	if bw.bypassDocumentValidation != nil && *bw.bypassDocumentValidation {
+		op = op.BypassDocumentValidation(*bw.bypassDocumentValidation)
+	}
+	retry := driver.RetryNone
+	if bw.collection.client.retryWrites && batch.canRetry {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err := op.Execute(ctx)
+
+	return op.Result(), err
+}
+
+func createUpdateDoc(
+	filter interface{},
+	update interface{},
+	hint interface{},
+	arrayFilters *options.ArrayFilters,
+	collation *options.Collation,
+	upsert *bool,
+	multi bool,
+	checkDollarKey bool,
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+) (bsoncore.Document, error) {
+	f, err := marshal(filter, bsonOpts, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	uidx, updateDoc := bsoncore.AppendDocumentStart(nil)
+	updateDoc = bsoncore.AppendDocumentElement(updateDoc, "q", f)
+
+	u, err := marshalUpdateValue(update, bsonOpts, registry, checkDollarKey)
+	if err != nil {
+		return nil, err
+	}
+
+	updateDoc = bsoncore.AppendValueElement(updateDoc, "u", u)
+
+	if multi {
+		updateDoc = bsoncore.AppendBooleanElement(updateDoc, "multi", multi)
+	}
+
+	if arrayFilters != nil {
+		reg := registry
+		if arrayFilters.Registry != nil {
+			reg = arrayFilters.Registry
+		}
+		arr, err := marshalValue(arrayFilters.Filters, bsonOpts, reg)
+		if err != nil {
+			return nil, err
+		}
+		updateDoc = bsoncore.AppendArrayElement(updateDoc, "arrayFilters", arr.Data)
+	}
+
+	if collation != nil {
+		updateDoc = bsoncore.AppendDocumentElement(updateDoc, "collation", bsoncore.Document(collation.ToDocument()))
+	}
+
+	if upsert != nil {
+		updateDoc = bsoncore.AppendBooleanElement(updateDoc, "upsert", *upsert)
+	}
+
+	if hint != nil {
+		if isUnorderedMap(hint) {
+			return nil, ErrMapForOrderedArgument{"hint"}
+		}
+		hintVal, err := marshalValue(hint, bsonOpts, registry)
+		if err != nil {
+			return nil, err
+		}
+		updateDoc = bsoncore.AppendValueElement(updateDoc, "hint", hintVal)
+	}
+
+	updateDoc, _ = bsoncore.AppendDocumentEnd(updateDoc, uidx)
+	return updateDoc, nil
+}
+
+func createBatches(models []WriteModel, ordered bool) []bulkWriteBatch {
+	if ordered {
+		return createOrderedBatches(models)
+	}
+
+	batches := make([]bulkWriteBatch, 5)
+	batches[insertCommand].canRetry = true
+	batches[deleteOneCommand].canRetry = true
+	batches[updateOneCommand].canRetry = true
+
+	// TODO(GODRIVER-1157): fix batching once operation retryability is fixed
+	for i, model := range models {
+		switch model.(type) {
+		case *InsertOneModel:
+			batches[insertCommand].models = append(batches[insertCommand].models, model)
+			batches[insertCommand].indexes = append(batches[insertCommand].indexes, i)
+		case *DeleteOneModel:
+			batches[deleteOneCommand].models = append(batches[deleteOneCommand].models, model)
+			batches[deleteOneCommand].indexes = append(batches[deleteOneCommand].indexes, i)
+		case *DeleteManyModel:
+			batches[deleteManyCommand].models = append(batches[deleteManyCommand].models, model)
+			batches[deleteManyCommand].indexes = append(batches[deleteManyCommand].indexes, i)
+		case *ReplaceOneModel, *UpdateOneModel:
+			batches[updateOneCommand].models = append(batches[updateOneCommand].models, model)
+			batches[updateOneCommand].indexes = append(batches[updateOneCommand].indexes, i)
+		case *UpdateManyModel:
+			batches[updateManyCommand].models = append(batches[updateManyCommand].models, model)
+			batches[updateManyCommand].indexes = append(batches[updateManyCommand].indexes, i)
+		}
+	}
+
+	return batches
+}
+
+func createOrderedBatches(models []WriteModel) []bulkWriteBatch {
+	var batches []bulkWriteBatch
+	var prevKind writeCommandKind = -1
+	i := -1 // batch index
+
+	for ind, model := range models {
+		var createNewBatch bool
+		var canRetry bool
+		var newKind writeCommandKind
+
+		// TODO(GODRIVER-1157): fix batching once operation retryability is fixed
+		switch model.(type) {
+		case *InsertOneModel:
+			createNewBatch = prevKind != insertCommand
+			canRetry = true
+			newKind = insertCommand
+		case *DeleteOneModel:
+			createNewBatch = prevKind != deleteOneCommand
+			canRetry = true
+			newKind = deleteOneCommand
+		case *DeleteManyModel:
+			createNewBatch = prevKind != deleteManyCommand
+			newKind = deleteManyCommand
+		case *ReplaceOneModel, *UpdateOneModel:
+			createNewBatch = prevKind != updateOneCommand
+			canRetry = true
+			newKind = updateOneCommand
+		case *UpdateManyModel:
+			createNewBatch = prevKind != updateManyCommand
+			newKind = updateManyCommand
+		}
+
+		if createNewBatch {
+			batches = append(batches, bulkWriteBatch{
+				models:   []WriteModel{model},
+				canRetry: canRetry,
+				indexes:  []int{ind},
+			})
+			i++
+		} else {
+			batches[i].models = append(batches[i].models, model)
+			if !canRetry {
+				batches[i].canRetry = false // don't make it true if it was already false
+			}
+			batches[i].indexes = append(batches[i].indexes, ind)
+		}
+
+		prevKind = newKind
+	}
+
+	return batches
+}
+
+func (bw *bulkWrite) mergeResults(newResult BulkWriteResult) {
+	bw.result.InsertedCount += newResult.InsertedCount
+	bw.result.MatchedCount += newResult.MatchedCount
+	bw.result.ModifiedCount += newResult.ModifiedCount
+	bw.result.DeletedCount += newResult.DeletedCount
+	bw.result.UpsertedCount += newResult.UpsertedCount
+
+	for index, upsertID := range newResult.UpsertedIDs {
+		bw.result.UpsertedIDs[index] = upsertID
+	}
+}
+
+// WriteCommandKind is the type of command represented by a Write
+type writeCommandKind int8
+
+// These constants represent the valid types of write commands.
+const (
+	insertCommand writeCommandKind = iota
+	updateOneCommand
+	updateManyCommand
+	deleteOneCommand
+	deleteManyCommand
+)
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go
new file mode 100644
index 0000000000000000000000000000000000000000..64f45891895671ccef913367c536fbb340fe59ef
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go
@@ -0,0 +1,305 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"go.mongodb.org/mongo-driver/mongo/options"
+)
+
+// WriteModel is an interface implemented by models that can be used in a BulkWrite operation. Each WriteModel
+// represents a write.
+//
+// This interface is implemented by InsertOneModel, DeleteOneModel, DeleteManyModel, ReplaceOneModel, UpdateOneModel,
+// and UpdateManyModel. Custom implementations of this interface must not be used.
+type WriteModel interface {
+	writeModel()
+}
+
+// InsertOneModel is used to insert a single document in a BulkWrite operation.
+type InsertOneModel struct {
+	Document interface{}
+}
+
+// NewInsertOneModel creates a new InsertOneModel.
+func NewInsertOneModel() *InsertOneModel {
+	return &InsertOneModel{}
+}
+
+// SetDocument specifies the document to be inserted. The document cannot be nil. If it does not have an _id field when
+// transformed into BSON, one will be added automatically to the marshalled document. The original document will not be
+// modified.
+func (iom *InsertOneModel) SetDocument(doc interface{}) *InsertOneModel {
+	iom.Document = doc
+	return iom
+}
+
+func (*InsertOneModel) writeModel() {}
+
+// DeleteOneModel is used to delete at most one document in a BulkWriteOperation.
+type DeleteOneModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+	Hint      interface{}
+}
+
+// NewDeleteOneModel creates a new DeleteOneModel.
+func NewDeleteOneModel() *DeleteOneModel {
+	return &DeleteOneModel{}
+}
+
+// SetFilter specifies a filter to use to select the document to delete. The filter must be a document containing query
+// operators. It cannot be nil. If the filter matches multiple documents, one will be selected from the matching
+// documents.
+func (dom *DeleteOneModel) SetFilter(filter interface{}) *DeleteOneModel {
+	dom.Filter = filter
+	return dom
+}
+
+// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be
+// used.
+func (dom *DeleteOneModel) SetCollation(collation *options.Collation) *DeleteOneModel {
+	dom.Collation = collation
+	return dom
+}
+
+// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index
+// specification as a document. This option is only valid for MongoDB versions >= 4.4. Server versions >= 3.4 will
+// return an error if this option is specified. For server versions < 3.4, the driver will return a client-side error if
+// this option is specified. The driver will return an error if this option is specified during an unacknowledged write
+// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, which
+// means that no hint will be sent.
+func (dom *DeleteOneModel) SetHint(hint interface{}) *DeleteOneModel {
+	dom.Hint = hint
+	return dom
+}
+
+func (*DeleteOneModel) writeModel() {}
+
+// DeleteManyModel is used to delete multiple documents in a BulkWrite operation.
+type DeleteManyModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+	Hint      interface{}
+}
+
+// NewDeleteManyModel creates a new DeleteManyModel.
+func NewDeleteManyModel() *DeleteManyModel {
+	return &DeleteManyModel{}
+}
+
+// SetFilter specifies a filter to use to select documents to delete. The filter must be a document containing query
+// operators. It cannot be nil.
+func (dmm *DeleteManyModel) SetFilter(filter interface{}) *DeleteManyModel {
+	dmm.Filter = filter
+	return dmm
+}
+
+// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be
+// used.
+func (dmm *DeleteManyModel) SetCollation(collation *options.Collation) *DeleteManyModel {
+	dmm.Collation = collation
+	return dmm
+}
+
+// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index
+// specification as a document. This option is only valid for MongoDB versions >= 4.4. Server versions >= 3.4 will
+// return an error if this option is specified. For server versions < 3.4, the driver will return a client-side error if
+// this option is specified. The driver will return an error if this option is specified during an unacknowledged write
+// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, which
+// means that no hint will be sent.
+func (dmm *DeleteManyModel) SetHint(hint interface{}) *DeleteManyModel {
+	dmm.Hint = hint
+	return dmm
+}
+
+func (*DeleteManyModel) writeModel() {}
+
+// ReplaceOneModel is used to replace at most one document in a BulkWrite operation.
+type ReplaceOneModel struct {
+	Collation   *options.Collation
+	Upsert      *bool
+	Filter      interface{}
+	Replacement interface{}
+	Hint        interface{}
+}
+
+// NewReplaceOneModel creates a new ReplaceOneModel.
+func NewReplaceOneModel() *ReplaceOneModel {
+	return &ReplaceOneModel{}
+}
+
+// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index
+// specification as a document. This option is only valid for MongoDB versions >= 4.2. Server versions >= 3.4 will
+// return an error if this option is specified. For server versions < 3.4, the driver will return a client-side error if
+// this option is specified. The driver will return an error if this option is specified during an unacknowledged write
+// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, which
+// means that no hint will be sent.
+func (rom *ReplaceOneModel) SetHint(hint interface{}) *ReplaceOneModel {
+	rom.Hint = hint
+	return rom
+}
+
+// SetFilter specifies a filter to use to select the document to replace. The filter must be a document containing query
+// operators. It cannot be nil. If the filter matches multiple documents, one will be selected from the matching
+// documents.
+func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel {
+	rom.Filter = filter
+	return rom
+}
+
+// SetReplacement specifies a document that will be used to replace the selected document. It cannot be nil and cannot
+// contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/).
+func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel {
+	rom.Replacement = rep
+	return rom
+}
+
+// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be
+// used.
+func (rom *ReplaceOneModel) SetCollation(collation *options.Collation) *ReplaceOneModel {
+	rom.Collation = collation
+	return rom
+}
+
+// SetUpsert specifies whether or not the replacement document should be inserted if no document matching the filter is
+// found. If an upsert is performed, the _id of the upserted document can be retrieved from the UpsertedIDs field of the
+// BulkWriteResult.
+func (rom *ReplaceOneModel) SetUpsert(upsert bool) *ReplaceOneModel {
+	rom.Upsert = &upsert
+	return rom
+}
+
+func (*ReplaceOneModel) writeModel() {}
+
+// UpdateOneModel is used to update at most one document in a BulkWrite operation.
+type UpdateOneModel struct {
+	Collation    *options.Collation
+	Upsert       *bool
+	Filter       interface{}
+	Update       interface{}
+	ArrayFilters *options.ArrayFilters
+	Hint         interface{}
+}
+
+// NewUpdateOneModel creates a new UpdateOneModel.
+func NewUpdateOneModel() *UpdateOneModel {
+	return &UpdateOneModel{}
+}
+
+// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index
+// specification as a document. This option is only valid for MongoDB versions >= 4.2. Server versions >= 3.4 will
+// return an error if this option is specified. For server versions < 3.4, the driver will return a client-side error if
+// this option is specified. The driver will return an error if this option is specified during an unacknowledged write
+// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, which
+// means that no hint will be sent.
+func (uom *UpdateOneModel) SetHint(hint interface{}) *UpdateOneModel {
+	uom.Hint = hint
+	return uom
+}
+
+// SetFilter specifies a filter to use to select the document to update. The filter must be a document containing query
+// operators. It cannot be nil. If the filter matches multiple documents, one will be selected from the matching
+// documents.
+func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel {
+	uom.Filter = filter
+	return uom
+}
+
+// SetUpdate specifies the modifications to be made to the selected document. The value must be a document containing
+// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty.
+func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel {
+	uom.Update = update
+	return uom
+}
+
+// SetArrayFilters specifies a set of filters to determine which elements should be modified when updating an array
+// field.
+func (uom *UpdateOneModel) SetArrayFilters(filters options.ArrayFilters) *UpdateOneModel {
+	uom.ArrayFilters = &filters
+	return uom
+}
+
+// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be
+// used.
+func (uom *UpdateOneModel) SetCollation(collation *options.Collation) *UpdateOneModel {
+	uom.Collation = collation
+	return uom
+}
+
+// SetUpsert specifies whether or not a new document should be inserted if no document matching the filter is found. If
+// an upsert is performed, the _id of the upserted document can be retrieved from the UpsertedIDs field of the
+// BulkWriteResult.
+func (uom *UpdateOneModel) SetUpsert(upsert bool) *UpdateOneModel {
+	uom.Upsert = &upsert
+	return uom
+}
+
+func (*UpdateOneModel) writeModel() {}
+
+// UpdateManyModel is used to update multiple documents in a BulkWrite operation.
+type UpdateManyModel struct {
+	Collation    *options.Collation
+	Upsert       *bool
+	Filter       interface{}
+	Update       interface{}
+	ArrayFilters *options.ArrayFilters
+	Hint         interface{}
+}
+
+// NewUpdateManyModel creates a new UpdateManyModel.
+func NewUpdateManyModel() *UpdateManyModel {
+	return &UpdateManyModel{}
+}
+
+// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index
+// specification as a document. This option is only valid for MongoDB versions >= 4.2. Server versions >= 3.4 will
+// return an error if this option is specified. For server versions < 3.4, the driver will return a client-side error if
+// this option is specified. The driver will return an error if this option is specified during an unacknowledged write
+// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, which
+// means that no hint will be sent.
+func (umm *UpdateManyModel) SetHint(hint interface{}) *UpdateManyModel {
+	umm.Hint = hint
+	return umm
+}
+
+// SetFilter specifies a filter to use to select documents to update. The filter must be a document containing query
+// operators. It cannot be nil.
+func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel {
+	umm.Filter = filter
+	return umm
+}
+
+// SetUpdate specifies the modifications to be made to the selected documents. The value must be a document containing
+// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty.
+func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel {
+	umm.Update = update
+	return umm
+}
+
+// SetArrayFilters specifies a set of filters to determine which elements should be modified when updating an array
+// field.
+func (umm *UpdateManyModel) SetArrayFilters(filters options.ArrayFilters) *UpdateManyModel {
+	umm.ArrayFilters = &filters
+	return umm
+}
+
+// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be
+// used.
+func (umm *UpdateManyModel) SetCollation(collation *options.Collation) *UpdateManyModel {
+	umm.Collation = collation
+	return umm
+}
+
+// SetUpsert specifies whether or not a new document should be inserted if no document matching the filter is found. If
+// an upsert is performed, the _id of the upserted document can be retrieved from the UpsertedIDs field of the
+// BulkWriteResult.
+func (umm *UpdateManyModel) SetUpsert(upsert bool) *UpdateManyModel {
+	umm.Upsert = &upsert
+	return umm
+}
+
+func (*UpdateManyModel) writeModel() {}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ea8baf1f2100a85a829b5f58a32b80277969590
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go
@@ -0,0 +1,733 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/internal/csot"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+var (
+	// ErrMissingResumeToken indicates that a change stream notification from the server did not contain a resume token.
+	ErrMissingResumeToken = errors.New("cannot provide resume functionality when the resume token is missing")
+	// ErrNilCursor indicates that the underlying cursor for the change stream is nil.
+	ErrNilCursor = errors.New("cursor is nil")
+
+	minResumableLabelWireVersion int32 = 9 // Wire version at which the server includes the resumable error label
+	networkErrorLabel                  = "NetworkError"
+	resumableErrorLabel                = "ResumableChangeStreamError"
+	errorCursorNotFound          int32 = 43 // CursorNotFound error code
+
+	// Allowlist of error codes that are considered resumable.
+	resumableChangeStreamErrors = map[int32]struct{}{
+		6:     {}, // HostUnreachable
+		7:     {}, // HostNotFound
+		89:    {}, // NetworkTimeout
+		91:    {}, // ShutdownInProgress
+		189:   {}, // PrimarySteppedDown
+		262:   {}, // ExceededTimeLimit
+		9001:  {}, // SocketException
+		10107: {}, // NotPrimary
+		11600: {}, // InterruptedAtShutdown
+		11602: {}, // InterruptedDueToReplStateChange
+		13435: {}, // NotPrimaryNoSecondaryOK
+		13436: {}, // NotPrimaryOrSecondary
+		63:    {}, // StaleShardVersion
+		150:   {}, // StaleEpoch
+		13388: {}, // StaleConfig
+		234:   {}, // RetryChangeStream
+		133:   {}, // FailedToSatisfyReadPreference
+	}
+)
+
+// ChangeStream is used to iterate over a stream of events. Each event can be decoded into a Go type via the Decode
+// method or accessed as raw BSON via the Current field. This type is not goroutine safe and must not be used
+// concurrently by multiple goroutines. For more information about change streams, see
+// https://www.mongodb.com/docs/manual/changeStreams/.
+type ChangeStream struct {
+	// Current is the BSON bytes of the current event. This property is only valid until the next call to Next or
+	// TryNext. If continued access is required, a copy must be made.
+	Current bson.Raw
+
+	aggregate       *operation.Aggregate
+	pipelineSlice   []bsoncore.Document
+	pipelineOptions map[string]bsoncore.Value
+	cursor          changeStreamCursor
+	cursorOptions   driver.CursorOptions
+	batch           []bsoncore.Document
+	resumeToken     bson.Raw
+	err             error
+	sess            *session.Client
+	client          *Client
+	bsonOpts        *options.BSONOptions
+	registry        *bsoncodec.Registry
+	streamType      StreamType
+	options         *options.ChangeStreamOptions
+	selector        description.ServerSelector
+	operationTime   *primitive.Timestamp
+	wireVersion     *description.VersionRange
+}
+
+type changeStreamConfig struct {
+	readConcern    *readconcern.ReadConcern
+	readPreference *readpref.ReadPref
+	client         *Client
+	bsonOpts       *options.BSONOptions
+	registry       *bsoncodec.Registry
+	streamType     StreamType
+	collectionName string
+	databaseName   string
+	crypt          driver.Crypt
+}
+
+func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	cursorOpts := config.client.createBaseCursorOptions()
+
+	cursorOpts.MarshalValueEncoderFn = newEncoderFn(config.bsonOpts, config.registry)
+
+	cs := &ChangeStream{
+		client:     config.client,
+		bsonOpts:   config.bsonOpts,
+		registry:   config.registry,
+		streamType: config.streamType,
+		options:    options.MergeChangeStreamOptions(opts...),
+		selector: description.CompositeSelector([]description.ServerSelector{
+			description.ReadPrefSelector(config.readPreference),
+			description.LatencySelector(config.client.localThreshold),
+		}),
+		cursorOptions: cursorOpts,
+	}
+
+	cs.sess = sessionFromContext(ctx)
+	if cs.sess == nil && cs.client.sessionPool != nil {
+		cs.sess = session.NewImplicitClientSession(cs.client.sessionPool, cs.client.id)
+	}
+	if cs.err = cs.client.validSession(cs.sess); cs.err != nil {
+		closeImplicitSession(cs.sess)
+		return nil, cs.Err()
+	}
+
+	cs.aggregate = operation.NewAggregate(nil).
+		ReadPreference(config.readPreference).ReadConcern(config.readConcern).
+		Deployment(cs.client.deployment).ClusterClock(cs.client.clock).
+		CommandMonitor(cs.client.monitor).Session(cs.sess).ServerSelector(cs.selector).Retry(driver.RetryNone).
+		ServerAPI(cs.client.serverAPI).Crypt(config.crypt).Timeout(cs.client.timeout).
+		Authenticator(cs.client.authenticator)
+
+	if cs.options.Collation != nil {
+		cs.aggregate.Collation(bsoncore.Document(cs.options.Collation.ToDocument()))
+	}
+	if comment := cs.options.Comment; comment != nil {
+		cs.aggregate.Comment(*comment)
+
+		commentVal, err := marshalValue(comment, cs.bsonOpts, cs.registry)
+		if err != nil {
+			return nil, err
+		}
+		cs.cursorOptions.Comment = commentVal
+	}
+	if cs.options.BatchSize != nil {
+		cs.aggregate.BatchSize(*cs.options.BatchSize)
+		cs.cursorOptions.BatchSize = *cs.options.BatchSize
+	}
+	if cs.options.MaxAwaitTime != nil {
+		cs.cursorOptions.MaxTimeMS = int64(*cs.options.MaxAwaitTime / time.Millisecond)
+	}
+	if cs.options.Custom != nil {
+		// Marshal all custom options before passing to the initial aggregate. Return
+		// any errors from Marshaling.
+		customOptions := make(map[string]bsoncore.Value)
+		for optionName, optionValue := range cs.options.Custom {
+			bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue)
+			if err != nil {
+				cs.err = err
+				closeImplicitSession(cs.sess)
+				return nil, cs.Err()
+			}
+			optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData}
+			customOptions[optionName] = optionValueBSON
+		}
+		cs.aggregate.CustomOptions(customOptions)
+	}
+	if cs.options.CustomPipeline != nil {
+		// Marshal all custom pipeline options before building pipeline slice. Return
+		// any errors from Marshaling.
+		cs.pipelineOptions = make(map[string]bsoncore.Value)
+		for optionName, optionValue := range cs.options.CustomPipeline {
+			bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue)
+			if err != nil {
+				cs.err = err
+				closeImplicitSession(cs.sess)
+				return nil, cs.Err()
+			}
+			optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData}
+			cs.pipelineOptions[optionName] = optionValueBSON
+		}
+	}
+
+	switch cs.streamType {
+	case ClientStream:
+		cs.aggregate.Database("admin")
+	case DatabaseStream:
+		cs.aggregate.Database(config.databaseName)
+	case CollectionStream:
+		cs.aggregate.Collection(config.collectionName).Database(config.databaseName)
+	default:
+		closeImplicitSession(cs.sess)
+		return nil, fmt.Errorf("must supply a valid StreamType in config, instead of %v", cs.streamType)
+	}
+
+	// When starting a change stream, cache startAfter as the first resume token if it is set. If not, cache
+	// resumeAfter. If neither is set, do not cache a resume token.
+	resumeToken := cs.options.StartAfter
+	if resumeToken == nil {
+		resumeToken = cs.options.ResumeAfter
+	}
+	var marshaledToken bson.Raw
+	if resumeToken != nil {
+		if marshaledToken, cs.err = bson.Marshal(resumeToken); cs.err != nil {
+			closeImplicitSession(cs.sess)
+			return nil, cs.Err()
+		}
+	}
+	cs.resumeToken = marshaledToken
+
+	if cs.err = cs.buildPipelineSlice(pipeline); cs.err != nil {
+		closeImplicitSession(cs.sess)
+		return nil, cs.Err()
+	}
+	var pipelineArr bsoncore.Document
+	pipelineArr, cs.err = cs.pipelineToBSON()
+	cs.aggregate.Pipeline(pipelineArr)
+
+	if cs.err = cs.executeOperation(ctx, false); cs.err != nil {
+		closeImplicitSession(cs.sess)
+		return nil, cs.Err()
+	}
+
+	return cs, cs.Err()
+}
+
+func (cs *ChangeStream) createOperationDeployment(server driver.Server, connection driver.Connection) driver.Deployment {
+	return &changeStreamDeployment{
+		topologyKind: cs.client.deployment.Kind(),
+		server:       server,
+		conn:         connection,
+	}
+}
+
+func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) error {
+	var server driver.Server
+	var conn driver.Connection
+
+	if server, cs.err = cs.client.deployment.SelectServer(ctx, cs.selector); cs.err != nil {
+		return cs.Err()
+	}
+	if conn, cs.err = server.Connection(ctx); cs.err != nil {
+		return cs.Err()
+	}
+	defer conn.Close()
+	cs.wireVersion = conn.Description().WireVersion
+
+	cs.aggregate.Deployment(cs.createOperationDeployment(server, conn))
+
+	if resuming {
+		cs.replaceOptions(cs.wireVersion)
+
+		csOptDoc, err := cs.createPipelineOptionsDoc()
+		if err != nil {
+			return err
+		}
+		pipIdx, pipDoc := bsoncore.AppendDocumentStart(nil)
+		pipDoc = bsoncore.AppendDocumentElement(pipDoc, "$changeStream", csOptDoc)
+		if pipDoc, cs.err = bsoncore.AppendDocumentEnd(pipDoc, pipIdx); cs.err != nil {
+			return cs.Err()
+		}
+		cs.pipelineSlice[0] = pipDoc
+
+		var plArr bsoncore.Document
+		if plArr, cs.err = cs.pipelineToBSON(); cs.err != nil {
+			return cs.Err()
+		}
+		cs.aggregate.Pipeline(plArr)
+	}
+
+	// If cs.client.timeout is set and context is not already a Timeout context,
+	// honor cs.client.timeout in new Timeout context for change stream
+	// operation execution and potential retry.
+	if cs.client.timeout != nil && !csot.IsTimeoutContext(ctx) {
+		newCtx, cancelFunc := csot.MakeTimeoutContext(ctx, *cs.client.timeout)
+		// Redefine ctx to be the new timeout-derived context.
+		ctx = newCtx
+		// Cancel the timeout-derived context at the end of executeOperation to avoid a context leak.
+		defer cancelFunc()
+	}
+
+	// Execute the aggregate, retrying on retryable errors once (1) if retryable reads are enabled and
+	// infinitely (-1) if context is a Timeout context.
+	var retries int
+	if cs.client.retryReads {
+		retries = 1
+	}
+	if csot.IsTimeoutContext(ctx) {
+		retries = -1
+	}
+
+	var err error
+AggregateExecuteLoop:
+	for {
+		err = cs.aggregate.Execute(ctx)
+		// If no error or no retries remain, do not retry.
+		if err == nil || retries == 0 {
+			break AggregateExecuteLoop
+		}
+
+		switch tt := err.(type) {
+		case driver.Error:
+			// If error is not retryable, do not retry.
+			if !tt.RetryableRead() {
+				break AggregateExecuteLoop
+			}
+
+			// If error is retryable: subtract 1 from retries, redo server selection, checkout
+			// a connection, and restart loop.
+			retries--
+			server, err = cs.client.deployment.SelectServer(ctx, cs.selector)
+			if err != nil {
+				break AggregateExecuteLoop
+			}
+
+			conn.Close()
+			conn, err = server.Connection(ctx)
+			if err != nil {
+				break AggregateExecuteLoop
+			}
+			defer conn.Close()
+
+			// Update the wire version with data from the new connection.
+			cs.wireVersion = conn.Description().WireVersion
+
+			// Reset deployment.
+			cs.aggregate.Deployment(cs.createOperationDeployment(server, conn))
+		default:
+			// Do not retry if error is not a driver error.
+			break AggregateExecuteLoop
+		}
+	}
+	if err != nil {
+		cs.err = replaceErrors(err)
+		return cs.err
+	}
+
+	cr := cs.aggregate.ResultCursorResponse()
+	cr.Server = server
+
+	cs.cursor, cs.err = driver.NewBatchCursor(cr, cs.sess, cs.client.clock, cs.cursorOptions)
+	if cs.err = replaceErrors(cs.err); cs.err != nil {
+		return cs.Err()
+	}
+
+	cs.updatePbrtFromCommand()
+	if cs.options.StartAtOperationTime == nil && cs.options.ResumeAfter == nil &&
+		cs.options.StartAfter == nil && cs.wireVersion.Max >= 7 &&
+		cs.emptyBatch() && cs.resumeToken == nil {
+		cs.operationTime = cs.sess.OperationTime
+	}
+
+	return cs.Err()
+}
+
+// Updates the post batch resume token after a successful aggregate or getMore operation.
+func (cs *ChangeStream) updatePbrtFromCommand() {
+	// Only cache the pbrt if an empty batch was returned and a pbrt was included
+	if pbrt := cs.cursor.PostBatchResumeToken(); cs.emptyBatch() && pbrt != nil {
+		cs.resumeToken = bson.Raw(pbrt)
+	}
+}
+
+func (cs *ChangeStream) storeResumeToken() error {
+	// If cs.Current is the last document in the batch and a pbrt is included, cache the pbrt
+	// Otherwise, cache the _id of the document
+	var tokenDoc bson.Raw
+	if len(cs.batch) == 0 {
+		if pbrt := cs.cursor.PostBatchResumeToken(); pbrt != nil {
+			tokenDoc = bson.Raw(pbrt)
+		}
+	}
+
+	if tokenDoc == nil {
+		var ok bool
+		tokenDoc, ok = cs.Current.Lookup("_id").DocumentOK()
+		if !ok {
+			_ = cs.Close(context.Background())
+			return ErrMissingResumeToken
+		}
+	}
+
+	cs.resumeToken = tokenDoc
+	return nil
+}
+
+func (cs *ChangeStream) buildPipelineSlice(pipeline interface{}) error {
+	val := reflect.ValueOf(pipeline)
+	if !val.IsValid() || !(val.Kind() == reflect.Slice) {
+		cs.err = errors.New("can only marshal slices and arrays into aggregation pipelines, but got invalid")
+		return cs.err
+	}
+
+	cs.pipelineSlice = make([]bsoncore.Document, 0, val.Len()+1)
+
+	csIdx, csDoc := bsoncore.AppendDocumentStart(nil)
+
+	csDocTemp, err := cs.createPipelineOptionsDoc()
+	if err != nil {
+		return err
+	}
+	csDoc = bsoncore.AppendDocumentElement(csDoc, "$changeStream", csDocTemp)
+	csDoc, cs.err = bsoncore.AppendDocumentEnd(csDoc, csIdx)
+	if cs.err != nil {
+		return cs.err
+	}
+	cs.pipelineSlice = append(cs.pipelineSlice, csDoc)
+
+	for i := 0; i < val.Len(); i++ {
+		var elem []byte
+		elem, cs.err = marshal(val.Index(i).Interface(), cs.bsonOpts, cs.registry)
+		if cs.err != nil {
+			return cs.err
+		}
+
+		cs.pipelineSlice = append(cs.pipelineSlice, elem)
+	}
+
+	return cs.err
+}
+
+func (cs *ChangeStream) createPipelineOptionsDoc() (bsoncore.Document, error) {
+	plDocIdx, plDoc := bsoncore.AppendDocumentStart(nil)
+
+	if cs.streamType == ClientStream {
+		plDoc = bsoncore.AppendBooleanElement(plDoc, "allChangesForCluster", true)
+	}
+
+	if cs.options.FullDocument != nil && *cs.options.FullDocument != options.Default {
+		plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument))
+	}
+
+	if cs.options.FullDocumentBeforeChange != nil {
+		plDoc = bsoncore.AppendStringElement(plDoc, "fullDocumentBeforeChange", string(*cs.options.FullDocumentBeforeChange))
+	}
+
+	if cs.options.ResumeAfter != nil {
+		var raDoc bsoncore.Document
+		raDoc, cs.err = marshal(cs.options.ResumeAfter, cs.bsonOpts, cs.registry)
+		if cs.err != nil {
+			return nil, cs.err
+		}
+
+		plDoc = bsoncore.AppendDocumentElement(plDoc, "resumeAfter", raDoc)
+	}
+
+	if cs.options.ShowExpandedEvents != nil {
+		plDoc = bsoncore.AppendBooleanElement(plDoc, "showExpandedEvents", *cs.options.ShowExpandedEvents)
+	}
+
+	if cs.options.StartAfter != nil {
+		var saDoc bsoncore.Document
+		saDoc, cs.err = marshal(cs.options.StartAfter, cs.bsonOpts, cs.registry)
+		if cs.err != nil {
+			return nil, cs.err
+		}
+
+		plDoc = bsoncore.AppendDocumentElement(plDoc, "startAfter", saDoc)
+	}
+
+	if cs.options.StartAtOperationTime != nil {
+		plDoc = bsoncore.AppendTimestampElement(plDoc, "startAtOperationTime", cs.options.StartAtOperationTime.T, cs.options.StartAtOperationTime.I)
+	}
+
+	// Append custom pipeline options.
+	for optionName, optionValue := range cs.pipelineOptions {
+		plDoc = bsoncore.AppendValueElement(plDoc, optionName, optionValue)
+	}
+
+	if plDoc, cs.err = bsoncore.AppendDocumentEnd(plDoc, plDocIdx); cs.err != nil {
+		return nil, cs.err
+	}
+
+	return plDoc, nil
+}
+
+func (cs *ChangeStream) pipelineToBSON() (bsoncore.Document, error) {
+	pipelineDocIdx, pipelineArr := bsoncore.AppendArrayStart(nil)
+	for i, doc := range cs.pipelineSlice {
+		pipelineArr = bsoncore.AppendDocumentElement(pipelineArr, strconv.Itoa(i), doc)
+	}
+	if pipelineArr, cs.err = bsoncore.AppendArrayEnd(pipelineArr, pipelineDocIdx); cs.err != nil {
+		return nil, cs.err
+	}
+	return pipelineArr, cs.err
+}
+
+func (cs *ChangeStream) replaceOptions(wireVersion *description.VersionRange) {
+	// Cached resume token: use the resume token as the resumeAfter option and set no other resume options
+	if cs.resumeToken != nil {
+		cs.options.SetResumeAfter(cs.resumeToken)
+		cs.options.SetStartAfter(nil)
+		cs.options.SetStartAtOperationTime(nil)
+		return
+	}
+
+	// No cached resume token but cached operation time: use the operation time as the startAtOperationTime option and
+	// set no other resume options
+	if (cs.sess.OperationTime != nil || cs.options.StartAtOperationTime != nil) && wireVersion.Max >= 7 {
+		opTime := cs.options.StartAtOperationTime
+		if cs.operationTime != nil {
+			opTime = cs.sess.OperationTime
+		}
+
+		cs.options.SetStartAtOperationTime(opTime)
+		cs.options.SetResumeAfter(nil)
+		cs.options.SetStartAfter(nil)
+		return
+	}
+
+	// No cached resume token or operation time: set none of the resume options
+	cs.options.SetResumeAfter(nil)
+	cs.options.SetStartAfter(nil)
+	cs.options.SetStartAtOperationTime(nil)
+}
+
+// ID returns the ID for this change stream, or 0 if the cursor has been closed or exhausted.
+func (cs *ChangeStream) ID() int64 {
+	if cs.cursor == nil {
+		return 0
+	}
+	return cs.cursor.ID()
+}
+
+// RemainingBatchLength returns the number of documents left in the current batch. If this returns zero, the subsequent
+// call to Next or TryNext will do a network request to fetch the next batch.
+func (cs *ChangeStream) RemainingBatchLength() int {
+	return len(cs.batch)
+}
+
+// SetBatchSize sets the number of documents to fetch from the database with
+// each iteration of the ChangeStream's "Next" or "TryNext" method. This setting
+// only affects subsequent document batches fetched from the database.
+func (cs *ChangeStream) SetBatchSize(size int32) {
+	// Set batch size on the cursor options also so any "resumed" change stream
+	// cursors will pick up the latest batch size setting.
+	cs.cursorOptions.BatchSize = size
+	cs.cursor.SetBatchSize(size)
+}
+
+// Decode will unmarshal the current event document into val and return any errors from the unmarshalling process
+// without any modification. If val is nil or is a typed nil, an error will be returned.
+func (cs *ChangeStream) Decode(val interface{}) error {
+	if cs.cursor == nil {
+		return ErrNilCursor
+	}
+
+	dec, err := getDecoder(cs.Current, cs.bsonOpts, cs.registry)
+	if err != nil {
+		return fmt.Errorf("error configuring BSON decoder: %w", err)
+	}
+	return dec.Decode(val)
+}
+
+// Err returns the last error seen by the change stream, or nil if no errors has occurred.
+func (cs *ChangeStream) Err() error {
+	if cs.err != nil {
+		return replaceErrors(cs.err)
+	}
+	if cs.cursor == nil {
+		return nil
+	}
+
+	return replaceErrors(cs.cursor.Err())
+}
+
+// Close closes this change stream and the underlying cursor. Next and TryNext must not be called after Close has been
+// called. Close is idempotent. After the first call, any subsequent calls will not change the state.
+func (cs *ChangeStream) Close(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	defer closeImplicitSession(cs.sess)
+
+	if cs.cursor == nil {
+		return nil // cursor is already closed
+	}
+
+	cs.err = replaceErrors(cs.cursor.Close(ctx))
+	cs.cursor = nil
+	return cs.Err()
+}
+
+// ResumeToken returns the last cached resume token for this change stream, or nil if a resume token has not been
+// stored.
+func (cs *ChangeStream) ResumeToken() bson.Raw {
+	return cs.resumeToken
+}
+
+// Next gets the next event for this change stream. It returns true if there were no errors and the next event document
+// is available.
+//
+// Next blocks until an event is available, an error occurs, or ctx expires. If ctx expires, the error
+// will be set to ctx.Err(). In an error case, Next will return false.
+//
+// If Next returns false, subsequent calls will also return false.
+func (cs *ChangeStream) Next(ctx context.Context) bool {
+	return cs.next(ctx, false)
+}
+
+// TryNext attempts to get the next event for this change stream. It returns true if there were no errors and the next
+// event document is available.
+//
+// TryNext returns false if the change stream is closed by the server, an error occurs when getting changes from the
+// server, the next change is not yet available, or ctx expires. If ctx expires, the error will be set to ctx.Err().
+//
+// If TryNext returns false and an error occurred or the change stream was closed
+// (i.e. cs.Err() != nil || cs.ID() == 0), subsequent attempts will also return false. Otherwise, it is safe to call
+// TryNext again until a change is available.
+//
+// This method requires driver version >= 1.2.0.
+func (cs *ChangeStream) TryNext(ctx context.Context) bool {
+	return cs.next(ctx, true)
+}
+
+func (cs *ChangeStream) next(ctx context.Context, nonBlocking bool) bool {
+	// return false right away if the change stream has already errored or if cursor is closed.
+	if cs.err != nil {
+		return false
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if len(cs.batch) == 0 {
+		cs.loopNext(ctx, nonBlocking)
+		if cs.err != nil {
+			cs.err = replaceErrors(cs.err)
+			return false
+		}
+		if len(cs.batch) == 0 {
+			return false
+		}
+	}
+
+	// successfully got non-empty batch
+	cs.Current = bson.Raw(cs.batch[0])
+	cs.batch = cs.batch[1:]
+	if cs.err = cs.storeResumeToken(); cs.err != nil {
+		return false
+	}
+	return true
+}
+
+func (cs *ChangeStream) loopNext(ctx context.Context, nonBlocking bool) {
+	for {
+		if cs.cursor == nil {
+			return
+		}
+
+		if cs.cursor.Next(ctx) {
+			// non-empty batch returned
+			cs.batch, cs.err = cs.cursor.Batch().Documents()
+			return
+		}
+
+		cs.err = replaceErrors(cs.cursor.Err())
+		if cs.err == nil {
+			// Check if cursor is alive
+			if cs.ID() == 0 {
+				return
+			}
+
+			// If a getMore was done but the batch was empty, the batch cursor will return false with no error.
+			// Update the tracked resume token to catch the post batch resume token from the server response.
+			cs.updatePbrtFromCommand()
+			if nonBlocking {
+				// stop after a successful getMore, even though the batch was empty
+				return
+			}
+			continue // loop getMore until a non-empty batch is returned or an error occurs
+		}
+
+		if !cs.isResumableError() {
+			return
+		}
+
+		// ignore error from cursor close because if the cursor is deleted or errors we tried to close it and will remake and try to get next batch
+		_ = cs.cursor.Close(ctx)
+		if cs.err = cs.executeOperation(ctx, true); cs.err != nil {
+			return
+		}
+	}
+}
+
+func (cs *ChangeStream) isResumableError() bool {
+	var commandErr CommandError
+	if !errors.As(cs.err, &commandErr) || commandErr.HasErrorLabel(networkErrorLabel) {
+		// All non-server errors or network errors are resumable.
+		return true
+	}
+
+	if commandErr.Code == errorCursorNotFound {
+		return true
+	}
+
+	// For wire versions 9 and above, a server error is resumable if it has the ResumableChangeStreamError label.
+	if cs.wireVersion != nil && cs.wireVersion.Includes(minResumableLabelWireVersion) {
+		return commandErr.HasErrorLabel(resumableErrorLabel)
+	}
+
+	// For wire versions below 9, a server error is resumable if its code is on the allowlist.
+	_, resumable := resumableChangeStreamErrors[commandErr.Code]
+	return resumable
+}
+
+// Returns true if the underlying cursor's batch is empty
+func (cs *ChangeStream) emptyBatch() bool {
+	return cs.cursor.Batch().Empty()
+}
+
+// StreamType represents the cluster type against which a ChangeStream was created.
+type StreamType uint8
+
+// These constants represent valid change stream types. A change stream can be initialized over a collection, all
+// collections in a database, or over a cluster.
+const (
+	CollectionStream StreamType = iota
+	DatabaseStream
+	ClientStream
+)
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go
new file mode 100644
index 0000000000000000000000000000000000000000..4dca59f91c423bf013e21382f9f115e88dfd6685
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+type changeStreamDeployment struct {
+	topologyKind description.TopologyKind
+	server       driver.Server
+	conn         driver.Connection
+}
+
+var _ driver.Deployment = (*changeStreamDeployment)(nil)
+var _ driver.Server = (*changeStreamDeployment)(nil)
+var _ driver.ErrorProcessor = (*changeStreamDeployment)(nil)
+
+func (c *changeStreamDeployment) SelectServer(context.Context, description.ServerSelector) (driver.Server, error) {
+	return c, nil
+}
+
+func (c *changeStreamDeployment) Kind() description.TopologyKind {
+	return c.topologyKind
+}
+
+func (c *changeStreamDeployment) Connection(context.Context) (driver.Connection, error) {
+	return c.conn, nil
+}
+
+func (c *changeStreamDeployment) RTTMonitor() driver.RTTMonitor {
+	return c.server.RTTMonitor()
+}
+
+func (c *changeStreamDeployment) ProcessError(err error, conn driver.Connection) driver.ProcessErrorResult {
+	ep, ok := c.server.(driver.ErrorProcessor)
+	if !ok {
+		return driver.NoChange
+	}
+
+	return ep.ProcessError(err, conn)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..0ce6d2e24b3d3ae245293530fc56458a3ebd868c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go
@@ -0,0 +1,877 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/httputil"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/internal/uuid"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/auth"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt"
+	mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
+)
+
+const (
+	defaultLocalThreshold = 15 * time.Millisecond
+	defaultMaxPoolSize    = 100
+)
+
+var (
+	// keyVaultCollOpts specifies options used to communicate with the key vault collection
+	keyVaultCollOpts = options.Collection().SetReadConcern(readconcern.Majority()).
+				SetWriteConcern(writeconcern.New(writeconcern.WMajority()))
+
+	endSessionsBatchSize = 10000
+)
+
+// Client is a handle representing a pool of connections to a MongoDB deployment. It is safe for concurrent use by
+// multiple goroutines.
+//
+// The Client type opens and closes connections automatically and maintains a pool of idle connections. For
+// connection pool configuration options, see documentation for the ClientOptions type in the mongo/options package.
+type Client struct {
+	id             uuid.UUID
+	deployment     driver.Deployment
+	localThreshold time.Duration
+	retryWrites    bool
+	retryReads     bool
+	clock          *session.ClusterClock
+	readPreference *readpref.ReadPref
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	bsonOpts       *options.BSONOptions
+	registry       *bsoncodec.Registry
+	monitor        *event.CommandMonitor
+	serverAPI      *driver.ServerAPIOptions
+	serverMonitor  *event.ServerMonitor
+	sessionPool    *session.Pool
+	timeout        *time.Duration
+	httpClient     *http.Client
+	logger         *logger.Logger
+
+	// client-side encryption fields
+	keyVaultClientFLE  *Client
+	keyVaultCollFLE    *Collection
+	mongocryptdFLE     *mongocryptdClient
+	cryptFLE           driver.Crypt
+	metadataClientFLE  *Client
+	internalClientFLE  *Client
+	encryptedFieldsMap map[string]interface{}
+	authenticator      driver.Authenticator
+}
+
+// Connect creates a new Client and then initializes it using the Connect method. This is equivalent to calling
+// NewClient followed by Client.Connect.
+//
+// When creating an options.ClientOptions, the order the methods are called matters. Later Set*
+// methods will overwrite the values from previous Set* method invocations. This includes the
+// ApplyURI method. This allows callers to determine the order of precedence for option
+// application. For instance, if ApplyURI is called before SetAuth, the Credential from
+// SetAuth will overwrite the values from the connection string. If ApplyURI is called
+// after SetAuth, then its values will overwrite those from SetAuth.
+//
+// The opts parameter is processed using options.MergeClientOptions, which will overwrite entire
+// option fields of previous options, there is no partial overwriting. For example, if Username is
+// set in the Auth field for the first option, and Password is set for the second but with no
+// Username, after the merge the Username field will be empty.
+//
+// The NewClient function does not do any I/O and returns an error if the given options are invalid.
+// The Client.Connect method starts background goroutines to monitor the state of the deployment and does not do
+// any I/O in the main goroutine to prevent the main goroutine from blocking. Therefore, it will not error if the
+// deployment is down.
+//
+// The Client.Ping method can be used to verify that the deployment is successfully connected and the
+// Client was correctly configured.
+func Connect(ctx context.Context, opts ...*options.ClientOptions) (*Client, error) {
+	c, err := NewClient(opts...)
+	if err != nil {
+		return nil, err
+	}
+	err = c.Connect(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+// NewClient creates a new client to connect to a deployment specified by the uri.
+//
+// When creating an options.ClientOptions, the order the methods are called matters. Later Set*
+// methods will overwrite the values from previous Set* method invocations. This includes the
+// ApplyURI method. This allows callers to determine the order of precedence for option
+// application. For instance, if ApplyURI is called before SetAuth, the Credential from
+// SetAuth will overwrite the values from the connection string. If ApplyURI is called
+// after SetAuth, then its values will overwrite those from SetAuth.
+//
+// The opts parameter is processed using options.MergeClientOptions, which will overwrite entire
+// option fields of previous options, there is no partial overwriting. For example, if Username is
+// set in the Auth field for the first option, and Password is set for the second but with no
+// Username, after the merge the Username field will be empty.
+//
+// Deprecated: Use [Connect] instead.
+func NewClient(opts ...*options.ClientOptions) (*Client, error) {
+	clientOpt := options.MergeClientOptions(opts...)
+
+	id, err := uuid.New()
+	if err != nil {
+		return nil, err
+	}
+	client := &Client{id: id}
+
+	// ClusterClock
+	client.clock = new(session.ClusterClock)
+
+	// LocalThreshold
+	client.localThreshold = defaultLocalThreshold
+	if clientOpt.LocalThreshold != nil {
+		client.localThreshold = *clientOpt.LocalThreshold
+	}
+	// Monitor
+	if clientOpt.Monitor != nil {
+		client.monitor = clientOpt.Monitor
+	}
+	// ServerMonitor
+	if clientOpt.ServerMonitor != nil {
+		client.serverMonitor = clientOpt.ServerMonitor
+	}
+	// ReadConcern
+	client.readConcern = readconcern.New()
+	if clientOpt.ReadConcern != nil {
+		client.readConcern = clientOpt.ReadConcern
+	}
+	// ReadPreference
+	client.readPreference = readpref.Primary()
+	if clientOpt.ReadPreference != nil {
+		client.readPreference = clientOpt.ReadPreference
+	}
+	// BSONOptions
+	if clientOpt.BSONOptions != nil {
+		client.bsonOpts = clientOpt.BSONOptions
+	}
+	// Registry
+	client.registry = bson.DefaultRegistry
+	if clientOpt.Registry != nil {
+		client.registry = clientOpt.Registry
+	}
+	// RetryWrites
+	client.retryWrites = true // retry writes on by default
+	if clientOpt.RetryWrites != nil {
+		client.retryWrites = *clientOpt.RetryWrites
+	}
+	client.retryReads = true
+	if clientOpt.RetryReads != nil {
+		client.retryReads = *clientOpt.RetryReads
+	}
+	// Timeout
+	client.timeout = clientOpt.Timeout
+	client.httpClient = clientOpt.HTTPClient
+	// WriteConcern
+	if clientOpt.WriteConcern != nil {
+		client.writeConcern = clientOpt.WriteConcern
+	}
+	// AutoEncryptionOptions
+	if clientOpt.AutoEncryptionOptions != nil {
+		if err := client.configureAutoEncryption(clientOpt); err != nil {
+			return nil, err
+		}
+	} else {
+		client.cryptFLE = clientOpt.Crypt
+	}
+
+	// Deployment
+	if clientOpt.Deployment != nil {
+		client.deployment = clientOpt.Deployment
+	}
+
+	// Set default options
+	if clientOpt.MaxPoolSize == nil {
+		clientOpt.SetMaxPoolSize(defaultMaxPoolSize)
+	}
+
+	if clientOpt.Auth != nil {
+		client.authenticator, err = auth.CreateAuthenticator(
+			clientOpt.Auth.AuthMechanism,
+			topology.ConvertCreds(clientOpt.Auth),
+			clientOpt.HTTPClient,
+		)
+		if err != nil {
+			return nil, fmt.Errorf("error creating authenticator: %w", err)
+		}
+	}
+
+	cfg, err := topology.NewConfigWithAuthenticator(clientOpt, client.clock, client.authenticator)
+	if err != nil {
+		return nil, err
+	}
+
+	client.serverAPI = topology.ServerAPIFromServerOptions(cfg.ServerOpts)
+
+	if client.deployment == nil {
+		client.deployment, err = topology.New(cfg)
+		if err != nil {
+			return nil, replaceErrors(err)
+		}
+	}
+
+	// Create a logger for the client.
+	client.logger, err = newLogger(clientOpt.LoggerOptions)
+	if err != nil {
+		return nil, fmt.Errorf("invalid logger options: %w", err)
+	}
+
+	return client, nil
+}
+
+// Connect initializes the Client by starting background monitoring goroutines.
+// If the Client was created using the NewClient function, this method must be called before a Client can be used.
+//
+// Connect starts background goroutines to monitor the state of the deployment and does not do any I/O in the main
+// goroutine. The Client.Ping method can be used to verify that the connection was created successfully.
+//
+// Deprecated: Use [mongo.Connect] instead.
+func (c *Client) Connect(ctx context.Context) error {
+	if connector, ok := c.deployment.(driver.Connector); ok {
+		err := connector.Connect()
+		if err != nil {
+			return replaceErrors(err)
+		}
+	}
+
+	if c.mongocryptdFLE != nil {
+		if err := c.mongocryptdFLE.connect(ctx); err != nil {
+			return err
+		}
+	}
+
+	if c.internalClientFLE != nil {
+		if err := c.internalClientFLE.Connect(ctx); err != nil {
+			return err
+		}
+	}
+
+	if c.keyVaultClientFLE != nil && c.keyVaultClientFLE != c.internalClientFLE && c.keyVaultClientFLE != c {
+		if err := c.keyVaultClientFLE.Connect(ctx); err != nil {
+			return err
+		}
+	}
+
+	if c.metadataClientFLE != nil && c.metadataClientFLE != c.internalClientFLE && c.metadataClientFLE != c {
+		if err := c.metadataClientFLE.Connect(ctx); err != nil {
+			return err
+		}
+	}
+
+	var updateChan <-chan description.Topology
+	if subscriber, ok := c.deployment.(driver.Subscriber); ok {
+		sub, err := subscriber.Subscribe()
+		if err != nil {
+			return replaceErrors(err)
+		}
+		updateChan = sub.Updates
+	}
+	c.sessionPool = session.NewPool(updateChan)
+	return nil
+}
+
+// Disconnect closes sockets to the topology referenced by this Client. It will
+// shut down any monitoring goroutines, close the idle connection pool, and will
+// wait until all the in use connections have been returned to the connection
+// pool and closed before returning. If the context expires via cancellation,
+// deadline, or timeout before the in use connections have returned, the in use
+// connections will be closed, resulting in the failure of any in flight read
+// or write operations. If this method returns with no errors, all connections
+// associated with this Client have been closed.
+func (c *Client) Disconnect(ctx context.Context) error {
+	if c.logger != nil {
+		defer c.logger.Close()
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if c.httpClient == httputil.DefaultHTTPClient {
+		defer httputil.CloseIdleHTTPConnections(c.httpClient)
+	}
+
+	c.endSessions(ctx)
+	if c.mongocryptdFLE != nil {
+		if err := c.mongocryptdFLE.disconnect(ctx); err != nil {
+			return err
+		}
+	}
+
+	if c.internalClientFLE != nil {
+		if err := c.internalClientFLE.Disconnect(ctx); err != nil {
+			return err
+		}
+	}
+
+	if c.keyVaultClientFLE != nil && c.keyVaultClientFLE != c.internalClientFLE && c.keyVaultClientFLE != c {
+		if err := c.keyVaultClientFLE.Disconnect(ctx); err != nil {
+			return err
+		}
+	}
+	if c.metadataClientFLE != nil && c.metadataClientFLE != c.internalClientFLE && c.metadataClientFLE != c {
+		if err := c.metadataClientFLE.Disconnect(ctx); err != nil {
+			return err
+		}
+	}
+	if c.cryptFLE != nil {
+		c.cryptFLE.Close()
+	}
+
+	if disconnector, ok := c.deployment.(driver.Disconnector); ok {
+		return replaceErrors(disconnector.Disconnect(ctx))
+	}
+
+	return nil
+}
+
+// Ping sends a ping command to verify that the client can connect to the deployment.
+//
+// The rp parameter is used to determine which server is selected for the operation.
+// If it is nil, the client's read preference is used.
+//
+// If the server is down, Ping will try to select a server until the client's server selection timeout expires.
+// This can be configured through the ClientOptions.SetServerSelectionTimeout option when creating a new Client.
+// After the timeout expires, a server selection error is returned.
+//
+// Using Ping reduces application resilience because applications starting up will error if the server is temporarily
+// unavailable or is failing over (e.g. during autoscaling due to a load spike).
+func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if rp == nil {
+		rp = c.readPreference
+	}
+
+	db := c.Database("admin")
+	res := db.RunCommand(ctx, bson.D{
+		{"ping", 1},
+	}, options.RunCmd().SetReadPreference(rp))
+
+	return replaceErrors(res.Err())
+}
+
+// StartSession starts a new session configured with the given options.
+//
+// StartSession does not actually communicate with the server and will not error if the client is
+// disconnected.
+//
+// StartSession is safe to call from multiple goroutines concurrently. However, Sessions returned by StartSession are
+// not safe for concurrent use by multiple goroutines.
+//
+// If the DefaultReadConcern, DefaultWriteConcern, or DefaultReadPreference options are not set, the client's read
+// concern, write concern, or read preference will be used, respectively.
+func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) {
+	if c.sessionPool == nil {
+		return nil, ErrClientDisconnected
+	}
+
+	sopts := options.MergeSessionOptions(opts...)
+	coreOpts := &session.ClientOptions{
+		DefaultReadConcern:    c.readConcern,
+		DefaultReadPreference: c.readPreference,
+		DefaultWriteConcern:   c.writeConcern,
+	}
+	if sopts.CausalConsistency != nil {
+		coreOpts.CausalConsistency = sopts.CausalConsistency
+	}
+	if sopts.DefaultReadConcern != nil {
+		coreOpts.DefaultReadConcern = sopts.DefaultReadConcern
+	}
+	if sopts.DefaultWriteConcern != nil {
+		coreOpts.DefaultWriteConcern = sopts.DefaultWriteConcern
+	}
+	if sopts.DefaultReadPreference != nil {
+		coreOpts.DefaultReadPreference = sopts.DefaultReadPreference
+	}
+	if sopts.DefaultMaxCommitTime != nil {
+		coreOpts.DefaultMaxCommitTime = sopts.DefaultMaxCommitTime
+	}
+	if sopts.Snapshot != nil {
+		coreOpts.Snapshot = sopts.Snapshot
+	}
+
+	sess, err := session.NewClientSession(c.sessionPool, c.id, coreOpts)
+	if err != nil {
+		return nil, replaceErrors(err)
+	}
+
+	// Writes are not retryable on standalones, so let operation determine whether to retry
+	sess.RetryWrite = false
+	sess.RetryRead = c.retryReads
+
+	return &sessionImpl{
+		clientSession: sess,
+		client:        c,
+		deployment:    c.deployment,
+	}, nil
+}
+
+func (c *Client) endSessions(ctx context.Context) {
+	if c.sessionPool == nil {
+		return
+	}
+
+	sessionIDs := c.sessionPool.IDSlice()
+	op := operation.NewEndSessions(nil).ClusterClock(c.clock).Deployment(c.deployment).
+		ServerSelector(description.ReadPrefSelector(readpref.PrimaryPreferred())).CommandMonitor(c.monitor).
+		Database("admin").Crypt(c.cryptFLE).ServerAPI(c.serverAPI)
+
+	totalNumIDs := len(sessionIDs)
+	var currentBatch []bsoncore.Document
+	for i := 0; i < totalNumIDs; i++ {
+		currentBatch = append(currentBatch, sessionIDs[i])
+
+		// If we are at the end of a batch or the end of the overall IDs array, execute the operation.
+		if ((i+1)%endSessionsBatchSize) == 0 || i == totalNumIDs-1 {
+			// Ignore all errors when ending sessions.
+			_, marshalVal, err := bson.MarshalValue(currentBatch)
+			if err == nil {
+				_ = op.SessionIDs(marshalVal).Execute(ctx)
+			}
+
+			currentBatch = currentBatch[:0]
+		}
+	}
+}
+
+func (c *Client) configureAutoEncryption(clientOpts *options.ClientOptions) error {
+	c.encryptedFieldsMap = clientOpts.AutoEncryptionOptions.EncryptedFieldsMap
+	if err := c.configureKeyVaultClientFLE(clientOpts); err != nil {
+		return err
+	}
+	if err := c.configureMetadataClientFLE(clientOpts); err != nil {
+		return err
+	}
+
+	mc, err := c.newMongoCrypt(clientOpts.AutoEncryptionOptions)
+	if err != nil {
+		return err
+	}
+
+	// If the crypt_shared library was not loaded, try to spawn and connect to mongocryptd.
+	if mc.CryptSharedLibVersionString() == "" {
+		mongocryptdFLE, err := newMongocryptdClient(clientOpts.AutoEncryptionOptions)
+		if err != nil {
+			return err
+		}
+		c.mongocryptdFLE = mongocryptdFLE
+	}
+
+	c.configureCryptFLE(mc, clientOpts.AutoEncryptionOptions)
+	return nil
+}
+
+func (c *Client) getOrCreateInternalClient(clientOpts *options.ClientOptions) (*Client, error) {
+	if c.internalClientFLE != nil {
+		return c.internalClientFLE, nil
+	}
+
+	internalClientOpts := options.MergeClientOptions(clientOpts)
+	internalClientOpts.AutoEncryptionOptions = nil
+	internalClientOpts.SetMinPoolSize(0)
+	var err error
+	c.internalClientFLE, err = NewClient(internalClientOpts)
+	return c.internalClientFLE, err
+}
+
+func (c *Client) configureKeyVaultClientFLE(clientOpts *options.ClientOptions) error {
+	// parse key vault options and create new key vault client
+	var err error
+	aeOpts := clientOpts.AutoEncryptionOptions
+	switch {
+	case aeOpts.KeyVaultClientOptions != nil:
+		c.keyVaultClientFLE, err = NewClient(aeOpts.KeyVaultClientOptions)
+	case clientOpts.MaxPoolSize != nil && *clientOpts.MaxPoolSize == 0:
+		c.keyVaultClientFLE = c
+	default:
+		c.keyVaultClientFLE, err = c.getOrCreateInternalClient(clientOpts)
+	}
+
+	if err != nil {
+		return err
+	}
+
+	dbName, collName := splitNamespace(aeOpts.KeyVaultNamespace)
+	c.keyVaultCollFLE = c.keyVaultClientFLE.Database(dbName).Collection(collName, keyVaultCollOpts)
+	return nil
+}
+
+func (c *Client) configureMetadataClientFLE(clientOpts *options.ClientOptions) error {
+	// parse key vault options and create new key vault client
+	aeOpts := clientOpts.AutoEncryptionOptions
+	if aeOpts.BypassAutoEncryption != nil && *aeOpts.BypassAutoEncryption {
+		// no need for a metadata client.
+		return nil
+	}
+	if clientOpts.MaxPoolSize != nil && *clientOpts.MaxPoolSize == 0 {
+		c.metadataClientFLE = c
+		return nil
+	}
+
+	var err error
+	c.metadataClientFLE, err = c.getOrCreateInternalClient(clientOpts)
+	return err
+}
+
+func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt.MongoCrypt, error) {
+	// convert schemas in SchemaMap to bsoncore documents
+	cryptSchemaMap := make(map[string]bsoncore.Document)
+	for k, v := range opts.SchemaMap {
+		schema, err := marshal(v, c.bsonOpts, c.registry)
+		if err != nil {
+			return nil, err
+		}
+		cryptSchemaMap[k] = schema
+	}
+
+	// convert schemas in EncryptedFieldsMap to bsoncore documents
+	cryptEncryptedFieldsMap := make(map[string]bsoncore.Document)
+	for k, v := range opts.EncryptedFieldsMap {
+		encryptedFields, err := marshal(v, c.bsonOpts, c.registry)
+		if err != nil {
+			return nil, err
+		}
+		cryptEncryptedFieldsMap[k] = encryptedFields
+	}
+
+	kmsProviders, err := marshal(opts.KmsProviders, c.bsonOpts, c.registry)
+	if err != nil {
+		return nil, fmt.Errorf("error creating KMS providers document: %w", err)
+	}
+
+	// Set the crypt_shared library override path from the "cryptSharedLibPath" extra option if one
+	// was set.
+	cryptSharedLibPath := ""
+	if val, ok := opts.ExtraOptions["cryptSharedLibPath"]; ok {
+		str, ok := val.(string)
+		if !ok {
+			return nil, fmt.Errorf(
+				`expected AutoEncryption extra option "cryptSharedLibPath" to be a string, but is a %T`, val)
+		}
+		cryptSharedLibPath = str
+	}
+
+	// Explicitly disable loading the crypt_shared library if requested. Note that this is ONLY
+	// intended for use from tests; there is no supported public API for explicitly disabling
+	// loading the crypt_shared library.
+	cryptSharedLibDisabled := false
+	if v, ok := opts.ExtraOptions["__cryptSharedLibDisabledForTestOnly"]; ok {
+		cryptSharedLibDisabled = v.(bool)
+	}
+
+	bypassAutoEncryption := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption
+	bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis
+
+	mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt().
+		SetKmsProviders(kmsProviders).
+		SetLocalSchemaMap(cryptSchemaMap).
+		SetBypassQueryAnalysis(bypassQueryAnalysis).
+		SetEncryptedFieldsMap(cryptEncryptedFieldsMap).
+		SetCryptSharedLibDisabled(cryptSharedLibDisabled || bypassAutoEncryption).
+		SetCryptSharedLibOverridePath(cryptSharedLibPath).
+		SetHTTPClient(opts.HTTPClient))
+	if err != nil {
+		return nil, err
+	}
+
+	var cryptSharedLibRequired bool
+	if val, ok := opts.ExtraOptions["cryptSharedLibRequired"]; ok {
+		b, ok := val.(bool)
+		if !ok {
+			return nil, fmt.Errorf(
+				`expected AutoEncryption extra option "cryptSharedLibRequired" to be a bool, but is a %T`, val)
+		}
+		cryptSharedLibRequired = b
+	}
+
+	// If the "cryptSharedLibRequired" extra option is set to true, check the MongoCrypt version
+	// string to confirm that the library was successfully loaded. If the version string is empty,
+	// return an error indicating that we couldn't load the crypt_shared library.
+	if cryptSharedLibRequired && mc.CryptSharedLibVersionString() == "" {
+		return nil, errors.New(
+			`AutoEncryption extra option "cryptSharedLibRequired" is true, but we failed to load the crypt_shared library`)
+	}
+
+	return mc, nil
+}
+
+//nolint:unused // the unused linter thinks that this function is unreachable because "c.newMongoCrypt" always panics without the "cse" build tag set.
+func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts *options.AutoEncryptionOptions) {
+	bypass := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption
+	kr := keyRetriever{coll: c.keyVaultCollFLE}
+	var cir collInfoRetriever
+	// If bypass is true, c.metadataClientFLE is nil and the collInfoRetriever
+	// will not be used. If bypass is false, to the parent client or the
+	// internal client.
+	if !bypass {
+		cir = collInfoRetriever{client: c.metadataClientFLE}
+	}
+
+	c.cryptFLE = driver.NewCrypt(&driver.CryptOptions{
+		MongoCrypt:           mc,
+		CollInfoFn:           cir.cryptCollInfo,
+		KeyFn:                kr.cryptKeys,
+		MarkFn:               c.mongocryptdFLE.markCommand,
+		TLSConfig:            opts.TLSConfig,
+		BypassAutoEncryption: bypass,
+	})
+}
+
+// validSession returns an error if the session doesn't belong to the client
+func (c *Client) validSession(sess *session.Client) error {
+	if sess != nil && sess.ClientID != c.id {
+		return ErrWrongClient
+	}
+	return nil
+}
+
+// Database returns a handle for a database with the given name configured with the given DatabaseOptions.
+func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Database {
+	return newDatabase(c, name, opts...)
+}
+
+// ListDatabases executes a listDatabases command and returns the result.
+//
+// The filter parameter must be a document containing query operators and can be used to select which
+// databases are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include
+// all databases.
+//
+// The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/.
+func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := c.validSession(sess)
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+	if sess == nil && c.sessionPool != nil {
+		sess = session.NewImplicitClientSession(c.sessionPool, c.id)
+		defer sess.EndSession()
+	}
+
+	err = c.validSession(sess)
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+
+	filterDoc, err := marshal(filter, c.bsonOpts, c.registry)
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+
+	selector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(c.localThreshold),
+	})
+	selector = makeReadPrefSelector(sess, selector, c.localThreshold)
+
+	ldo := options.MergeListDatabasesOptions(opts...)
+	op := operation.NewListDatabases(filterDoc).
+		Session(sess).ReadPreference(c.readPreference).CommandMonitor(c.monitor).
+		ServerSelector(selector).ClusterClock(c.clock).Database("admin").Deployment(c.deployment).Crypt(c.cryptFLE).
+		ServerAPI(c.serverAPI).Timeout(c.timeout).Authenticator(c.authenticator)
+
+	if ldo.NameOnly != nil {
+		op = op.NameOnly(*ldo.NameOnly)
+	}
+	if ldo.AuthorizedDatabases != nil {
+		op = op.AuthorizedDatabases(*ldo.AuthorizedDatabases)
+	}
+
+	retry := driver.RetryNone
+	if c.retryReads {
+		retry = driver.RetryOncePerCommand
+	}
+	op.Retry(retry)
+
+	err = op.Execute(ctx)
+	if err != nil {
+		return ListDatabasesResult{}, replaceErrors(err)
+	}
+
+	return newListDatabasesResultFromOperation(op.Result()), nil
+}
+
+// ListDatabaseNames executes a listDatabases command and returns a slice containing the names of all of the databases
+// on the server.
+//
+// The filter parameter must be a document containing query operators and can be used to select which databases
+// are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include all
+// databases.
+//
+// The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions
+// documentation.)
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/.
+func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) {
+	opts = append(opts, options.ListDatabases().SetNameOnly(true))
+
+	res, err := c.ListDatabases(ctx, filter, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	names := make([]string, 0)
+	for _, spec := range res.Databases {
+		names = append(names, spec.Name)
+	}
+
+	return names, nil
+}
+
+// WithSession creates a new SessionContext from the ctx and sess parameters and uses it to call the fn callback. The
+// SessionContext must be used as the Context parameter for any operations in the fn callback that should be executed
+// under the session.
+//
+// WithSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the
+// WithSession callback function is not safe for concurrent use by multiple goroutines.
+//
+// If the ctx parameter already contains a Session, that Session will be replaced with the one provided.
+//
+// Any error returned by the fn callback will be returned without any modifications.
+func WithSession(ctx context.Context, sess Session, fn func(SessionContext) error) error {
+	return fn(NewSessionContext(ctx, sess))
+}
+
+// UseSession creates a new Session and uses it to create a new SessionContext, which is used to call the fn callback.
+// The SessionContext parameter must be used as the Context parameter for any operations in the fn callback that should
+// be executed under a session. After the callback returns, the created Session is ended, meaning that any in-progress
+// transactions started by fn will be aborted even if fn returns an error.
+//
+// UseSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the
+// UseSession callback function is not safe for concurrent use by multiple goroutines.
+//
+// If the ctx parameter already contains a Session, that Session will be replaced with the newly created one.
+//
+// Any error returned by the fn callback will be returned without any modifications.
+func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) error {
+	return c.UseSessionWithOptions(ctx, options.Session(), fn)
+}
+
+// UseSessionWithOptions operates like UseSession but uses the given SessionOptions to create the Session.
+//
+// UseSessionWithOptions is safe to call from multiple goroutines concurrently. However, the SessionContext passed to
+// the UseSessionWithOptions callback function is not safe for concurrent use by multiple goroutines.
+func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error {
+	defaultSess, err := c.StartSession(opts)
+	if err != nil {
+		return err
+	}
+
+	defer defaultSess.EndSession(ctx)
+	return fn(NewSessionContext(ctx, defaultSess))
+}
+
+// Watch returns a change stream for all changes on the deployment. See
+// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams.
+//
+// The client must be configured with read concern majority or no read concern for a change stream to be created
+// successfully.
+//
+// The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be
+// nil or empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for a list
+// of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the mongo.Pipeline{}
+// type can be used.
+//
+// The opts parameter can be used to specify options for change stream creation (see the options.ChangeStreamOptions
+// documentation).
+func (c *Client) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+	if c.sessionPool == nil {
+		return nil, ErrClientDisconnected
+	}
+
+	csConfig := changeStreamConfig{
+		readConcern:    c.readConcern,
+		readPreference: c.readPreference,
+		client:         c,
+		bsonOpts:       c.bsonOpts,
+		registry:       c.registry,
+		streamType:     ClientStream,
+		crypt:          c.cryptFLE,
+	}
+
+	return newChangeStream(ctx, csConfig, pipeline, opts...)
+}
+
+// NumberSessionsInProgress returns the number of sessions that have been started for this client but have not been
+// closed (i.e. EndSession has not been called).
+func (c *Client) NumberSessionsInProgress() int {
+	// The underlying session pool uses an int64 for checkedOut to allow atomic
+	// access. We convert to an int here to maintain backward compatibility with
+	// older versions of the driver that did not atomically access checkedOut.
+	return int(c.sessionPool.CheckedOut())
+}
+
+// Timeout returns the timeout set for this client.
+func (c *Client) Timeout() *time.Duration {
+	return c.timeout
+}
+
+func (c *Client) createBaseCursorOptions() driver.CursorOptions {
+	return driver.CursorOptions{
+		CommandMonitor: c.monitor,
+		Crypt:          c.cryptFLE,
+		ServerAPI:      c.serverAPI,
+	}
+}
+
+// newLogger will use the LoggerOptions to create an internal logger and publish
+// messages using a LogSink.
+func newLogger(opts *options.LoggerOptions) (*logger.Logger, error) {
+	// If there are no logger options, then create a default logger.
+	if opts == nil {
+		opts = options.Logger()
+	}
+
+	// If there are no component-level options and the environment does not
+	// contain component variables, then do nothing.
+	if (opts.ComponentLevels == nil || len(opts.ComponentLevels) == 0) &&
+		!logger.EnvHasComponentVariables() {
+
+		return nil, nil
+	}
+
+	// Otherwise, collect the component-level options and create a logger.
+	componentLevels := make(map[logger.Component]logger.Level)
+	for component, level := range opts.ComponentLevels {
+		componentLevels[logger.Component(component)] = logger.Level(level)
+	}
+
+	return logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go
new file mode 100644
index 0000000000000000000000000000000000000000..352dac1f058da2e0b87028f7db72e3c8c24c67c3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go
@@ -0,0 +1,436 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt"
+	mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options"
+)
+
+// ClientEncryption is used to create data keys and explicitly encrypt and decrypt BSON values.
+type ClientEncryption struct {
+	crypt          driver.Crypt
+	keyVaultClient *Client
+	keyVaultColl   *Collection
+}
+
+// NewClientEncryption creates a new ClientEncryption instance configured with the given options.
+func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncryptionOptions) (*ClientEncryption, error) {
+	if keyVaultClient == nil {
+		return nil, errors.New("keyVaultClient must not be nil")
+	}
+
+	ce := &ClientEncryption{
+		keyVaultClient: keyVaultClient,
+	}
+	ceo := options.MergeClientEncryptionOptions(opts...)
+
+	// create keyVaultColl
+	db, coll := splitNamespace(ceo.KeyVaultNamespace)
+	ce.keyVaultColl = ce.keyVaultClient.Database(db).Collection(coll, keyVaultCollOpts)
+
+	kmsProviders, err := marshal(ceo.KmsProviders, nil, nil)
+	if err != nil {
+		return nil, fmt.Errorf("error creating KMS providers map: %w", err)
+	}
+
+	mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt().
+		SetKmsProviders(kmsProviders).
+		// Explicitly disable loading the crypt_shared library for the Crypt used for
+		// ClientEncryption because it's only needed for AutoEncryption and we don't expect users to
+		// have the crypt_shared library installed if they're using ClientEncryption.
+		SetCryptSharedLibDisabled(true).
+		SetHTTPClient(ceo.HTTPClient))
+	if err != nil {
+		return nil, err
+	}
+
+	// create Crypt
+	kr := keyRetriever{coll: ce.keyVaultColl}
+	cir := collInfoRetriever{client: ce.keyVaultClient}
+	ce.crypt = driver.NewCrypt(&driver.CryptOptions{
+		MongoCrypt: mc,
+		KeyFn:      kr.cryptKeys,
+		CollInfoFn: cir.cryptCollInfo,
+		TLSConfig:  ceo.TLSConfig,
+	})
+
+	return ce, nil
+}
+
+// CreateEncryptedCollection creates a new collection for Queryable Encryption with the help of automatic generation of new encryption data keys for null keyIds.
+// It returns the created collection and the encrypted fields document used to create it.
+func (ce *ClientEncryption) CreateEncryptedCollection(ctx context.Context,
+	db *Database, coll string, createOpts *options.CreateCollectionOptions,
+	kmsProvider string, masterKey interface{}) (*Collection, bson.M, error) {
+	if createOpts == nil {
+		return nil, nil, errors.New("nil CreateCollectionOptions")
+	}
+	ef := createOpts.EncryptedFields
+	if ef == nil {
+		return nil, nil, errors.New("no EncryptedFields defined for the collection")
+	}
+
+	efBSON, err := marshal(ef, db.bsonOpts, db.registry)
+	if err != nil {
+		return nil, nil, err
+	}
+	r := bsonrw.NewBSONDocumentReader(efBSON)
+	dec, err := bson.NewDecoder(r)
+	if err != nil {
+		return nil, nil, err
+	}
+	var m bson.M
+	err = dec.Decode(&m)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if v, ok := m["fields"]; ok {
+		if fields, ok := v.(bson.A); ok {
+			for _, field := range fields {
+				if f, ok := field.(bson.M); !ok {
+					continue
+				} else if v, ok := f["keyId"]; ok && v == nil {
+					dkOpts := options.DataKey()
+					if masterKey != nil {
+						dkOpts.SetMasterKey(masterKey)
+					}
+					keyid, err := ce.CreateDataKey(ctx, kmsProvider, dkOpts)
+					if err != nil {
+						createOpts.EncryptedFields = m
+						return nil, m, err
+					}
+					f["keyId"] = keyid
+				}
+			}
+			createOpts.EncryptedFields = m
+		}
+	}
+	err = db.CreateCollection(ctx, coll, createOpts)
+	if err != nil {
+		return nil, m, err
+	}
+	return db.Collection(coll), m, nil
+}
+
+// AddKeyAltName adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the
+// given UUID (BSON binary subtype 0x04). Returns the previous version of the key document.
+func (ce *ClientEncryption) AddKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult {
+	filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build()
+	keyAltNameDoc := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build()
+	update := bsoncore.NewDocumentBuilder().AppendDocument("$addToSet", keyAltNameDoc).Build()
+	return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update)
+}
+
+// CreateDataKey creates a new key document and inserts into the key vault collection. Returns the _id of the created
+// document as a UUID (BSON binary subtype 0x04).
+func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string,
+	opts ...*options.DataKeyOptions) (primitive.Binary, error) {
+
+	// translate opts to mcopts.DataKeyOptions
+	dko := options.MergeDataKeyOptions(opts...)
+	co := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames)
+	if dko.MasterKey != nil {
+		keyDoc, err := marshal(
+			dko.MasterKey,
+			ce.keyVaultClient.bsonOpts,
+			ce.keyVaultClient.registry)
+		if err != nil {
+			return primitive.Binary{}, err
+		}
+		co.SetMasterKey(keyDoc)
+	}
+	if dko.KeyMaterial != nil {
+		co.SetKeyMaterial(dko.KeyMaterial)
+	}
+
+	// create data key document
+	dataKeyDoc, err := ce.crypt.CreateDataKey(ctx, kmsProvider, co)
+	if err != nil {
+		return primitive.Binary{}, err
+	}
+
+	// insert key into key vault
+	_, err = ce.keyVaultColl.InsertOne(ctx, dataKeyDoc)
+	if err != nil {
+		return primitive.Binary{}, err
+	}
+
+	subtype, data := bson.Raw(dataKeyDoc).Lookup("_id").Binary()
+	return primitive.Binary{Subtype: subtype, Data: data}, nil
+}
+
+// transformExplicitEncryptionOptions creates explicit encryption options to be passed to libmongocrypt.
+func transformExplicitEncryptionOptions(opts ...*options.EncryptOptions) *mcopts.ExplicitEncryptionOptions {
+	eo := options.MergeEncryptOptions(opts...)
+	transformed := mcopts.ExplicitEncryption()
+	if eo.KeyID != nil {
+		transformed.SetKeyID(*eo.KeyID)
+	}
+	if eo.KeyAltName != nil {
+		transformed.SetKeyAltName(*eo.KeyAltName)
+	}
+	transformed.SetAlgorithm(eo.Algorithm)
+	transformed.SetQueryType(eo.QueryType)
+
+	if eo.ContentionFactor != nil {
+		transformed.SetContentionFactor(*eo.ContentionFactor)
+	}
+
+	if eo.RangeOptions != nil {
+		var transformedRange mcopts.ExplicitRangeOptions
+		if eo.RangeOptions.Min != nil {
+			transformedRange.Min = &bsoncore.Value{Type: eo.RangeOptions.Min.Type, Data: eo.RangeOptions.Min.Value}
+		}
+		if eo.RangeOptions.Max != nil {
+			transformedRange.Max = &bsoncore.Value{Type: eo.RangeOptions.Max.Type, Data: eo.RangeOptions.Max.Value}
+		}
+		if eo.RangeOptions.Precision != nil {
+			transformedRange.Precision = eo.RangeOptions.Precision
+		}
+		if eo.RangeOptions.Sparsity != nil {
+			transformedRange.Sparsity = eo.RangeOptions.Sparsity
+		}
+		if eo.RangeOptions.TrimFactor != nil {
+			transformedRange.TrimFactor = eo.RangeOptions.TrimFactor
+		}
+		transformed.SetRangeOptions(transformedRange)
+	}
+	return transformed
+}
+
+// Encrypt encrypts a BSON value with the given key and algorithm. Returns an encrypted value (BSON binary of subtype 6).
+func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue,
+	opts ...*options.EncryptOptions) (primitive.Binary, error) {
+
+	transformed := transformExplicitEncryptionOptions(opts...)
+	subtype, data, err := ce.crypt.EncryptExplicit(ctx, bsoncore.Value{Type: val.Type, Data: val.Value}, transformed)
+	if err != nil {
+		return primitive.Binary{}, err
+	}
+	return primitive.Binary{Subtype: subtype, Data: data}, nil
+}
+
+// EncryptExpression encrypts an expression to query a range index.
+// On success, `result` is populated with the resulting BSON document.
+// `expr` is expected to be a BSON document of one of the following forms:
+// 1. A Match Expression of this form:
+// {$and: [{<field>: {$gt: <value1>}}, {<field>: {$lt: <value2> }}]}
+// 2. An Aggregate Expression of this form:
+// {$and: [{$gt: [<fieldpath>, <value1>]}, {$lt: [<fieldpath>, <value2>]}]
+// $gt may also be $gte. $lt may also be $lte.
+// Only supported for queryType "range"
+func (ce *ClientEncryption) EncryptExpression(ctx context.Context, expr interface{}, result interface{}, opts ...*options.EncryptOptions) error {
+	transformed := transformExplicitEncryptionOptions(opts...)
+
+	exprDoc, err := marshal(expr, nil, nil)
+	if err != nil {
+		return err
+	}
+
+	encryptedExprDoc, err := ce.crypt.EncryptExplicitExpression(ctx, exprDoc, transformed)
+	if err != nil {
+		return err
+	}
+	if raw, ok := result.(*bson.Raw); ok {
+		// Avoid the cost of Unmarshal.
+		*raw = bson.Raw(encryptedExprDoc)
+		return nil
+	}
+	err = bson.Unmarshal([]byte(encryptedExprDoc), result)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Decrypt decrypts an encrypted value (BSON binary of subtype 6) and returns the original BSON value.
+func (ce *ClientEncryption) Decrypt(ctx context.Context, val primitive.Binary) (bson.RawValue, error) {
+	decrypted, err := ce.crypt.DecryptExplicit(ctx, val.Subtype, val.Data)
+	if err != nil {
+		return bson.RawValue{}, err
+	}
+
+	return bson.RawValue{Type: decrypted.Type, Value: decrypted.Data}, nil
+}
+
+// Close cleans up any resources associated with the ClientEncryption instance. This includes disconnecting the
+// key-vault Client instance.
+func (ce *ClientEncryption) Close(ctx context.Context) error {
+	ce.crypt.Close()
+	return ce.keyVaultClient.Disconnect(ctx)
+}
+
+// DeleteKey removes the key document with the given UUID (BSON binary subtype 0x04) from the key vault collection.
+// Returns the result of the internal deleteOne() operation on the key vault collection.
+func (ce *ClientEncryption) DeleteKey(ctx context.Context, id primitive.Binary) (*DeleteResult, error) {
+	filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build()
+	return ce.keyVaultColl.DeleteOne(ctx, filter)
+}
+
+// GetKeyByAltName returns a key document in the key vault collection with the given keyAltName.
+func (ce *ClientEncryption) GetKeyByAltName(ctx context.Context, keyAltName string) *SingleResult {
+	filter := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build()
+	return ce.keyVaultColl.FindOne(ctx, filter)
+}
+
+// GetKey finds a single key document with the given UUID (BSON binary subtype 0x04). Returns the result of the
+// internal find() operation on the key vault collection.
+func (ce *ClientEncryption) GetKey(ctx context.Context, id primitive.Binary) *SingleResult {
+	filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build()
+	return ce.keyVaultColl.FindOne(ctx, filter)
+}
+
+// GetKeys finds all documents in the key vault collection. Returns the result of the internal find() operation on the
+// key vault collection.
+func (ce *ClientEncryption) GetKeys(ctx context.Context) (*Cursor, error) {
+	return ce.keyVaultColl.Find(ctx, bson.D{})
+}
+
+// RemoveKeyAltName removes a keyAltName from the keyAltNames array of the key document in the key vault collection with
+// the given UUID (BSON binary subtype 0x04). Returns the previous version of the key document.
+func (ce *ClientEncryption) RemoveKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult {
+	filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build()
+	update := bson.A{bson.D{{"$set", bson.D{{"keyAltNames", bson.D{{"$cond", bson.A{bson.D{{"$eq",
+		bson.A{"$keyAltNames", bson.A{keyAltName}}}}, "$$REMOVE", bson.D{{"$filter",
+		bson.D{{"input", "$keyAltNames"}, {"cond", bson.D{{"$ne", bson.A{"$$this", keyAltName}}}}}}}}}}}}}}}
+	return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update)
+}
+
+// setRewrapManyDataKeyWriteModels will prepare the WriteModel slice for a bulk updating rewrapped documents.
+func setRewrapManyDataKeyWriteModels(rewrappedDocuments []bsoncore.Document, writeModels *[]WriteModel) error {
+	const idKey = "_id"
+	const keyMaterial = "keyMaterial"
+	const masterKey = "masterKey"
+
+	if writeModels == nil {
+		return fmt.Errorf("writeModels pointer not set for location referenced")
+	}
+
+	// Append a slice of WriteModel with the update document per each rewrappedDoc _id filter.
+	for _, rewrappedDocument := range rewrappedDocuments {
+		// Prepare the new master key for update.
+		masterKeyValue, err := rewrappedDocument.LookupErr(masterKey)
+		if err != nil {
+			return err
+		}
+		masterKeyDoc := masterKeyValue.Document()
+
+		// Prepare the new material key for update.
+		keyMaterialValue, err := rewrappedDocument.LookupErr(keyMaterial)
+		if err != nil {
+			return err
+		}
+		keyMaterialSubtype, keyMaterialData := keyMaterialValue.Binary()
+		keyMaterialBinary := primitive.Binary{Subtype: keyMaterialSubtype, Data: keyMaterialData}
+
+		// Prepare the _id filter for documents to update.
+		id, err := rewrappedDocument.LookupErr(idKey)
+		if err != nil {
+			return err
+		}
+
+		idSubtype, idData, ok := id.BinaryOK()
+		if !ok {
+			return fmt.Errorf("expected to assert %q as binary, got type %T", idKey, id)
+		}
+		binaryID := primitive.Binary{Subtype: idSubtype, Data: idData}
+
+		// Append the mutable document to the slice for bulk update.
+		*writeModels = append(*writeModels, NewUpdateOneModel().
+			SetFilter(bson.D{{idKey, binaryID}}).
+			SetUpdate(
+				bson.D{
+					{"$set", bson.D{{keyMaterial, keyMaterialBinary}, {masterKey, masterKeyDoc}}},
+					{"$currentDate", bson.D{{"updateDate", true}}},
+				},
+			))
+	}
+	return nil
+}
+
+// RewrapManyDataKey decrypts and encrypts all matching data keys with a possibly new masterKey value. For all
+// matching documents, this method will overwrite the "masterKey", "updateDate", and "keyMaterial". On error, some
+// matching data keys may have been rewrapped.
+// libmongocrypt 1.5.2 is required. An error is returned if the detected version of libmongocrypt is less than 1.5.2.
+func (ce *ClientEncryption) RewrapManyDataKey(ctx context.Context, filter interface{},
+	opts ...*options.RewrapManyDataKeyOptions) (*RewrapManyDataKeyResult, error) {
+
+	// libmongocrypt versions 1.5.0 and 1.5.1 have a severe bug in RewrapManyDataKey.
+	// Check if the version string starts with 1.5.0 or 1.5.1. This accounts for pre-release versions, like 1.5.0-rc0.
+	libmongocryptVersion := mongocrypt.Version()
+	if strings.HasPrefix(libmongocryptVersion, "1.5.0") || strings.HasPrefix(libmongocryptVersion, "1.5.1") {
+		return nil, fmt.Errorf("RewrapManyDataKey requires libmongocrypt 1.5.2 or newer. Detected version: %v", libmongocryptVersion)
+	}
+
+	rmdko := options.MergeRewrapManyDataKeyOptions(opts...)
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	// Transfer rmdko options to /x/ package options to publish the mongocrypt feed.
+	co := mcopts.RewrapManyDataKey()
+	if rmdko.MasterKey != nil {
+		keyDoc, err := marshal(
+			rmdko.MasterKey,
+			ce.keyVaultClient.bsonOpts,
+			ce.keyVaultClient.registry)
+		if err != nil {
+			return nil, err
+		}
+		co.SetMasterKey(keyDoc)
+	}
+	if rmdko.Provider != nil {
+		co.SetProvider(*rmdko.Provider)
+	}
+
+	// Prepare the filters and rewrap the data key using mongocrypt.
+	filterdoc, err := marshal(filter, ce.keyVaultClient.bsonOpts, ce.keyVaultClient.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	rewrappedDocuments, err := ce.crypt.RewrapDataKey(ctx, filterdoc, co)
+	if err != nil {
+		return nil, err
+	}
+	if len(rewrappedDocuments) == 0 {
+		// If there are no documents to rewrap, then do nothing.
+		return new(RewrapManyDataKeyResult), nil
+	}
+
+	// Prepare the WriteModel slice for bulk updating the rewrapped data keys.
+	models := []WriteModel{}
+	if err := setRewrapManyDataKeyWriteModels(rewrappedDocuments, &models); err != nil {
+		return nil, err
+	}
+
+	bulkWriteResults, err := ce.keyVaultColl.BulkWrite(ctx, models)
+	return &RewrapManyDataKeyResult{BulkWriteResult: bulkWriteResults}, err
+}
+
+// splitNamespace takes a namespace in the form "database.collection" and returns (database name, collection name)
+func splitNamespace(ns string) (string, string) {
+	firstDot := strings.Index(ns, ".")
+	if firstDot == -1 {
+		return "", ns
+	}
+
+	return ns[:firstDot], ns[firstDot+1:]
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go
new file mode 100644
index 0000000000000000000000000000000000000000..dbe238a9e393de03d2d2096ed2d1dfa8785b636a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go
@@ -0,0 +1,1988 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/internal/csfle"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Collection is a handle to a MongoDB collection. It is safe for concurrent use by multiple goroutines.
+type Collection struct {
+	client         *Client
+	db             *Database
+	name           string
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	readPreference *readpref.ReadPref
+	readSelector   description.ServerSelector
+	writeSelector  description.ServerSelector
+	bsonOpts       *options.BSONOptions
+	registry       *bsoncodec.Registry
+}
+
+// aggregateParams is used to store information to configure an Aggregate operation.
+type aggregateParams struct {
+	ctx            context.Context
+	pipeline       interface{}
+	client         *Client
+	bsonOpts       *options.BSONOptions
+	registry       *bsoncodec.Registry
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	retryRead      bool
+	db             string
+	col            string
+	readSelector   description.ServerSelector
+	writeSelector  description.ServerSelector
+	readPreference *readpref.ReadPref
+	opts           []*options.AggregateOptions
+}
+
+func closeImplicitSession(sess *session.Client) {
+	if sess != nil && sess.IsImplicit {
+		sess.EndSession()
+	}
+}
+
+func newCollection(db *Database, name string, opts ...*options.CollectionOptions) *Collection {
+	collOpt := options.MergeCollectionOptions(opts...)
+
+	rc := db.readConcern
+	if collOpt.ReadConcern != nil {
+		rc = collOpt.ReadConcern
+	}
+
+	wc := db.writeConcern
+	if collOpt.WriteConcern != nil {
+		wc = collOpt.WriteConcern
+	}
+
+	rp := db.readPreference
+	if collOpt.ReadPreference != nil {
+		rp = collOpt.ReadPreference
+	}
+
+	bsonOpts := db.bsonOpts
+	if collOpt.BSONOptions != nil {
+		bsonOpts = collOpt.BSONOptions
+	}
+
+	reg := db.registry
+	if collOpt.Registry != nil {
+		reg = collOpt.Registry
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(rp),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	writeSelector := description.CompositeSelector([]description.ServerSelector{
+		description.WriteSelector(),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	coll := &Collection{
+		client:         db.client,
+		db:             db,
+		name:           name,
+		readPreference: rp,
+		readConcern:    rc,
+		writeConcern:   wc,
+		readSelector:   readSelector,
+		writeSelector:  writeSelector,
+		bsonOpts:       bsonOpts,
+		registry:       reg,
+	}
+
+	return coll
+}
+
+func (coll *Collection) copy() *Collection {
+	return &Collection{
+		client:         coll.client,
+		db:             coll.db,
+		name:           coll.name,
+		readConcern:    coll.readConcern,
+		writeConcern:   coll.writeConcern,
+		readPreference: coll.readPreference,
+		readSelector:   coll.readSelector,
+		writeSelector:  coll.writeSelector,
+		registry:       coll.registry,
+	}
+}
+
+// Clone creates a copy of the Collection configured with the given CollectionOptions.
+// The specified options are merged with the existing options on the collection, with the specified options taking
+// precedence.
+func (coll *Collection) Clone(opts ...*options.CollectionOptions) (*Collection, error) {
+	copyColl := coll.copy()
+	optsColl := options.MergeCollectionOptions(opts...)
+
+	if optsColl.ReadConcern != nil {
+		copyColl.readConcern = optsColl.ReadConcern
+	}
+
+	if optsColl.WriteConcern != nil {
+		copyColl.writeConcern = optsColl.WriteConcern
+	}
+
+	if optsColl.ReadPreference != nil {
+		copyColl.readPreference = optsColl.ReadPreference
+	}
+
+	if optsColl.Registry != nil {
+		copyColl.registry = optsColl.Registry
+	}
+
+	copyColl.readSelector = description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(copyColl.readPreference),
+		description.LatencySelector(copyColl.client.localThreshold),
+	})
+
+	return copyColl, nil
+}
+
+// Name returns the name of the collection.
+func (coll *Collection) Name() string {
+	return coll.name
+}
+
+// Database returns the Database that was used to create the Collection.
+func (coll *Collection) Database() *Database {
+	return coll.db
+}
+
+// BulkWrite performs a bulk write operation (https://www.mongodb.com/docs/manual/core/bulk-write-operations/).
+//
+// The models parameter must be a slice of operations to be executed in this bulk write. It cannot be nil or empty.
+// All of the models must be non-nil. See the mongo.WriteModel documentation for a list of valid model types and
+// examples of how they should be used.
+//
+// The opts parameter can be used to specify options for the operation (see the options.BulkWriteOptions documentation.)
+func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel,
+	opts ...*options.BulkWriteOptions) (*BulkWriteResult, error) {
+
+	if len(models) == 0 {
+		return nil, ErrEmptySlice
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err := coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, coll.writeSelector)
+
+	for _, model := range models {
+		if model == nil {
+			return nil, ErrNilDocument
+		}
+	}
+
+	bwo := options.MergeBulkWriteOptions(opts...)
+
+	op := bulkWrite{
+		comment:                  bwo.Comment,
+		ordered:                  bwo.Ordered,
+		bypassDocumentValidation: bwo.BypassDocumentValidation,
+		models:                   models,
+		session:                  sess,
+		collection:               coll,
+		selector:                 selector,
+		writeConcern:             wc,
+		let:                      bwo.Let,
+	}
+
+	err = op.execute(ctx)
+
+	return &op.result, replaceErrors(err)
+}
+
+func (coll *Collection) insert(ctx context.Context, documents []interface{},
+	opts ...*options.InsertManyOptions) ([]interface{}, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	result := make([]interface{}, len(documents))
+	docs := make([]bsoncore.Document, len(documents))
+
+	for i, doc := range documents {
+		bsoncoreDoc, err := marshal(doc, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		bsoncoreDoc, id, err := ensureID(bsoncoreDoc, primitive.NilObjectID, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		docs[i] = bsoncoreDoc
+		result[i] = id
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err := coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, coll.writeSelector)
+
+	op := operation.NewInsert(docs...).
+		Session(sess).WriteConcern(wc).CommandMonitor(coll.client.monitor).
+		ServerSelector(selector).ClusterClock(coll.client.clock).
+		Database(coll.db.name).Collection(coll.name).
+		Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true).
+		ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).Logger(coll.client.logger).
+		Authenticator(coll.client.authenticator)
+	imo := options.MergeInsertManyOptions(opts...)
+	if imo.BypassDocumentValidation != nil && *imo.BypassDocumentValidation {
+		op = op.BypassDocumentValidation(*imo.BypassDocumentValidation)
+	}
+	if imo.Comment != nil {
+		comment, err := marshalValue(imo.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op = op.Comment(comment)
+	}
+	if imo.Ordered != nil {
+		op = op.Ordered(*imo.Ordered)
+	}
+	retry := driver.RetryNone
+	if coll.client.retryWrites {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err = op.Execute(ctx)
+	var wce driver.WriteCommandError
+	if !errors.As(err, &wce) {
+		return result, err
+	}
+
+	// remove the ids that had writeErrors from result
+	for i, we := range wce.WriteErrors {
+		// i indexes have been removed before the current error, so the index is we.Index-i
+		idIndex := int(we.Index) - i
+		// if the insert is ordered, nothing after the error was inserted
+		if imo.Ordered == nil || *imo.Ordered {
+			result = result[:idIndex]
+			break
+		}
+		result = append(result[:idIndex], result[idIndex+1:]...)
+	}
+
+	return result, err
+}
+
+// InsertOne executes an insert command to insert a single document into the collection.
+//
+// The document parameter must be the document to be inserted. It cannot be nil. If the document does not have an _id
+// field when transformed into BSON, one will be added automatically to the marshalled document. The original document
+// will not be modified. The _id can be retrieved from the InsertedID field of the returned InsertOneResult.
+//
+// The opts parameter can be used to specify options for the operation (see the options.InsertOneOptions documentation.)
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/.
+func (coll *Collection) InsertOne(ctx context.Context, document interface{},
+	opts ...*options.InsertOneOptions) (*InsertOneResult, error) {
+
+	ioOpts := options.MergeInsertOneOptions(opts...)
+	imOpts := options.InsertMany()
+
+	if ioOpts.BypassDocumentValidation != nil && *ioOpts.BypassDocumentValidation {
+		imOpts.SetBypassDocumentValidation(*ioOpts.BypassDocumentValidation)
+	}
+	if ioOpts.Comment != nil {
+		imOpts.SetComment(ioOpts.Comment)
+	}
+	res, err := coll.insert(ctx, []interface{}{document}, imOpts)
+
+	rr, err := processWriteError(err)
+	if rr&rrOne == 0 {
+		return nil, err
+	}
+	return &InsertOneResult{InsertedID: res[0]}, err
+}
+
+// InsertMany executes an insert command to insert multiple documents into the collection. If write errors occur
+// during the operation (e.g. duplicate key error), this method returns a BulkWriteException error.
+//
+// The documents parameter must be a slice of documents to insert. The slice cannot be nil or empty. The elements must
+// all be non-nil. For any document that does not have an _id field when transformed into BSON, one will be added
+// automatically to the marshalled document. The original document will not be modified. The _id values for the inserted
+// documents can be retrieved from the InsertedIDs field of the returned InsertManyResult.
+//
+// The opts parameter can be used to specify options for the operation (see the options.InsertManyOptions documentation.)
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/.
+func (coll *Collection) InsertMany(ctx context.Context, documents []interface{},
+	opts ...*options.InsertManyOptions) (*InsertManyResult, error) {
+
+	if len(documents) == 0 {
+		return nil, ErrEmptySlice
+	}
+
+	result, err := coll.insert(ctx, documents, opts...)
+	rr, err := processWriteError(err)
+	if rr&rrMany == 0 {
+		return nil, err
+	}
+
+	imResult := &InsertManyResult{InsertedIDs: result}
+	var writeException WriteException
+	if !errors.As(err, &writeException) {
+		return imResult, err
+	}
+
+	// create and return a BulkWriteException
+	bwErrors := make([]BulkWriteError, 0, len(writeException.WriteErrors))
+	for _, we := range writeException.WriteErrors {
+		bwErrors = append(bwErrors, BulkWriteError{
+			WriteError: we,
+			Request:    nil,
+		})
+	}
+
+	return imResult, BulkWriteException{
+		WriteErrors:       bwErrors,
+		WriteConcernError: writeException.WriteConcernError,
+		Labels:            writeException.Labels,
+	}
+}
+
+func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOne bool, expectedRr returnResult,
+	opts ...*options.DeleteOptions) (*DeleteResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, coll.writeSelector)
+
+	var limit int32
+	if deleteOne {
+		limit = 1
+	}
+	do := options.MergeDeleteOptions(opts...)
+	didx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendDocumentElement(doc, "q", f)
+	doc = bsoncore.AppendInt32Element(doc, "limit", limit)
+	if do.Collation != nil {
+		doc = bsoncore.AppendDocumentElement(doc, "collation", do.Collation.ToDocument())
+	}
+	if do.Hint != nil {
+		if isUnorderedMap(do.Hint) {
+			return nil, ErrMapForOrderedArgument{"hint"}
+		}
+		hint, err := marshalValue(do.Hint, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		doc = bsoncore.AppendValueElement(doc, "hint", hint)
+	}
+	doc, _ = bsoncore.AppendDocumentEnd(doc, didx)
+
+	op := operation.NewDelete(doc).
+		Session(sess).WriteConcern(wc).CommandMonitor(coll.client.monitor).
+		ServerSelector(selector).ClusterClock(coll.client.clock).
+		Database(coll.db.name).Collection(coll.name).
+		Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true).
+		ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).Logger(coll.client.logger).
+		Authenticator(coll.client.authenticator)
+	if do.Comment != nil {
+		comment, err := marshalValue(do.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op = op.Comment(comment)
+	}
+	if do.Hint != nil {
+		op = op.Hint(true)
+	}
+	if do.Let != nil {
+		let, err := marshal(do.Let, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op = op.Let(let)
+	}
+
+	// deleteMany cannot be retried
+	retryMode := driver.RetryNone
+	if deleteOne && coll.client.retryWrites {
+		retryMode = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retryMode)
+	rr, err := processWriteError(op.Execute(ctx))
+	if rr&expectedRr == 0 {
+		return nil, err
+	}
+	return &DeleteResult{DeletedCount: op.Result().N}, err
+}
+
+// DeleteOne executes a delete command to delete at most one document from the collection.
+//
+// The filter parameter must be a document containing query operators and can be used to select the document to be
+// deleted. It cannot be nil. If the filter does not match any documents, the operation will succeed and a DeleteResult
+// with a DeletedCount of 0 will be returned. If the filter matches multiple documents, one will be selected from the
+// matched set.
+//
+// The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/.
+func (coll *Collection) DeleteOne(ctx context.Context, filter interface{},
+	opts ...*options.DeleteOptions) (*DeleteResult, error) {
+
+	return coll.delete(ctx, filter, true, rrOne, opts...)
+}
+
+// DeleteMany executes a delete command to delete documents from the collection.
+//
+// The filter parameter must be a document containing query operators and can be used to select the documents to
+// be deleted. It cannot be nil. An empty document (e.g. bson.D{}) should be used to delete all documents in the
+// collection. If the filter does not match any documents, the operation will succeed and a DeleteResult with a
+// DeletedCount of 0 will be returned.
+//
+// The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/.
+func (coll *Collection) DeleteMany(ctx context.Context, filter interface{},
+	opts ...*options.DeleteOptions) (*DeleteResult, error) {
+
+	return coll.delete(ctx, filter, false, rrMany, opts...)
+}
+
+func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Document, update interface{}, multi bool,
+	expectedRr returnResult, checkDollarKey bool, opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	uo := options.MergeUpdateOptions(opts...)
+
+	// collation, arrayFilters, upsert, and hint are included on the individual update documents rather than as part of the
+	// command
+	updateDoc, err := createUpdateDoc(
+		filter,
+		update,
+		uo.Hint,
+		uo.ArrayFilters,
+		uo.Collation,
+		uo.Upsert,
+		multi,
+		checkDollarKey,
+		coll.bsonOpts,
+		coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, coll.writeSelector)
+
+	op := operation.NewUpdate(updateDoc).
+		Session(sess).WriteConcern(wc).CommandMonitor(coll.client.monitor).
+		ServerSelector(selector).ClusterClock(coll.client.clock).
+		Database(coll.db.name).Collection(coll.name).
+		Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Hint(uo.Hint != nil).
+		ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI).
+		Timeout(coll.client.timeout).Logger(coll.client.logger).Authenticator(coll.client.authenticator)
+	if uo.Let != nil {
+		let, err := marshal(uo.Let, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op = op.Let(let)
+	}
+
+	if uo.BypassDocumentValidation != nil && *uo.BypassDocumentValidation {
+		op = op.BypassDocumentValidation(*uo.BypassDocumentValidation)
+	}
+	if uo.Comment != nil {
+		comment, err := marshalValue(uo.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op = op.Comment(comment)
+	}
+	retry := driver.RetryNone
+	// retryable writes are only enabled updateOne/replaceOne operations
+	if !multi && coll.client.retryWrites {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+	err = op.Execute(ctx)
+
+	rr, err := processWriteError(err)
+	if rr&expectedRr == 0 {
+		return nil, err
+	}
+
+	opRes := op.Result()
+	res := &UpdateResult{
+		MatchedCount:  opRes.N,
+		ModifiedCount: opRes.NModified,
+		UpsertedCount: int64(len(opRes.Upserted)),
+	}
+	if len(opRes.Upserted) > 0 {
+		res.UpsertedID = opRes.Upserted[0].ID
+		res.MatchedCount--
+	}
+
+	return res, err
+}
+
+// UpdateByID executes an update command to update the document whose _id value matches the provided ID in the collection.
+// This is equivalent to running UpdateOne(ctx, bson.D{{"_id", id}}, update, opts...).
+//
+// The id parameter is the _id of the document to be updated. It cannot be nil. If the ID does not match any documents,
+// the operation will succeed and an UpdateResult with a MatchedCount of 0 will be returned.
+//
+// The update parameter must be a document containing update operators
+// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be
+// made to the selected document. It cannot be nil or empty.
+//
+// The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/.
+func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update interface{},
+	opts ...*options.UpdateOptions) (*UpdateResult, error) {
+	if id == nil {
+		return nil, ErrNilValue
+	}
+	return coll.UpdateOne(ctx, bson.D{{"_id", id}}, update, opts...)
+}
+
+// UpdateOne executes an update command to update at most one document in the collection.
+//
+// The filter parameter must be a document containing query operators and can be used to select the document to be
+// updated. It cannot be nil. If the filter does not match any documents, the operation will succeed and an UpdateResult
+// with a MatchedCount of 0 will be returned. If the filter matches multiple documents, one will be selected from the
+// matched set and MatchedCount will equal 1.
+//
+// The update parameter must be a document containing update operators
+// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be
+// made to the selected document. It cannot be nil or empty.
+//
+// The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/.
+func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{},
+	opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	return coll.updateOrReplace(ctx, f, update, false, rrOne, true, opts...)
+}
+
+// UpdateMany executes an update command to update documents in the collection.
+//
+// The filter parameter must be a document containing query operators and can be used to select the documents to be
+// updated. It cannot be nil. If the filter does not match any documents, the operation will succeed and an UpdateResult
+// with a MatchedCount of 0 will be returned.
+//
+// The update parameter must be a document containing update operators
+// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made
+// to the selected documents. It cannot be nil or empty.
+//
+// The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/.
+func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{},
+	opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	return coll.updateOrReplace(ctx, f, update, true, rrMany, true, opts...)
+}
+
+// ReplaceOne executes an update command to replace at most one document in the collection.
+//
+// The filter parameter must be a document containing query operators and can be used to select the document to be
+// replaced. It cannot be nil. If the filter does not match any documents, the operation will succeed and an
+// UpdateResult with a MatchedCount of 0 will be returned. If the filter matches multiple documents, one will be
+// selected from the matched set and MatchedCount will equal 1.
+//
+// The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil
+// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/).
+//
+// The opts parameter can be used to specify options for the operation (see the options.ReplaceOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/.
+func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{},
+	replacement interface{}, opts ...*options.ReplaceOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	r, err := marshal(replacement, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := ensureNoDollarKey(r); err != nil {
+		return nil, err
+	}
+
+	updateOptions := make([]*options.UpdateOptions, 0, len(opts))
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		uOpts := options.Update()
+		uOpts.BypassDocumentValidation = opt.BypassDocumentValidation
+		uOpts.Collation = opt.Collation
+		uOpts.Upsert = opt.Upsert
+		uOpts.Hint = opt.Hint
+		uOpts.Let = opt.Let
+		uOpts.Comment = opt.Comment
+		updateOptions = append(updateOptions, uOpts)
+	}
+
+	return coll.updateOrReplace(ctx, f, r, false, rrOne, false, updateOptions...)
+}
+
+// Aggregate executes an aggregate command against the collection and returns a cursor over the resulting documents.
+//
+// The pipeline parameter must be an array of documents, each representing an aggregation stage. The pipeline cannot
+// be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the
+// mongo.Pipeline type can be used. See
+// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of
+// valid stages in aggregations.
+//
+// The opts parameter can be used to specify options for the operation (see the options.AggregateOptions documentation.)
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/.
+func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{},
+	opts ...*options.AggregateOptions) (*Cursor, error) {
+	a := aggregateParams{
+		ctx:            ctx,
+		pipeline:       pipeline,
+		client:         coll.client,
+		registry:       coll.registry,
+		readConcern:    coll.readConcern,
+		writeConcern:   coll.writeConcern,
+		bsonOpts:       coll.bsonOpts,
+		retryRead:      coll.client.retryReads,
+		db:             coll.db.name,
+		col:            coll.name,
+		readSelector:   coll.readSelector,
+		writeSelector:  coll.writeSelector,
+		readPreference: coll.readPreference,
+		opts:           opts,
+	}
+	return aggregate(a)
+}
+
+// aggregate is the helper method for Aggregate
+func aggregate(a aggregateParams) (cur *Cursor, err error) {
+	if a.ctx == nil {
+		a.ctx = context.Background()
+	}
+
+	pipelineArr, hasOutputStage, err := marshalAggregatePipeline(a.pipeline, a.bsonOpts, a.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(a.ctx)
+	// Always close any created implicit sessions if aggregate returns an error.
+	defer func() {
+		if err != nil && sess != nil {
+			closeImplicitSession(sess)
+		}
+	}()
+	if sess == nil && a.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(a.client.sessionPool, a.client.id)
+	}
+	if err = a.client.validSession(sess); err != nil {
+		return nil, err
+	}
+
+	var wc *writeconcern.WriteConcern
+	if hasOutputStage {
+		wc = a.writeConcern
+	}
+	rc := a.readConcern
+	if sess.TransactionRunning() {
+		wc = nil
+		rc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		closeImplicitSession(sess)
+		sess = nil
+	}
+
+	selector := makeReadPrefSelector(sess, a.readSelector, a.client.localThreshold)
+	if hasOutputStage {
+		selector = makeOutputAggregateSelector(sess, a.readPreference, a.client.localThreshold)
+	}
+
+	ao := options.MergeAggregateOptions(a.opts...)
+
+	cursorOpts := a.client.createBaseCursorOptions()
+
+	cursorOpts.MarshalValueEncoderFn = newEncoderFn(a.bsonOpts, a.registry)
+
+	op := operation.NewAggregate(pipelineArr).
+		Session(sess).
+		WriteConcern(wc).
+		ReadConcern(rc).
+		ReadPreference(a.readPreference).
+		CommandMonitor(a.client.monitor).
+		ServerSelector(selector).
+		ClusterClock(a.client.clock).
+		Database(a.db).
+		Collection(a.col).
+		Deployment(a.client.deployment).
+		Crypt(a.client.cryptFLE).
+		ServerAPI(a.client.serverAPI).
+		HasOutputStage(hasOutputStage).
+		Timeout(a.client.timeout).
+		MaxTime(ao.MaxTime).
+		Authenticator(a.client.authenticator)
+
+	// Omit "maxTimeMS" from operations that return a user-managed cursor to
+	// prevent confusing "cursor not found" errors. To maintain existing
+	// behavior for users who set "timeoutMS" with no context deadline, only
+	// omit "maxTimeMS" when a context deadline is set.
+	//
+	// See DRIVERS-2722 for more detail.
+	_, deadlineSet := a.ctx.Deadline()
+	op.OmitCSOTMaxTimeMS(deadlineSet)
+
+	if ao.AllowDiskUse != nil {
+		op.AllowDiskUse(*ao.AllowDiskUse)
+	}
+	// ignore batchSize of 0 with $out
+	if ao.BatchSize != nil && !(*ao.BatchSize == 0 && hasOutputStage) {
+		op.BatchSize(*ao.BatchSize)
+		cursorOpts.BatchSize = *ao.BatchSize
+	}
+	if ao.BypassDocumentValidation != nil && *ao.BypassDocumentValidation {
+		op.BypassDocumentValidation(*ao.BypassDocumentValidation)
+	}
+	if ao.Collation != nil {
+		op.Collation(bsoncore.Document(ao.Collation.ToDocument()))
+	}
+	if ao.MaxAwaitTime != nil {
+		cursorOpts.MaxTimeMS = int64(*ao.MaxAwaitTime / time.Millisecond)
+	}
+	if ao.Comment != nil {
+		op.Comment(*ao.Comment)
+
+		commentVal, err := marshalValue(ao.Comment, a.bsonOpts, a.registry)
+		if err != nil {
+			return nil, err
+		}
+		cursorOpts.Comment = commentVal
+	}
+	if ao.Hint != nil {
+		if isUnorderedMap(ao.Hint) {
+			return nil, ErrMapForOrderedArgument{"hint"}
+		}
+		hintVal, err := marshalValue(ao.Hint, a.bsonOpts, a.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Hint(hintVal)
+	}
+	if ao.Let != nil {
+		let, err := marshal(ao.Let, a.bsonOpts, a.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Let(let)
+	}
+	if ao.Custom != nil {
+		// Marshal all custom options before passing to the aggregate operation. Return
+		// any errors from Marshaling.
+		customOptions := make(map[string]bsoncore.Value)
+		for optionName, optionValue := range ao.Custom {
+			bsonType, bsonData, err := bson.MarshalValueWithRegistry(a.registry, optionValue)
+			if err != nil {
+				return nil, err
+			}
+			optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData}
+			customOptions[optionName] = optionValueBSON
+		}
+		op.CustomOptions(customOptions)
+	}
+
+	retry := driver.RetryNone
+	if a.retryRead && !hasOutputStage {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err = op.Execute(a.ctx)
+	if err != nil {
+		if wce, ok := err.(driver.WriteCommandError); ok && wce.WriteConcernError != nil {
+			return nil, *convertDriverWriteConcernError(wce.WriteConcernError)
+		}
+		return nil, replaceErrors(err)
+	}
+
+	bc, err := op.Result(cursorOpts)
+	if err != nil {
+		return nil, replaceErrors(err)
+	}
+	cursor, err := newCursorWithSession(bc, a.client.bsonOpts, a.registry, sess)
+	return cursor, replaceErrors(err)
+}
+
+// CountDocuments returns the number of documents in the collection. For a fast count of the documents in the
+// collection, see the EstimatedDocumentCount method.
+//
+// The filter parameter must be a document and can be used to select which documents contribute to the count. It
+// cannot be nil. An empty document (e.g. bson.D{}) should be used to count all documents in the collection. This will
+// result in a full collection scan.
+//
+// The opts parameter can be used to specify options for the operation (see the options.CountOptions documentation).
+func (coll *Collection) CountDocuments(ctx context.Context, filter interface{},
+	opts ...*options.CountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	countOpts := options.MergeCountOptions(opts...)
+
+	pipelineArr, err := countDocumentsAggregatePipeline(filter, coll.bsonOpts, coll.registry, countOpts)
+	if err != nil {
+		return 0, err
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+	if err = coll.client.validSession(sess); err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess.TransactionRunning() {
+		rc = nil
+	}
+
+	selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold)
+	op := operation.NewAggregate(pipelineArr).Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference).
+		CommandMonitor(coll.client.monitor).ServerSelector(selector).ClusterClock(coll.client.clock).Database(coll.db.name).
+		Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI).
+		Timeout(coll.client.timeout).MaxTime(countOpts.MaxTime).Authenticator(coll.client.authenticator)
+	if countOpts.Collation != nil {
+		op.Collation(bsoncore.Document(countOpts.Collation.ToDocument()))
+	}
+	if countOpts.Comment != nil {
+		op.Comment(*countOpts.Comment)
+	}
+	if countOpts.Hint != nil {
+		if isUnorderedMap(countOpts.Hint) {
+			return 0, ErrMapForOrderedArgument{"hint"}
+		}
+		hintVal, err := marshalValue(countOpts.Hint, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return 0, err
+		}
+		op.Hint(hintVal)
+	}
+	retry := driver.RetryNone
+	if coll.client.retryReads {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err = op.Execute(ctx)
+	if err != nil {
+		return 0, replaceErrors(err)
+	}
+
+	batch := op.ResultCursorResponse().FirstBatch
+	if batch == nil {
+		return 0, errors.New("invalid response from server, no 'firstBatch' field")
+	}
+
+	docs, err := batch.Documents()
+	if err != nil || len(docs) == 0 {
+		return 0, nil
+	}
+
+	val, ok := docs[0].Lookup("n").AsInt64OK()
+	if !ok {
+		return 0, errors.New("invalid response from server, no 'n' field")
+	}
+
+	return val, nil
+}
+
+// EstimatedDocumentCount executes a count command and returns an estimate of the number of documents in the collection
+// using collection metadata.
+//
+// The opts parameter can be used to specify options for the operation (see the options.EstimatedDocumentCountOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/count/.
+func (coll *Collection) EstimatedDocumentCount(ctx context.Context,
+	opts ...*options.EstimatedDocumentCountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	var err error
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = coll.client.validSession(sess)
+	if err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess.TransactionRunning() {
+		rc = nil
+	}
+
+	co := options.MergeEstimatedDocumentCountOptions(opts...)
+
+	selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold)
+	op := operation.NewCount().Session(sess).ClusterClock(coll.client.clock).
+		Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor).
+		Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference).
+		ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI).
+		Timeout(coll.client.timeout).MaxTime(co.MaxTime).Authenticator(coll.client.authenticator)
+
+	if co.Comment != nil {
+		comment, err := marshalValue(co.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return 0, err
+		}
+		op = op.Comment(comment)
+	}
+
+	retry := driver.RetryNone
+	if coll.client.retryReads {
+		retry = driver.RetryOncePerCommand
+	}
+	op.Retry(retry)
+
+	err = op.Execute(ctx)
+	return op.Result().N, replaceErrors(err)
+}
+
+// Distinct executes a distinct command to find the unique values for a specified field in the collection.
+//
+// The fieldName parameter specifies the field name for which distinct values should be returned.
+//
+// The filter parameter must be a document containing query operators and can be used to select which documents are
+// considered. It cannot be nil. An empty document (e.g. bson.D{}) should be used to select all documents.
+//
+// The opts parameter can be used to specify options for the operation (see the options.DistinctOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/distinct/.
+func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{},
+	opts ...*options.DistinctOptions) ([]interface{}, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	rc := coll.readConcern
+	if sess.TransactionRunning() {
+		rc = nil
+	}
+
+	selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold)
+	option := options.MergeDistinctOptions(opts...)
+
+	op := operation.NewDistinct(fieldName, f).
+		Session(sess).ClusterClock(coll.client.clock).
+		Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor).
+		Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference).
+		ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI).
+		Timeout(coll.client.timeout).MaxTime(option.MaxTime).Authenticator(coll.client.authenticator)
+
+	if option.Collation != nil {
+		op.Collation(bsoncore.Document(option.Collation.ToDocument()))
+	}
+	if option.Comment != nil {
+		comment, err := marshalValue(option.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Comment(comment)
+	}
+	retry := driver.RetryNone
+	if coll.client.retryReads {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err = op.Execute(ctx)
+	if err != nil {
+		return nil, replaceErrors(err)
+	}
+
+	arr, ok := op.Result().Values.ArrayOK()
+	if !ok {
+		return nil, fmt.Errorf("response field 'values' is type array, but received BSON type %s", op.Result().Values.Type)
+	}
+
+	values, err := arr.Values()
+	if err != nil {
+		return nil, err
+	}
+
+	retArray := make([]interface{}, len(values))
+
+	for i, val := range values {
+		raw := bson.RawValue{Type: val.Type, Value: val.Data}
+		err = raw.Unmarshal(&retArray[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return retArray, replaceErrors(err)
+}
+
+// Find executes a find command and returns a Cursor over the matching documents in the collection.
+//
+// The filter parameter must be a document containing query operators and can be used to select which documents are
+// included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include all documents.
+//
+// The opts parameter can be used to specify options for the operation (see the options.FindOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/.
+func (coll *Collection) Find(ctx context.Context, filter interface{},
+	opts ...*options.FindOptions) (cur *Cursor, err error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	// Omit "maxTimeMS" from operations that return a user-managed cursor to
+	// prevent confusing "cursor not found" errors. To maintain existing
+	// behavior for users who set "timeoutMS" with no context deadline, only
+	// omit "maxTimeMS" when a context deadline is set.
+	//
+	// See DRIVERS-2722 for more detail.
+	_, deadlineSet := ctx.Deadline()
+	return coll.find(ctx, filter, deadlineSet, opts...)
+}
+
+func (coll *Collection) find(
+	ctx context.Context,
+	filter interface{},
+	omitCSOTMaxTimeMS bool,
+	opts ...*options.FindOptions,
+) (cur *Cursor, err error) {
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+	// Always close any created implicit sessions if Find returns an error.
+	defer func() {
+		if err != nil && sess != nil {
+			closeImplicitSession(sess)
+		}
+	}()
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+	}
+
+	err = coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	rc := coll.readConcern
+	if sess.TransactionRunning() {
+		rc = nil
+	}
+
+	fo := options.MergeFindOptions(opts...)
+
+	selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold)
+	op := operation.NewFind(f).
+		Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference).
+		CommandMonitor(coll.client.monitor).ServerSelector(selector).
+		ClusterClock(coll.client.clock).Database(coll.db.name).Collection(coll.name).
+		Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI).
+		Timeout(coll.client.timeout).MaxTime(fo.MaxTime).Logger(coll.client.logger).
+		OmitCSOTMaxTimeMS(omitCSOTMaxTimeMS).Authenticator(coll.client.authenticator)
+
+	cursorOpts := coll.client.createBaseCursorOptions()
+
+	cursorOpts.MarshalValueEncoderFn = newEncoderFn(coll.bsonOpts, coll.registry)
+
+	if fo.AllowDiskUse != nil {
+		op.AllowDiskUse(*fo.AllowDiskUse)
+	}
+	if fo.AllowPartialResults != nil {
+		op.AllowPartialResults(*fo.AllowPartialResults)
+	}
+	if fo.BatchSize != nil {
+		cursorOpts.BatchSize = *fo.BatchSize
+		op.BatchSize(*fo.BatchSize)
+	}
+	if fo.Collation != nil {
+		op.Collation(bsoncore.Document(fo.Collation.ToDocument()))
+	}
+	if fo.Comment != nil {
+		op.Comment(*fo.Comment)
+
+		commentVal, err := marshalValue(fo.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		cursorOpts.Comment = commentVal
+	}
+	if fo.CursorType != nil {
+		switch *fo.CursorType {
+		case options.Tailable:
+			op.Tailable(true)
+		case options.TailableAwait:
+			op.Tailable(true)
+			op.AwaitData(true)
+		}
+	}
+	if fo.Hint != nil {
+		if isUnorderedMap(fo.Hint) {
+			return nil, ErrMapForOrderedArgument{"hint"}
+		}
+		hint, err := marshalValue(fo.Hint, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Hint(hint)
+	}
+	if fo.Let != nil {
+		let, err := marshal(fo.Let, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Let(let)
+	}
+	if fo.Limit != nil {
+		limit := *fo.Limit
+		if limit < 0 {
+			limit = -1 * limit
+			op.SingleBatch(true)
+		}
+		cursorOpts.Limit = int32(limit)
+		op.Limit(limit)
+	}
+	if fo.Max != nil {
+		max, err := marshal(fo.Max, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Max(max)
+	}
+	if fo.MaxAwaitTime != nil {
+		cursorOpts.MaxTimeMS = int64(*fo.MaxAwaitTime / time.Millisecond)
+	}
+	if fo.Min != nil {
+		min, err := marshal(fo.Min, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Min(min)
+	}
+	if fo.NoCursorTimeout != nil {
+		op.NoCursorTimeout(*fo.NoCursorTimeout)
+	}
+	if fo.OplogReplay != nil {
+		op.OplogReplay(*fo.OplogReplay)
+	}
+	if fo.Projection != nil {
+		proj, err := marshal(fo.Projection, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Projection(proj)
+	}
+	if fo.ReturnKey != nil {
+		op.ReturnKey(*fo.ReturnKey)
+	}
+	if fo.ShowRecordID != nil {
+		op.ShowRecordID(*fo.ShowRecordID)
+	}
+	if fo.Skip != nil {
+		op.Skip(*fo.Skip)
+	}
+	if fo.Snapshot != nil {
+		op.Snapshot(*fo.Snapshot)
+	}
+	if fo.Sort != nil {
+		if isUnorderedMap(fo.Sort) {
+			return nil, ErrMapForOrderedArgument{"sort"}
+		}
+		sort, err := marshal(fo.Sort, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Sort(sort)
+	}
+	retry := driver.RetryNone
+	if coll.client.retryReads {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	if err = op.Execute(ctx); err != nil {
+		return nil, replaceErrors(err)
+	}
+
+	bc, err := op.Result(cursorOpts)
+	if err != nil {
+		return nil, replaceErrors(err)
+	}
+	return newCursorWithSession(bc, coll.bsonOpts, coll.registry, sess)
+}
+
+// FindOne executes a find command and returns a SingleResult for one document in the collection.
+//
+// The filter parameter must be a document containing query operators and can be used to select the document to be
+// returned. It cannot be nil. If the filter does not match any documents, a SingleResult with an error set to
+// ErrNoDocuments will be returned. If the filter matches multiple documents, one will be selected from the matched set.
+//
+// The opts parameter can be used to specify options for this operation (see the options.FindOneOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/.
+func (coll *Collection) FindOne(ctx context.Context, filter interface{},
+	opts ...*options.FindOneOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	findOpts := make([]*options.FindOptions, 0, len(opts))
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		findOpts = append(findOpts, &options.FindOptions{
+			AllowPartialResults: opt.AllowPartialResults,
+			BatchSize:           opt.BatchSize,
+			Collation:           opt.Collation,
+			Comment:             opt.Comment,
+			CursorType:          opt.CursorType,
+			Hint:                opt.Hint,
+			Max:                 opt.Max,
+			MaxAwaitTime:        opt.MaxAwaitTime,
+			MaxTime:             opt.MaxTime,
+			Min:                 opt.Min,
+			NoCursorTimeout:     opt.NoCursorTimeout,
+			OplogReplay:         opt.OplogReplay,
+			Projection:          opt.Projection,
+			ReturnKey:           opt.ReturnKey,
+			ShowRecordID:        opt.ShowRecordID,
+			Skip:                opt.Skip,
+			Snapshot:            opt.Snapshot,
+			Sort:                opt.Sort,
+		})
+	}
+	// Unconditionally send a limit to make sure only one document is returned and the cursor is not kept open
+	// by the server.
+	findOpts = append(findOpts, options.Find().SetLimit(-1))
+
+	cursor, err := coll.find(ctx, filter, false, findOpts...)
+	return &SingleResult{
+		ctx:      ctx,
+		cur:      cursor,
+		bsonOpts: coll.bsonOpts,
+		reg:      coll.registry,
+		err:      replaceErrors(err),
+	}
+}
+
+func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAndModify) *SingleResult {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	var err error
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = coll.client.validSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	wc := coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, coll.writeSelector)
+
+	retry := driver.RetryNone
+	if coll.client.retryWrites {
+		retry = driver.RetryOnce
+	}
+
+	op = op.Session(sess).
+		WriteConcern(wc).
+		CommandMonitor(coll.client.monitor).
+		ServerSelector(selector).
+		ClusterClock(coll.client.clock).
+		Database(coll.db.name).
+		Collection(coll.name).
+		Deployment(coll.client.deployment).
+		Retry(retry).
+		Crypt(coll.client.cryptFLE)
+
+	_, err = processWriteError(op.Execute(ctx))
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	return &SingleResult{
+		ctx:      ctx,
+		rdr:      bson.Raw(op.Result().Value),
+		bsonOpts: coll.bsonOpts,
+		reg:      coll.registry,
+	}
+}
+
+// FindOneAndDelete executes a findAndModify command to delete at most one document in the collection. and returns the
+// document as it appeared before deletion.
+//
+// The filter parameter must be a document containing query operators and can be used to select the document to be
+// deleted. It cannot be nil. If the filter does not match any documents, a SingleResult with an error set to
+// ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set.
+//
+// The opts parameter can be used to specify options for the operation (see the options.FindOneAndDeleteOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/.
+func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{},
+	opts ...*options.FindOneAndDeleteOptions) *SingleResult {
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+	fod := options.MergeFindOneAndDeleteOptions(opts...)
+	op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).
+		MaxTime(fod.MaxTime).Authenticator(coll.client.authenticator)
+	if fod.Collation != nil {
+		op = op.Collation(bsoncore.Document(fod.Collation.ToDocument()))
+	}
+	if fod.Comment != nil {
+		comment, err := marshalValue(fod.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Comment(comment)
+	}
+	if fod.Projection != nil {
+		proj, err := marshal(fod.Projection, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Fields(proj)
+	}
+	if fod.Sort != nil {
+		if isUnorderedMap(fod.Sort) {
+			return &SingleResult{err: ErrMapForOrderedArgument{"sort"}}
+		}
+		sort, err := marshal(fod.Sort, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Sort(sort)
+	}
+	if fod.Hint != nil {
+		if isUnorderedMap(fod.Hint) {
+			return &SingleResult{err: ErrMapForOrderedArgument{"hint"}}
+		}
+		hint, err := marshalValue(fod.Hint, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Hint(hint)
+	}
+	if fod.Let != nil {
+		let, err := marshal(fod.Let, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Let(let)
+	}
+
+	return coll.findAndModify(ctx, op)
+}
+
+// FindOneAndReplace executes a findAndModify command to replace at most one document in the collection
+// and returns the document as it appeared before replacement.
+//
+// The filter parameter must be a document containing query operators and can be used to select the document to be
+// replaced. It cannot be nil. If the filter does not match any documents, a SingleResult with an error set to
+// ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set.
+//
+// The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil
+// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/).
+//
+// The opts parameter can be used to specify options for the operation (see the options.FindOneAndReplaceOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/.
+func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{},
+	replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult {
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+	r, err := marshal(replacement, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+	if firstElem, err := r.IndexErr(0); err == nil && strings.HasPrefix(firstElem.Key(), "$") {
+		return &SingleResult{err: errors.New("replacement document cannot contain keys beginning with '$'")}
+	}
+
+	fo := options.MergeFindOneAndReplaceOptions(opts...)
+	op := operation.NewFindAndModify(f).Update(bsoncore.Value{Type: bsontype.EmbeddedDocument, Data: r}).
+		ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).MaxTime(fo.MaxTime).Authenticator(coll.client.authenticator)
+
+	if fo.BypassDocumentValidation != nil && *fo.BypassDocumentValidation {
+		op = op.BypassDocumentValidation(*fo.BypassDocumentValidation)
+	}
+	if fo.Collation != nil {
+		op = op.Collation(bsoncore.Document(fo.Collation.ToDocument()))
+	}
+	if fo.Comment != nil {
+		comment, err := marshalValue(fo.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Comment(comment)
+	}
+	if fo.Projection != nil {
+		proj, err := marshal(fo.Projection, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Fields(proj)
+	}
+	if fo.ReturnDocument != nil {
+		op = op.NewDocument(*fo.ReturnDocument == options.After)
+	}
+	if fo.Sort != nil {
+		if isUnorderedMap(fo.Sort) {
+			return &SingleResult{err: ErrMapForOrderedArgument{"sort"}}
+		}
+		sort, err := marshal(fo.Sort, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Sort(sort)
+	}
+	if fo.Upsert != nil {
+		op = op.Upsert(*fo.Upsert)
+	}
+	if fo.Hint != nil {
+		if isUnorderedMap(fo.Hint) {
+			return &SingleResult{err: ErrMapForOrderedArgument{"hint"}}
+		}
+		hint, err := marshalValue(fo.Hint, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Hint(hint)
+	}
+	if fo.Let != nil {
+		let, err := marshal(fo.Let, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Let(let)
+	}
+
+	return coll.findAndModify(ctx, op)
+}
+
+// FindOneAndUpdate executes a findAndModify command to update at most one document in the collection and returns the
+// document as it appeared before updating.
+//
+// The filter parameter must be a document containing query operators and can be used to select the document to be
+// updated. It cannot be nil. If the filter does not match any documents, a SingleResult with an error set to
+// ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set.
+//
+// The update parameter must be a document containing update operators
+// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made
+// to the selected document. It cannot be nil or empty.
+//
+// The opts parameter can be used to specify options for the operation (see the options.FindOneAndUpdateOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/.
+func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{},
+	update interface{}, opts ...*options.FindOneAndUpdateOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := marshal(filter, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	fo := options.MergeFindOneAndUpdateOptions(opts...)
+	op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).
+		MaxTime(fo.MaxTime).Authenticator(coll.client.authenticator)
+
+	u, err := marshalUpdateValue(update, coll.bsonOpts, coll.registry, true)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+	op = op.Update(u)
+
+	if fo.ArrayFilters != nil {
+		af := fo.ArrayFilters
+		reg := coll.registry
+		if af.Registry != nil {
+			reg = af.Registry
+		}
+		filtersDoc, err := marshalValue(af.Filters, coll.bsonOpts, reg)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.ArrayFilters(filtersDoc.Data)
+	}
+	if fo.BypassDocumentValidation != nil && *fo.BypassDocumentValidation {
+		op = op.BypassDocumentValidation(*fo.BypassDocumentValidation)
+	}
+	if fo.Collation != nil {
+		op = op.Collation(bsoncore.Document(fo.Collation.ToDocument()))
+	}
+	if fo.Comment != nil {
+		comment, err := marshalValue(fo.Comment, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Comment(comment)
+	}
+	if fo.Projection != nil {
+		proj, err := marshal(fo.Projection, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Fields(proj)
+	}
+	if fo.ReturnDocument != nil {
+		op = op.NewDocument(*fo.ReturnDocument == options.After)
+	}
+	if fo.Sort != nil {
+		if isUnorderedMap(fo.Sort) {
+			return &SingleResult{err: ErrMapForOrderedArgument{"sort"}}
+		}
+		sort, err := marshal(fo.Sort, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Sort(sort)
+	}
+	if fo.Upsert != nil {
+		op = op.Upsert(*fo.Upsert)
+	}
+	if fo.Hint != nil {
+		if isUnorderedMap(fo.Hint) {
+			return &SingleResult{err: ErrMapForOrderedArgument{"hint"}}
+		}
+		hint, err := marshalValue(fo.Hint, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Hint(hint)
+	}
+	if fo.Let != nil {
+		let, err := marshal(fo.Let, coll.bsonOpts, coll.registry)
+		if err != nil {
+			return &SingleResult{err: err}
+		}
+		op = op.Let(let)
+	}
+
+	return coll.findAndModify(ctx, op)
+}
+
+// Watch returns a change stream for all changes on the corresponding collection. See
+// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams.
+//
+// The Collection must be configured with read concern majority or no read concern for a change stream to be created
+// successfully.
+//
+// The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be
+// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for
+// a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the
+// mongo.Pipeline{} type can be used.
+//
+// The opts parameter can be used to specify options for change stream creation (see the options.ChangeStreamOptions
+// documentation).
+func (coll *Collection) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	csConfig := changeStreamConfig{
+		readConcern:    coll.readConcern,
+		readPreference: coll.readPreference,
+		client:         coll.client,
+		bsonOpts:       coll.bsonOpts,
+		registry:       coll.registry,
+		streamType:     CollectionStream,
+		collectionName: coll.Name(),
+		databaseName:   coll.db.Name(),
+		crypt:          coll.client.cryptFLE,
+	}
+	return newChangeStream(ctx, csConfig, pipeline, opts...)
+}
+
+// Indexes returns an IndexView instance that can be used to perform operations on the indexes for the collection.
+func (coll *Collection) Indexes() IndexView {
+	return IndexView{coll: coll}
+}
+
+// SearchIndexes returns a SearchIndexView instance that can be used to perform operations on the search indexes for the collection.
+func (coll *Collection) SearchIndexes() SearchIndexView {
+	c, _ := coll.Clone() // Clone() always return a nil error.
+	c.readConcern = nil
+	c.writeConcern = nil
+	return SearchIndexView{
+		coll: c,
+	}
+}
+
+// Drop drops the collection on the server. This method ignores "namespace not found" errors so it is safe to drop
+// a collection that does not exist on the server.
+func (coll *Collection) Drop(ctx context.Context) error {
+	// Follow Client-Side Encryption specification to check for encryptedFields.
+	// Drop does not have an encryptedFields option. See: GODRIVER-2413.
+	// Check for encryptedFields from the client EncryptedFieldsMap.
+	// Check for encryptedFields from the server if EncryptedFieldsMap is set.
+	ef := coll.db.getEncryptedFieldsFromMap(coll.name)
+	if ef == nil && coll.db.client.encryptedFieldsMap != nil {
+		var err error
+		if ef, err = coll.db.getEncryptedFieldsFromServer(ctx, coll.name); err != nil {
+			return err
+		}
+	}
+
+	if ef != nil {
+		return coll.dropEncryptedCollection(ctx, ef)
+	}
+
+	return coll.drop(ctx)
+}
+
+// dropEncryptedCollection drops a collection with EncryptedFields.
+func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interface{}) error {
+	efBSON, err := marshal(ef, coll.bsonOpts, coll.registry)
+	if err != nil {
+		return fmt.Errorf("error transforming document: %w", err)
+	}
+
+	// Drop the two encryption-related, associated collections: `escCollection` and `ecocCollection`.
+	// Drop ESCCollection.
+	escCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, coll.name, csfle.EncryptedStateCollection)
+	if err != nil {
+		return err
+	}
+	if err := coll.db.Collection(escCollection).drop(ctx); err != nil {
+		return err
+	}
+
+	// Drop ECOCCollection.
+	ecocCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, coll.name, csfle.EncryptedCompactionCollection)
+	if err != nil {
+		return err
+	}
+	if err := coll.db.Collection(ecocCollection).drop(ctx); err != nil {
+		return err
+	}
+
+	// Drop the data collection.
+	return coll.drop(ctx)
+}
+
+// drop drops a collection without EncryptedFields.
+func (coll *Collection) drop(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(coll.client.sessionPool, coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err := coll.client.validSession(sess)
+	if err != nil {
+		return err
+	}
+
+	wc := coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, coll.writeSelector)
+
+	op := operation.NewDropCollection().
+		Session(sess).WriteConcern(wc).CommandMonitor(coll.client.monitor).
+		ServerSelector(selector).ClusterClock(coll.client.clock).
+		Database(coll.db.name).Collection(coll.name).
+		Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).
+		ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).
+		Authenticator(coll.client.authenticator)
+	err = op.Execute(ctx)
+
+	// ignore namespace not found errors
+	driverErr, ok := err.(driver.Error)
+	if !ok || (ok && !driverErr.NamespaceNotFound()) {
+		return replaceErrors(err)
+	}
+	return nil
+}
+
+type pinnedServerSelector struct {
+	stringer fmt.Stringer
+	fallback description.ServerSelector
+	session  *session.Client
+}
+
+func (pss pinnedServerSelector) String() string {
+	if pss.stringer == nil {
+		return ""
+	}
+
+	return pss.stringer.String()
+}
+
+func (pss pinnedServerSelector) SelectServer(
+	t description.Topology,
+	svrs []description.Server,
+) ([]description.Server, error) {
+	if pss.session != nil && pss.session.PinnedServer != nil {
+		// If there is a pinned server, try to find it in the list of candidates.
+		for _, candidate := range svrs {
+			if candidate.Addr == pss.session.PinnedServer.Addr {
+				return []description.Server{candidate}, nil
+			}
+		}
+
+		return nil, nil
+	}
+
+	return pss.fallback.SelectServer(t, svrs)
+}
+
+func makePinnedSelector(sess *session.Client, fallback description.ServerSelector) description.ServerSelector {
+	pss := pinnedServerSelector{
+		session:  sess,
+		fallback: fallback,
+	}
+
+	if srvSelectorStringer, ok := fallback.(fmt.Stringer); ok {
+		pss.stringer = srvSelectorStringer
+	}
+
+	return pss
+}
+
+func makeReadPrefSelector(sess *session.Client, selector description.ServerSelector, localThreshold time.Duration) description.ServerSelector {
+	if sess != nil && sess.TransactionRunning() {
+		selector = description.CompositeSelector([]description.ServerSelector{
+			description.ReadPrefSelector(sess.CurrentRp),
+			description.LatencySelector(localThreshold),
+		})
+	}
+
+	return makePinnedSelector(sess, selector)
+}
+
+func makeOutputAggregateSelector(sess *session.Client, rp *readpref.ReadPref, localThreshold time.Duration) description.ServerSelector {
+	if sess != nil && sess.TransactionRunning() {
+		// Use current transaction's read preference if available
+		rp = sess.CurrentRp
+	}
+
+	selector := description.CompositeSelector([]description.ServerSelector{
+		description.OutputAggregateSelector(rp),
+		description.LatencySelector(localThreshold),
+	})
+	return makePinnedSelector(sess, selector)
+}
+
+// isUnorderedMap returns true if val is a map with more than 1 element. It is typically used to
+// check for unordered Go values that are used in nested command documents where different field
+// orders mean different things. Examples are the "sort" and "hint" fields.
+func isUnorderedMap(val interface{}) bool {
+	refValue := reflect.ValueOf(val)
+	return refValue.Kind() == reflect.Map && refValue.Len() > 1
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go b/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go
new file mode 100644
index 0000000000000000000000000000000000000000..5e96da731a13fd66a1091f9fa668bfd21db4dbe5
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// keyRetriever gets keys from the key vault collection.
+type keyRetriever struct {
+	coll *Collection
+}
+
+func (kr *keyRetriever) cryptKeys(ctx context.Context, filter bsoncore.Document) ([]bsoncore.Document, error) {
+	// Remove the explicit session from the context if one is set.
+	// The explicit session may be from a different client.
+	ctx = NewSessionContext(ctx, nil)
+	cursor, err := kr.coll.Find(ctx, filter)
+	if err != nil {
+		return nil, EncryptionKeyVaultError{Wrapped: err}
+	}
+	defer cursor.Close(ctx)
+
+	var results []bsoncore.Document
+	for cursor.Next(ctx) {
+		cur := make([]byte, len(cursor.Current))
+		copy(cur, cursor.Current)
+		results = append(results, cur)
+	}
+	if err = cursor.Err(); err != nil {
+		return nil, EncryptionKeyVaultError{Wrapped: err}
+	}
+
+	return results, nil
+}
+
+// collInfoRetriever gets info for collections from a database.
+type collInfoRetriever struct {
+	client *Client
+}
+
+func (cir *collInfoRetriever) cryptCollInfo(ctx context.Context, db string, filter bsoncore.Document) (bsoncore.Document, error) {
+	// Remove the explicit session from the context if one is set.
+	// The explicit session may be from a different client.
+	ctx = NewSessionContext(ctx, nil)
+	cursor, err := cir.client.Database(db).ListCollections(ctx, filter)
+	if err != nil {
+		return nil, err
+	}
+	defer cursor.Close(ctx)
+
+	if !cursor.Next(ctx) {
+		return nil, cursor.Err()
+	}
+
+	res := make([]byte, len(cursor.Current))
+	copy(res, cursor.Current)
+	return res, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e01e398da3abb9d3906f0536f3158428ff5567b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go
@@ -0,0 +1,418 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Cursor is used to iterate over a stream of documents. Each document can be decoded into a Go type via the Decode
+// method or accessed as raw BSON via the Current field. This type is not goroutine safe and must not be used
+// concurrently by multiple goroutines.
+type Cursor struct {
+	// Current contains the BSON bytes of the current change document. This property is only valid until the next call
+	// to Next or TryNext. If continued access is required, a copy must be made.
+	Current bson.Raw
+
+	bc            batchCursor
+	batch         *bsoncore.DocumentSequence
+	batchLength   int
+	bsonOpts      *options.BSONOptions
+	registry      *bsoncodec.Registry
+	clientSession *session.Client
+
+	err error
+}
+
+func newCursor(
+	bc batchCursor,
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+) (*Cursor, error) {
+	return newCursorWithSession(bc, bsonOpts, registry, nil)
+}
+
+func newCursorWithSession(
+	bc batchCursor,
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+	clientSession *session.Client,
+) (*Cursor, error) {
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+	if bc == nil {
+		return nil, errors.New("batch cursor must not be nil")
+	}
+	c := &Cursor{
+		bc:            bc,
+		bsonOpts:      bsonOpts,
+		registry:      registry,
+		clientSession: clientSession,
+	}
+	if bc.ID() == 0 {
+		c.closeImplicitSession()
+	}
+
+	// Initialize just the batchLength here so RemainingBatchLength will return an accurate result. The actual batch
+	// will be pulled up by the first Next/TryNext call.
+	c.batchLength = c.bc.Batch().DocumentCount()
+	return c, nil
+}
+
+func newEmptyCursor() *Cursor {
+	return &Cursor{bc: driver.NewEmptyBatchCursor()}
+}
+
+// NewCursorFromDocuments creates a new Cursor pre-loaded with the provided documents, error and registry. If no registry is provided,
+// bson.DefaultRegistry will be used.
+//
+// The documents parameter must be a slice of documents. The slice may be nil or empty, but all elements must be non-nil.
+func NewCursorFromDocuments(documents []interface{}, err error, registry *bsoncodec.Registry) (*Cursor, error) {
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+
+	// Convert documents slice to a sequence-style byte array.
+	var docsBytes []byte
+	for _, doc := range documents {
+		switch t := doc.(type) {
+		case nil:
+			return nil, ErrNilDocument
+		case []byte:
+			// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+			doc = bson.Raw(t)
+		}
+		var marshalErr error
+		docsBytes, marshalErr = bson.MarshalAppendWithRegistry(registry, docsBytes, doc)
+		if marshalErr != nil {
+			return nil, marshalErr
+		}
+	}
+
+	c := &Cursor{
+		bc:       driver.NewBatchCursorFromDocuments(docsBytes),
+		registry: registry,
+		err:      err,
+	}
+
+	// Initialize batch and batchLength here. The underlying batch cursor will be preloaded with the
+	// provided contents, and thus already has a batch before calls to Next/TryNext.
+	c.batch = c.bc.Batch()
+	c.batchLength = c.bc.Batch().DocumentCount()
+	return c, nil
+}
+
+// ID returns the ID of this cursor, or 0 if the cursor has been closed or exhausted.
+func (c *Cursor) ID() int64 { return c.bc.ID() }
+
+// Next gets the next document for this cursor. It returns true if there were no errors and the cursor has not been
+// exhausted.
+//
+// Next blocks until a document is available or an error occurs. If the context expires, the cursor's error will
+// be set to ctx.Err(). In case of an error, Next will return false.
+//
+// If Next returns false, subsequent calls will also return false.
+func (c *Cursor) Next(ctx context.Context) bool {
+	return c.next(ctx, false)
+}
+
+// TryNext attempts to get the next document for this cursor. It returns true if there were no errors and the next
+// document is available. This is only recommended for use with tailable cursors as a non-blocking alternative to
+// Next. See https://www.mongodb.com/docs/manual/core/tailable-cursors/ for more information about tailable cursors.
+//
+// TryNext returns false if the cursor is exhausted, an error occurs when getting results from the server, the next
+// document is not yet available, or ctx expires. If the context  expires, the cursor's error will be set to ctx.Err().
+//
+// If TryNext returns false and an error occurred or the cursor has been exhausted (i.e. c.Err() != nil || c.ID() == 0),
+// subsequent attempts will also return false. Otherwise, it is safe to call TryNext again until a document is
+// available.
+//
+// This method requires driver version >= 1.2.0.
+func (c *Cursor) TryNext(ctx context.Context) bool {
+	return c.next(ctx, true)
+}
+
+func (c *Cursor) next(ctx context.Context, nonBlocking bool) bool {
+	// return false right away if the cursor has already errored.
+	if c.err != nil {
+		return false
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+	doc, err := c.batch.Next()
+	switch {
+	case err == nil:
+		// Consume the next document in the current batch.
+		c.batchLength--
+		c.Current = bson.Raw(doc)
+		return true
+	case errors.Is(err, io.EOF): // Need to do a getMore
+	default:
+		c.err = err
+		return false
+	}
+
+	// call the Next method in a loop until at least one document is returned in the next batch or
+	// the context times out.
+	for {
+		// If we don't have a next batch
+		if !c.bc.Next(ctx) {
+			// Do we have an error? If so we return false.
+			c.err = replaceErrors(c.bc.Err())
+			if c.err != nil {
+				return false
+			}
+			// Is the cursor ID zero?
+			if c.bc.ID() == 0 {
+				c.closeImplicitSession()
+				return false
+			}
+			// empty batch, but cursor is still valid.
+			// use nonBlocking to determine if we should continue or return control to the caller.
+			if nonBlocking {
+				return false
+			}
+			continue
+		}
+
+		// close the implicit session if this was the last getMore
+		if c.bc.ID() == 0 {
+			c.closeImplicitSession()
+		}
+
+		// Use the new batch to update the batch and batchLength fields. Consume the first document in the batch.
+		c.batch = c.bc.Batch()
+		c.batchLength = c.batch.DocumentCount()
+		doc, err = c.batch.Next()
+		switch {
+		case err == nil:
+			c.batchLength--
+			c.Current = bson.Raw(doc)
+			return true
+		case errors.Is(err, io.EOF): // Empty batch so we continue
+		default:
+			c.err = err
+			return false
+		}
+	}
+}
+
+func getDecoder(
+	data []byte,
+	opts *options.BSONOptions,
+	reg *bsoncodec.Registry,
+) (*bson.Decoder, error) {
+	dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data))
+	if err != nil {
+		return nil, err
+	}
+
+	if opts != nil {
+		if opts.AllowTruncatingDoubles {
+			dec.AllowTruncatingDoubles()
+		}
+		if opts.BinaryAsSlice {
+			dec.BinaryAsSlice()
+		}
+		if opts.DefaultDocumentD {
+			dec.DefaultDocumentD()
+		}
+		if opts.DefaultDocumentM {
+			dec.DefaultDocumentM()
+		}
+		if opts.UseJSONStructTags {
+			dec.UseJSONStructTags()
+		}
+		if opts.UseLocalTimeZone {
+			dec.UseLocalTimeZone()
+		}
+		if opts.ZeroMaps {
+			dec.ZeroMaps()
+		}
+		if opts.ZeroStructs {
+			dec.ZeroStructs()
+		}
+	}
+
+	if reg != nil {
+		// TODO:(GODRIVER-2719): Remove error handling.
+		if err := dec.SetRegistry(reg); err != nil {
+			return nil, err
+		}
+	}
+
+	return dec, nil
+}
+
+// Decode will unmarshal the current document into val and return any errors from the unmarshalling process without any
+// modification. If val is nil or is a typed nil, an error will be returned.
+func (c *Cursor) Decode(val interface{}) error {
+	dec, err := getDecoder(c.Current, c.bsonOpts, c.registry)
+	if err != nil {
+		return fmt.Errorf("error configuring BSON decoder: %w", err)
+	}
+
+	return dec.Decode(val)
+}
+
+// Err returns the last error seen by the Cursor, or nil if no error has occurred.
+func (c *Cursor) Err() error { return c.err }
+
+// Close closes this cursor. Next and TryNext must not be called after Close has been called. Close is idempotent. After
+// the first call, any subsequent calls will not change the state.
+func (c *Cursor) Close(ctx context.Context) error {
+	defer c.closeImplicitSession()
+	return replaceErrors(c.bc.Close(ctx))
+}
+
+// All iterates the cursor and decodes each document into results. The results parameter must be a pointer to a slice.
+// The slice pointed to by results will be completely overwritten. A nil slice pointer will not be modified if the cursor
+// has been closed, exhausted, or is empty. This method will close the cursor after retrieving all documents. If the
+// cursor has been iterated, any previously iterated documents will not be included in results.
+//
+// This method requires driver version >= 1.1.0.
+func (c *Cursor) All(ctx context.Context, results interface{}) error {
+	resultsVal := reflect.ValueOf(results)
+	if resultsVal.Kind() != reflect.Ptr {
+		return fmt.Errorf("results argument must be a pointer to a slice, but was a %s", resultsVal.Kind())
+	}
+
+	sliceVal := resultsVal.Elem()
+	if sliceVal.Kind() == reflect.Interface {
+		sliceVal = sliceVal.Elem()
+	}
+
+	if sliceVal.Kind() != reflect.Slice {
+		return fmt.Errorf("results argument must be a pointer to a slice, but was a pointer to %s", sliceVal.Kind())
+	}
+
+	elementType := sliceVal.Type().Elem()
+	var index int
+	var err error
+
+	// Defer a call to Close to try to clean up the cursor server-side when all
+	// documents have not been exhausted. Use context.Background() to ensure Close
+	// completes even if the context passed to All has errored.
+	defer c.Close(context.Background())
+
+	batch := c.batch // exhaust the current batch before iterating the batch cursor
+	for {
+		sliceVal, index, err = c.addFromBatch(sliceVal, elementType, batch, index)
+		if err != nil {
+			return err
+		}
+
+		if !c.bc.Next(ctx) {
+			break
+		}
+
+		batch = c.bc.Batch()
+	}
+
+	if err = replaceErrors(c.bc.Err()); err != nil {
+		return err
+	}
+
+	resultsVal.Elem().Set(sliceVal.Slice(0, index))
+	return nil
+}
+
+// RemainingBatchLength returns the number of documents left in the current batch. If this returns zero, the subsequent
+// call to Next or TryNext will do a network request to fetch the next batch.
+func (c *Cursor) RemainingBatchLength() int {
+	return c.batchLength
+}
+
+// addFromBatch adds all documents from batch to sliceVal starting at the given index. It returns the new slice value,
+// the next empty index in the slice, and an error if one occurs.
+func (c *Cursor) addFromBatch(sliceVal reflect.Value, elemType reflect.Type, batch *bsoncore.DocumentSequence,
+	index int) (reflect.Value, int, error) {
+
+	docs, err := batch.Documents()
+	if err != nil {
+		return sliceVal, index, err
+	}
+
+	for _, doc := range docs {
+		if sliceVal.Len() == index {
+			// slice is full
+			newElem := reflect.New(elemType)
+			sliceVal = reflect.Append(sliceVal, newElem.Elem())
+			sliceVal = sliceVal.Slice(0, sliceVal.Cap())
+		}
+
+		currElem := sliceVal.Index(index).Addr().Interface()
+		dec, err := getDecoder(doc, c.bsonOpts, c.registry)
+		if err != nil {
+			return sliceVal, index, fmt.Errorf("error configuring BSON decoder: %w", err)
+		}
+		err = dec.Decode(currElem)
+		if err != nil {
+			return sliceVal, index, err
+		}
+
+		index++
+	}
+
+	return sliceVal, index, nil
+}
+
+func (c *Cursor) closeImplicitSession() {
+	if c.clientSession != nil && c.clientSession.IsImplicit {
+		c.clientSession.EndSession()
+	}
+}
+
+// SetBatchSize sets the number of documents to fetch from the database with
+// each iteration of the cursor's "Next" method. Note that some operations set
+// an initial cursor batch size, so this setting only affects subsequent
+// document batches fetched from the database.
+func (c *Cursor) SetBatchSize(batchSize int32) {
+	c.bc.SetBatchSize(batchSize)
+}
+
+// SetMaxTime will set the maximum amount of time the server will allow the
+// operations to execute. The server will error if this field is set but the
+// cursor is not configured with awaitData=true.
+//
+// The time.Duration value passed by this setter will be converted and rounded
+// down to the nearest millisecond.
+func (c *Cursor) SetMaxTime(dur time.Duration) {
+	c.bc.SetMaxTime(dur)
+}
+
+// SetComment will set a user-configurable comment that can be used to identify
+// the operation in server logs.
+func (c *Cursor) SetComment(comment interface{}) {
+	c.bc.SetComment(comment)
+}
+
+// BatchCursorFromCursor returns a driver.BatchCursor for the given Cursor. If there is no underlying
+// driver.BatchCursor, nil is returned.
+//
+// Deprecated: This is an unstable function because the driver.BatchCursor type exists in the "x" package. Neither this
+// function nor the driver.BatchCursor type should be used by applications and may be changed or removed in any release.
+func BatchCursorFromCursor(c *Cursor) *driver.BatchCursor {
+	bc, _ := c.bc.(*driver.BatchCursor)
+	return bc
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go
new file mode 100644
index 0000000000000000000000000000000000000000..5344c9641e1411e722cff8fbae431f03baa9e189
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go
@@ -0,0 +1,849 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/internal/csfle"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+var (
+	defaultRunCmdOpts = []*options.RunCmdOptions{options.RunCmd().SetReadPreference(readpref.Primary())}
+)
+
+// Database is a handle to a MongoDB database. It is safe for concurrent use by multiple goroutines.
+type Database struct {
+	client         *Client
+	name           string
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	readPreference *readpref.ReadPref
+	readSelector   description.ServerSelector
+	writeSelector  description.ServerSelector
+	bsonOpts       *options.BSONOptions
+	registry       *bsoncodec.Registry
+}
+
+func newDatabase(client *Client, name string, opts ...*options.DatabaseOptions) *Database {
+	dbOpt := options.MergeDatabaseOptions(opts...)
+
+	rc := client.readConcern
+	if dbOpt.ReadConcern != nil {
+		rc = dbOpt.ReadConcern
+	}
+
+	rp := client.readPreference
+	if dbOpt.ReadPreference != nil {
+		rp = dbOpt.ReadPreference
+	}
+
+	wc := client.writeConcern
+	if dbOpt.WriteConcern != nil {
+		wc = dbOpt.WriteConcern
+	}
+
+	bsonOpts := client.bsonOpts
+	if dbOpt.BSONOptions != nil {
+		bsonOpts = dbOpt.BSONOptions
+	}
+
+	reg := client.registry
+	if dbOpt.Registry != nil {
+		reg = dbOpt.Registry
+	}
+
+	db := &Database{
+		client:         client,
+		name:           name,
+		readPreference: rp,
+		readConcern:    rc,
+		writeConcern:   wc,
+		bsonOpts:       bsonOpts,
+		registry:       reg,
+	}
+
+	db.readSelector = description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(db.readPreference),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	db.writeSelector = description.CompositeSelector([]description.ServerSelector{
+		description.WriteSelector(),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	return db
+}
+
+// Client returns the Client the Database was created from.
+func (db *Database) Client() *Client {
+	return db.client
+}
+
+// Name returns the name of the database.
+func (db *Database) Name() string {
+	return db.name
+}
+
+// Collection gets a handle for a collection with the given name configured with the given CollectionOptions.
+func (db *Database) Collection(name string, opts ...*options.CollectionOptions) *Collection {
+	return newCollection(db, name, opts...)
+}
+
+// Aggregate executes an aggregate command the database. This requires MongoDB version >= 3.6 and driver version >=
+// 1.1.0.
+//
+// The pipeline parameter must be a slice of documents, each representing an aggregation stage. The pipeline
+// cannot be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the
+// mongo.Pipeline type can be used. See
+// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid
+// stages in database-level aggregations.
+//
+// The opts parameter can be used to specify options for this operation (see the options.AggregateOptions documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/.
+func (db *Database) Aggregate(ctx context.Context, pipeline interface{},
+	opts ...*options.AggregateOptions) (*Cursor, error) {
+	a := aggregateParams{
+		ctx:            ctx,
+		pipeline:       pipeline,
+		client:         db.client,
+		registry:       db.registry,
+		readConcern:    db.readConcern,
+		writeConcern:   db.writeConcern,
+		retryRead:      db.client.retryReads,
+		db:             db.name,
+		readSelector:   db.readSelector,
+		writeSelector:  db.writeSelector,
+		readPreference: db.readPreference,
+		opts:           opts,
+	}
+	return aggregate(a)
+}
+
+func (db *Database) processRunCommand(ctx context.Context, cmd interface{},
+	cursorCommand bool, opts ...*options.RunCmdOptions) (*operation.Command, *session.Client, error) {
+	sess := sessionFromContext(ctx)
+	if sess == nil && db.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(db.client.sessionPool, db.client.id)
+	}
+
+	err := db.client.validSession(sess)
+	if err != nil {
+		return nil, sess, err
+	}
+
+	ro := options.MergeRunCmdOptions(append(defaultRunCmdOpts, opts...)...)
+	if sess != nil && sess.TransactionRunning() && ro.ReadPreference != nil && ro.ReadPreference.Mode() != readpref.PrimaryMode {
+		return nil, sess, errors.New("read preference in a transaction must be primary")
+	}
+
+	if isUnorderedMap(cmd) {
+		return nil, sess, ErrMapForOrderedArgument{"cmd"}
+	}
+
+	runCmdDoc, err := marshal(cmd, db.bsonOpts, db.registry)
+	if err != nil {
+		return nil, sess, err
+	}
+	readSelect := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(ro.ReadPreference),
+		description.LatencySelector(db.client.localThreshold),
+	})
+	if sess != nil && sess.PinnedServer != nil {
+		readSelect = makePinnedSelector(sess, readSelect)
+	}
+
+	var op *operation.Command
+	switch cursorCommand {
+	case true:
+		cursorOpts := db.client.createBaseCursorOptions()
+
+		cursorOpts.MarshalValueEncoderFn = newEncoderFn(db.bsonOpts, db.registry)
+
+		op = operation.NewCursorCommand(runCmdDoc, cursorOpts)
+	default:
+		op = operation.NewCommand(runCmdDoc)
+	}
+
+	return op.Session(sess).CommandMonitor(db.client.monitor).
+		ServerSelector(readSelect).ClusterClock(db.client.clock).
+		Database(db.name).Deployment(db.client.deployment).
+		Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI).
+		Timeout(db.client.timeout).Logger(db.client.logger).Authenticator(db.client.authenticator), sess, nil
+}
+
+// RunCommand executes the given command against the database.
+//
+// This function does not obey the Database's readPreference. To specify a read
+// preference, the RunCmdOptions.ReadPreference option must be used.
+//
+// This function does not obey the Database's readConcern or writeConcern. A
+// user must supply these values manually in the user-provided runCommand
+// parameter.
+//
+// The runCommand parameter must be a document for the command to be executed. It cannot be nil.
+// This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid.
+//
+// The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation).
+//
+// The behavior of RunCommand is undefined if the command document contains any of the following:
+// - A session ID or any transaction-specific fields
+// - API versioning options when an API version is already declared on the Client
+// - maxTimeMS when Timeout is set on the Client
+func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	op, sess, err := db.processRunCommand(ctx, runCommand, false, opts...)
+	defer closeImplicitSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	err = op.Execute(ctx)
+	// RunCommand can be used to run a write, thus execute may return a write error
+	_, convErr := processWriteError(err)
+	return &SingleResult{
+		ctx:      ctx,
+		err:      convErr,
+		rdr:      bson.Raw(op.Result()),
+		bsonOpts: db.bsonOpts,
+		reg:      db.registry,
+	}
+}
+
+// RunCommandCursor executes the given command against the database and parses the response as a cursor. If the command
+// being executed does not return a cursor (e.g. insert), the command will be executed on the server and an error will
+// be returned because the server response cannot be parsed as a cursor. This function does not obey the Database's read
+// preference. To specify a read preference, the RunCmdOptions.ReadPreference option must be used.
+//
+// The runCommand parameter must be a document for the command to be executed. It cannot be nil.
+// This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid.
+//
+// The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation).
+//
+// The behavior of RunCommandCursor is undefined if the command document contains any of the following:
+// - A session ID or any transaction-specific fields
+// - API versioning options when an API version is already declared on the Client
+// - maxTimeMS when Timeout is set on the Client
+func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	op, sess, err := db.processRunCommand(ctx, runCommand, true, opts...)
+	if err != nil {
+		closeImplicitSession(sess)
+		return nil, replaceErrors(err)
+	}
+
+	if err = op.Execute(ctx); err != nil {
+		closeImplicitSession(sess)
+		if errors.Is(err, driver.ErrNoCursor) {
+			return nil, errors.New(
+				"database response does not contain a cursor; try using RunCommand instead")
+		}
+		return nil, replaceErrors(err)
+	}
+
+	bc, err := op.ResultCursor()
+	if err != nil {
+		closeImplicitSession(sess)
+		return nil, replaceErrors(err)
+	}
+	cursor, err := newCursorWithSession(bc, db.bsonOpts, db.registry, sess)
+	return cursor, replaceErrors(err)
+}
+
+// Drop drops the database on the server. This method ignores "namespace not found" errors so it is safe to drop
+// a database that does not exist on the server.
+func (db *Database) Drop(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && db.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(db.client.sessionPool, db.client.id)
+		defer sess.EndSession()
+	}
+
+	err := db.client.validSession(sess)
+	if err != nil {
+		return err
+	}
+
+	wc := db.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, db.writeSelector)
+
+	op := operation.NewDropDatabase().
+		Session(sess).WriteConcern(wc).CommandMonitor(db.client.monitor).
+		ServerSelector(selector).ClusterClock(db.client.clock).
+		Database(db.name).Deployment(db.client.deployment).Crypt(db.client.cryptFLE).
+		ServerAPI(db.client.serverAPI).Authenticator(db.client.authenticator)
+
+	err = op.Execute(ctx)
+
+	driverErr, ok := err.(driver.Error)
+	if err != nil && (!ok || !driverErr.NamespaceNotFound()) {
+		return replaceErrors(err)
+	}
+	return nil
+}
+
+// ListCollectionSpecifications executes a listCollections command and returns a slice of CollectionSpecification
+// instances representing the collections in the database.
+//
+// The filter parameter must be a document containing query operators and can be used to select which collections
+// are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include all
+// collections.
+//
+// The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/.
+//
+// BUG(benjirewis): ListCollectionSpecifications prevents listing more than 100 collections per database when running
+// against MongoDB version 2.6.
+func (db *Database) ListCollectionSpecifications(ctx context.Context, filter interface{},
+	opts ...*options.ListCollectionsOptions) ([]*CollectionSpecification, error) {
+
+	cursor, err := db.ListCollections(ctx, filter, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	var specs []*CollectionSpecification
+	err = cursor.All(ctx, &specs)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, spec := range specs {
+		// Pre-4.4 servers report a namespace in their responses, so we only set Namespace manually if it was not in
+		// the response.
+		if spec.IDIndex != nil && spec.IDIndex.Namespace == "" {
+			spec.IDIndex.Namespace = db.name + "." + spec.Name
+		}
+	}
+	return specs, nil
+}
+
+// ListCollections executes a listCollections command and returns a cursor over the collections in the database.
+//
+// The filter parameter must be a document containing query operators and can be used to select which collections
+// are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include all
+// collections.
+//
+// The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/.
+//
+// BUG(benjirewis): ListCollections prevents listing more than 100 collections per database when running against
+// MongoDB version 2.6.
+func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	filterDoc, err := marshal(filter, db.bsonOpts, db.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && db.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(db.client.sessionPool, db.client.id)
+	}
+
+	err = db.client.validSession(sess)
+	if err != nil {
+		closeImplicitSession(sess)
+		return nil, err
+	}
+
+	selector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(db.client.localThreshold),
+	})
+	selector = makeReadPrefSelector(sess, selector, db.client.localThreshold)
+
+	lco := options.MergeListCollectionsOptions(opts...)
+	op := operation.NewListCollections(filterDoc).
+		Session(sess).ReadPreference(db.readPreference).CommandMonitor(db.client.monitor).
+		ServerSelector(selector).ClusterClock(db.client.clock).
+		Database(db.name).Deployment(db.client.deployment).Crypt(db.client.cryptFLE).
+		ServerAPI(db.client.serverAPI).Timeout(db.client.timeout).Authenticator(db.client.authenticator)
+
+	cursorOpts := db.client.createBaseCursorOptions()
+
+	cursorOpts.MarshalValueEncoderFn = newEncoderFn(db.bsonOpts, db.registry)
+
+	if lco.NameOnly != nil {
+		op = op.NameOnly(*lco.NameOnly)
+	}
+	if lco.BatchSize != nil {
+		cursorOpts.BatchSize = *lco.BatchSize
+		op = op.BatchSize(*lco.BatchSize)
+	}
+	if lco.AuthorizedCollections != nil {
+		op = op.AuthorizedCollections(*lco.AuthorizedCollections)
+	}
+
+	retry := driver.RetryNone
+	if db.client.retryReads {
+		retry = driver.RetryOncePerCommand
+	}
+	op = op.Retry(retry)
+
+	err = op.Execute(ctx)
+	if err != nil {
+		closeImplicitSession(sess)
+		return nil, replaceErrors(err)
+	}
+
+	bc, err := op.Result(cursorOpts)
+	if err != nil {
+		closeImplicitSession(sess)
+		return nil, replaceErrors(err)
+	}
+	cursor, err := newCursorWithSession(bc, db.bsonOpts, db.registry, sess)
+	return cursor, replaceErrors(err)
+}
+
+// ListCollectionNames executes a listCollections command and returns a slice containing the names of the collections
+// in the database. This method requires driver version >= 1.1.0.
+//
+// The filter parameter must be a document containing query operators and can be used to select which collections
+// are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include all
+// collections.
+//
+// The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/.
+//
+// BUG(benjirewis): ListCollectionNames prevents listing more than 100 collections per database when running against
+// MongoDB version 2.6.
+func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]string, error) {
+	opts = append(opts, options.ListCollections().SetNameOnly(true))
+
+	res, err := db.ListCollections(ctx, filter, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	defer res.Close(ctx)
+
+	names := make([]string, 0)
+	for res.Next(ctx) {
+		elem, err := res.Current.LookupErr("name")
+		if err != nil {
+			return nil, err
+		}
+
+		if elem.Type != bson.TypeString {
+			return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type, bson.TypeString)
+		}
+
+		elemName := elem.StringValue()
+		names = append(names, elemName)
+	}
+
+	res.Close(ctx)
+	return names, nil
+}
+
+// ReadConcern returns the read concern used to configure the Database object.
+func (db *Database) ReadConcern() *readconcern.ReadConcern {
+	return db.readConcern
+}
+
+// ReadPreference returns the read preference used to configure the Database object.
+func (db *Database) ReadPreference() *readpref.ReadPref {
+	return db.readPreference
+}
+
+// WriteConcern returns the write concern used to configure the Database object.
+func (db *Database) WriteConcern() *writeconcern.WriteConcern {
+	return db.writeConcern
+}
+
+// Watch returns a change stream for all changes to the corresponding database. See
+// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams.
+//
+// The Database must be configured with read concern majority or no read concern for a change stream to be created
+// successfully.
+//
+// The pipeline parameter must be a slice of documents, each representing a pipeline stage. The pipeline cannot be
+// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for
+// a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the
+// mongo.Pipeline{} type can be used.
+//
+// The opts parameter can be used to specify options for change stream creation (see the options.ChangeStreamOptions
+// documentation).
+func (db *Database) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	csConfig := changeStreamConfig{
+		readConcern:    db.readConcern,
+		readPreference: db.readPreference,
+		client:         db.client,
+		registry:       db.registry,
+		streamType:     DatabaseStream,
+		databaseName:   db.Name(),
+		crypt:          db.client.cryptFLE,
+	}
+	return newChangeStream(ctx, csConfig, pipeline, opts...)
+}
+
+// CreateCollection executes a create command to explicitly create a new collection with the specified name on the
+// server. If the collection being created already exists, this method will return a mongo.CommandError. This method
+// requires driver version 1.4.0 or higher.
+//
+// The opts parameter can be used to specify options for the operation (see the options.CreateCollectionOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/create/.
+func (db *Database) CreateCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error {
+	cco := options.MergeCreateCollectionOptions(opts...)
+	// Follow Client-Side Encryption specification to check for encryptedFields.
+	// Check for encryptedFields from create options.
+	ef := cco.EncryptedFields
+	// Check for encryptedFields from the client EncryptedFieldsMap.
+	if ef == nil {
+		ef = db.getEncryptedFieldsFromMap(name)
+	}
+	if ef != nil {
+		return db.createCollectionWithEncryptedFields(ctx, name, ef, opts...)
+	}
+
+	return db.createCollection(ctx, name, opts...)
+}
+
+// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by running the "listCollections" command.
+// Returns nil and no error if the listCollections command succeeds, but "encryptedFields" is not present.
+func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collectionName string) (interface{}, error) {
+	// Check if collection has an EncryptedFields configured server-side.
+	collSpecs, err := db.ListCollectionSpecifications(ctx, bson.D{{"name", collectionName}})
+	if err != nil {
+		return nil, err
+	}
+	if len(collSpecs) == 0 {
+		return nil, nil
+	}
+	if len(collSpecs) > 1 {
+		return nil, fmt.Errorf("expected 1 or 0 results from listCollections, got %v", len(collSpecs))
+	}
+	collSpec := collSpecs[0]
+	rawValue, err := collSpec.Options.LookupErr("encryptedFields")
+	if errors.Is(err, bsoncore.ErrElementNotFound) {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+
+	encryptedFields, ok := rawValue.DocumentOK()
+	if !ok {
+		return nil, fmt.Errorf("expected encryptedFields of %v to be document, got %v", collectionName, rawValue.Type)
+	}
+
+	return encryptedFields, nil
+}
+
+// getEncryptedFieldsFromMap tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap.
+// Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName.
+func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} {
+	// Check the EncryptedFieldsMap
+	efMap := db.client.encryptedFieldsMap
+	if efMap == nil {
+		return nil
+	}
+
+	namespace := db.name + "." + collectionName
+
+	ef, ok := efMap[namespace]
+	if ok {
+		return ef
+	}
+	return nil
+}
+
+// createCollectionWithEncryptedFields creates a collection with an EncryptedFields.
+func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, name string, ef interface{}, opts ...*options.CreateCollectionOptions) error {
+	efBSON, err := marshal(ef, db.bsonOpts, db.registry)
+	if err != nil {
+		return fmt.Errorf("error transforming document: %w", err)
+	}
+
+	// Check the wire version to ensure server is 7.0.0 or newer.
+	// After the wire version check, and before creating the collections, it is possible the server state changes.
+	// That is OK. This wire version check is a best effort to inform users earlier if using a QEv2 driver with a QEv1 server.
+	{
+		const QEv2WireVersion = 21
+		server, err := db.client.deployment.SelectServer(ctx, description.WriteSelector())
+		if err != nil {
+			return fmt.Errorf("error selecting server to check maxWireVersion: %w", err)
+		}
+		conn, err := server.Connection(ctx)
+		if err != nil {
+			return fmt.Errorf("error getting connection to check maxWireVersion: %w", err)
+		}
+		defer conn.Close()
+		wireVersionRange := conn.Description().WireVersion
+		if wireVersionRange.Max < QEv2WireVersion {
+			return fmt.Errorf("Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption. Got maxWireVersion %v but need maxWireVersion >= %v", wireVersionRange.Max, QEv2WireVersion)
+		}
+	}
+
+	// Create the two encryption-related, associated collections: `escCollection` and `ecocCollection`.
+
+	stateCollectionOpts := options.CreateCollection().
+		SetClusteredIndex(bson.D{{"key", bson.D{{"_id", 1}}}, {"unique", true}})
+	// Create ESCCollection.
+	escCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, name, csfle.EncryptedStateCollection)
+	if err != nil {
+		return err
+	}
+
+	if err := db.createCollection(ctx, escCollection, stateCollectionOpts); err != nil {
+		return err
+	}
+
+	// Create ECOCCollection.
+	ecocCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, name, csfle.EncryptedCompactionCollection)
+	if err != nil {
+		return err
+	}
+
+	if err := db.createCollection(ctx, ecocCollection, stateCollectionOpts); err != nil {
+		return err
+	}
+
+	// Create a data collection with the 'encryptedFields' option.
+	op, err := db.createCollectionOperation(name, opts...)
+	if err != nil {
+		return err
+	}
+
+	op.EncryptedFields(efBSON)
+	if err := db.executeCreateOperation(ctx, op); err != nil {
+		return err
+	}
+
+	// Create an index on the __safeContent__ field in the collection @collectionName.
+	if _, err := db.Collection(name).Indexes().CreateOne(ctx, IndexModel{Keys: bson.D{{"__safeContent__", 1}}}); err != nil {
+		return fmt.Errorf("error creating safeContent index: %w", err)
+	}
+
+	return nil
+}
+
+// createCollection creates a collection without EncryptedFields.
+func (db *Database) createCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error {
+	op, err := db.createCollectionOperation(name, opts...)
+	if err != nil {
+		return err
+	}
+	return db.executeCreateOperation(ctx, op)
+}
+
+func (db *Database) createCollectionOperation(name string, opts ...*options.CreateCollectionOptions) (*operation.Create, error) {
+	cco := options.MergeCreateCollectionOptions(opts...)
+	op := operation.NewCreate(name).ServerAPI(db.client.serverAPI).Authenticator(db.client.authenticator)
+
+	if cco.Capped != nil {
+		op.Capped(*cco.Capped)
+	}
+	if cco.Collation != nil {
+		op.Collation(bsoncore.Document(cco.Collation.ToDocument()))
+	}
+	if cco.ChangeStreamPreAndPostImages != nil {
+		csppi, err := marshal(cco.ChangeStreamPreAndPostImages, db.bsonOpts, db.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.ChangeStreamPreAndPostImages(csppi)
+	}
+	if cco.DefaultIndexOptions != nil {
+		idx, doc := bsoncore.AppendDocumentStart(nil)
+		if cco.DefaultIndexOptions.StorageEngine != nil {
+			storageEngine, err := marshal(cco.DefaultIndexOptions.StorageEngine, db.bsonOpts, db.registry)
+			if err != nil {
+				return nil, err
+			}
+
+			doc = bsoncore.AppendDocumentElement(doc, "storageEngine", storageEngine)
+		}
+		doc, err := bsoncore.AppendDocumentEnd(doc, idx)
+		if err != nil {
+			return nil, err
+		}
+
+		op.IndexOptionDefaults(doc)
+	}
+	if cco.MaxDocuments != nil {
+		op.Max(*cco.MaxDocuments)
+	}
+	if cco.SizeInBytes != nil {
+		op.Size(*cco.SizeInBytes)
+	}
+	if cco.StorageEngine != nil {
+		storageEngine, err := marshal(cco.StorageEngine, db.bsonOpts, db.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.StorageEngine(storageEngine)
+	}
+	if cco.ValidationAction != nil {
+		op.ValidationAction(*cco.ValidationAction)
+	}
+	if cco.ValidationLevel != nil {
+		op.ValidationLevel(*cco.ValidationLevel)
+	}
+	if cco.Validator != nil {
+		validator, err := marshal(cco.Validator, db.bsonOpts, db.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.Validator(validator)
+	}
+	if cco.ExpireAfterSeconds != nil {
+		op.ExpireAfterSeconds(*cco.ExpireAfterSeconds)
+	}
+	if cco.TimeSeriesOptions != nil {
+		idx, doc := bsoncore.AppendDocumentStart(nil)
+		doc = bsoncore.AppendStringElement(doc, "timeField", cco.TimeSeriesOptions.TimeField)
+
+		if cco.TimeSeriesOptions.MetaField != nil {
+			doc = bsoncore.AppendStringElement(doc, "metaField", *cco.TimeSeriesOptions.MetaField)
+		}
+		if cco.TimeSeriesOptions.Granularity != nil {
+			doc = bsoncore.AppendStringElement(doc, "granularity", *cco.TimeSeriesOptions.Granularity)
+		}
+
+		if cco.TimeSeriesOptions.BucketMaxSpan != nil {
+			bmss := int64(*cco.TimeSeriesOptions.BucketMaxSpan / time.Second)
+
+			doc = bsoncore.AppendInt64Element(doc, "bucketMaxSpanSeconds", bmss)
+		}
+
+		if cco.TimeSeriesOptions.BucketRounding != nil {
+			brs := int64(*cco.TimeSeriesOptions.BucketRounding / time.Second)
+
+			doc = bsoncore.AppendInt64Element(doc, "bucketRoundingSeconds", brs)
+		}
+
+		doc, err := bsoncore.AppendDocumentEnd(doc, idx)
+		if err != nil {
+			return nil, err
+		}
+
+		op.TimeSeries(doc)
+	}
+	if cco.ClusteredIndex != nil {
+		clusteredIndex, err := marshal(cco.ClusteredIndex, db.bsonOpts, db.registry)
+		if err != nil {
+			return nil, err
+		}
+		op.ClusteredIndex(clusteredIndex)
+	}
+
+	return op, nil
+}
+
+// CreateView executes a create command to explicitly create a view on the server. See
+// https://www.mongodb.com/docs/manual/core/views/ for more information about views. This method requires driver version >=
+// 1.4.0 and MongoDB version >= 3.4.
+//
+// The viewName parameter specifies the name of the view to create.
+//
+// # The viewOn parameter specifies the name of the collection or view on which this view will be created
+//
+// The pipeline parameter specifies an aggregation pipeline that will be exececuted against the source collection or
+// view to create this view.
+//
+// The opts parameter can be used to specify options for the operation (see the options.CreateViewOptions
+// documentation).
+func (db *Database) CreateView(ctx context.Context, viewName, viewOn string, pipeline interface{},
+	opts ...*options.CreateViewOptions) error {
+
+	pipelineArray, _, err := marshalAggregatePipeline(pipeline, db.bsonOpts, db.registry)
+	if err != nil {
+		return err
+	}
+
+	op := operation.NewCreate(viewName).
+		ViewOn(viewOn).
+		Pipeline(pipelineArray).
+		ServerAPI(db.client.serverAPI).
+		Authenticator(db.client.authenticator)
+	cvo := options.MergeCreateViewOptions(opts...)
+	if cvo.Collation != nil {
+		op.Collation(bsoncore.Document(cvo.Collation.ToDocument()))
+	}
+
+	return db.executeCreateOperation(ctx, op)
+}
+
+func (db *Database) executeCreateOperation(ctx context.Context, op *operation.Create) error {
+	sess := sessionFromContext(ctx)
+	if sess == nil && db.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(db.client.sessionPool, db.client.id)
+		defer sess.EndSession()
+	}
+
+	err := db.client.validSession(sess)
+	if err != nil {
+		return err
+	}
+
+	wc := db.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, db.writeSelector)
+	op = op.Session(sess).
+		WriteConcern(wc).
+		CommandMonitor(db.client.monitor).
+		ServerSelector(selector).
+		ClusterClock(db.client.clock).
+		Database(db.name).
+		Deployment(db.client.deployment).
+		Crypt(db.client.cryptFLE)
+
+	return replaceErrors(op.Execute(ctx))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go
new file mode 100644
index 0000000000000000000000000000000000000000..e750e33b1470787c1565a2353d166a91c96b58df
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go
@@ -0,0 +1,11 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package description contains types and functions for describing the state of MongoDB clusters.
+package description // import "go.mongodb.org/mongo-driver/mongo/description"
+
+// Unknown is an unknown server or topology kind.
+const Unknown = 0
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..19f2760e2f46a3f56d25b206bcdcc55a7089bd88
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go
@@ -0,0 +1,504 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/internal/bsonutil"
+	"go.mongodb.org/mongo-driver/internal/handshake"
+	"go.mongodb.org/mongo-driver/internal/ptrutil"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/tag"
+)
+
+// SelectedServer augments the Server type by also including the TopologyKind of the topology that includes the server.
+// This type should be used to track the state of a server that was selected to perform an operation.
+type SelectedServer struct {
+	Server
+	Kind TopologyKind
+}
+
+// Server contains information about a node in a cluster. This is created from hello command responses. If the value
+// of the Kind field is LoadBalancer, only the Addr and Kind fields will be set. All other fields will be set to the
+// zero value of the field's type.
+type Server struct {
+	Addr address.Address
+
+	Arbiters          []string
+	AverageRTT        time.Duration
+	AverageRTTSet     bool
+	Compression       []string // compression methods returned by server
+	CanonicalAddr     address.Address
+	ElectionID        primitive.ObjectID
+	HeartbeatInterval time.Duration
+	HelloOK           bool
+	Hosts             []string
+	IsCryptd          bool
+	LastError         error
+	LastUpdateTime    time.Time
+	LastWriteTime     time.Time
+	MaxBatchCount     uint32
+	MaxDocumentSize   uint32
+	MaxMessageSize    uint32
+	Members           []address.Address
+	Passives          []string
+	Passive           bool
+	Primary           address.Address
+	ReadOnly          bool
+	ServiceID         *primitive.ObjectID // Only set for servers that are deployed behind a load balancer.
+	// Deprecated: Use SessionTimeoutMinutesPtr instead.
+	SessionTimeoutMinutes    uint32
+	SessionTimeoutMinutesPtr *int64
+	SetName                  string
+	SetVersion               uint32
+	Tags                     tag.Set
+	TopologyVersion          *TopologyVersion
+	Kind                     ServerKind
+	WireVersion              *VersionRange
+}
+
+// NewServer creates a new server description from the given hello command response.
+func NewServer(addr address.Address, response bson.Raw) Server {
+	desc := Server{Addr: addr, CanonicalAddr: addr, LastUpdateTime: time.Now().UTC()}
+	elements, err := response.Elements()
+	if err != nil {
+		desc.LastError = err
+		return desc
+	}
+	var ok bool
+	var isReplicaSet, isWritablePrimary, hidden, secondary, arbiterOnly bool
+	var msg string
+	var versionRange VersionRange
+	for _, element := range elements {
+		switch element.Key() {
+		case "arbiters":
+			var err error
+			desc.Arbiters, err = stringSliceFromRawElement(element)
+			if err != nil {
+				desc.LastError = err
+				return desc
+			}
+		case "arbiterOnly":
+			arbiterOnly, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'arbiterOnly' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "compression":
+			var err error
+			desc.Compression, err = stringSliceFromRawElement(element)
+			if err != nil {
+				desc.LastError = err
+				return desc
+			}
+		case "electionId":
+			desc.ElectionID, ok = element.Value().ObjectIDOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'electionId' to be a objectID but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "iscryptd":
+			desc.IsCryptd, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'iscryptd' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "helloOk":
+			desc.HelloOK, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'helloOk' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "hidden":
+			hidden, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'hidden' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "hosts":
+			var err error
+			desc.Hosts, err = stringSliceFromRawElement(element)
+			if err != nil {
+				desc.LastError = err
+				return desc
+			}
+		case "isWritablePrimary":
+			isWritablePrimary, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'isWritablePrimary' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case handshake.LegacyHelloLowercase:
+			isWritablePrimary, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected legacy hello to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "isreplicaset":
+			isReplicaSet, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'isreplicaset' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "lastWrite":
+			lastWrite, ok := element.Value().DocumentOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'lastWrite' to be a document but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			dateTime, err := lastWrite.LookupErr("lastWriteDate")
+			if err == nil {
+				dt, ok := dateTime.DateTimeOK()
+				if !ok {
+					desc.LastError = fmt.Errorf("expected 'lastWriteDate' to be a datetime but it's a BSON %s", dateTime.Type)
+					return desc
+				}
+				desc.LastWriteTime = time.Unix(dt/1000, dt%1000*1000000).UTC()
+			}
+		case "logicalSessionTimeoutMinutes":
+			i64, ok := element.Value().AsInt64OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'logicalSessionTimeoutMinutes' to be an integer but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+
+			desc.SessionTimeoutMinutes = uint32(i64)
+			desc.SessionTimeoutMinutesPtr = &i64
+		case "maxBsonObjectSize":
+			i64, ok := element.Value().AsInt64OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'maxBsonObjectSize' to be an integer but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			desc.MaxDocumentSize = uint32(i64)
+		case "maxMessageSizeBytes":
+			i64, ok := element.Value().AsInt64OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'maxMessageSizeBytes' to be an integer but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			desc.MaxMessageSize = uint32(i64)
+		case "maxWriteBatchSize":
+			i64, ok := element.Value().AsInt64OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'maxWriteBatchSize' to be an integer but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			desc.MaxBatchCount = uint32(i64)
+		case "me":
+			me, ok := element.Value().StringValueOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'me' to be a string but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			desc.CanonicalAddr = address.Address(me).Canonicalize()
+		case "maxWireVersion":
+			versionRange.Max, ok = element.Value().AsInt32OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'maxWireVersion' to be an integer but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "minWireVersion":
+			versionRange.Min, ok = element.Value().AsInt32OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'minWireVersion' to be an integer but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "msg":
+			msg, ok = element.Value().StringValueOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'msg' to be a string but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "ok":
+			okay, ok := element.Value().AsInt32OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'ok' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			if okay != 1 {
+				desc.LastError = errors.New("not ok")
+				return desc
+			}
+		case "passives":
+			var err error
+			desc.Passives, err = stringSliceFromRawElement(element)
+			if err != nil {
+				desc.LastError = err
+				return desc
+			}
+		case "passive":
+			desc.Passive, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'passive' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "primary":
+			primary, ok := element.Value().StringValueOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'primary' to be a string but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			desc.Primary = address.Address(primary)
+		case "readOnly":
+			desc.ReadOnly, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'readOnly' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "secondary":
+			secondary, ok = element.Value().BooleanOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'secondary' to be a boolean but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "serviceId":
+			oid, ok := element.Value().ObjectIDOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'serviceId' to be an ObjectId but it's a BSON %s", element.Value().Type)
+			}
+			desc.ServiceID = &oid
+		case "setName":
+			desc.SetName, ok = element.Value().StringValueOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'setName' to be a string but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+		case "setVersion":
+			i64, ok := element.Value().AsInt64OK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'setVersion' to be an integer but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+			desc.SetVersion = uint32(i64)
+		case "tags":
+			m, err := decodeStringMap(element, "tags")
+			if err != nil {
+				desc.LastError = err
+				return desc
+			}
+			desc.Tags = tag.NewTagSetFromMap(m)
+		case "topologyVersion":
+			doc, ok := element.Value().DocumentOK()
+			if !ok {
+				desc.LastError = fmt.Errorf("expected 'topologyVersion' to be a document but it's a BSON %s", element.Value().Type)
+				return desc
+			}
+
+			desc.TopologyVersion, err = NewTopologyVersion(doc)
+			if err != nil {
+				desc.LastError = err
+				return desc
+			}
+		}
+	}
+
+	for _, host := range desc.Hosts {
+		desc.Members = append(desc.Members, address.Address(host).Canonicalize())
+	}
+
+	for _, passive := range desc.Passives {
+		desc.Members = append(desc.Members, address.Address(passive).Canonicalize())
+	}
+
+	for _, arbiter := range desc.Arbiters {
+		desc.Members = append(desc.Members, address.Address(arbiter).Canonicalize())
+	}
+
+	desc.Kind = Standalone
+
+	switch {
+	case isReplicaSet:
+		desc.Kind = RSGhost
+	case desc.SetName != "":
+		switch {
+		case isWritablePrimary:
+			desc.Kind = RSPrimary
+		case hidden:
+			desc.Kind = RSMember
+		case secondary:
+			desc.Kind = RSSecondary
+		case arbiterOnly:
+			desc.Kind = RSArbiter
+		default:
+			desc.Kind = RSMember
+		}
+	case msg == "isdbgrid":
+		desc.Kind = Mongos
+	}
+
+	desc.WireVersion = &versionRange
+
+	return desc
+}
+
+// NewDefaultServer creates a new unknown server description with the given address.
+func NewDefaultServer(addr address.Address) Server {
+	return NewServerFromError(addr, nil, nil)
+}
+
+// NewServerFromError creates a new unknown server description with the given parameters.
+func NewServerFromError(addr address.Address, err error, tv *TopologyVersion) Server {
+	return Server{
+		Addr:            addr,
+		LastError:       err,
+		Kind:            Unknown,
+		TopologyVersion: tv,
+	}
+}
+
+// SetAverageRTT sets the average round trip time for this server description.
+func (s Server) SetAverageRTT(rtt time.Duration) Server {
+	s.AverageRTT = rtt
+	s.AverageRTTSet = true
+	return s
+}
+
+// DataBearing returns true if the server is a data bearing server.
+func (s Server) DataBearing() bool {
+	return s.Kind == RSPrimary ||
+		s.Kind == RSSecondary ||
+		s.Kind == Mongos ||
+		s.Kind == Standalone
+}
+
+// LoadBalanced returns true if the server is a load balancer or is behind a load balancer.
+func (s Server) LoadBalanced() bool {
+	return s.Kind == LoadBalancer || s.ServiceID != nil
+}
+
+// String implements the Stringer interface
+func (s Server) String() string {
+	str := fmt.Sprintf("Addr: %s, Type: %s",
+		s.Addr, s.Kind)
+	if len(s.Tags) != 0 {
+		str += fmt.Sprintf(", Tag sets: %s", s.Tags)
+	}
+
+	if s.AverageRTTSet {
+		str += fmt.Sprintf(", Average RTT: %d", s.AverageRTT)
+	}
+
+	if s.LastError != nil {
+		str += fmt.Sprintf(", Last error: %s", s.LastError)
+	}
+	return str
+}
+
+func decodeStringMap(element bson.RawElement, name string) (map[string]string, error) {
+	doc, ok := element.Value().DocumentOK()
+	if !ok {
+		return nil, fmt.Errorf("expected '%s' to be a document but it's a BSON %s", name, element.Value().Type)
+	}
+	elements, err := doc.Elements()
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string)
+	for _, element := range elements {
+		key := element.Key()
+		value, ok := element.Value().StringValueOK()
+		if !ok {
+			return nil, fmt.Errorf("expected '%s' to be a document of strings, but found a BSON %s", name, element.Value().Type)
+		}
+		m[key] = value
+	}
+	return m, nil
+}
+
+// Equal compares two server descriptions and returns true if they are equal
+func (s Server) Equal(other Server) bool {
+	if s.CanonicalAddr.String() != other.CanonicalAddr.String() {
+		return false
+	}
+
+	if !sliceStringEqual(s.Arbiters, other.Arbiters) {
+		return false
+	}
+
+	if !sliceStringEqual(s.Hosts, other.Hosts) {
+		return false
+	}
+
+	if !sliceStringEqual(s.Passives, other.Passives) {
+		return false
+	}
+
+	if s.Primary != other.Primary {
+		return false
+	}
+
+	if s.SetName != other.SetName {
+		return false
+	}
+
+	if s.Kind != other.Kind {
+		return false
+	}
+
+	if s.LastError != nil || other.LastError != nil {
+		if s.LastError == nil || other.LastError == nil {
+			return false
+		}
+		if s.LastError.Error() != other.LastError.Error() {
+			return false
+		}
+	}
+
+	if !s.WireVersion.Equals(other.WireVersion) {
+		return false
+	}
+
+	if len(s.Tags) != len(other.Tags) || !s.Tags.ContainsAll(other.Tags) {
+		return false
+	}
+
+	if s.SetVersion != other.SetVersion {
+		return false
+	}
+
+	if s.ElectionID != other.ElectionID {
+		return false
+	}
+
+	if ptrutil.CompareInt64(s.SessionTimeoutMinutesPtr, other.SessionTimeoutMinutesPtr) != 0 {
+		return false
+	}
+
+	// If TopologyVersion is nil for both servers, CompareToIncoming will return -1 because it assumes that the
+	// incoming response is newer. We want the descriptions to be considered equal in this case, though, so an
+	// explicit check is required.
+	if s.TopologyVersion == nil && other.TopologyVersion == nil {
+		return true
+	}
+	return s.TopologyVersion.CompareToIncoming(other.TopologyVersion) == 0
+}
+
+func sliceStringEqual(a []string, b []string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, v := range a {
+		if v != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// stringSliceFromRawElement decodes the provided BSON element into a []string.
+// This internally calls StringSliceFromRawValue on the element's value. The
+// error conditions outlined in that function's documentation apply for this
+// function as well.
+func stringSliceFromRawElement(element bson.RawElement) ([]string, error) {
+	return bsonutil.StringSliceFromRawValue(element.Key(), element.Value())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_kind.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_kind.go
new file mode 100644
index 0000000000000000000000000000000000000000..b71d29d8b5667c2fcb9308bd5f4aea3d19637e9f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_kind.go
@@ -0,0 +1,46 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// ServerKind represents the type of a single server in a topology.
+type ServerKind uint32
+
+// These constants are the possible types of servers.
+const (
+	Standalone   ServerKind = 1
+	RSMember     ServerKind = 2
+	RSPrimary    ServerKind = 4 + RSMember
+	RSSecondary  ServerKind = 8 + RSMember
+	RSArbiter    ServerKind = 16 + RSMember
+	RSGhost      ServerKind = 32 + RSMember
+	Mongos       ServerKind = 256
+	LoadBalancer ServerKind = 512
+)
+
+// String returns a stringified version of the kind or "Unknown" if the kind is invalid.
+func (kind ServerKind) String() string {
+	switch kind {
+	case Standalone:
+		return "Standalone"
+	case RSMember:
+		return "RSOther"
+	case RSPrimary:
+		return "RSPrimary"
+	case RSSecondary:
+		return "RSSecondary"
+	case RSArbiter:
+		return "RSArbiter"
+	case RSGhost:
+		return "RSGhost"
+	case Mongos:
+		return "Mongos"
+	case LoadBalancer:
+		return "LoadBalancer"
+	}
+
+	return "Unknown"
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go
new file mode 100644
index 0000000000000000000000000000000000000000..176f0fb53a86d7d265f8c2dd07fda9ef0247c5ea
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go
@@ -0,0 +1,420 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/tag"
+)
+
+// ServerSelector is an interface implemented by types that can perform server selection given a topology description
+// and list of candidate servers. The selector should filter the provided candidates list and return a subset that
+// matches some criteria.
+type ServerSelector interface {
+	SelectServer(Topology, []Server) ([]Server, error)
+}
+
+// ServerSelectorFunc is a function that can be used as a ServerSelector.
+type ServerSelectorFunc func(Topology, []Server) ([]Server, error)
+
+// SelectServer implements the ServerSelector interface.
+func (ssf ServerSelectorFunc) SelectServer(t Topology, s []Server) ([]Server, error) {
+	return ssf(t, s)
+}
+
+// serverSelectorInfo contains metadata concerning the server selector for the
+// purpose of publication.
+type serverSelectorInfo struct {
+	Type      string
+	Data      string               `json:",omitempty"`
+	Selectors []serverSelectorInfo `json:",omitempty"`
+}
+
+// String returns the JSON string representation of the serverSelectorInfo.
+func (sss serverSelectorInfo) String() string {
+	bytes, _ := json.Marshal(sss)
+
+	return string(bytes)
+}
+
+// serverSelectorInfoGetter is an interface that defines an info() method to
+// get the serverSelectorInfo.
+type serverSelectorInfoGetter interface {
+	info() serverSelectorInfo
+}
+
+type compositeSelector struct {
+	selectors []ServerSelector
+}
+
+func (cs *compositeSelector) info() serverSelectorInfo {
+	csInfo := serverSelectorInfo{Type: "compositeSelector"}
+
+	for _, sel := range cs.selectors {
+		if getter, ok := sel.(serverSelectorInfoGetter); ok {
+			csInfo.Selectors = append(csInfo.Selectors, getter.info())
+		}
+	}
+
+	return csInfo
+}
+
+// String returns the JSON string representation of the compositeSelector.
+func (cs *compositeSelector) String() string {
+	return cs.info().String()
+}
+
+// CompositeSelector combines multiple selectors into a single selector by applying them in order to the candidates
+// list.
+//
+// For example, if the initial candidates list is [s0, s1, s2, s3] and two selectors are provided where the first
+// matches s0 and s1 and the second matches s1 and s2, the following would occur during server selection:
+//
+// 1. firstSelector([s0, s1, s2, s3]) -> [s0, s1]
+// 2. secondSelector([s0, s1]) -> [s1]
+//
+// The final list of candidates returned by the composite selector would be [s1].
+func CompositeSelector(selectors []ServerSelector) ServerSelector {
+	return &compositeSelector{selectors: selectors}
+}
+
+func (cs *compositeSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	var err error
+	for _, sel := range cs.selectors {
+		candidates, err = sel.SelectServer(t, candidates)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return candidates, nil
+}
+
+type latencySelector struct {
+	latency time.Duration
+}
+
+// LatencySelector creates a ServerSelector which selects servers based on their average RTT values.
+func LatencySelector(latency time.Duration) ServerSelector {
+	return &latencySelector{latency: latency}
+}
+
+func (latencySelector) info() serverSelectorInfo {
+	return serverSelectorInfo{Type: "latencySelector"}
+}
+
+func (selector latencySelector) String() string {
+	return selector.info().String()
+}
+
+func (selector *latencySelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	if selector.latency < 0 {
+		return candidates, nil
+	}
+	if t.Kind == LoadBalanced {
+		// In LoadBalanced mode, there should only be one server in the topology and it must be selected.
+		return candidates, nil
+	}
+
+	switch len(candidates) {
+	case 0, 1:
+		return candidates, nil
+	default:
+		min := time.Duration(math.MaxInt64)
+		for _, candidate := range candidates {
+			if candidate.AverageRTTSet {
+				if candidate.AverageRTT < min {
+					min = candidate.AverageRTT
+				}
+			}
+		}
+
+		if min == math.MaxInt64 {
+			return candidates, nil
+		}
+
+		max := min + selector.latency
+
+		viableIndexes := make([]int, 0, len(candidates))
+		for i, candidate := range candidates {
+			if candidate.AverageRTTSet {
+				if candidate.AverageRTT <= max {
+					viableIndexes = append(viableIndexes, i)
+				}
+			}
+		}
+		if len(viableIndexes) == len(candidates) {
+			return candidates, nil
+		}
+		result := make([]Server, len(viableIndexes))
+		for i, idx := range viableIndexes {
+			result[i] = candidates[idx]
+		}
+		return result, nil
+	}
+}
+
+type writeServerSelector struct{}
+
+// WriteSelector selects all the writable servers.
+func WriteSelector() ServerSelector {
+	return writeServerSelector{}
+}
+
+func (writeServerSelector) info() serverSelectorInfo {
+	return serverSelectorInfo{Type: "writeSelector"}
+}
+
+func (selector writeServerSelector) String() string {
+	return selector.info().String()
+}
+
+func (writeServerSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	switch t.Kind {
+	case Single, LoadBalanced:
+		return candidates, nil
+	default:
+		// Determine the capacity of the results slice.
+		selected := 0
+		for _, candidate := range candidates {
+			switch candidate.Kind {
+			case Mongos, RSPrimary, Standalone:
+				selected++
+			}
+		}
+
+		// Append candidates to the results slice.
+		result := make([]Server, 0, selected)
+		for _, candidate := range candidates {
+			switch candidate.Kind {
+			case Mongos, RSPrimary, Standalone:
+				result = append(result, candidate)
+			}
+		}
+		return result, nil
+	}
+}
+
+type readPrefServerSelector struct {
+	rp                *readpref.ReadPref
+	isOutputAggregate bool
+}
+
+// ReadPrefSelector selects servers based on the provided read preference.
+func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector {
+	return readPrefServerSelector{
+		rp:                rp,
+		isOutputAggregate: false,
+	}
+}
+
+func (selector readPrefServerSelector) info() serverSelectorInfo {
+	return serverSelectorInfo{
+		Type: "readPrefSelector",
+		Data: selector.rp.String(),
+	}
+}
+
+func (selector readPrefServerSelector) String() string {
+	return selector.info().String()
+}
+
+func (selector readPrefServerSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	if t.Kind == LoadBalanced {
+		// In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check
+		// this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate
+		// server wouldn't have a wire version set, which would result in an error.
+		return candidates, nil
+	}
+
+	switch t.Kind {
+	case Single:
+		return candidates, nil
+	case ReplicaSetNoPrimary, ReplicaSetWithPrimary:
+		return selectForReplicaSet(selector.rp, selector.isOutputAggregate, t, candidates)
+	case Sharded:
+		return selectByKind(candidates, Mongos), nil
+	}
+
+	return nil, nil
+}
+
+// OutputAggregateSelector selects servers based on the provided read preference
+// given that the underlying operation is aggregate with an output stage.
+func OutputAggregateSelector(rp *readpref.ReadPref) ServerSelector {
+	return readPrefServerSelector{
+		rp:                rp,
+		isOutputAggregate: true,
+	}
+}
+
+func selectForReplicaSet(rp *readpref.ReadPref, isOutputAggregate bool, t Topology, candidates []Server) ([]Server, error) {
+	if err := verifyMaxStaleness(rp, t); err != nil {
+		return nil, err
+	}
+
+	// If underlying operation is an aggregate with an output stage, only apply read preference
+	// if all candidates are 5.0+. Otherwise, operate under primary read preference.
+	if isOutputAggregate {
+		for _, s := range candidates {
+			if s.WireVersion.Max < 13 {
+				return selectByKind(candidates, RSPrimary), nil
+			}
+		}
+	}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		return selectByKind(candidates, RSPrimary), nil
+	case readpref.PrimaryPreferredMode:
+		selected := selectByKind(candidates, RSPrimary)
+
+		if len(selected) == 0 {
+			selected = selectSecondaries(rp, candidates)
+			return selectByTagSet(selected, rp.TagSets()), nil
+		}
+
+		return selected, nil
+	case readpref.SecondaryPreferredMode:
+		selected := selectSecondaries(rp, candidates)
+		selected = selectByTagSet(selected, rp.TagSets())
+		if len(selected) > 0 {
+			return selected, nil
+		}
+		return selectByKind(candidates, RSPrimary), nil
+	case readpref.SecondaryMode:
+		selected := selectSecondaries(rp, candidates)
+		return selectByTagSet(selected, rp.TagSets()), nil
+	case readpref.NearestMode:
+		selected := selectByKind(candidates, RSPrimary)
+		selected = append(selected, selectSecondaries(rp, candidates)...)
+		return selectByTagSet(selected, rp.TagSets()), nil
+	}
+
+	return nil, fmt.Errorf("unsupported mode: %d", rp.Mode())
+}
+
+func selectSecondaries(rp *readpref.ReadPref, candidates []Server) []Server {
+	secondaries := selectByKind(candidates, RSSecondary)
+	if len(secondaries) == 0 {
+		return secondaries
+	}
+	if maxStaleness, set := rp.MaxStaleness(); set {
+		primaries := selectByKind(candidates, RSPrimary)
+		if len(primaries) == 0 {
+			baseTime := secondaries[0].LastWriteTime
+			for i := 1; i < len(secondaries); i++ {
+				if secondaries[i].LastWriteTime.After(baseTime) {
+					baseTime = secondaries[i].LastWriteTime
+				}
+			}
+
+			var selected []Server
+			for _, secondary := range secondaries {
+				estimatedStaleness := baseTime.Sub(secondary.LastWriteTime) + secondary.HeartbeatInterval
+				if estimatedStaleness <= maxStaleness {
+					selected = append(selected, secondary)
+				}
+			}
+
+			return selected
+		}
+
+		primary := primaries[0]
+
+		var selected []Server
+		for _, secondary := range secondaries {
+			estimatedStaleness := secondary.LastUpdateTime.Sub(secondary.LastWriteTime) - primary.LastUpdateTime.Sub(primary.LastWriteTime) + secondary.HeartbeatInterval
+			if estimatedStaleness <= maxStaleness {
+				selected = append(selected, secondary)
+			}
+		}
+		return selected
+	}
+
+	return secondaries
+}
+
+func selectByTagSet(candidates []Server, tagSets []tag.Set) []Server {
+	if len(tagSets) == 0 {
+		return candidates
+	}
+
+	for _, ts := range tagSets {
+		// If this tag set is empty, we can take a fast path because the empty list is a subset of all tag sets, so
+		// all candidate servers will be selected.
+		if len(ts) == 0 {
+			return candidates
+		}
+
+		var results []Server
+		for _, s := range candidates {
+			// ts is non-empty, so only servers with a non-empty set of tags need to be checked.
+			if len(s.Tags) > 0 && s.Tags.ContainsAll(ts) {
+				results = append(results, s)
+			}
+		}
+
+		if len(results) > 0 {
+			return results
+		}
+	}
+
+	return []Server{}
+}
+
+func selectByKind(candidates []Server, kind ServerKind) []Server {
+	// Record the indices of viable candidates first and then append those to the returned slice
+	// to avoid appending costly Server structs directly as an optimization.
+	viableIndexes := make([]int, 0, len(candidates))
+	for i, s := range candidates {
+		if s.Kind == kind {
+			viableIndexes = append(viableIndexes, i)
+		}
+	}
+	if len(viableIndexes) == len(candidates) {
+		return candidates
+	}
+	result := make([]Server, len(viableIndexes))
+	for i, idx := range viableIndexes {
+		result[i] = candidates[idx]
+	}
+	return result
+}
+
+func verifyMaxStaleness(rp *readpref.ReadPref, t Topology) error {
+	maxStaleness, set := rp.MaxStaleness()
+	if !set {
+		return nil
+	}
+
+	if maxStaleness < 90*time.Second {
+		return fmt.Errorf("max staleness (%s) must be greater than or equal to 90s", maxStaleness)
+	}
+
+	if len(t.Servers) < 1 {
+		// Maybe we should return an error here instead?
+		return nil
+	}
+
+	// we'll assume all candidates have the same heartbeat interval.
+	s := t.Servers[0]
+	idleWritePeriod := 10 * time.Second
+
+	if maxStaleness < s.HeartbeatInterval+idleWritePeriod {
+		return fmt.Errorf(
+			"max staleness (%s) must be greater than or equal to the heartbeat interval (%s) plus idle write period (%s)",
+			maxStaleness, s.HeartbeatInterval, idleWritePeriod,
+		)
+	}
+
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go
new file mode 100644
index 0000000000000000000000000000000000000000..b082515e53b5f25e65a45328e8fe4093f5e15d9b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go
@@ -0,0 +1,144 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+)
+
+// Topology contains information about a MongoDB cluster.
+type Topology struct {
+	Servers []Server
+	SetName string
+	Kind    TopologyKind
+	// Deprecated: Use SessionTimeoutMinutesPtr instead.
+	SessionTimeoutMinutes    uint32
+	SessionTimeoutMinutesPtr *int64
+	CompatibilityErr         error
+}
+
+// String implements the Stringer interface.
+func (t Topology) String() string {
+	var serversStr string
+	for _, s := range t.Servers {
+		serversStr += "{ " + s.String() + " }, "
+	}
+	return fmt.Sprintf("Type: %s, Servers: [%s]", t.Kind, serversStr)
+}
+
+// Equal compares two topology descriptions and returns true if they are equal.
+func (t Topology) Equal(other Topology) bool {
+	if t.Kind != other.Kind {
+		return false
+	}
+
+	topoServers := make(map[string]Server)
+	for _, s := range t.Servers {
+		topoServers[s.Addr.String()] = s
+	}
+
+	otherServers := make(map[string]Server)
+	for _, s := range other.Servers {
+		otherServers[s.Addr.String()] = s
+	}
+
+	if len(topoServers) != len(otherServers) {
+		return false
+	}
+
+	for _, server := range topoServers {
+		otherServer := otherServers[server.Addr.String()]
+
+		if !server.Equal(otherServer) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// HasReadableServer returns true if the topology contains a server suitable for reading.
+//
+// If the Topology's kind is Single or Sharded, the mode parameter is ignored and the function contains true if any of
+// the servers in the Topology are of a known type.
+//
+// For replica sets, the function returns true if the cluster contains a server that matches the provided read
+// preference mode.
+func (t Topology) HasReadableServer(mode readpref.Mode) bool {
+	switch t.Kind {
+	case Single, Sharded:
+		return hasAvailableServer(t.Servers, 0)
+	case ReplicaSetWithPrimary:
+		return hasAvailableServer(t.Servers, mode)
+	case ReplicaSetNoPrimary, ReplicaSet:
+		if mode == readpref.PrimaryMode {
+			return false
+		}
+		// invalid read preference
+		if !mode.IsValid() {
+			return false
+		}
+
+		return hasAvailableServer(t.Servers, mode)
+	}
+	return false
+}
+
+// HasWritableServer returns true if a topology has a server available for writing.
+//
+// If the Topology's kind is Single or Sharded, this function returns true if any of the servers in the Topology are of
+// a known type.
+//
+// For replica sets, the function returns true if the replica set contains a primary.
+func (t Topology) HasWritableServer() bool {
+	return t.HasReadableServer(readpref.PrimaryMode)
+}
+
+// hasAvailableServer returns true if any servers are available based on the read preference.
+func hasAvailableServer(servers []Server, mode readpref.Mode) bool {
+	switch mode {
+	case readpref.PrimaryMode:
+		for _, s := range servers {
+			if s.Kind == RSPrimary {
+				return true
+			}
+		}
+		return false
+	case readpref.PrimaryPreferredMode, readpref.SecondaryPreferredMode, readpref.NearestMode:
+		for _, s := range servers {
+			if s.Kind == RSPrimary || s.Kind == RSSecondary {
+				return true
+			}
+		}
+		return false
+	case readpref.SecondaryMode:
+		for _, s := range servers {
+			if s.Kind == RSSecondary {
+				return true
+			}
+		}
+		return false
+	}
+
+	// read preference is not specified
+	for _, s := range servers {
+		switch s.Kind {
+		case Standalone,
+			RSMember,
+			RSPrimary,
+			RSSecondary,
+			RSArbiter,
+			RSGhost,
+			Mongos:
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_kind.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_kind.go
new file mode 100644
index 0000000000000000000000000000000000000000..6d60c4d874e9cb1d65132a1456cdab050484bcd4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_kind.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// TopologyKind represents a specific topology configuration.
+type TopologyKind uint32
+
+// These constants are the available topology configurations.
+const (
+	Single                TopologyKind = 1
+	ReplicaSet            TopologyKind = 2
+	ReplicaSetNoPrimary   TopologyKind = 4 + ReplicaSet
+	ReplicaSetWithPrimary TopologyKind = 8 + ReplicaSet
+	Sharded               TopologyKind = 256
+	LoadBalanced          TopologyKind = 512
+)
+
+// String implements the fmt.Stringer interface.
+func (kind TopologyKind) String() string {
+	switch kind {
+	case Single:
+		return "Single"
+	case ReplicaSet:
+		return "ReplicaSet"
+	case ReplicaSetNoPrimary:
+		return "ReplicaSetNoPrimary"
+	case ReplicaSetWithPrimary:
+		return "ReplicaSetWithPrimary"
+	case Sharded:
+		return "Sharded"
+	case LoadBalanced:
+		return "LoadBalanced"
+	}
+
+	return "Unknown"
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_version.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_version.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6674ea762441c651265293a517a78b41c449c6f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_version.go
@@ -0,0 +1,66 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// TopologyVersion represents a software version.
+type TopologyVersion struct {
+	ProcessID primitive.ObjectID
+	Counter   int64
+}
+
+// NewTopologyVersion creates a TopologyVersion based on doc
+func NewTopologyVersion(doc bson.Raw) (*TopologyVersion, error) {
+	elements, err := doc.Elements()
+	if err != nil {
+		return nil, err
+	}
+	var tv TopologyVersion
+	var ok bool
+	for _, element := range elements {
+		switch element.Key() {
+		case "processId":
+			tv.ProcessID, ok = element.Value().ObjectIDOK()
+			if !ok {
+				return nil, fmt.Errorf("expected 'processId' to be a objectID but it's a BSON %s", element.Value().Type)
+			}
+		case "counter":
+			tv.Counter, ok = element.Value().Int64OK()
+			if !ok {
+				return nil, fmt.Errorf("expected 'counter' to be an int64 but it's a BSON %s", element.Value().Type)
+			}
+		}
+	}
+	return &tv, nil
+}
+
+// CompareToIncoming compares the receiver, which represents the currently known TopologyVersion for a server, to an
+// incoming TopologyVersion extracted from a server command response.
+//
+// This returns -1 if the receiver version is less than the response, 0 if the versions are equal, and 1 if the
+// receiver version is greater than the response. This comparison is not commutative.
+func (tv *TopologyVersion) CompareToIncoming(responseTV *TopologyVersion) int {
+	if tv == nil || responseTV == nil {
+		return -1
+	}
+	if tv.ProcessID != responseTV.ProcessID {
+		return -1
+	}
+	if tv.Counter == responseTV.Counter {
+		return 0
+	}
+	if tv.Counter < responseTV.Counter {
+		return -1
+	}
+	return 1
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/version_range.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/version_range.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d6270c521dd006cc40479e1436cd6696363186f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/version_range.go
@@ -0,0 +1,42 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import "fmt"
+
+// VersionRange represents a range of versions.
+type VersionRange struct {
+	Min int32
+	Max int32
+}
+
+// NewVersionRange creates a new VersionRange given a min and a max.
+func NewVersionRange(min, max int32) VersionRange {
+	return VersionRange{Min: min, Max: max}
+}
+
+// Includes returns a bool indicating whether the supplied integer is included
+// in the range.
+func (vr VersionRange) Includes(v int32) bool {
+	return v >= vr.Min && v <= vr.Max
+}
+
+// Equals returns a bool indicating whether the supplied VersionRange is equal.
+func (vr *VersionRange) Equals(other *VersionRange) bool {
+	if vr == nil && other == nil {
+		return true
+	}
+	if vr == nil || other == nil {
+		return false
+	}
+	return vr.Min == other.Min && vr.Max == other.Max
+}
+
+// String implements the fmt.Stringer interface.
+func (vr VersionRange) String() string {
+	return fmt.Sprintf("[%d, %d]", vr.Min, vr.Max)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..e0a5d66ac2c3d406ee87db02cc84231a382e25eb
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go
@@ -0,0 +1,157 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// NOTE: This documentation should be kept in line with the Example* test functions.
+
+// Package mongo provides a MongoDB Driver API for Go.
+//
+// Basic usage of the driver starts with creating a Client from a connection
+// string. To do so, call Connect:
+//
+//	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+//	defer cancel()
+//	client, err := mongo.Connect(ctx, options.Client().ApplyURI("mongodb://foo:bar@localhost:27017"))
+//	if err != nil { return err }
+//
+// This will create a new client and start monitoring the MongoDB server on localhost.
+// The Database and Collection types can be used to access the database:
+//
+//	collection := client.Database("baz").Collection("qux")
+//
+// A Collection can be used to query the database or insert documents:
+//
+//	res, err := collection.InsertOne(context.Background(), bson.M{"hello": "world"})
+//	if err != nil { return err }
+//	id := res.InsertedID
+//
+// Several methods return a cursor, which can be used like this:
+//
+//	cur, err := collection.Find(context.Background(), bson.D{})
+//	if err != nil { log.Fatal(err) }
+//	defer cur.Close(context.Background())
+//	for cur.Next(context.Background()) {
+//	  // To decode into a struct, use cursor.Decode()
+//	  result := struct{
+//	    Foo string
+//	    Bar int32
+//	  }{}
+//	  err := cur.Decode(&result)
+//	  if err != nil { log.Fatal(err) }
+//	  // do something with result...
+//
+//	  // To get the raw bson bytes use cursor.Current
+//	  raw := cur.Current
+//	  // do something with raw...
+//	}
+//	if err := cur.Err(); err != nil {
+//	  return err
+//	}
+//
+// Cursor.All will decode all of the returned elements at once:
+//
+//	var results []struct{
+//	  Foo string
+//	  Bar int32
+//	}
+//	if err = cur.All(context.Background(), &results); err != nil {
+//	  log.Fatal(err)
+//	}
+//	// do something with results...
+//
+// Methods that only return a single document will return a *SingleResult, which works
+// like a *sql.Row:
+//
+//	result := struct{
+//	  Foo string
+//	  Bar int32
+//	}{}
+//	filter := bson.D{{"hello", "world"}}
+//	err := collection.FindOne(context.Background(), filter).Decode(&result)
+//	if err != nil { return err }
+//	// do something with result...
+//
+// All Client, Collection, and Database methods that take parameters of type interface{}
+// will return ErrNilDocument if nil is passed in for an interface{}.
+//
+// Additional examples can be found under the examples directory in the driver's repository and
+// on the MongoDB website.
+//
+// # Error Handling
+//
+// Errors from the MongoDB server will implement the ServerError interface, which has functions to check for specific
+// error codes, labels, and message substrings. These can be used to check for and handle specific errors. Some methods,
+// like InsertMany and BulkWrite, can return an error representing multiple errors, and in those cases the ServerError
+// functions will return true if any of the contained errors satisfy the check.
+//
+// There are also helper functions to check for certain specific types of errors:
+//
+//	IsDuplicateKeyError(error)
+//	IsNetworkError(error)
+//	IsTimeout(error)
+//
+// # Potential DNS Issues
+//
+// Building with Go 1.11+ and using connection strings with the "mongodb+srv"[1] scheme is unfortunately
+// incompatible with some DNS servers in the wild due to the change introduced in
+// https://github.com/golang/go/issues/10622. You may receive an error with the message "cannot unmarshal DNS message"
+// while running an operation when using DNS servers that non-compliantly compress SRV records. Old versions of kube-dns
+// and the native DNS resolver (systemd-resolver) on Ubuntu 18.04 are known to be non-compliant in this manner. We suggest
+// using a different DNS server (8.8.8.8 is the common default), and, if that's not possible, avoiding the "mongodb+srv"
+// scheme.
+//
+// # Client Side Encryption
+//
+// Client-side encryption is a new feature in MongoDB 4.2 that allows specific data fields to be encrypted. Using this
+// feature requires specifying the "cse" build tag during compilation:
+//
+//	go build -tags cse
+//
+// Note: Auto encryption is an enterprise- and Atlas-only feature.
+//
+// The libmongocrypt C library is required when using client-side encryption. Specific versions of libmongocrypt
+// are required for different versions of the Go Driver:
+//
+// - Go Driver v1.2.0 requires libmongocrypt v1.0.0 or higher
+//
+// - Go Driver v1.5.0 requires libmongocrypt v1.1.0 or higher
+//
+// - Go Driver v1.8.0 requires libmongocrypt v1.3.0 or higher
+//
+// - Go Driver v1.10.0 requires libmongocrypt v1.5.0 or higher.
+// There is a severe bug when calling RewrapManyDataKey with libmongocrypt versions less than 1.5.2.
+// This bug may result in data corruption.
+// Please use libmongocrypt 1.5.2 or higher when calling RewrapManyDataKey.
+//
+// - Go Driver v1.12.0 requires libmongocrypt v1.8.0 or higher.
+//
+// To install libmongocrypt, follow the instructions for your
+// operating system:
+//
+// 1. Linux: follow the instructions listed at
+// https://github.com/mongodb/libmongocrypt#installing-libmongocrypt-from-distribution-packages to install the correct
+// deb/rpm package.
+//
+// 2. Mac: Follow the instructions listed at https://github.com/mongodb/libmongocrypt#installing-libmongocrypt-on-macos
+// to install packages via brew and compile the libmongocrypt source code.
+//
+// 3. Windows:
+//
+//	mkdir -p c:/libmongocrypt/bin
+//	mkdir -p c:/libmongocrypt/include
+//
+//	// Run the curl command in an empty directory as it will create new directories when unpacked.
+//	curl https://s3.amazonaws.com/mciuploads/libmongocrypt/windows/latest_release/libmongocrypt.tar.gz --output libmongocrypt.tar.gz
+//	tar -xvzf libmongocrypt.tar.gz
+//
+//	cp ./bin/mongocrypt.dll c:/libmongocrypt/bin
+//	cp ./include/mongocrypt/*.h c:/libmongocrypt/include
+//	export PATH=$PATH:/cygdrive/c/libmongocrypt/bin
+//
+// libmongocrypt communicates with the mongocryptd process or mongo_crypt shared library for automatic encryption.
+// See AutoEncryptionOpts.SetExtraOptions for options to configure use of mongocryptd or mongo_crypt.
+//
+// [1] See https://www.mongodb.com/docs/manual/reference/connection-string/#dns-seedlist-connection-format
+package mongo
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..d92c9ca9bd70b65e7de3f8fc4483a21cb9637c2f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go
@@ -0,0 +1,682 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/internal/codecutil"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
+)
+
+// ErrUnacknowledgedWrite is returned by operations that have an unacknowledged write concern.
+var ErrUnacknowledgedWrite = errors.New("unacknowledged write")
+
+// ErrClientDisconnected is returned when disconnected Client is used to run an operation.
+var ErrClientDisconnected = errors.New("client is disconnected")
+
+// ErrNilDocument is returned when a nil document is passed to a CRUD method.
+var ErrNilDocument = errors.New("document is nil")
+
+// ErrNilValue is returned when a nil value is passed to a CRUD method.
+var ErrNilValue = errors.New("value is nil")
+
+// ErrEmptySlice is returned when an empty slice is passed to a CRUD method that requires a non-empty slice.
+var ErrEmptySlice = errors.New("must provide at least one element in input slice")
+
+// ErrMapForOrderedArgument is returned when a map with multiple keys is passed to a CRUD method for an ordered parameter
+type ErrMapForOrderedArgument struct {
+	ParamName string
+}
+
+// Error implements the error interface.
+func (e ErrMapForOrderedArgument) Error() string {
+	return fmt.Sprintf("multi-key map passed in for ordered parameter %v", e.ParamName)
+}
+
+func replaceErrors(err error) error {
+	// Return nil when err is nil to avoid costly reflection logic below.
+	if err == nil {
+		return nil
+	}
+
+	if errors.Is(err, topology.ErrTopologyClosed) {
+		return ErrClientDisconnected
+	}
+	if de, ok := err.(driver.Error); ok {
+		return CommandError{
+			Code:    de.Code,
+			Message: de.Message,
+			Labels:  de.Labels,
+			Name:    de.Name,
+			Wrapped: de.Wrapped,
+			Raw:     bson.Raw(de.Raw),
+		}
+	}
+	if qe, ok := err.(driver.QueryFailureError); ok {
+		// qe.Message is "command failure"
+		ce := CommandError{
+			Name:    qe.Message,
+			Wrapped: qe.Wrapped,
+			Raw:     bson.Raw(qe.Response),
+		}
+
+		dollarErr, err := qe.Response.LookupErr("$err")
+		if err == nil {
+			ce.Message, _ = dollarErr.StringValueOK()
+		}
+		code, err := qe.Response.LookupErr("code")
+		if err == nil {
+			ce.Code, _ = code.Int32OK()
+		}
+
+		return ce
+	}
+	if me, ok := err.(mongocrypt.Error); ok {
+		return MongocryptError{Code: me.Code, Message: me.Message}
+	}
+
+	if errors.Is(err, codecutil.ErrNilValue) {
+		return ErrNilValue
+	}
+
+	if marshalErr, ok := err.(codecutil.MarshalError); ok {
+		return MarshalError{
+			Value: marshalErr.Value,
+			Err:   marshalErr.Err,
+		}
+	}
+
+	return err
+}
+
+// IsDuplicateKeyError returns true if err is a duplicate key error.
+func IsDuplicateKeyError(err error) bool {
+	if se := ServerError(nil); errors.As(err, &se) {
+		return se.HasErrorCode(11000) || // Duplicate key error.
+			se.HasErrorCode(11001) || // Duplicate key error on update.
+			// Duplicate key error in a capped collection. See SERVER-7164.
+			se.HasErrorCode(12582) ||
+			// Mongos insert error caused by a duplicate key error. See
+			// SERVER-11493.
+			se.HasErrorCodeWithMessage(16460, " E11000 ")
+	}
+	return false
+}
+
+// timeoutErrs is a list of error values that indicate a timeout happened.
+var timeoutErrs = [...]error{
+	context.DeadlineExceeded,
+	driver.ErrDeadlineWouldBeExceeded,
+	topology.ErrServerSelectionTimeout,
+}
+
+// IsTimeout returns true if err was caused by a timeout. For error chains,
+// IsTimeout returns true if any error in the chain was caused by a timeout.
+func IsTimeout(err error) bool {
+	// Check if the error chain contains any of the timeout error values.
+	for _, target := range timeoutErrs {
+		if errors.Is(err, target) {
+			return true
+		}
+	}
+
+	// Check if the error chain contains any error types that can indicate
+	// timeout.
+	if errors.As(err, &topology.WaitQueueTimeoutError{}) {
+		return true
+	}
+	if ce := (CommandError{}); errors.As(err, &ce) && ce.IsMaxTimeMSExpiredError() {
+		return true
+	}
+	if we := (WriteException{}); errors.As(err, &we) && we.WriteConcernError != nil && we.WriteConcernError.IsMaxTimeMSExpiredError() {
+		return true
+	}
+	if ne := net.Error(nil); errors.As(err, &ne) {
+		return ne.Timeout()
+	}
+	// Check timeout error labels.
+	if le := LabeledError(nil); errors.As(err, &le) {
+		if le.HasErrorLabel("NetworkTimeoutError") || le.HasErrorLabel("ExceededTimeLimitError") {
+			return true
+		}
+	}
+
+	return false
+}
+
+// unwrap returns the inner error if err implements Unwrap(), otherwise it returns nil.
+func unwrap(err error) error {
+	u, ok := err.(interface {
+		Unwrap() error
+	})
+	if !ok {
+		return nil
+	}
+	return u.Unwrap()
+}
+
+// errorHasLabel returns true if err contains the specified label
+func errorHasLabel(err error, label string) bool {
+	for ; err != nil; err = unwrap(err) {
+		if le, ok := err.(LabeledError); ok && le.HasErrorLabel(label) {
+			return true
+		}
+	}
+	return false
+}
+
+// IsNetworkError returns true if err is a network error
+func IsNetworkError(err error) bool {
+	return errorHasLabel(err, "NetworkError")
+}
+
+// MongocryptError represents an libmongocrypt error during client-side encryption.
+type MongocryptError struct {
+	Code    int32
+	Message string
+}
+
+// Error implements the error interface.
+func (m MongocryptError) Error() string {
+	return fmt.Sprintf("mongocrypt error %d: %v", m.Code, m.Message)
+}
+
+// EncryptionKeyVaultError represents an error while communicating with the key vault collection during client-side
+// encryption.
+type EncryptionKeyVaultError struct {
+	Wrapped error
+}
+
+// Error implements the error interface.
+func (ekve EncryptionKeyVaultError) Error() string {
+	return fmt.Sprintf("key vault communication error: %v", ekve.Wrapped)
+}
+
+// Unwrap returns the underlying error.
+func (ekve EncryptionKeyVaultError) Unwrap() error {
+	return ekve.Wrapped
+}
+
+// MongocryptdError represents an error while communicating with mongocryptd during client-side encryption.
+type MongocryptdError struct {
+	Wrapped error
+}
+
+// Error implements the error interface.
+func (e MongocryptdError) Error() string {
+	return fmt.Sprintf("mongocryptd communication error: %v", e.Wrapped)
+}
+
+// Unwrap returns the underlying error.
+func (e MongocryptdError) Unwrap() error {
+	return e.Wrapped
+}
+
+// LabeledError is an interface for errors with labels.
+type LabeledError interface {
+	error
+	// HasErrorLabel returns true if the error contains the specified label.
+	HasErrorLabel(string) bool
+}
+
+// ServerError is the interface implemented by errors returned from the server. Custom implementations of this
+// interface should not be used in production.
+type ServerError interface {
+	LabeledError
+	// HasErrorCode returns true if the error has the specified code.
+	HasErrorCode(int) bool
+	// HasErrorMessage returns true if the error contains the specified message.
+	HasErrorMessage(string) bool
+	// HasErrorCodeWithMessage returns true if any of the contained errors have the specified code and message.
+	HasErrorCodeWithMessage(int, string) bool
+
+	serverError()
+}
+
+var _ ServerError = CommandError{}
+var _ ServerError = WriteError{}
+var _ ServerError = WriteException{}
+var _ ServerError = BulkWriteException{}
+
+// CommandError represents a server error during execution of a command. This can be returned by any operation.
+type CommandError struct {
+	Code    int32
+	Message string
+	Labels  []string // Categories to which the error belongs
+	Name    string   // A human-readable name corresponding to the error code
+	Wrapped error    // The underlying error, if one exists.
+	Raw     bson.Raw // The original server response containing the error.
+}
+
+// Error implements the error interface.
+func (e CommandError) Error() string {
+	if e.Name != "" {
+		return fmt.Sprintf("(%v) %v", e.Name, e.Message)
+	}
+	return e.Message
+}
+
+// Unwrap returns the underlying error.
+func (e CommandError) Unwrap() error {
+	return e.Wrapped
+}
+
+// HasErrorCode returns true if the error has the specified code.
+func (e CommandError) HasErrorCode(code int) bool {
+	return int(e.Code) == code
+}
+
+// HasErrorLabel returns true if the error contains the specified label.
+func (e CommandError) HasErrorLabel(label string) bool {
+	if e.Labels != nil {
+		for _, l := range e.Labels {
+			if l == label {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// HasErrorMessage returns true if the error contains the specified message.
+func (e CommandError) HasErrorMessage(message string) bool {
+	return strings.Contains(e.Message, message)
+}
+
+// HasErrorCodeWithMessage returns true if the error has the specified code and Message contains the specified message.
+func (e CommandError) HasErrorCodeWithMessage(code int, message string) bool {
+	return int(e.Code) == code && strings.Contains(e.Message, message)
+}
+
+// IsMaxTimeMSExpiredError returns true if the error is a MaxTimeMSExpired error.
+func (e CommandError) IsMaxTimeMSExpiredError() bool {
+	return e.Code == 50 || e.Name == "MaxTimeMSExpired"
+}
+
+// serverError implements the ServerError interface.
+func (e CommandError) serverError() {}
+
+// WriteError is an error that occurred during execution of a write operation. This error type is only returned as part
+// of a WriteException or BulkWriteException.
+type WriteError struct {
+	// The index of the write in the slice passed to an InsertMany or BulkWrite operation that caused this error.
+	Index int
+
+	Code    int
+	Message string
+	Details bson.Raw
+
+	// The original write error from the server response.
+	Raw bson.Raw
+}
+
+func (we WriteError) Error() string {
+	msg := we.Message
+	if len(we.Details) > 0 {
+		msg = fmt.Sprintf("%s: %s", msg, we.Details.String())
+	}
+	return msg
+}
+
+// HasErrorCode returns true if the error has the specified code.
+func (we WriteError) HasErrorCode(code int) bool {
+	return we.Code == code
+}
+
+// HasErrorLabel returns true if the error contains the specified label. WriteErrors do not contain labels,
+// so we always return false.
+func (we WriteError) HasErrorLabel(string) bool {
+	return false
+}
+
+// HasErrorMessage returns true if the error contains the specified message.
+func (we WriteError) HasErrorMessage(message string) bool {
+	return strings.Contains(we.Message, message)
+}
+
+// HasErrorCodeWithMessage returns true if the error has the specified code and Message contains the specified message.
+func (we WriteError) HasErrorCodeWithMessage(code int, message string) bool {
+	return we.Code == code && strings.Contains(we.Message, message)
+}
+
+// serverError implements the ServerError interface.
+func (we WriteError) serverError() {}
+
+// WriteErrors is a group of write errors that occurred during execution of a write operation.
+type WriteErrors []WriteError
+
+// Error implements the error interface.
+func (we WriteErrors) Error() string {
+	errs := make([]error, len(we))
+	for i := 0; i < len(we); i++ {
+		errs[i] = we[i]
+	}
+	// WriteErrors isn't returned from batch operations, but we can still use the same formatter.
+	return "write errors: " + joinBatchErrors(errs)
+}
+
+func writeErrorsFromDriverWriteErrors(errs driver.WriteErrors) WriteErrors {
+	wes := make(WriteErrors, 0, len(errs))
+	for _, err := range errs {
+		wes = append(wes, WriteError{
+			Index:   int(err.Index),
+			Code:    int(err.Code),
+			Message: err.Message,
+			Details: bson.Raw(err.Details),
+			Raw:     bson.Raw(err.Raw),
+		})
+	}
+	return wes
+}
+
+// WriteConcernError represents a write concern failure during execution of a write operation. This error type is only
+// returned as part of a WriteException or a BulkWriteException.
+type WriteConcernError struct {
+	Name    string
+	Code    int
+	Message string
+	Details bson.Raw
+	Raw     bson.Raw // The original write concern error from the server response.
+}
+
+// Error implements the error interface.
+func (wce WriteConcernError) Error() string {
+	if wce.Name != "" {
+		return fmt.Sprintf("(%v) %v", wce.Name, wce.Message)
+	}
+	return wce.Message
+}
+
+// IsMaxTimeMSExpiredError returns true if the error is a MaxTimeMSExpired error.
+func (wce WriteConcernError) IsMaxTimeMSExpiredError() bool {
+	return wce.Code == 50
+}
+
+// WriteException is the error type returned by the InsertOne, DeleteOne, DeleteMany, UpdateOne, UpdateMany, and
+// ReplaceOne operations.
+type WriteException struct {
+	// The write concern error that occurred, or nil if there was none.
+	WriteConcernError *WriteConcernError
+
+	// The write errors that occurred during operation execution.
+	WriteErrors WriteErrors
+
+	// The categories to which the exception belongs.
+	Labels []string
+
+	// The original server response containing the error.
+	Raw bson.Raw
+}
+
+// Error implements the error interface.
+func (mwe WriteException) Error() string {
+	causes := make([]string, 0, 2)
+	if mwe.WriteConcernError != nil {
+		causes = append(causes, "write concern error: "+mwe.WriteConcernError.Error())
+	}
+	if len(mwe.WriteErrors) > 0 {
+		// The WriteErrors error message already starts with "write errors:", so don't add it to the
+		// error message again.
+		causes = append(causes, mwe.WriteErrors.Error())
+	}
+
+	message := "write exception: "
+	if len(causes) == 0 {
+		return message + "no causes"
+	}
+	return message + strings.Join(causes, ", ")
+}
+
+// HasErrorCode returns true if the error has the specified code.
+func (mwe WriteException) HasErrorCode(code int) bool {
+	if mwe.WriteConcernError != nil && mwe.WriteConcernError.Code == code {
+		return true
+	}
+	for _, we := range mwe.WriteErrors {
+		if we.Code == code {
+			return true
+		}
+	}
+	return false
+}
+
+// HasErrorLabel returns true if the error contains the specified label.
+func (mwe WriteException) HasErrorLabel(label string) bool {
+	if mwe.Labels != nil {
+		for _, l := range mwe.Labels {
+			if l == label {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// HasErrorMessage returns true if the error contains the specified message.
+func (mwe WriteException) HasErrorMessage(message string) bool {
+	if mwe.WriteConcernError != nil && strings.Contains(mwe.WriteConcernError.Message, message) {
+		return true
+	}
+	for _, we := range mwe.WriteErrors {
+		if strings.Contains(we.Message, message) {
+			return true
+		}
+	}
+	return false
+}
+
+// HasErrorCodeWithMessage returns true if any of the contained errors have the specified code and message.
+func (mwe WriteException) HasErrorCodeWithMessage(code int, message string) bool {
+	if mwe.WriteConcernError != nil &&
+		mwe.WriteConcernError.Code == code && strings.Contains(mwe.WriteConcernError.Message, message) {
+		return true
+	}
+	for _, we := range mwe.WriteErrors {
+		if we.Code == code && strings.Contains(we.Message, message) {
+			return true
+		}
+	}
+	return false
+}
+
+// serverError implements the ServerError interface.
+func (mwe WriteException) serverError() {}
+
+func convertDriverWriteConcernError(wce *driver.WriteConcernError) *WriteConcernError {
+	if wce == nil {
+		return nil
+	}
+
+	return &WriteConcernError{
+		Name:    wce.Name,
+		Code:    int(wce.Code),
+		Message: wce.Message,
+		Details: bson.Raw(wce.Details),
+		Raw:     bson.Raw(wce.Raw),
+	}
+}
+
+// BulkWriteError is an error that occurred during execution of one operation in a BulkWrite. This error type is only
+// returned as part of a BulkWriteException.
+type BulkWriteError struct {
+	WriteError            // The WriteError that occurred.
+	Request    WriteModel // The WriteModel that caused this error.
+}
+
+// Error implements the error interface.
+func (bwe BulkWriteError) Error() string {
+	return bwe.WriteError.Error()
+}
+
+// BulkWriteException is the error type returned by BulkWrite and InsertMany operations.
+type BulkWriteException struct {
+	// The write concern error that occurred, or nil if there was none.
+	WriteConcernError *WriteConcernError
+
+	// The write errors that occurred during operation execution.
+	WriteErrors []BulkWriteError
+
+	// The categories to which the exception belongs.
+	Labels []string
+}
+
+// Error implements the error interface.
+func (bwe BulkWriteException) Error() string {
+	causes := make([]string, 0, 2)
+	if bwe.WriteConcernError != nil {
+		causes = append(causes, "write concern error: "+bwe.WriteConcernError.Error())
+	}
+	if len(bwe.WriteErrors) > 0 {
+		errs := make([]error, len(bwe.WriteErrors))
+		for i := 0; i < len(bwe.WriteErrors); i++ {
+			errs[i] = &bwe.WriteErrors[i]
+		}
+		causes = append(causes, "write errors: "+joinBatchErrors(errs))
+	}
+
+	message := "bulk write exception: "
+	if len(causes) == 0 {
+		return message + "no causes"
+	}
+	return "bulk write exception: " + strings.Join(causes, ", ")
+}
+
+// HasErrorCode returns true if any of the errors have the specified code.
+func (bwe BulkWriteException) HasErrorCode(code int) bool {
+	if bwe.WriteConcernError != nil && bwe.WriteConcernError.Code == code {
+		return true
+	}
+	for _, we := range bwe.WriteErrors {
+		if we.Code == code {
+			return true
+		}
+	}
+	return false
+}
+
+// HasErrorLabel returns true if the error contains the specified label.
+func (bwe BulkWriteException) HasErrorLabel(label string) bool {
+	if bwe.Labels != nil {
+		for _, l := range bwe.Labels {
+			if l == label {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// HasErrorMessage returns true if the error contains the specified message.
+func (bwe BulkWriteException) HasErrorMessage(message string) bool {
+	if bwe.WriteConcernError != nil && strings.Contains(bwe.WriteConcernError.Message, message) {
+		return true
+	}
+	for _, we := range bwe.WriteErrors {
+		if strings.Contains(we.Message, message) {
+			return true
+		}
+	}
+	return false
+}
+
+// HasErrorCodeWithMessage returns true if any of the contained errors have the specified code and message.
+func (bwe BulkWriteException) HasErrorCodeWithMessage(code int, message string) bool {
+	if bwe.WriteConcernError != nil &&
+		bwe.WriteConcernError.Code == code && strings.Contains(bwe.WriteConcernError.Message, message) {
+		return true
+	}
+	for _, we := range bwe.WriteErrors {
+		if we.Code == code && strings.Contains(we.Message, message) {
+			return true
+		}
+	}
+	return false
+}
+
+// serverError implements the ServerError interface.
+func (bwe BulkWriteException) serverError() {}
+
+// returnResult is used to determine if a function calling processWriteError should return
+// the result or return nil. Since the processWriteError function is used by many different
+// methods, both *One and *Many, we need a way to differentiate if the method should return
+// the result and the error.
+type returnResult int
+
+const (
+	rrNone returnResult = 1 << iota // None means do not return the result ever.
+	rrOne                           // One means return the result if this was called by a *One method.
+	rrMany                          // Many means return the result is this was called by a *Many method.
+
+	rrAll returnResult = rrOne | rrMany // All means always return the result.
+)
+
+// processWriteError handles processing the result of a write operation. If the retrunResult matches
+// the calling method's type, it should return the result object in addition to the error.
+// This function will wrap the errors from other packages and return them as errors from this package.
+//
+// WriteConcernError will be returned over WriteErrors if both are present.
+func processWriteError(err error) (returnResult, error) {
+	switch {
+	case errors.Is(err, driver.ErrUnacknowledgedWrite):
+		return rrAll, ErrUnacknowledgedWrite
+	case err != nil:
+		switch tt := err.(type) {
+		case driver.WriteCommandError:
+			return rrMany, WriteException{
+				WriteConcernError: convertDriverWriteConcernError(tt.WriteConcernError),
+				WriteErrors:       writeErrorsFromDriverWriteErrors(tt.WriteErrors),
+				Labels:            tt.Labels,
+				Raw:               bson.Raw(tt.Raw),
+			}
+		default:
+			return rrNone, replaceErrors(err)
+		}
+	default:
+		return rrAll, nil
+	}
+}
+
+// batchErrorsTargetLength is the target length of error messages returned by batch operation
+// error types. Try to limit batch error messages to 2kb to prevent problems when printing error
+// messages from large batch operations.
+const batchErrorsTargetLength = 2000
+
+// joinBatchErrors appends messages from the given errors to a comma-separated string. If the
+// string exceeds 2kb, it stops appending error messages and appends the message "+N more errors..."
+// to the end.
+//
+// Example format:
+//
+//	"[message 1, message 2, +8 more errors...]"
+func joinBatchErrors(errs []error) string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "[")
+	for idx, err := range errs {
+		if idx != 0 {
+			fmt.Fprint(&buf, ", ")
+		}
+		// If the error message has exceeded the target error message length, stop appending errors
+		// to the message and append the number of remaining errors instead.
+		if buf.Len() > batchErrorsTargetLength {
+			fmt.Fprintf(&buf, "+%d more errors...", len(errs)-idx)
+			break
+		}
+		fmt.Fprint(&buf, err.Error())
+	}
+	fmt.Fprint(&buf, "]")
+
+	return buf.String()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go
new file mode 100644
index 0000000000000000000000000000000000000000..d12deaee283cf6ba44dbbc0c161beb0dd26722ee
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go
@@ -0,0 +1,176 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"go.mongodb.org/mongo-driver/bson"
+)
+
+// IndexOptionsBuilder specifies options for a new index.
+//
+// Deprecated: Use the IndexOptions type in the mongo/options package instead.
+type IndexOptionsBuilder struct {
+	document bson.D
+}
+
+// NewIndexOptionsBuilder creates a new IndexOptionsBuilder.
+//
+// Deprecated: Use the Index function in mongo/options instead.
+func NewIndexOptionsBuilder() *IndexOptionsBuilder {
+	return &IndexOptionsBuilder{}
+}
+
+// Background specifies a value for the background option.
+//
+// Deprecated: Use the IndexOptions.SetBackground function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Background(background bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"background", background})
+	return iob
+}
+
+// ExpireAfterSeconds specifies a value for the expireAfterSeconds option.
+//
+// Deprecated: Use the IndexOptions.SetExpireAfterSeconds function in mongo/options instead.
+func (iob *IndexOptionsBuilder) ExpireAfterSeconds(expireAfterSeconds int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"expireAfterSeconds", expireAfterSeconds})
+	return iob
+}
+
+// Name specifies a value for the name option.
+//
+// Deprecated: Use the IndexOptions.SetName function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Name(name string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"name", name})
+	return iob
+}
+
+// Sparse specifies a value for the sparse option.
+//
+// Deprecated: Use the IndexOptions.SetSparse function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Sparse(sparse bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"sparse", sparse})
+	return iob
+}
+
+// StorageEngine specifies a value for the storageEngine option.
+//
+// Deprecated: Use the IndexOptions.SetStorageEngine function in mongo/options instead.
+func (iob *IndexOptionsBuilder) StorageEngine(storageEngine interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"storageEngine", storageEngine})
+	return iob
+}
+
+// Unique specifies a value for the unique option.
+//
+// Deprecated: Use the IndexOptions.SetUnique function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Unique(unique bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"unique", unique})
+	return iob
+}
+
+// Version specifies a value for the version option.
+//
+// Deprecated: Use the IndexOptions.SetVersion function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Version(version int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"v", version})
+	return iob
+}
+
+// DefaultLanguage specifies a value for the default_language option.
+//
+// Deprecated: Use the IndexOptions.SetDefaultLanguage function in mongo/options instead.
+func (iob *IndexOptionsBuilder) DefaultLanguage(defaultLanguage string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"default_language", defaultLanguage})
+	return iob
+}
+
+// LanguageOverride specifies a value for the language_override option.
+//
+// Deprecated: Use the IndexOptions.SetLanguageOverride function in mongo/options instead.
+func (iob *IndexOptionsBuilder) LanguageOverride(languageOverride string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"language_override", languageOverride})
+	return iob
+}
+
+// TextVersion specifies a value for the textIndexVersion option.
+//
+// Deprecated: Use the IndexOptions.SetTextVersion function in mongo/options instead.
+func (iob *IndexOptionsBuilder) TextVersion(textVersion int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"textIndexVersion", textVersion})
+	return iob
+}
+
+// Weights specifies a value for the weights option.
+//
+// Deprecated: Use the IndexOptions.SetWeights function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Weights(weights interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"weights", weights})
+	return iob
+}
+
+// SphereVersion specifies a value for the 2dsphereIndexVersion option.
+//
+// Deprecated: Use the IndexOptions.SetSphereVersion function in mongo/options instead.
+func (iob *IndexOptionsBuilder) SphereVersion(sphereVersion int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"2dsphereIndexVersion", sphereVersion})
+	return iob
+}
+
+// Bits specifies a value for the bits option.
+//
+// Deprecated: Use the IndexOptions.SetBits function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Bits(bits int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"bits", bits})
+	return iob
+}
+
+// Max specifies a value for the max option.
+//
+// Deprecated: Use the IndexOptions.SetMax function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Max(max float64) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"max", max})
+	return iob
+}
+
+// Min specifies a value for the min option.
+//
+// Deprecated: Use the IndexOptions.SetMin function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Min(min float64) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"min", min})
+	return iob
+}
+
+// BucketSize specifies a value for the bucketSize option.
+//
+// Deprecated: Use the IndexOptions.SetBucketSize function in mongo/options instead.
+func (iob *IndexOptionsBuilder) BucketSize(bucketSize int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"bucketSize", bucketSize})
+	return iob
+}
+
+// PartialFilterExpression specifies a value for the partialFilterExpression option.
+//
+// Deprecated: Use the IndexOptions.SetPartialFilterExpression function in mongo/options instead.
+func (iob *IndexOptionsBuilder) PartialFilterExpression(partialFilterExpression interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"partialFilterExpression", partialFilterExpression})
+	return iob
+}
+
+// Collation specifies a value for the collation option.
+//
+// Deprecated: Use the IndexOptions.SetCollation function in mongo/options instead.
+func (iob *IndexOptionsBuilder) Collation(collation interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"collation", collation})
+	return iob
+}
+
+// Build finishes constructing an the builder.
+//
+// Deprecated: Use the IndexOptions type in the mongo/options package instead.
+func (iob *IndexOptionsBuilder) Build() bson.D {
+	return iob.document
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go
new file mode 100644
index 0000000000000000000000000000000000000000..db65f75072ac5e513fe0c760b09c203aa5fe0abd
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go
@@ -0,0 +1,517 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"strconv"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// ErrInvalidIndexValue is returned if an index is created with a keys document that has a value that is not a number
+// or string.
+var ErrInvalidIndexValue = errors.New("invalid index value")
+
+// ErrNonStringIndexName is returned if an index is created with a name that is not a string.
+var ErrNonStringIndexName = errors.New("index name must be a string")
+
+// ErrMultipleIndexDrop is returned if multiple indexes would be dropped from a call to IndexView.DropOne.
+var ErrMultipleIndexDrop = errors.New("multiple indexes would be dropped")
+
+// IndexView is a type that can be used to create, drop, and list indexes on a collection. An IndexView for a collection
+// can be created by a call to Collection.Indexes().
+type IndexView struct {
+	coll *Collection
+}
+
+// IndexModel represents a new index to be created.
+type IndexModel struct {
+	// A document describing which keys should be used for the index. It cannot be nil. This must be an order-preserving
+	// type such as bson.D. Map types such as bson.M are not valid. See https://www.mongodb.com/docs/manual/indexes/#indexes
+	// for examples of valid documents.
+	Keys interface{}
+
+	// The options to use to create the index.
+	Options *options.IndexOptions
+}
+
+func isNamespaceNotFoundError(err error) bool {
+	if de, ok := err.(driver.Error); ok {
+		return de.Code == 26
+	}
+	return false
+}
+
+// List executes a listIndexes command and returns a cursor over the indexes in the collection.
+//
+// The opts parameter can be used to specify options for this operation (see the options.ListIndexesOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listIndexes/.
+func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && iv.coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(iv.coll.client.sessionPool, iv.coll.client.id)
+	}
+
+	err := iv.coll.client.validSession(sess)
+	if err != nil {
+		closeImplicitSession(sess)
+		return nil, err
+	}
+
+	selector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(iv.coll.client.localThreshold),
+	})
+	selector = makeReadPrefSelector(sess, selector, iv.coll.client.localThreshold)
+
+	// TODO(GODRIVER-3038): This operation should pass CSE to the ListIndexes
+	// Crypt setter to be applied to the operation.
+	op := operation.NewListIndexes().
+		Session(sess).CommandMonitor(iv.coll.client.monitor).
+		ServerSelector(selector).ClusterClock(iv.coll.client.clock).
+		Database(iv.coll.db.name).Collection(iv.coll.name).
+		Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI).
+		Timeout(iv.coll.client.timeout).Authenticator(iv.coll.client.authenticator)
+
+	cursorOpts := iv.coll.client.createBaseCursorOptions()
+
+	cursorOpts.MarshalValueEncoderFn = newEncoderFn(iv.coll.bsonOpts, iv.coll.registry)
+
+	lio := options.MergeListIndexesOptions(opts...)
+	if lio.BatchSize != nil {
+		op = op.BatchSize(*lio.BatchSize)
+		cursorOpts.BatchSize = *lio.BatchSize
+	}
+	op = op.MaxTime(lio.MaxTime)
+	retry := driver.RetryNone
+	if iv.coll.client.retryReads {
+		retry = driver.RetryOncePerCommand
+	}
+	op.Retry(retry)
+
+	err = op.Execute(ctx)
+	if err != nil {
+		// for namespaceNotFound errors, return an empty cursor and do not throw an error
+		closeImplicitSession(sess)
+		if isNamespaceNotFoundError(err) {
+			return newEmptyCursor(), nil
+		}
+
+		return nil, replaceErrors(err)
+	}
+
+	bc, err := op.Result(cursorOpts)
+	if err != nil {
+		closeImplicitSession(sess)
+		return nil, replaceErrors(err)
+	}
+	cursor, err := newCursorWithSession(bc, iv.coll.bsonOpts, iv.coll.registry, sess)
+	return cursor, replaceErrors(err)
+}
+
+// ListSpecifications executes a List command and returns a slice of returned IndexSpecifications
+func (iv IndexView) ListSpecifications(ctx context.Context, opts ...*options.ListIndexesOptions) ([]*IndexSpecification, error) {
+	cursor, err := iv.List(ctx, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	var results []*IndexSpecification
+	err = cursor.All(ctx, &results)
+	if err != nil {
+		return nil, err
+	}
+
+	ns := iv.coll.db.Name() + "." + iv.coll.Name()
+	for _, res := range results {
+		// Pre-4.4 servers report a namespace in their responses, so we only set Namespace manually if it was not in
+		// the response.
+		res.Namespace = ns
+	}
+
+	return results, nil
+}
+
+// CreateOne executes a createIndexes command to create an index on the collection and returns the name of the new
+// index. See the IndexView.CreateMany documentation for more information and an example.
+func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*options.CreateIndexesOptions) (string, error) {
+	names, err := iv.CreateMany(ctx, []IndexModel{model}, opts...)
+	if err != nil {
+		return "", err
+	}
+
+	return names[0], nil
+}
+
+// CreateMany executes a createIndexes command to create multiple indexes on the collection and returns the names of
+// the new indexes.
+//
+// For each IndexModel in the models parameter, the index name can be specified via the Options field. If a name is not
+// given, it will be generated from the Keys document.
+//
+// The opts parameter can be used to specify options for this operation (see the options.CreateIndexesOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/createIndexes/.
+func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) {
+	names := make([]string, 0, len(models))
+
+	var indexes bsoncore.Document
+	aidx, indexes := bsoncore.AppendArrayStart(indexes)
+
+	for i, model := range models {
+		if model.Keys == nil {
+			return nil, fmt.Errorf("index model keys cannot be nil")
+		}
+
+		if isUnorderedMap(model.Keys) {
+			return nil, ErrMapForOrderedArgument{"keys"}
+		}
+
+		keys, err := marshal(model.Keys, iv.coll.bsonOpts, iv.coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		name, err := getOrGenerateIndexName(keys, model)
+		if err != nil {
+			return nil, err
+		}
+
+		names = append(names, name)
+
+		var iidx int32
+		iidx, indexes = bsoncore.AppendDocumentElementStart(indexes, strconv.Itoa(i))
+		indexes = bsoncore.AppendDocumentElement(indexes, "key", keys)
+
+		if model.Options == nil {
+			model.Options = options.Index()
+		}
+		model.Options.SetName(name)
+
+		optsDoc, err := iv.createOptionsDoc(model.Options)
+		if err != nil {
+			return nil, err
+		}
+
+		indexes = bsoncore.AppendDocument(indexes, optsDoc)
+
+		indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	indexes, err := bsoncore.AppendArrayEnd(indexes, aidx)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	if sess == nil && iv.coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(iv.coll.client.sessionPool, iv.coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = iv.coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := iv.coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, iv.coll.writeSelector)
+
+	option := options.MergeCreateIndexesOptions(opts...)
+
+	// TODO(GODRIVER-3038): This operation should pass CSE to the CreateIndexes
+	// Crypt setter to be applied to the operation.
+	//
+	// This was added in GODRIVER-2413 for the 2.0 major release.
+	op := operation.NewCreateIndexes(indexes).
+		Session(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock).
+		Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor).
+		Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI).
+		Timeout(iv.coll.client.timeout).MaxTime(option.MaxTime).Authenticator(iv.coll.client.authenticator)
+	if option.CommitQuorum != nil {
+		commitQuorum, err := marshalValue(option.CommitQuorum, iv.coll.bsonOpts, iv.coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		op.CommitQuorum(commitQuorum)
+	}
+
+	err = op.Execute(ctx)
+	if err != nil {
+		_, err = processWriteError(err)
+		return nil, err
+	}
+
+	return names, nil
+}
+
+func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsoncore.Document, error) {
+	optsDoc := bsoncore.Document{}
+	if opts.Background != nil {
+		optsDoc = bsoncore.AppendBooleanElement(optsDoc, "background", *opts.Background)
+	}
+	if opts.ExpireAfterSeconds != nil {
+		optsDoc = bsoncore.AppendInt32Element(optsDoc, "expireAfterSeconds", *opts.ExpireAfterSeconds)
+	}
+	if opts.Name != nil {
+		optsDoc = bsoncore.AppendStringElement(optsDoc, "name", *opts.Name)
+	}
+	if opts.Sparse != nil {
+		optsDoc = bsoncore.AppendBooleanElement(optsDoc, "sparse", *opts.Sparse)
+	}
+	if opts.StorageEngine != nil {
+		doc, err := marshal(opts.StorageEngine, iv.coll.bsonOpts, iv.coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = bsoncore.AppendDocumentElement(optsDoc, "storageEngine", doc)
+	}
+	if opts.Unique != nil {
+		optsDoc = bsoncore.AppendBooleanElement(optsDoc, "unique", *opts.Unique)
+	}
+	if opts.Version != nil {
+		optsDoc = bsoncore.AppendInt32Element(optsDoc, "v", *opts.Version)
+	}
+	if opts.DefaultLanguage != nil {
+		optsDoc = bsoncore.AppendStringElement(optsDoc, "default_language", *opts.DefaultLanguage)
+	}
+	if opts.LanguageOverride != nil {
+		optsDoc = bsoncore.AppendStringElement(optsDoc, "language_override", *opts.LanguageOverride)
+	}
+	if opts.TextVersion != nil {
+		optsDoc = bsoncore.AppendInt32Element(optsDoc, "textIndexVersion", *opts.TextVersion)
+	}
+	if opts.Weights != nil {
+		doc, err := marshal(opts.Weights, iv.coll.bsonOpts, iv.coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = bsoncore.AppendDocumentElement(optsDoc, "weights", doc)
+	}
+	if opts.SphereVersion != nil {
+		optsDoc = bsoncore.AppendInt32Element(optsDoc, "2dsphereIndexVersion", *opts.SphereVersion)
+	}
+	if opts.Bits != nil {
+		optsDoc = bsoncore.AppendInt32Element(optsDoc, "bits", *opts.Bits)
+	}
+	if opts.Max != nil {
+		optsDoc = bsoncore.AppendDoubleElement(optsDoc, "max", *opts.Max)
+	}
+	if opts.Min != nil {
+		optsDoc = bsoncore.AppendDoubleElement(optsDoc, "min", *opts.Min)
+	}
+	if opts.BucketSize != nil {
+		optsDoc = bsoncore.AppendInt32Element(optsDoc, "bucketSize", *opts.BucketSize)
+	}
+	if opts.PartialFilterExpression != nil {
+		doc, err := marshal(opts.PartialFilterExpression, iv.coll.bsonOpts, iv.coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = bsoncore.AppendDocumentElement(optsDoc, "partialFilterExpression", doc)
+	}
+	if opts.Collation != nil {
+		optsDoc = bsoncore.AppendDocumentElement(optsDoc, "collation", bsoncore.Document(opts.Collation.ToDocument()))
+	}
+	if opts.WildcardProjection != nil {
+		doc, err := marshal(opts.WildcardProjection, iv.coll.bsonOpts, iv.coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = bsoncore.AppendDocumentElement(optsDoc, "wildcardProjection", doc)
+	}
+	if opts.Hidden != nil {
+		optsDoc = bsoncore.AppendBooleanElement(optsDoc, "hidden", *opts.Hidden)
+	}
+
+	return optsDoc, nil
+}
+
+func (iv IndexView) drop(ctx context.Context, index any, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && iv.coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(iv.coll.client.sessionPool, iv.coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err := iv.coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := iv.coll.writeConcern
+	if sess.TransactionRunning() {
+		wc = nil
+	}
+	if !writeconcern.AckWrite(wc) {
+		sess = nil
+	}
+
+	selector := makePinnedSelector(sess, iv.coll.writeSelector)
+
+	dio := options.MergeDropIndexesOptions(opts...)
+
+	// TODO(GODRIVER-3038): This operation should pass CSE to the DropIndexes
+	// Crypt setter to be applied to the operation.
+	op := operation.NewDropIndexes(index).Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor).
+		ServerSelector(selector).ClusterClock(iv.coll.client.clock).
+		Database(iv.coll.db.name).Collection(iv.coll.name).
+		Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI).
+		Timeout(iv.coll.client.timeout).MaxTime(dio.MaxTime).
+		Authenticator(iv.coll.client.authenticator)
+
+	err = op.Execute(ctx)
+	if err != nil {
+		return nil, replaceErrors(err)
+	}
+
+	// TODO: it's weird to return a bson.Raw here because we have to convert the result back to BSON
+	ridx, res := bsoncore.AppendDocumentStart(nil)
+	res = bsoncore.AppendInt32Element(res, "nIndexesWas", op.Result().NIndexesWas)
+	res, _ = bsoncore.AppendDocumentEnd(res, ridx)
+	return res, nil
+}
+
+// DropOne executes a dropIndexes operation to drop an index on the collection. If the operation succeeds, this returns
+// a BSON document in the form {nIndexesWas: <int32>}. The "nIndexesWas" field in the response contains the number of
+// indexes that existed prior to the drop.
+//
+// The name parameter should be the name of the index to drop. If the name is "*", ErrMultipleIndexDrop will be returned
+// without running the command because doing so would drop all indexes.
+//
+// The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/.
+func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	if name == "*" {
+		return nil, ErrMultipleIndexDrop
+	}
+
+	return iv.drop(ctx, name, opts...)
+}
+
+// DropOneWithKey drops a collection index by key using the dropIndexes operation. If the operation succeeds, this returns
+// a BSON document in the form {nIndexesWas: <int32>}. The "nIndexesWas" field in the response contains the number of
+// indexes that existed prior to the drop.
+//
+// This function is useful to drop an index using its key specification instead of its name.
+func (iv IndexView) DropOneWithKey(ctx context.Context, keySpecDocument interface{}, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	doc, err := marshal(keySpecDocument, iv.coll.bsonOpts, iv.coll.registry)
+	if err != nil {
+		return nil, err
+	}
+
+	return iv.drop(ctx, doc, opts...)
+}
+
+// DropAll executes a dropIndexes operation to drop all indexes on the collection. If the operation succeeds, this
+// returns a BSON document in the form {nIndexesWas: <int32>}. The "nIndexesWas" field in the response contains the
+// number of indexes that existed prior to the drop.
+//
+// The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions
+// documentation).
+//
+// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/.
+func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	return iv.drop(ctx, "*", opts...)
+}
+
+func getOrGenerateIndexName(keySpecDocument bsoncore.Document, model IndexModel) (string, error) {
+	if model.Options != nil && model.Options.Name != nil {
+		return *model.Options.Name, nil
+	}
+
+	name := bytes.NewBufferString("")
+	first := true
+
+	elems, err := keySpecDocument.Elements()
+	if err != nil {
+		return "", err
+	}
+	for _, elem := range elems {
+		if !first {
+			_, err := name.WriteRune('_')
+			if err != nil {
+				return "", err
+			}
+		}
+
+		_, err := name.WriteString(elem.Key())
+		if err != nil {
+			return "", err
+		}
+
+		_, err = name.WriteRune('_')
+		if err != nil {
+			return "", err
+		}
+
+		var value string
+
+		bsonValue := elem.Value()
+		switch bsonValue.Type {
+		case bsontype.Int32:
+			value = fmt.Sprintf("%d", bsonValue.Int32())
+		case bsontype.Int64:
+			value = fmt.Sprintf("%d", bsonValue.Int64())
+		case bsontype.String:
+			value = bsonValue.StringValue()
+		default:
+			return "", ErrInvalidIndexValue
+		}
+
+		_, err = name.WriteString(value)
+		if err != nil {
+			return "", err
+		}
+
+		first = false
+	}
+
+	return name.String(), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec8e817c7305158e7c054e0fc11b34d456158e0f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go
@@ -0,0 +1,485 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo // import "go.mongodb.org/mongo-driver/mongo"
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/internal/codecutil"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/bson/bsonrw"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// Dialer is used to make network connections.
+type Dialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// BSONAppender is an interface implemented by types that can marshal a
+// provided type into BSON bytes and append those bytes to the provided []byte.
+// The AppendBSON can return a non-nil error and non-nil []byte. The AppendBSON
+// method may also write incomplete BSON to the []byte.
+//
+// Deprecated: BSONAppender is unused and will be removed in Go Driver 2.0.
+type BSONAppender interface {
+	AppendBSON([]byte, interface{}) ([]byte, error)
+}
+
+// BSONAppenderFunc is an adapter function that allows any function that
+// satisfies the AppendBSON method signature to be used where a BSONAppender is
+// used.
+//
+// Deprecated: BSONAppenderFunc is unused and will be removed in Go Driver 2.0.
+type BSONAppenderFunc func([]byte, interface{}) ([]byte, error)
+
+// AppendBSON implements the BSONAppender interface
+//
+// Deprecated: BSONAppenderFunc is unused and will be removed in Go Driver 2.0.
+func (baf BSONAppenderFunc) AppendBSON(dst []byte, val interface{}) ([]byte, error) {
+	return baf(dst, val)
+}
+
+// MarshalError is returned when attempting to marshal a value into a document
+// results in an error.
+type MarshalError struct {
+	Value interface{}
+	Err   error
+}
+
+// Error implements the error interface.
+func (me MarshalError) Error() string {
+	return fmt.Sprintf("cannot marshal type %s to a BSON Document: %v", reflect.TypeOf(me.Value), me.Err)
+}
+
+// Pipeline is a type that makes creating aggregation pipelines easier. It is a
+// helper and is intended for serializing to BSON.
+//
+// Example usage:
+//
+//	mongo.Pipeline{
+//		{{"$group", bson.D{{"_id", "$state"}, {"totalPop", bson.D{{"$sum", "$pop"}}}}}},
+//		{{"$match", bson.D{{"totalPop", bson.D{{"$gte", 10*1000*1000}}}}}},
+//	}
+type Pipeline []bson.D
+
+// bvwPool is a pool of BSON value writers. BSON value writers
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+
+// getEncoder takes a writer, BSON options, and a BSON registry and returns a properly configured
+// bson.Encoder that writes to the given writer.
+func getEncoder(
+	w io.Writer,
+	opts *options.BSONOptions,
+	reg *bsoncodec.Registry,
+) (*bson.Encoder, error) {
+	vw := bvwPool.Get(w)
+	enc, err := bson.NewEncoder(vw)
+	if err != nil {
+		return nil, err
+	}
+
+	if opts != nil {
+		if opts.ErrorOnInlineDuplicates {
+			enc.ErrorOnInlineDuplicates()
+		}
+		if opts.IntMinSize {
+			enc.IntMinSize()
+		}
+		if opts.NilByteSliceAsEmpty {
+			enc.NilByteSliceAsEmpty()
+		}
+		if opts.NilMapAsEmpty {
+			enc.NilMapAsEmpty()
+		}
+		if opts.NilSliceAsEmpty {
+			enc.NilSliceAsEmpty()
+		}
+		if opts.OmitZeroStruct {
+			enc.OmitZeroStruct()
+		}
+		if opts.StringifyMapKeysWithFmt {
+			enc.StringifyMapKeysWithFmt()
+		}
+		if opts.UseJSONStructTags {
+			enc.UseJSONStructTags()
+		}
+	}
+
+	if reg != nil {
+		// TODO:(GODRIVER-2719): Remove error handling.
+		if err := enc.SetRegistry(reg); err != nil {
+			return nil, err
+		}
+	}
+
+	return enc, nil
+}
+
+// newEncoderFn will return a function for constructing an encoder based on the
+// provided codec options.
+func newEncoderFn(opts *options.BSONOptions, registry *bsoncodec.Registry) codecutil.EncoderFn {
+	return func(w io.Writer) (*bson.Encoder, error) {
+		return getEncoder(w, opts, registry)
+	}
+}
+
+// marshal marshals the given value as a BSON document. Byte slices are always converted to a
+// bson.Raw before marshaling.
+//
+// If bsonOpts and registry are specified, the encoder is configured with the requested behaviors.
+// If they are nil, the default behaviors are used.
+func marshal(
+	val interface{},
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+) (bsoncore.Document, error) {
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+	if val == nil {
+		return nil, ErrNilDocument
+	}
+	if bs, ok := val.([]byte); ok {
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(bs)
+	}
+
+	buf := new(bytes.Buffer)
+	enc, err := getEncoder(buf, bsonOpts, registry)
+	if err != nil {
+		return nil, fmt.Errorf("error configuring BSON encoder: %w", err)
+	}
+
+	err = enc.Encode(val)
+	if err != nil {
+		return nil, MarshalError{Value: val, Err: err}
+	}
+
+	return buf.Bytes(), nil
+}
+
+// ensureID inserts the given ObjectID as an element named "_id" at the
+// beginning of the given BSON document if there is not an "_id" already.
+// If the given ObjectID is primitive.NilObjectID, a new object ID will be
+// generated with time.Now().
+//
+// If there is already an element named "_id", the document is not modified. It
+// returns the resulting document and the decoded Go value of the "_id" element.
+func ensureID(
+	doc bsoncore.Document,
+	oid primitive.ObjectID,
+	bsonOpts *options.BSONOptions,
+	reg *bsoncodec.Registry,
+) (bsoncore.Document, interface{}, error) {
+	if reg == nil {
+		reg = bson.DefaultRegistry
+	}
+
+	// Try to find the "_id" element. If it exists, try to unmarshal just the
+	// "_id" field as an interface{} and return it along with the unmodified
+	// BSON document.
+	if _, err := doc.LookupErr("_id"); err == nil {
+		var id struct {
+			ID interface{} `bson:"_id"`
+		}
+		dec, err := getDecoder(doc, bsonOpts, reg)
+		if err != nil {
+			return nil, nil, fmt.Errorf("error configuring BSON decoder: %w", err)
+		}
+		err = dec.Decode(&id)
+		if err != nil {
+			return nil, nil, fmt.Errorf("error unmarshaling BSON document: %w", err)
+		}
+
+		return doc, id.ID, nil
+	}
+
+	// We couldn't find an "_id" element, so add one with the value of the
+	// provided ObjectID.
+
+	olddoc := doc
+
+	// Reserve an extra 17 bytes for the "_id" field we're about to add:
+	// type (1) + "_id" (3) + terminator (1) + object ID (12)
+	const extraSpace = 17
+	doc = make(bsoncore.Document, 0, len(olddoc)+extraSpace)
+	_, doc = bsoncore.ReserveLength(doc)
+	if oid.IsZero() {
+		oid = primitive.NewObjectID()
+	}
+	doc = bsoncore.AppendObjectIDElement(doc, "_id", oid)
+
+	// Remove and re-write the BSON document length header.
+	const int32Len = 4
+	doc = append(doc, olddoc[int32Len:]...)
+	doc = bsoncore.UpdateLength(doc, 0, int32(len(doc)))
+
+	return doc, oid, nil
+}
+
+func ensureDollarKey(doc bsoncore.Document) error {
+	firstElem, err := doc.IndexErr(0)
+	if err != nil {
+		return errors.New("update document must have at least one element")
+	}
+
+	if !strings.HasPrefix(firstElem.Key(), "$") {
+		return errors.New("update document must contain key beginning with '$'")
+	}
+	return nil
+}
+
+func ensureNoDollarKey(doc bsoncore.Document) error {
+	if elem, err := doc.IndexErr(0); err == nil && strings.HasPrefix(elem.Key(), "$") {
+		return errors.New("replacement document cannot contain keys beginning with '$'")
+	}
+
+	return nil
+}
+
+func marshalAggregatePipeline(
+	pipeline interface{},
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+) (bsoncore.Document, bool, error) {
+	switch t := pipeline.(type) {
+	case bsoncodec.ValueMarshaler:
+		btype, val, err := t.MarshalBSONValue()
+		if err != nil {
+			return nil, false, err
+		}
+		if btype != bsontype.Array {
+			return nil, false, fmt.Errorf("ValueMarshaler returned a %v, but was expecting %v", btype, bsontype.Array)
+		}
+
+		var hasOutputStage bool
+		pipelineDoc := bsoncore.Document(val)
+		values, _ := pipelineDoc.Values()
+		if pipelineLen := len(values); pipelineLen > 0 {
+			if finalDoc, ok := values[pipelineLen-1].DocumentOK(); ok {
+				if elem, err := finalDoc.IndexErr(0); err == nil && (elem.Key() == "$out" || elem.Key() == "$merge") {
+					hasOutputStage = true
+				}
+			}
+		}
+
+		return pipelineDoc, hasOutputStage, nil
+	default:
+		val := reflect.ValueOf(t)
+		if !val.IsValid() || (val.Kind() != reflect.Slice && val.Kind() != reflect.Array) {
+			return nil, false, fmt.Errorf("can only marshal slices and arrays into aggregation pipelines, but got %v", val.Kind())
+		}
+
+		var hasOutputStage bool
+		valLen := val.Len()
+
+		switch t := pipeline.(type) {
+		// Explicitly forbid non-empty pipelines that are semantically single documents
+		// and are implemented as slices.
+		case bson.D, bson.Raw, bsoncore.Document:
+			if valLen > 0 {
+				return nil, false,
+					fmt.Errorf("%T is not an allowed pipeline type as it represents a single document. Use bson.A or mongo.Pipeline instead", t)
+			}
+		// bsoncore.Arrays do not need to be marshaled. Only check validity and presence of output stage.
+		case bsoncore.Array:
+			if err := t.Validate(); err != nil {
+				return nil, false, err
+			}
+
+			values, err := t.Values()
+			if err != nil {
+				return nil, false, err
+			}
+
+			numVals := len(values)
+			if numVals == 0 {
+				return bsoncore.Document(t), false, nil
+			}
+
+			// If not empty, check if first value of the last stage is $out or $merge.
+			if lastStage, ok := values[numVals-1].DocumentOK(); ok {
+				if elem, err := lastStage.IndexErr(0); err == nil && (elem.Key() == "$out" || elem.Key() == "$merge") {
+					hasOutputStage = true
+				}
+			}
+			return bsoncore.Document(t), hasOutputStage, nil
+		}
+
+		aidx, arr := bsoncore.AppendArrayStart(nil)
+		for idx := 0; idx < valLen; idx++ {
+			doc, err := marshal(val.Index(idx).Interface(), bsonOpts, registry)
+			if err != nil {
+				return nil, false, err
+			}
+
+			if idx == valLen-1 {
+				if elem, err := doc.IndexErr(0); err == nil && (elem.Key() == "$out" || elem.Key() == "$merge") {
+					hasOutputStage = true
+				}
+			}
+			arr = bsoncore.AppendDocumentElement(arr, strconv.Itoa(idx), doc)
+		}
+		arr, _ = bsoncore.AppendArrayEnd(arr, aidx)
+		return arr, hasOutputStage, nil
+	}
+}
+
+func marshalUpdateValue(
+	update interface{},
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+	dollarKeysAllowed bool,
+) (bsoncore.Value, error) {
+	documentCheckerFunc := ensureDollarKey
+	if !dollarKeysAllowed {
+		documentCheckerFunc = ensureNoDollarKey
+	}
+
+	var u bsoncore.Value
+	var err error
+	switch t := update.(type) {
+	case nil:
+		return u, ErrNilDocument
+	case primitive.D:
+		u.Type = bsontype.EmbeddedDocument
+		u.Data, err = marshal(update, bsonOpts, registry)
+		if err != nil {
+			return u, err
+		}
+
+		return u, documentCheckerFunc(u.Data)
+	case bson.Raw:
+		u.Type = bsontype.EmbeddedDocument
+		u.Data = t
+		return u, documentCheckerFunc(u.Data)
+	case bsoncore.Document:
+		u.Type = bsontype.EmbeddedDocument
+		u.Data = t
+		return u, documentCheckerFunc(u.Data)
+	case []byte:
+		u.Type = bsontype.EmbeddedDocument
+		u.Data = t
+		return u, documentCheckerFunc(u.Data)
+	case bsoncodec.Marshaler:
+		u.Type = bsontype.EmbeddedDocument
+		u.Data, err = t.MarshalBSON()
+		if err != nil {
+			return u, err
+		}
+
+		return u, documentCheckerFunc(u.Data)
+	case bsoncodec.ValueMarshaler:
+		u.Type, u.Data, err = t.MarshalBSONValue()
+		if err != nil {
+			return u, err
+		}
+		if u.Type != bsontype.Array && u.Type != bsontype.EmbeddedDocument {
+			return u, fmt.Errorf("ValueMarshaler returned a %v, but was expecting %v or %v", u.Type, bsontype.Array, bsontype.EmbeddedDocument)
+		}
+		return u, err
+	default:
+		val := reflect.ValueOf(t)
+		if !val.IsValid() {
+			return u, fmt.Errorf("can only marshal slices and arrays into update pipelines, but got %v", val.Kind())
+		}
+		if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
+			u.Type = bsontype.EmbeddedDocument
+			u.Data, err = marshal(update, bsonOpts, registry)
+			if err != nil {
+				return u, err
+			}
+
+			return u, documentCheckerFunc(u.Data)
+		}
+
+		u.Type = bsontype.Array
+		aidx, arr := bsoncore.AppendArrayStart(nil)
+		valLen := val.Len()
+		for idx := 0; idx < valLen; idx++ {
+			doc, err := marshal(val.Index(idx).Interface(), bsonOpts, registry)
+			if err != nil {
+				return u, err
+			}
+
+			if err := documentCheckerFunc(doc); err != nil {
+				return u, err
+			}
+
+			arr = bsoncore.AppendDocumentElement(arr, strconv.Itoa(idx), doc)
+		}
+		u.Data, _ = bsoncore.AppendArrayEnd(arr, aidx)
+		return u, err
+	}
+}
+
+func marshalValue(
+	val interface{},
+	bsonOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+) (bsoncore.Value, error) {
+	return codecutil.MarshalValue(val, newEncoderFn(bsonOpts, registry))
+}
+
+// Build the aggregation pipeline for the CountDocument command.
+func countDocumentsAggregatePipeline(
+	filter interface{},
+	encOpts *options.BSONOptions,
+	registry *bsoncodec.Registry,
+	opts *options.CountOptions,
+) (bsoncore.Document, error) {
+	filterDoc, err := marshal(filter, encOpts, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	aidx, arr := bsoncore.AppendArrayStart(nil)
+	didx, arr := bsoncore.AppendDocumentElementStart(arr, strconv.Itoa(0))
+	arr = bsoncore.AppendDocumentElement(arr, "$match", filterDoc)
+	arr, _ = bsoncore.AppendDocumentEnd(arr, didx)
+
+	index := 1
+	if opts != nil {
+		if opts.Skip != nil {
+			didx, arr = bsoncore.AppendDocumentElementStart(arr, strconv.Itoa(index))
+			arr = bsoncore.AppendInt64Element(arr, "$skip", *opts.Skip)
+			arr, _ = bsoncore.AppendDocumentEnd(arr, didx)
+			index++
+		}
+		if opts.Limit != nil {
+			didx, arr = bsoncore.AppendDocumentElementStart(arr, strconv.Itoa(index))
+			arr = bsoncore.AppendInt64Element(arr, "$limit", *opts.Limit)
+			arr, _ = bsoncore.AppendDocumentEnd(arr, didx)
+			index++
+		}
+	}
+
+	didx, arr = bsoncore.AppendDocumentElementStart(arr, strconv.Itoa(index))
+	iidx, arr := bsoncore.AppendDocumentElementStart(arr, "$group")
+	arr = bsoncore.AppendInt32Element(arr, "_id", 1)
+	iiidx, arr := bsoncore.AppendDocumentElementStart(arr, "n")
+	arr = bsoncore.AppendInt32Element(arr, "$sum", 1)
+	arr, _ = bsoncore.AppendDocumentEnd(arr, iiidx)
+	arr, _ = bsoncore.AppendDocumentEnd(arr, iidx)
+	arr, _ = bsoncore.AppendDocumentEnd(arr, didx)
+
+	return bsoncore.AppendArrayEnd(arr, aidx)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go
new file mode 100644
index 0000000000000000000000000000000000000000..2603a3918d99cc4ef97c24ab0a21d7f54b9b3df1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go
@@ -0,0 +1,164 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"os/exec"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+const (
+	defaultServerSelectionTimeout = 10 * time.Second
+	defaultURI                    = "mongodb://localhost:27020"
+	defaultPath                   = "mongocryptd"
+	serverSelectionTimeoutStr     = "server selection error"
+)
+
+var defaultTimeoutArgs = []string{"--idleShutdownTimeoutSecs=60"}
+var databaseOpts = options.Database().SetReadConcern(readconcern.New()).SetReadPreference(readpref.Primary())
+
+type mongocryptdClient struct {
+	bypassSpawn bool
+	client      *Client
+	path        string
+	spawnArgs   []string
+}
+
+// newMongocryptdClient creates a client to mongocryptd.
+// newMongocryptdClient is expected to not be called if the crypt shared library is available.
+// The crypt shared library replaces all mongocryptd functionality.
+func newMongocryptdClient(opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) {
+	// create mcryptClient instance and spawn process if necessary
+	var bypassSpawn bool
+	var bypassAutoEncryption bool
+
+	if bypass, ok := opts.ExtraOptions["mongocryptdBypassSpawn"]; ok {
+		bypassSpawn = bypass.(bool)
+	}
+	if opts.BypassAutoEncryption != nil {
+		bypassAutoEncryption = *opts.BypassAutoEncryption
+	}
+
+	bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis
+
+	mc := &mongocryptdClient{
+		// mongocryptd should not be spawned if any of these conditions are true:
+		// - mongocryptdBypassSpawn is passed
+		// - bypassAutoEncryption is true because mongocryptd is not used during decryption
+		// - bypassQueryAnalysis is true because mongocryptd is not used during decryption
+		bypassSpawn: bypassSpawn || bypassAutoEncryption || bypassQueryAnalysis,
+	}
+
+	if !mc.bypassSpawn {
+		mc.path, mc.spawnArgs = createSpawnArgs(opts.ExtraOptions)
+		if err := mc.spawnProcess(); err != nil {
+			return nil, err
+		}
+	}
+
+	// get connection string
+	uri := defaultURI
+	if u, ok := opts.ExtraOptions["mongocryptdURI"]; ok {
+		uri = u.(string)
+	}
+
+	// create client
+	client, err := NewClient(options.Client().ApplyURI(uri).SetServerSelectionTimeout(defaultServerSelectionTimeout))
+	if err != nil {
+		return nil, err
+	}
+	mc.client = client
+
+	return mc, nil
+}
+
+// markCommand executes the given command on mongocryptd.
+func (mc *mongocryptdClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) {
+	// Remove the explicit session from the context if one is set.
+	// The explicit session will be from a different client.
+	// If an explicit session is set, it is applied after automatic encryption.
+	ctx = NewSessionContext(ctx, nil)
+	db := mc.client.Database(dbName, databaseOpts)
+
+	res, err := db.RunCommand(ctx, cmd).Raw()
+	// propagate original result
+	if err == nil {
+		return bsoncore.Document(res), nil
+	}
+	// wrap original error
+	if mc.bypassSpawn || !strings.Contains(err.Error(), serverSelectionTimeoutStr) {
+		return nil, MongocryptdError{Wrapped: err}
+	}
+
+	// re-spawn and retry
+	if err = mc.spawnProcess(); err != nil {
+		return nil, err
+	}
+	res, err = db.RunCommand(ctx, cmd).Raw()
+	if err != nil {
+		return nil, MongocryptdError{Wrapped: err}
+	}
+	return bsoncore.Document(res), nil
+}
+
+// connect connects the underlying Client instance. This must be called before performing any mark operations.
+func (mc *mongocryptdClient) connect(ctx context.Context) error {
+	return mc.client.Connect(ctx)
+}
+
+// disconnect disconnects the underlying Client instance. This should be called after all operations have completed.
+func (mc *mongocryptdClient) disconnect(ctx context.Context) error {
+	return mc.client.Disconnect(ctx)
+}
+
+func (mc *mongocryptdClient) spawnProcess() error {
+	// Ignore gosec warning about subprocess launched with externally-provided path variable.
+	/* #nosec G204 */
+	cmd := exec.Command(mc.path, mc.spawnArgs...)
+	cmd.Stdout = nil
+	cmd.Stderr = nil
+	return cmd.Start()
+}
+
+// createSpawnArgs creates arguments to spawn mcryptClient. It returns the path and a slice of arguments.
+func createSpawnArgs(opts map[string]interface{}) (string, []string) {
+	var spawnArgs []string
+
+	// get command path
+	path := defaultPath
+	if p, ok := opts["mongocryptdPath"]; ok {
+		path = p.(string)
+	}
+
+	// add specified options
+	if sa, ok := opts["mongocryptdSpawnArgs"]; ok {
+		spawnArgs = append(spawnArgs, sa.([]string)...)
+	}
+
+	// add timeout options if necessary
+	var foundTimeout bool
+	for _, arg := range spawnArgs {
+		// need to use HasPrefix instead of doing an exact equality check because both
+		// mongocryptd supports both [--idleShutdownTimeoutSecs, 0] and [--idleShutdownTimeoutSecs=0]
+		if strings.HasPrefix(arg, "--idleShutdownTimeoutSecs") {
+			foundTimeout = true
+			break
+		}
+	}
+	if !foundTimeout {
+		spawnArgs = append(spawnArgs, defaultTimeoutArgs...)
+	}
+
+	return path, spawnArgs
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..20e1c70439ea15e52b07c33bf6d600fbab6f63d1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go
@@ -0,0 +1,184 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+)
+
+// AggregateOptions represents options that can be used to configure an Aggregate operation.
+type AggregateOptions struct {
+	// If true, the operation can write to temporary files in the _tmp subdirectory of the database directory path on
+	// the server. The default value is false.
+	AllowDiskUse *bool
+
+	// The maximum number of documents to be included in each batch returned by the server.
+	BatchSize *int32
+
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query.
+	// This option is only valid for MongoDB versions >= 3.2 and is ignored for previous server versions.
+	MaxAwaitTime *time.Duration
+
+	// A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation.
+	// The default is nil, which means that no comment will be included in the logs.
+	Comment *string
+
+	// The index to use for the aggregation. This should either be the index name as a string or the index specification
+	// as a document. The hint does not apply to $lookup and $graphLookup aggregation stages. The driver will return an
+	// error if the hint parameter is a multi-key map. The default value is nil, which means that no hint will be sent.
+	Hint interface{}
+
+	// Specifies parameters for the aggregate expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+
+	// Custom options to be added to aggregate expression. Key-value pairs of the BSON map should correlate with desired
+	// option names and values. Values must be Marshalable. Custom options may conflict with non-custom options, and custom
+	// options bypass client-side validation. Prefer using non-custom options where possible.
+	Custom bson.M
+}
+
+// Aggregate creates a new AggregateOptions instance.
+func Aggregate() *AggregateOptions {
+	return &AggregateOptions{}
+}
+
+// SetAllowDiskUse sets the value for the AllowDiskUse field.
+func (ao *AggregateOptions) SetAllowDiskUse(b bool) *AggregateOptions {
+	ao.AllowDiskUse = &b
+	return ao
+}
+
+// SetBatchSize sets the value for the BatchSize field.
+func (ao *AggregateOptions) SetBatchSize(i int32) *AggregateOptions {
+	ao.BatchSize = &i
+	return ao
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (ao *AggregateOptions) SetBypassDocumentValidation(b bool) *AggregateOptions {
+	ao.BypassDocumentValidation = &b
+	return ao
+}
+
+// SetCollation sets the value for the Collation field.
+func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions {
+	ao.Collation = c
+	return ao
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions {
+	ao.MaxTime = &d
+	return ao
+}
+
+// SetMaxAwaitTime sets the value for the MaxAwaitTime field.
+func (ao *AggregateOptions) SetMaxAwaitTime(d time.Duration) *AggregateOptions {
+	ao.MaxAwaitTime = &d
+	return ao
+}
+
+// SetComment sets the value for the Comment field.
+func (ao *AggregateOptions) SetComment(s string) *AggregateOptions {
+	ao.Comment = &s
+	return ao
+}
+
+// SetHint sets the value for the Hint field.
+func (ao *AggregateOptions) SetHint(h interface{}) *AggregateOptions {
+	ao.Hint = h
+	return ao
+}
+
+// SetLet sets the value for the Let field.
+func (ao *AggregateOptions) SetLet(let interface{}) *AggregateOptions {
+	ao.Let = let
+	return ao
+}
+
+// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate
+// with desired option names and values. Values must be Marshalable. Custom options may conflict
+// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom
+// options where possible.
+func (ao *AggregateOptions) SetCustom(c bson.M) *AggregateOptions {
+	ao.Custom = c
+	return ao
+}
+
+// MergeAggregateOptions combines the given AggregateOptions instances into a single AggregateOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions {
+	aggOpts := Aggregate()
+	for _, ao := range opts {
+		if ao == nil {
+			continue
+		}
+		if ao.AllowDiskUse != nil {
+			aggOpts.AllowDiskUse = ao.AllowDiskUse
+		}
+		if ao.BatchSize != nil {
+			aggOpts.BatchSize = ao.BatchSize
+		}
+		if ao.BypassDocumentValidation != nil {
+			aggOpts.BypassDocumentValidation = ao.BypassDocumentValidation
+		}
+		if ao.Collation != nil {
+			aggOpts.Collation = ao.Collation
+		}
+		if ao.MaxTime != nil {
+			aggOpts.MaxTime = ao.MaxTime
+		}
+		if ao.MaxAwaitTime != nil {
+			aggOpts.MaxAwaitTime = ao.MaxAwaitTime
+		}
+		if ao.Comment != nil {
+			aggOpts.Comment = ao.Comment
+		}
+		if ao.Hint != nil {
+			aggOpts.Hint = ao.Hint
+		}
+		if ao.Let != nil {
+			aggOpts.Let = ao.Let
+		}
+		if ao.Custom != nil {
+			aggOpts.Custom = ao.Custom
+		}
+	}
+
+	return aggOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..15d513862d049b7a20dcd16088af167712c53272
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go
@@ -0,0 +1,210 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"crypto/tls"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/internal/httputil"
+)
+
+// AutoEncryptionOptions represents options used to configure auto encryption/decryption behavior for a mongo.Client
+// instance.
+//
+// Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic
+// encryption is not supported for operations on a database or view, and operations that are not bypassed will result
+// in error. Too bypass automatic encryption for all operations, set BypassAutoEncryption=true.
+//
+// Auto encryption requires the authenticated user to have the listCollections privilege action.
+//
+// If automatic encryption fails on an operation, use a MongoClient configured with bypassAutoEncryption=true and use
+// ClientEncryption.encrypt() to manually encrypt values.
+//
+// Enabling Client Side Encryption reduces the maximum document and message size (using a maxBsonObjectSize of 2MiB and
+// maxMessageSizeBytes of 6MB) and may have a negative performance impact.
+type AutoEncryptionOptions struct {
+	KeyVaultClientOptions *ClientOptions
+	KeyVaultNamespace     string
+	KmsProviders          map[string]map[string]interface{}
+	SchemaMap             map[string]interface{}
+	BypassAutoEncryption  *bool
+	ExtraOptions          map[string]interface{}
+	TLSConfig             map[string]*tls.Config
+	HTTPClient            *http.Client
+	EncryptedFieldsMap    map[string]interface{}
+	BypassQueryAnalysis   *bool
+}
+
+// AutoEncryption creates a new AutoEncryptionOptions configured with default values.
+func AutoEncryption() *AutoEncryptionOptions {
+	return &AutoEncryptionOptions{
+		HTTPClient: httputil.DefaultHTTPClient,
+	}
+}
+
+// SetKeyVaultClientOptions specifies options for the client used to communicate with the key vault collection.
+//
+// If this is set, it is used to create an internal mongo.Client.
+// Otherwise, if the target mongo.Client being configured has an unlimited connection pool size (i.e. maxPoolSize=0),
+// it is reused to interact with the key vault collection.
+// Otherwise, if the target mongo.Client has a limited connection pool size, a separate internal mongo.Client is used
+// (and created if necessary). The internal mongo.Client may be shared during automatic encryption (if
+// BypassAutomaticEncryption is false). The internal mongo.Client is configured with the same options as the target
+// mongo.Client except minPoolSize is set to 0 and AutoEncryptionOptions is omitted.
+func (a *AutoEncryptionOptions) SetKeyVaultClientOptions(opts *ClientOptions) *AutoEncryptionOptions {
+	a.KeyVaultClientOptions = opts
+	return a
+}
+
+// SetKeyVaultNamespace specifies the namespace of the key vault collection. This is required.
+func (a *AutoEncryptionOptions) SetKeyVaultNamespace(ns string) *AutoEncryptionOptions {
+	a.KeyVaultNamespace = ns
+	return a
+}
+
+// SetKmsProviders specifies options for KMS providers. This is required.
+func (a *AutoEncryptionOptions) SetKmsProviders(providers map[string]map[string]interface{}) *AutoEncryptionOptions {
+	a.KmsProviders = providers
+	return a
+}
+
+// SetSchemaMap specifies a map from namespace to local schema document. Schemas supplied in the schemaMap only apply
+// to configuring automatic encryption for client side encryption. Other validation rules in the JSON schema will not
+// be enforced by the driver and will result in an error.
+//
+// Supplying a schemaMap provides more security than relying on JSON Schemas obtained from the server. It protects
+// against a malicious server advertising a false JSON Schema, which could trick the client into sending unencrypted
+// data that should be encrypted.
+func (a *AutoEncryptionOptions) SetSchemaMap(schemaMap map[string]interface{}) *AutoEncryptionOptions {
+	a.SchemaMap = schemaMap
+	return a
+}
+
+// SetBypassAutoEncryption specifies whether or not auto encryption should be done.
+//
+// If this is unset or false and target mongo.Client being configured has an unlimited connection pool size
+// (i.e. maxPoolSize=0), it is reused in the process of auto encryption.
+// Otherwise, if the target mongo.Client has a limited connection pool size, a separate internal mongo.Client is used
+// (and created if necessary). The internal mongo.Client may be shared for key vault operations (if KeyVaultClient is
+// unset). The internal mongo.Client is configured with the same options as the target mongo.Client except minPoolSize
+// is set to 0 and AutoEncryptionOptions is omitted.
+func (a *AutoEncryptionOptions) SetBypassAutoEncryption(bypass bool) *AutoEncryptionOptions {
+	a.BypassAutoEncryption = &bypass
+	return a
+}
+
+// SetExtraOptions specifies a map of options to configure the mongocryptd process or mongo_crypt shared library.
+//
+// # Supported Extra Options
+//
+// "mongocryptdURI" - The mongocryptd URI. Allows setting a custom URI used to communicate with the
+// mongocryptd process. The default is "mongodb://localhost:27020", which works with the default
+// mongocryptd process spawned by the Client. Must be a string.
+//
+// "mongocryptdBypassSpawn" - If set to true, the Client will not attempt to spawn a mongocryptd
+// process. Must be a bool.
+//
+// "mongocryptdSpawnPath" - The path used when spawning mongocryptd.
+// Defaults to empty string and spawns mongocryptd from system path. Must be a string.
+//
+// "mongocryptdSpawnArgs" - Command line arguments passed when spawning mongocryptd.
+// Defaults to ["--idleShutdownTimeoutSecs=60"]. Must be an array of strings.
+//
+// "cryptSharedLibRequired" - If set to true, Client creation will return an error if the
+// crypt_shared library is not loaded. If unset or set to false, Client creation will not return an
+// error if the crypt_shared library is not loaded. The default is unset. Must be a bool.
+//
+// "cryptSharedLibPath" - The crypt_shared library override path. This must be the path to the
+// crypt_shared dynamic library file (for example, a .so, .dll, or .dylib file), not the directory
+// that contains it. If the override path is a relative path, it will be resolved relative to the
+// working directory of the process. If the override path is a relative path and the first path
+// component is the literal string "$ORIGIN", the "$ORIGIN" component will be replaced by the
+// absolute path to the directory containing the linked libmongocrypt library. Setting an override
+// path disables the default system library search path. If an override path is specified but the
+// crypt_shared library cannot be loaded, Client creation will return an error. Must be a string.
+func (a *AutoEncryptionOptions) SetExtraOptions(extraOpts map[string]interface{}) *AutoEncryptionOptions {
+	a.ExtraOptions = extraOpts
+	return a
+}
+
+// SetTLSConfig specifies tls.Config instances for each KMS provider to use to configure TLS on all connections created
+// to the KMS provider.
+//
+// This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12.
+func (a *AutoEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *AutoEncryptionOptions {
+	tlsConfigs := make(map[string]*tls.Config)
+	for provider, config := range tlsOpts {
+		// use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites
+		if config.MinVersion == 0 {
+			config.MinVersion = tls.VersionTLS12
+		}
+		tlsConfigs[provider] = config
+	}
+	a.TLSConfig = tlsConfigs
+	return a
+}
+
+// SetEncryptedFieldsMap specifies a map from namespace to local EncryptedFieldsMap document.
+// EncryptedFieldsMap is used for Queryable Encryption.
+func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptions {
+	a.EncryptedFieldsMap = ef
+	return a
+}
+
+// SetBypassQueryAnalysis specifies whether or not query analysis should be used for automatic encryption.
+// Use this option when using explicit encryption with Queryable Encryption.
+func (a *AutoEncryptionOptions) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptions {
+	a.BypassQueryAnalysis = &bypass
+	return a
+}
+
+// MergeAutoEncryptionOptions combines the argued AutoEncryptionOptions in a last-one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionOptions {
+	aeo := AutoEncryption()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.KeyVaultClientOptions != nil {
+			aeo.KeyVaultClientOptions = opt.KeyVaultClientOptions
+		}
+		if opt.KeyVaultNamespace != "" {
+			aeo.KeyVaultNamespace = opt.KeyVaultNamespace
+		}
+		if opt.KmsProviders != nil {
+			aeo.KmsProviders = opt.KmsProviders
+		}
+		if opt.SchemaMap != nil {
+			aeo.SchemaMap = opt.SchemaMap
+		}
+		if opt.BypassAutoEncryption != nil {
+			aeo.BypassAutoEncryption = opt.BypassAutoEncryption
+		}
+		if opt.ExtraOptions != nil {
+			aeo.ExtraOptions = opt.ExtraOptions
+		}
+		if opt.TLSConfig != nil {
+			aeo.TLSConfig = opt.TLSConfig
+		}
+		if opt.EncryptedFieldsMap != nil {
+			aeo.EncryptedFieldsMap = opt.EncryptedFieldsMap
+		}
+		if opt.BypassQueryAnalysis != nil {
+			aeo.BypassQueryAnalysis = opt.BypassQueryAnalysis
+		}
+		if opt.HTTPClient != nil {
+			aeo.HTTPClient = opt.HTTPClient
+		}
+	}
+
+	return aeo
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..153de0c7351057b4986e82e9a43dd400f6167705
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go
@@ -0,0 +1,94 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// DefaultOrdered is the default value for the Ordered option in BulkWriteOptions.
+var DefaultOrdered = true
+
+// BulkWriteOptions represents options that can be used to configure a BulkWrite operation.
+type BulkWriteOptions struct {
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// If true, no writes will be executed after one fails. The default value is true.
+	Ordered *bool
+
+	// Specifies parameters for all update and delete commands in the BulkWrite. This option is only valid for MongoDB
+	// versions >= 5.0. Older servers will report an error for using this option. This must be a document mapping
+	// parameter names to values. Values must be constant or closed expressions that do not reference document fields.
+	// Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// BulkWrite creates a new *BulkWriteOptions instance.
+func BulkWrite() *BulkWriteOptions {
+	return &BulkWriteOptions{
+		Ordered: &DefaultOrdered,
+	}
+}
+
+// SetComment sets the value for the Comment field.
+func (b *BulkWriteOptions) SetComment(comment interface{}) *BulkWriteOptions {
+	b.Comment = comment
+	return b
+}
+
+// SetOrdered sets the value for the Ordered field.
+func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions {
+	b.Ordered = &ordered
+	return b
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOptions {
+	b.BypassDocumentValidation = &bypass
+	return b
+}
+
+// SetLet sets the value for the Let field. Let specifies parameters for all update and delete commands in the BulkWrite.
+// This option is only valid for MongoDB versions >= 5.0. Older servers will report an error for using this option.
+// This must be a document mapping parameter names to values. Values must be constant or closed expressions that do not
+// reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var").
+func (b *BulkWriteOptions) SetLet(let interface{}) *BulkWriteOptions {
+	b.Let = &let
+	return b
+}
+
+// MergeBulkWriteOptions combines the given BulkWriteOptions instances into a single BulkWriteOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions {
+	b := BulkWrite()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Comment != nil {
+			b.Comment = opt.Comment
+		}
+		if opt.Ordered != nil {
+			b.Ordered = opt.Ordered
+		}
+		if opt.BypassDocumentValidation != nil {
+			b.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+		if opt.Let != nil {
+			b.Let = opt.Let
+		}
+	}
+
+	return b
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..3d06a668e5a4eedd4f115f4b0300ffda5c847ed7
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go
@@ -0,0 +1,207 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ChangeStreamOptions represents options that can be used to configure a Watch operation.
+type ChangeStreamOptions struct {
+	// The maximum number of documents to be included in each batch returned by the server.
+	BatchSize *int32
+
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation.
+	// The default is nil, which means that no comment will be included in the logs.
+	Comment *string
+
+	// Specifies how the updated document should be returned in change notifications for update operations. The default
+	// is options.Default, which means that only partial update deltas will be included in the change notification.
+	FullDocument *FullDocument
+
+	// Specifies how the pre-update document should be returned in change notifications for update operations. The default
+	// is options.Off, which means that the pre-update document will not be included in the change notification.
+	FullDocumentBeforeChange *FullDocument
+
+	// The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query.
+	MaxAwaitTime *time.Duration
+
+	// A document specifying the logical starting point for the change stream. Only changes corresponding to an oplog
+	// entry immediately after the resume token will be returned. If this is specified, StartAtOperationTime and
+	// StartAfter must not be set.
+	ResumeAfter interface{}
+
+	// ShowExpandedEvents specifies whether the server will return an expanded list of change stream events. Additional
+	// events include: createIndexes, dropIndexes, modify, create, shardCollection, reshardCollection and
+	// refineCollectionShardKey. This option is only valid for MongoDB versions >= 6.0.
+	ShowExpandedEvents *bool
+
+	// If specified, the change stream will only return changes that occurred at or after the given timestamp. This
+	// option is only valid for MongoDB versions >= 4.0. If this is specified, ResumeAfter and StartAfter must not be
+	// set.
+	StartAtOperationTime *primitive.Timestamp
+
+	// A document specifying the logical starting point for the change stream. This is similar to the ResumeAfter
+	// option, but allows a resume token from an "invalidate" notification to be used. This allows a change stream on a
+	// collection to be resumed after the collection has been dropped and recreated or renamed. Only changes
+	// corresponding to an oplog entry immediately after the specified token will be returned. If this is specified,
+	// ResumeAfter and StartAtOperationTime must not be set. This option is only valid for MongoDB versions >= 4.1.1.
+	StartAfter interface{}
+
+	// Custom options to be added to the initial aggregate for the change stream. Key-value pairs of the BSON map should
+	// correlate with desired option names and values. Values must be Marshalable. Custom options may conflict with
+	// non-custom options, and custom options bypass client-side validation. Prefer using non-custom options where possible.
+	Custom bson.M
+
+	// Custom options to be added to the $changeStream stage in the initial aggregate. Key-value pairs of the BSON map should
+	// correlate with desired option names and values. Values must be Marshalable. Custom pipeline options bypass client-side
+	// validation. Prefer using non-custom options where possible.
+	CustomPipeline bson.M
+}
+
+// ChangeStream creates a new ChangeStreamOptions instance.
+func ChangeStream() *ChangeStreamOptions {
+	cso := &ChangeStreamOptions{}
+	return cso
+}
+
+// SetBatchSize sets the value for the BatchSize field.
+func (cso *ChangeStreamOptions) SetBatchSize(i int32) *ChangeStreamOptions {
+	cso.BatchSize = &i
+	return cso
+}
+
+// SetCollation sets the value for the Collation field.
+func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions {
+	cso.Collation = &c
+	return cso
+}
+
+// SetComment sets the value for the Comment field.
+func (cso *ChangeStreamOptions) SetComment(comment string) *ChangeStreamOptions {
+	cso.Comment = &comment
+	return cso
+}
+
+// SetFullDocument sets the value for the FullDocument field.
+func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions {
+	cso.FullDocument = &fd
+	return cso
+}
+
+// SetFullDocumentBeforeChange sets the value for the FullDocumentBeforeChange field.
+func (cso *ChangeStreamOptions) SetFullDocumentBeforeChange(fdbc FullDocument) *ChangeStreamOptions {
+	cso.FullDocumentBeforeChange = &fdbc
+	return cso
+}
+
+// SetMaxAwaitTime sets the value for the MaxAwaitTime field.
+func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions {
+	cso.MaxAwaitTime = &d
+	return cso
+}
+
+// SetResumeAfter sets the value for the ResumeAfter field.
+func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOptions {
+	cso.ResumeAfter = rt
+	return cso
+}
+
+// SetShowExpandedEvents sets the value for the ShowExpandedEvents field.
+func (cso *ChangeStreamOptions) SetShowExpandedEvents(see bool) *ChangeStreamOptions {
+	cso.ShowExpandedEvents = &see
+	return cso
+}
+
+// SetStartAtOperationTime sets the value for the StartAtOperationTime field.
+func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions {
+	cso.StartAtOperationTime = t
+	return cso
+}
+
+// SetStartAfter sets the value for the StartAfter field.
+func (cso *ChangeStreamOptions) SetStartAfter(sa interface{}) *ChangeStreamOptions {
+	cso.StartAfter = sa
+	return cso
+}
+
+// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate
+// with desired option names and values. Values must be Marshalable. Custom options may conflict
+// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom
+// options where possible.
+func (cso *ChangeStreamOptions) SetCustom(c bson.M) *ChangeStreamOptions {
+	cso.Custom = c
+	return cso
+}
+
+// SetCustomPipeline sets the value for the CustomPipeline field. Key-value pairs of the BSON map
+// should correlate with desired option names and values. Values must be Marshalable. Custom pipeline
+// options bypass client-side validation. Prefer using non-custom options where possible.
+func (cso *ChangeStreamOptions) SetCustomPipeline(cp bson.M) *ChangeStreamOptions {
+	cso.CustomPipeline = cp
+	return cso
+}
+
+// MergeChangeStreamOptions combines the given ChangeStreamOptions instances into a single ChangeStreamOptions in a
+// last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions {
+	csOpts := ChangeStream()
+	for _, cso := range opts {
+		if cso == nil {
+			continue
+		}
+		if cso.BatchSize != nil {
+			csOpts.BatchSize = cso.BatchSize
+		}
+		if cso.Collation != nil {
+			csOpts.Collation = cso.Collation
+		}
+		if cso.Comment != nil {
+			csOpts.Comment = cso.Comment
+		}
+		if cso.FullDocument != nil {
+			csOpts.FullDocument = cso.FullDocument
+		}
+		if cso.FullDocumentBeforeChange != nil {
+			csOpts.FullDocumentBeforeChange = cso.FullDocumentBeforeChange
+		}
+		if cso.MaxAwaitTime != nil {
+			csOpts.MaxAwaitTime = cso.MaxAwaitTime
+		}
+		if cso.ResumeAfter != nil {
+			csOpts.ResumeAfter = cso.ResumeAfter
+		}
+		if cso.ShowExpandedEvents != nil {
+			csOpts.ShowExpandedEvents = cso.ShowExpandedEvents
+		}
+		if cso.StartAtOperationTime != nil {
+			csOpts.StartAtOperationTime = cso.StartAtOperationTime
+		}
+		if cso.StartAfter != nil {
+			csOpts.StartAfter = cso.StartAfter
+		}
+		if cso.Custom != nil {
+			csOpts.Custom = cso.Custom
+		}
+		if cso.CustomPipeline != nil {
+			csOpts.CustomPipeline = cso.CustomPipeline
+		}
+	}
+
+	return csOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..2457f682bab28ca0a4276bfd4c4206dc0b2439c8
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go
@@ -0,0 +1,150 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/internal/httputil"
+)
+
+// ClientEncryptionOptions represents all possible options used to configure a ClientEncryption instance.
+type ClientEncryptionOptions struct {
+	KeyVaultNamespace string
+	KmsProviders      map[string]map[string]interface{}
+	TLSConfig         map[string]*tls.Config
+	HTTPClient        *http.Client
+}
+
+// ClientEncryption creates a new ClientEncryptionOptions instance.
+func ClientEncryption() *ClientEncryptionOptions {
+	return &ClientEncryptionOptions{
+		HTTPClient: httputil.DefaultHTTPClient,
+	}
+}
+
+// SetKeyVaultNamespace specifies the namespace of the key vault collection. This is required.
+func (c *ClientEncryptionOptions) SetKeyVaultNamespace(ns string) *ClientEncryptionOptions {
+	c.KeyVaultNamespace = ns
+	return c
+}
+
+// SetKmsProviders specifies options for KMS providers. This is required.
+func (c *ClientEncryptionOptions) SetKmsProviders(providers map[string]map[string]interface{}) *ClientEncryptionOptions {
+	c.KmsProviders = providers
+	return c
+}
+
+// SetTLSConfig specifies tls.Config instances for each KMS provider to use to configure TLS on all connections created
+// to the KMS provider.
+//
+// This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12.
+func (c *ClientEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *ClientEncryptionOptions {
+	tlsConfigs := make(map[string]*tls.Config)
+	for provider, config := range tlsOpts {
+		// use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites
+		if config.MinVersion == 0 {
+			config.MinVersion = tls.VersionTLS12
+		}
+		tlsConfigs[provider] = config
+	}
+	c.TLSConfig = tlsConfigs
+	return c
+}
+
+// BuildTLSConfig specifies tls.Config options for each KMS provider to use to configure TLS on all connections created
+// to the KMS provider. The input map should contain a mapping from each KMS provider to a document containing the necessary
+// options, as follows:
+//
+//	{
+//			"kmip": {
+//				"tlsCertificateKeyFile": "foo.pem",
+//				"tlsCAFile": "fooCA.pem"
+//			}
+//	}
+//
+// Currently, the following TLS options are supported:
+//
+// 1. "tlsCertificateKeyFile" (or "sslClientCertificateKeyFile"): The "tlsCertificateKeyFile" option specifies a path to
+// the client certificate and private key, which must be concatenated into one file.
+//
+// 2. "tlsCertificateKeyFilePassword" (or "sslClientCertificateKeyPassword"): Specify the password to decrypt the client
+// private key file (e.g. "tlsCertificateKeyFilePassword=password").
+//
+// 3. "tlsCaFile" (or "sslCertificateAuthorityFile"): Specify the path to a single or bundle of certificate authorities
+// to be considered trusted when making a TLS connection (e.g. "tlsCaFile=/path/to/caFile").
+//
+// This should only be used to set custom TLS options. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12.
+func BuildTLSConfig(tlsOpts map[string]interface{}) (*tls.Config, error) {
+	// use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites
+	cfg := &tls.Config{MinVersion: tls.VersionTLS12}
+
+	for name := range tlsOpts {
+		var err error
+		switch name {
+		case "tlsCertificateKeyFile", "sslClientCertificateKeyFile":
+			clientCertPath, ok := tlsOpts[name].(string)
+			if !ok {
+				return nil, fmt.Errorf("expected %q value to be of type string, got %T", name, tlsOpts[name])
+			}
+			// apply custom key file password if found, otherwise use empty string
+			if keyPwd, found := tlsOpts["tlsCertificateKeyFilePassword"].(string); found {
+				_, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, keyPwd)
+			} else if keyPwd, found := tlsOpts["sslClientCertificateKeyPassword"].(string); found {
+				_, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, keyPwd)
+			} else {
+				_, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, "")
+			}
+		case "tlsCertificateKeyFilePassword", "sslClientCertificateKeyPassword":
+			continue
+		case "tlsCAFile", "sslCertificateAuthorityFile":
+			caPath, ok := tlsOpts[name].(string)
+			if !ok {
+				return nil, fmt.Errorf("expected %q value to be of type string, got %T", name, tlsOpts[name])
+			}
+			err = addCACertFromFile(cfg, caPath)
+		default:
+			return nil, fmt.Errorf("unrecognized TLS option %v", name)
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return cfg, nil
+}
+
+// MergeClientEncryptionOptions combines the argued ClientEncryptionOptions in a last-one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeClientEncryptionOptions(opts ...*ClientEncryptionOptions) *ClientEncryptionOptions {
+	ceo := ClientEncryption()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.KeyVaultNamespace != "" {
+			ceo.KeyVaultNamespace = opt.KeyVaultNamespace
+		}
+		if opt.KmsProviders != nil {
+			ceo.KmsProviders = opt.KmsProviders
+		}
+		if opt.TLSConfig != nil {
+			ceo.TLSConfig = opt.TLSConfig
+		}
+		if opt.HTTPClient != nil {
+			ceo.HTTPClient = opt.HTTPClient
+		}
+	}
+
+	return ceo
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..c3a9d439e97778c38fb5de93c15dec4f47799ff7
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go
@@ -0,0 +1,1377 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options // import "go.mongodb.org/mongo-driver/mongo/options"
+
+import (
+	"bytes"
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"math"
+	"net"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/youmark/pkcs8"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/httputil"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/tag"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/auth"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage"
+)
+
+const (
+	// ServerMonitoringModeAuto indicates that the client will behave like "poll"
+	// mode when running on a FaaS (Function as a Service) platform, or like
+	// "stream" mode otherwise. The client detects its execution environment by
+	// following the rules for generating the "client.env" handshake metadata field
+	// as specified in the MongoDB Handshake specification. This is the default
+	// mode.
+	ServerMonitoringModeAuto = connstring.ServerMonitoringModeAuto
+
+	// ServerMonitoringModePoll indicates that the client will periodically check
+	// the server using a hello or legacy hello command and then sleep for
+	// heartbeatFrequencyMS milliseconds before running another check.
+	ServerMonitoringModePoll = connstring.ServerMonitoringModePoll
+
+	// ServerMonitoringModeStream indicates that the client will use a streaming
+	// protocol when the server supports it. The streaming protocol optimally
+	// reduces the time it takes for a client to discover server state changes.
+	ServerMonitoringModeStream = connstring.ServerMonitoringModeStream
+)
+
+// ContextDialer is an interface that can be implemented by types that can create connections. It should be used to
+// provide a custom dialer when configuring a Client.
+//
+// DialContext should return a connection to the provided address on the given network.
+type ContextDialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// Credential can be used to provide authentication options when configuring a Client.
+//
+// AuthMechanism: the mechanism to use for authentication. Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1",
+// "MONGODB-CR", "PLAIN", "GSSAPI", "MONGODB-X509", and "MONGODB-AWS". This can also be set through the "authMechanism"
+// URI option. (e.g. "authMechanism=PLAIN"). For more information, see
+// https://www.mongodb.com/docs/manual/core/authentication-mechanisms/.
+//
+// AuthMechanismProperties can be used to specify additional configuration options for certain mechanisms. They can also
+// be set through the "authMechanismProperites" URI option
+// (e.g. "authMechanismProperties=SERVICE_NAME:service,CANONICALIZE_HOST_NAME:true"). Supported properties are:
+//
+// 1. SERVICE_NAME: The service name to use for GSSAPI authentication. The default is "mongodb".
+//
+// 2. CANONICALIZE_HOST_NAME: If "true", the driver will canonicalize the host name for GSSAPI authentication. The default
+// is "false".
+//
+// 3. SERVICE_REALM: The service realm for GSSAPI authentication.
+//
+// 4. SERVICE_HOST: The host name to use for GSSAPI authentication. This should be specified if the host name to use for
+// authentication is different than the one given for Client construction.
+//
+// 4. AWS_SESSION_TOKEN: The AWS token for MONGODB-AWS authentication. This is optional and used for authentication with
+// temporary credentials.
+//
+// The SERVICE_HOST and CANONICALIZE_HOST_NAME properties must not be used at the same time on Linux and Darwin
+// systems.
+//
+// AuthSource: the name of the database to use for authentication. This defaults to "$external" for MONGODB-AWS,
+// MONGODB-OIDC, MONGODB-X509, GSSAPI, and PLAIN. It defaults to  "admin" for all other auth mechanisms. This can
+// also be set through the "authSource" URI option (e.g. "authSource=otherDb").
+//
+// Username: the username for authentication. This can also be set through the URI as a username:password pair before
+// the first @ character. For example, a URI for user "user", password "pwd", and host "localhost:27017" would be
+// "mongodb://user:pwd@localhost:27017". This is optional for X509 authentication and will be extracted from the
+// client certificate if not specified.
+//
+// Password: the password for authentication. This must not be specified for X509 and is optional for GSSAPI
+// authentication.
+//
+// PasswordSet: For GSSAPI, this must be true if a password is specified, even if the password is the empty string, and
+// false if no password is specified, indicating that the password should be taken from the context of the running
+// process. For other mechanisms, this field is ignored.
+type Credential struct {
+	AuthMechanism           string
+	AuthMechanismProperties map[string]string
+	AuthSource              string
+	Username                string
+	Password                string
+	PasswordSet             bool
+	OIDCMachineCallback     OIDCCallback
+	OIDCHumanCallback       OIDCCallback
+}
+
+// OIDCCallback is the type for both Human and Machine Callback flows.
+// RefreshToken will always be nil in the OIDCArgs for the Machine flow.
+type OIDCCallback func(context.Context, *OIDCArgs) (*OIDCCredential, error)
+
+// OIDCArgs contains the arguments for the OIDC callback.
+type OIDCArgs struct {
+	Version      int
+	IDPInfo      *IDPInfo
+	RefreshToken *string
+}
+
+// OIDCCredential contains the access token and refresh token.
+type OIDCCredential struct {
+	AccessToken  string
+	ExpiresAt    *time.Time
+	RefreshToken *string
+}
+
+// IDPInfo contains the information needed to perform OIDC authentication with
+// an Identity Provider.
+type IDPInfo struct {
+	Issuer        string
+	ClientID      string
+	RequestScopes []string
+}
+
+// BSONOptions are optional BSON marshaling and unmarshaling behaviors.
+type BSONOptions struct {
+	// UseJSONStructTags causes the driver to fall back to using the "json"
+	// struct tag if a "bson" struct tag is not specified.
+	UseJSONStructTags bool
+
+	// ErrorOnInlineDuplicates causes the driver to return an error if there is
+	// a duplicate field in the marshaled BSON when the "inline" struct tag
+	// option is set.
+	ErrorOnInlineDuplicates bool
+
+	// IntMinSize causes the driver to marshal Go integer values (int, int8,
+	// int16, int32, int64, uint, uint8, uint16, uint32, or uint64) as the
+	// minimum BSON int size (either 32 or 64 bits) that can represent the
+	// integer value.
+	IntMinSize bool
+
+	// NilMapAsEmpty causes the driver to marshal nil Go maps as empty BSON
+	// documents instead of BSON null.
+	//
+	// Empty BSON documents take up slightly more space than BSON null, but
+	// preserve the ability to use document update operations like "$set" that
+	// do not work on BSON null.
+	NilMapAsEmpty bool
+
+	// NilSliceAsEmpty causes the driver to marshal nil Go slices as empty BSON
+	// arrays instead of BSON null.
+	//
+	// Empty BSON arrays take up slightly more space than BSON null, but
+	// preserve the ability to use array update operations like "$push" or
+	// "$addToSet" that do not work on BSON null.
+	NilSliceAsEmpty bool
+
+	// NilByteSliceAsEmpty causes the driver to marshal nil Go byte slices as
+	// empty BSON binary values instead of BSON null.
+	NilByteSliceAsEmpty bool
+
+	// OmitZeroStruct causes the driver to consider the zero value for a struct
+	// (e.g. MyStruct{}) as empty and omit it from the marshaled BSON when the
+	// "omitempty" struct tag option is set.
+	OmitZeroStruct bool
+
+	// StringifyMapKeysWithFmt causes the driver to convert Go map keys to BSON
+	// document field name strings using fmt.Sprint instead of the default
+	// string conversion logic.
+	StringifyMapKeysWithFmt bool
+
+	// AllowTruncatingDoubles causes the driver to truncate the fractional part
+	// of BSON "double" values when attempting to unmarshal them into a Go
+	// integer (int, int8, int16, int32, or int64) struct field. The truncation
+	// logic does not apply to BSON "decimal128" values.
+	AllowTruncatingDoubles bool
+
+	// BinaryAsSlice causes the driver to unmarshal BSON binary field values
+	// that are the "Generic" or "Old" BSON binary subtype as a Go byte slice
+	// instead of a primitive.Binary.
+	BinaryAsSlice bool
+
+	// DefaultDocumentD causes the driver to always unmarshal documents into the
+	// primitive.D type. This behavior is restricted to data typed as
+	// "interface{}" or "map[string]interface{}".
+	DefaultDocumentD bool
+
+	// DefaultDocumentM causes the driver to always unmarshal documents into the
+	// primitive.M type. This behavior is restricted to data typed as
+	// "interface{}" or "map[string]interface{}".
+	DefaultDocumentM bool
+
+	// UseLocalTimeZone causes the driver to unmarshal time.Time values in the
+	// local timezone instead of the UTC timezone.
+	UseLocalTimeZone bool
+
+	// ZeroMaps causes the driver to delete any existing values from Go maps in
+	// the destination value before unmarshaling BSON documents into them.
+	ZeroMaps bool
+
+	// ZeroStructs causes the driver to delete any existing values from Go
+	// structs in the destination value before unmarshaling BSON documents into
+	// them.
+	ZeroStructs bool
+}
+
+// ClientOptions contains options to configure a Client instance. Each option can be set through setter functions. See
+// documentation for each setter function for an explanation of the option.
+type ClientOptions struct {
+	AppName                  *string
+	Auth                     *Credential
+	AutoEncryptionOptions    *AutoEncryptionOptions
+	ConnectTimeout           *time.Duration
+	Compressors              []string
+	Dialer                   ContextDialer
+	Direct                   *bool
+	DisableOCSPEndpointCheck *bool
+	HeartbeatInterval        *time.Duration
+	Hosts                    []string
+	HTTPClient               *http.Client
+	LoadBalanced             *bool
+	LocalThreshold           *time.Duration
+	LoggerOptions            *LoggerOptions
+	MaxConnIdleTime          *time.Duration
+	MaxPoolSize              *uint64
+	MinPoolSize              *uint64
+	MaxConnecting            *uint64
+	PoolMonitor              *event.PoolMonitor
+	Monitor                  *event.CommandMonitor
+	ServerMonitor            *event.ServerMonitor
+	ReadConcern              *readconcern.ReadConcern
+	ReadPreference           *readpref.ReadPref
+	BSONOptions              *BSONOptions
+	Registry                 *bsoncodec.Registry
+	ReplicaSet               *string
+	RetryReads               *bool
+	RetryWrites              *bool
+	ServerAPIOptions         *ServerAPIOptions
+	ServerMonitoringMode     *string
+	ServerSelectionTimeout   *time.Duration
+	SRVMaxHosts              *int
+	SRVServiceName           *string
+	Timeout                  *time.Duration
+	TLSConfig                *tls.Config
+	WriteConcern             *writeconcern.WriteConcern
+	ZlibLevel                *int
+	ZstdLevel                *int
+
+	err error
+	cs  *connstring.ConnString
+
+	// AuthenticateToAnything skips server type checks when deciding if authentication is possible.
+	//
+	// Deprecated: This option is for internal use only and should not be set. It may be changed or removed in any
+	// release.
+	AuthenticateToAnything *bool
+
+	// Crypt specifies a custom driver.Crypt to be used to encrypt and decrypt documents. The default is no
+	// encryption.
+	//
+	// Deprecated: This option is for internal use only and should not be set (see GODRIVER-2149). It may be
+	// changed or removed in any release.
+	Crypt driver.Crypt
+
+	// Deployment specifies a custom deployment to use for the new Client.
+	//
+	// Deprecated: This option is for internal use only and should not be set. It may be changed or removed in any
+	// release.
+	Deployment driver.Deployment
+
+	// SocketTimeout specifies the timeout to be used for the Client's socket reads and writes.
+	//
+	// NOTE(benjirewis): SocketTimeout will be deprecated in a future release. The more general Timeout option
+	// may be used in its place to control the amount of time that a single operation can run before returning
+	// an error. Setting SocketTimeout and Timeout on a single client will result in undefined behavior.
+	SocketTimeout *time.Duration
+}
+
+// Client creates a new ClientOptions instance.
+func Client() *ClientOptions {
+	return &ClientOptions{
+		HTTPClient: httputil.DefaultHTTPClient,
+	}
+}
+
+// Validate validates the client options. This method will return the first error found.
+func (c *ClientOptions) Validate() error {
+	if c.err != nil {
+		return c.err
+	}
+	c.err = c.validate()
+	return c.err
+}
+
+func (c *ClientOptions) validate() error {
+	// Direct connections cannot be made if multiple hosts are specified or an SRV URI is used.
+	if c.Direct != nil && *c.Direct {
+		if len(c.Hosts) > 1 {
+			return errors.New("a direct connection cannot be made if multiple hosts are specified")
+		}
+		if c.cs != nil && c.cs.Scheme == connstring.SchemeMongoDBSRV {
+			return errors.New("a direct connection cannot be made if an SRV URI is used")
+		}
+	}
+
+	if c.MaxPoolSize != nil && c.MinPoolSize != nil && *c.MaxPoolSize != 0 && *c.MinPoolSize > *c.MaxPoolSize {
+		return fmt.Errorf("minPoolSize must be less than or equal to maxPoolSize, got minPoolSize=%d maxPoolSize=%d", *c.MinPoolSize, *c.MaxPoolSize)
+	}
+
+	// verify server API version if ServerAPIOptions are passed in.
+	if c.ServerAPIOptions != nil {
+		if err := c.ServerAPIOptions.ServerAPIVersion.Validate(); err != nil {
+			return err
+		}
+	}
+
+	// Validation for load-balanced mode.
+	if c.LoadBalanced != nil && *c.LoadBalanced {
+		if len(c.Hosts) > 1 {
+			return connstring.ErrLoadBalancedWithMultipleHosts
+		}
+		if c.ReplicaSet != nil {
+			return connstring.ErrLoadBalancedWithReplicaSet
+		}
+		if c.Direct != nil && *c.Direct {
+			return connstring.ErrLoadBalancedWithDirectConnection
+		}
+	}
+
+	// Validation for srvMaxHosts.
+	if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 {
+		if c.ReplicaSet != nil {
+			return connstring.ErrSRVMaxHostsWithReplicaSet
+		}
+		if c.LoadBalanced != nil && *c.LoadBalanced {
+			return connstring.ErrSRVMaxHostsWithLoadBalanced
+		}
+	}
+
+	if mode := c.ServerMonitoringMode; mode != nil && !connstring.IsValidServerMonitoringMode(*mode) {
+		return fmt.Errorf("invalid server monitoring mode: %q", *mode)
+	}
+
+	// OIDC Validation
+	if c.Auth != nil && c.Auth.AuthMechanism == auth.MongoDBOIDC {
+		if c.Auth.Password != "" {
+			return fmt.Errorf("password must not be set for the %s auth mechanism", auth.MongoDBOIDC)
+		}
+		if c.Auth.OIDCMachineCallback != nil && c.Auth.OIDCHumanCallback != nil {
+			return fmt.Errorf("cannot set both OIDCMachineCallback and OIDCHumanCallback, only one may be specified")
+		}
+		if c.Auth.OIDCHumanCallback == nil && c.Auth.AuthMechanismProperties[auth.AllowedHostsProp] != "" {
+			return fmt.Errorf("Cannot specify ALLOWED_HOSTS without an OIDCHumanCallback")
+		}
+		if env, ok := c.Auth.AuthMechanismProperties[auth.EnvironmentProp]; ok {
+			switch env {
+			case auth.GCPEnvironmentValue, auth.AzureEnvironmentValue:
+				if c.Auth.OIDCMachineCallback != nil {
+					return fmt.Errorf("OIDCMachineCallback cannot be specified with the %s %q", env, auth.EnvironmentProp)
+				}
+				if c.Auth.OIDCHumanCallback != nil {
+					return fmt.Errorf("OIDCHumanCallback cannot be specified with the %s %q", env, auth.EnvironmentProp)
+				}
+				if c.Auth.AuthMechanismProperties[auth.ResourceProp] == "" {
+					return fmt.Errorf("%q must be set for the %s %q", auth.ResourceProp, env, auth.EnvironmentProp)
+				}
+			default:
+				if c.Auth.AuthMechanismProperties[auth.ResourceProp] != "" {
+					return fmt.Errorf("%q must not be set for the %s %q", auth.ResourceProp, env, auth.EnvironmentProp)
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+// GetURI returns the original URI used to configure the ClientOptions instance. If ApplyURI was not called during
+// construction, this returns "".
+func (c *ClientOptions) GetURI() string {
+	if c.cs == nil {
+		return ""
+	}
+	return c.cs.Original
+}
+
+// ApplyURI parses the given URI and sets options accordingly. The URI can contain host names, IPv4/IPv6 literals, or
+// an SRV record that will be resolved when the Client is created. When using an SRV record, TLS support is
+// implicitly enabled. Specify the "tls=false" URI option to override this.
+//
+// If the connection string contains any options that have previously been set, it will overwrite them. Options that
+// correspond to multiple URI parameters, such as WriteConcern, will be completely overwritten if any of the query
+// parameters are specified. If an option is set on ClientOptions after this method is called, that option will override
+// any option applied via the connection string.
+//
+// If the URI format is incorrect or there are conflicting options specified in the URI an error will be recorded and
+// can be retrieved by calling Validate.
+//
+// For more information about the URI format, see https://www.mongodb.com/docs/manual/reference/connection-string/. See
+// mongo.Connect documentation for examples of using URIs for different Client configurations.
+func (c *ClientOptions) ApplyURI(uri string) *ClientOptions {
+	if c.err != nil {
+		return c
+	}
+
+	cs, err := connstring.ParseAndValidate(uri)
+	if err != nil {
+		c.err = err
+		return c
+	}
+	c.cs = cs
+
+	if cs.AppName != "" {
+		c.AppName = &cs.AppName
+	}
+
+	// Only create a Credential if there is a request for authentication via non-empty credentials in the URI.
+	if cs.HasAuthParameters() {
+		c.Auth = &Credential{
+			AuthMechanism:           cs.AuthMechanism,
+			AuthMechanismProperties: cs.AuthMechanismProperties,
+			AuthSource:              cs.AuthSource,
+			Username:                cs.Username,
+			Password:                cs.Password,
+			PasswordSet:             cs.PasswordSet,
+		}
+	}
+
+	if cs.ConnectSet {
+		direct := cs.Connect == connstring.SingleConnect
+		c.Direct = &direct
+	}
+
+	if cs.DirectConnectionSet {
+		c.Direct = &cs.DirectConnection
+	}
+
+	if cs.ConnectTimeoutSet {
+		c.ConnectTimeout = &cs.ConnectTimeout
+	}
+
+	if len(cs.Compressors) > 0 {
+		c.Compressors = cs.Compressors
+		if stringSliceContains(c.Compressors, "zlib") {
+			defaultLevel := wiremessage.DefaultZlibLevel
+			c.ZlibLevel = &defaultLevel
+		}
+		if stringSliceContains(c.Compressors, "zstd") {
+			defaultLevel := wiremessage.DefaultZstdLevel
+			c.ZstdLevel = &defaultLevel
+		}
+	}
+
+	if cs.HeartbeatIntervalSet {
+		c.HeartbeatInterval = &cs.HeartbeatInterval
+	}
+
+	c.Hosts = cs.Hosts
+
+	if cs.LoadBalancedSet {
+		c.LoadBalanced = &cs.LoadBalanced
+	}
+
+	if cs.LocalThresholdSet {
+		c.LocalThreshold = &cs.LocalThreshold
+	}
+
+	if cs.MaxConnIdleTimeSet {
+		c.MaxConnIdleTime = &cs.MaxConnIdleTime
+	}
+
+	if cs.MaxPoolSizeSet {
+		c.MaxPoolSize = &cs.MaxPoolSize
+	}
+
+	if cs.MinPoolSizeSet {
+		c.MinPoolSize = &cs.MinPoolSize
+	}
+
+	if cs.MaxConnectingSet {
+		c.MaxConnecting = &cs.MaxConnecting
+	}
+
+	if cs.ReadConcernLevel != "" {
+		c.ReadConcern = readconcern.New(readconcern.Level(cs.ReadConcernLevel))
+	}
+
+	if cs.ReadPreference != "" || len(cs.ReadPreferenceTagSets) > 0 || cs.MaxStalenessSet {
+		opts := make([]readpref.Option, 0, 1)
+
+		tagSets := tag.NewTagSetsFromMaps(cs.ReadPreferenceTagSets)
+		if len(tagSets) > 0 {
+			opts = append(opts, readpref.WithTagSets(tagSets...))
+		}
+
+		if cs.MaxStaleness != 0 {
+			opts = append(opts, readpref.WithMaxStaleness(cs.MaxStaleness))
+		}
+
+		mode, err := readpref.ModeFromString(cs.ReadPreference)
+		if err != nil {
+			c.err = err
+			return c
+		}
+
+		c.ReadPreference, c.err = readpref.New(mode, opts...)
+		if c.err != nil {
+			return c
+		}
+	}
+
+	if cs.RetryWritesSet {
+		c.RetryWrites = &cs.RetryWrites
+	}
+
+	if cs.RetryReadsSet {
+		c.RetryReads = &cs.RetryReads
+	}
+
+	if cs.ReplicaSet != "" {
+		c.ReplicaSet = &cs.ReplicaSet
+	}
+
+	if cs.ServerSelectionTimeoutSet {
+		c.ServerSelectionTimeout = &cs.ServerSelectionTimeout
+	}
+
+	if cs.SocketTimeoutSet {
+		c.SocketTimeout = &cs.SocketTimeout
+	}
+
+	if cs.SRVMaxHosts != 0 {
+		c.SRVMaxHosts = &cs.SRVMaxHosts
+	}
+
+	if cs.SRVServiceName != "" {
+		c.SRVServiceName = &cs.SRVServiceName
+	}
+
+	if cs.SSL {
+		tlsConfig := new(tls.Config)
+
+		if cs.SSLCaFileSet {
+			c.err = addCACertFromFile(tlsConfig, cs.SSLCaFile)
+			if c.err != nil {
+				return c
+			}
+		}
+
+		if cs.SSLInsecure {
+			tlsConfig.InsecureSkipVerify = true
+		}
+
+		var x509Subject string
+		var keyPasswd string
+		if cs.SSLClientCertificateKeyPasswordSet && cs.SSLClientCertificateKeyPassword != nil {
+			keyPasswd = cs.SSLClientCertificateKeyPassword()
+		}
+		if cs.SSLClientCertificateKeyFileSet {
+			x509Subject, err = addClientCertFromConcatenatedFile(tlsConfig, cs.SSLClientCertificateKeyFile, keyPasswd)
+		} else if cs.SSLCertificateFileSet || cs.SSLPrivateKeyFileSet {
+			x509Subject, err = addClientCertFromSeparateFiles(tlsConfig, cs.SSLCertificateFile,
+				cs.SSLPrivateKeyFile, keyPasswd)
+		}
+		if err != nil {
+			c.err = err
+			return c
+		}
+
+		// If a username wasn't specified fork x509, add one from the certificate.
+		if c.Auth != nil && strings.ToLower(c.Auth.AuthMechanism) == "mongodb-x509" &&
+			c.Auth.Username == "" {
+
+			// The Go x509 package gives the subject with the pairs in reverse order that we want.
+			c.Auth.Username = extractX509UsernameFromSubject(x509Subject)
+		}
+
+		c.TLSConfig = tlsConfig
+	}
+
+	if cs.JSet || cs.WString != "" || cs.WNumberSet || cs.WTimeoutSet {
+		opts := make([]writeconcern.Option, 0, 1)
+
+		if len(cs.WString) > 0 {
+			opts = append(opts, writeconcern.WTagSet(cs.WString))
+		} else if cs.WNumberSet {
+			opts = append(opts, writeconcern.W(cs.WNumber))
+		}
+
+		if cs.JSet {
+			opts = append(opts, writeconcern.J(cs.J))
+		}
+
+		if cs.WTimeoutSet {
+			opts = append(opts, writeconcern.WTimeout(cs.WTimeout))
+		}
+
+		c.WriteConcern = writeconcern.New(opts...)
+	}
+
+	if cs.ZlibLevelSet {
+		c.ZlibLevel = &cs.ZlibLevel
+	}
+	if cs.ZstdLevelSet {
+		c.ZstdLevel = &cs.ZstdLevel
+	}
+
+	if cs.SSLDisableOCSPEndpointCheckSet {
+		c.DisableOCSPEndpointCheck = &cs.SSLDisableOCSPEndpointCheck
+	}
+
+	if cs.TimeoutSet {
+		c.Timeout = &cs.Timeout
+	}
+
+	return c
+}
+
+// SetAppName specifies an application name that is sent to the server when creating new connections. It is used by the
+// server to log connection and profiling information (e.g. slow query logs). This can also be set through the "appName"
+// URI option (e.g "appName=example_application"). The default is empty, meaning no app name will be sent.
+func (c *ClientOptions) SetAppName(s string) *ClientOptions {
+	c.AppName = &s
+	return c
+}
+
+// SetAuth specifies a Credential containing options for configuring authentication. See the options.Credential
+// documentation for more information about Credential fields. The default is an empty Credential, meaning no
+// authentication will be configured.
+func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions {
+	c.Auth = &auth
+	return c
+}
+
+// SetCompressors sets the compressors that can be used when communicating with a server. Valid values are:
+//
+// 1. "snappy" - requires server version >= 3.4
+//
+// 2. "zlib" - requires server version >= 3.6
+//
+// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver
+// version >= 1.3.0 without cgo.
+//
+// If this option is specified, the driver will perform a negotiation with the server to determine a common list of
+// compressors and will use the first one in that list when performing operations. See
+// https://www.mongodb.com/docs/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more
+// information about configuring compression on the server and the server-side defaults.
+//
+// This can also be set through the "compressors" URI option (e.g. "compressors=zstd,zlib,snappy"). The default is
+// an empty slice, meaning no compression will be enabled.
+func (c *ClientOptions) SetCompressors(comps []string) *ClientOptions {
+	c.Compressors = comps
+
+	return c
+}
+
+// SetConnectTimeout specifies a timeout that is used for creating connections to the server. This can be set through
+// ApplyURI with the "connectTimeoutMS" (e.g "connectTimeoutMS=30") option. If set to 0, no timeout will be used. The
+// default is 30 seconds.
+func (c *ClientOptions) SetConnectTimeout(d time.Duration) *ClientOptions {
+	c.ConnectTimeout = &d
+	return c
+}
+
+// SetDialer specifies a custom ContextDialer to be used to create new connections to the server. This method overrides
+// the default net.Dialer, so dialer options such as Timeout, KeepAlive, Resolver, etc can be set.
+// See https://golang.org/pkg/net/#Dialer for more information about the net.Dialer type.
+func (c *ClientOptions) SetDialer(d ContextDialer) *ClientOptions {
+	c.Dialer = d
+	return c
+}
+
+// SetDirect specifies whether or not a direct connect should be made. If set to true, the driver will only connect to
+// the host provided in the URI and will not discover other hosts in the cluster. This can also be set through the
+// "directConnection" URI option. This option cannot be set to true if multiple hosts are specified, either through
+// ApplyURI or SetHosts, or an SRV URI is used.
+//
+// As of driver version 1.4, the "connect" URI option has been deprecated and replaced with "directConnection". The
+// "connect" URI option has two values:
+//
+// 1. "connect=direct" for direct connections. This corresponds to "directConnection=true".
+//
+// 2. "connect=automatic" for automatic discovery. This corresponds to "directConnection=false"
+//
+// If the "connect" and "directConnection" URI options are both specified in the connection string, their values must
+// not conflict. Direct connections are not valid if multiple hosts are specified or an SRV URI is used. The default
+// value for this option is false.
+func (c *ClientOptions) SetDirect(b bool) *ClientOptions {
+	c.Direct = &b
+	return c
+}
+
+// SetHeartbeatInterval specifies the amount of time to wait between periodic background server checks. This can also be
+// set through the "heartbeatIntervalMS" URI option (e.g. "heartbeatIntervalMS=10000"). The default is 10 seconds.
+func (c *ClientOptions) SetHeartbeatInterval(d time.Duration) *ClientOptions {
+	c.HeartbeatInterval = &d
+	return c
+}
+
+// SetHosts specifies a list of host names or IP addresses for servers in a cluster. Both IPv4 and IPv6 addresses are
+// supported. IPv6 literals must be enclosed in '[]' following RFC-2732 syntax.
+//
+// Hosts can also be specified as a comma-separated list in a URI. For example, to include "localhost:27017" and
+// "localhost:27018", a URI could be "mongodb://localhost:27017,localhost:27018". The default is ["localhost:27017"]
+func (c *ClientOptions) SetHosts(s []string) *ClientOptions {
+	c.Hosts = s
+	return c
+}
+
+// SetLoadBalanced specifies whether or not the MongoDB deployment is hosted behind a load balancer. This can also be
+// set through the "loadBalanced" URI option. The driver will error during Client configuration if this option is set
+// to true and one of the following conditions are met:
+//
+// 1. Multiple hosts are specified, either via the ApplyURI or SetHosts methods. This includes the case where an SRV
+// URI is used and the SRV record resolves to multiple hostnames.
+// 2. A replica set name is specified, either via the URI or the SetReplicaSet method.
+// 3. The options specify whether or not a direct connection should be made, either via the URI or the SetDirect method.
+//
+// The default value is false.
+func (c *ClientOptions) SetLoadBalanced(lb bool) *ClientOptions {
+	c.LoadBalanced = &lb
+	return c
+}
+
+// SetLocalThreshold specifies the width of the 'latency window': when choosing between multiple suitable servers for an
+// operation, this is the acceptable non-negative delta between shortest and longest average round-trip times. A server
+// within the latency window is selected randomly. This can also be set through the "localThresholdMS" URI option (e.g.
+// "localThresholdMS=15000"). The default is 15 milliseconds.
+func (c *ClientOptions) SetLocalThreshold(d time.Duration) *ClientOptions {
+	c.LocalThreshold = &d
+	return c
+}
+
+// SetLoggerOptions specifies a LoggerOptions containing options for
+// configuring a logger.
+func (c *ClientOptions) SetLoggerOptions(opts *LoggerOptions) *ClientOptions {
+	c.LoggerOptions = opts
+
+	return c
+}
+
+// SetMaxConnIdleTime specifies the maximum amount of time that a connection will remain idle in a connection pool
+// before it is removed from the pool and closed. This can also be set through the "maxIdleTimeMS" URI option (e.g.
+// "maxIdleTimeMS=10000"). The default is 0, meaning a connection can remain unused indefinitely.
+func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions {
+	c.MaxConnIdleTime = &d
+	return c
+}
+
+// SetMaxPoolSize specifies that maximum number of connections allowed in the driver's connection pool to each server.
+// Requests to a server will block if this maximum is reached. This can also be set through the "maxPoolSize" URI option
+// (e.g. "maxPoolSize=100"). If this is 0, maximum connection pool size is not limited. The default is 100.
+func (c *ClientOptions) SetMaxPoolSize(u uint64) *ClientOptions {
+	c.MaxPoolSize = &u
+	return c
+}
+
+// SetMinPoolSize specifies the minimum number of connections allowed in the driver's connection pool to each server. If
+// this is non-zero, each server's pool will be maintained in the background to ensure that the size does not fall below
+// the minimum. This can also be set through the "minPoolSize" URI option (e.g. "minPoolSize=100"). The default is 0.
+func (c *ClientOptions) SetMinPoolSize(u uint64) *ClientOptions {
+	c.MinPoolSize = &u
+	return c
+}
+
+// SetMaxConnecting specifies the maximum number of connections a connection pool may establish simultaneously. This can
+// also be set through the "maxConnecting" URI option (e.g. "maxConnecting=2"). If this is 0, the default is used. The
+// default is 2. Values greater than 100 are not recommended.
+func (c *ClientOptions) SetMaxConnecting(u uint64) *ClientOptions {
+	c.MaxConnecting = &u
+	return c
+}
+
+// SetPoolMonitor specifies a PoolMonitor to receive connection pool events. See the event.PoolMonitor documentation
+// for more information about the structure of the monitor and events that can be received.
+func (c *ClientOptions) SetPoolMonitor(m *event.PoolMonitor) *ClientOptions {
+	c.PoolMonitor = m
+	return c
+}
+
+// SetMonitor specifies a CommandMonitor to receive command events. See the event.CommandMonitor documentation for more
+// information about the structure of the monitor and events that can be received.
+func (c *ClientOptions) SetMonitor(m *event.CommandMonitor) *ClientOptions {
+	c.Monitor = m
+	return c
+}
+
+// SetServerMonitor specifies an SDAM monitor used to monitor SDAM events.
+func (c *ClientOptions) SetServerMonitor(m *event.ServerMonitor) *ClientOptions {
+	c.ServerMonitor = m
+	return c
+}
+
+// SetReadConcern specifies the read concern to use for read operations. A read concern level can also be set through
+// the "readConcernLevel" URI option (e.g. "readConcernLevel=majority"). The default is nil, meaning the server will use
+// its configured default.
+func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptions {
+	c.ReadConcern = rc
+
+	return c
+}
+
+// SetReadPreference specifies the read preference to use for read operations. This can also be set through the
+// following URI options:
+//
+// 1. "readPreference" - Specify the read preference mode (e.g. "readPreference=primary").
+//
+// 2. "readPreferenceTags": Specify one or more read preference tags
+// (e.g. "readPreferenceTags=region:south,datacenter:A").
+//
+// 3. "maxStalenessSeconds" (or "maxStaleness"): Specify a maximum replication lag for reads from secondaries in a
+// replica set (e.g. "maxStalenessSeconds=10").
+//
+// The default is readpref.Primary(). See https://www.mongodb.com/docs/manual/core/read-preference/#read-preference for
+// more information about read preferences.
+func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions {
+	c.ReadPreference = rp
+
+	return c
+}
+
+// SetBSONOptions configures optional BSON marshaling and unmarshaling behavior.
+func (c *ClientOptions) SetBSONOptions(opts *BSONOptions) *ClientOptions {
+	c.BSONOptions = opts
+	return c
+}
+
+// SetRegistry specifies the BSON registry to use for BSON marshalling/unmarshalling operations. The default is
+// bson.DefaultRegistry.
+func (c *ClientOptions) SetRegistry(registry *bsoncodec.Registry) *ClientOptions {
+	c.Registry = registry
+	return c
+}
+
+// SetReplicaSet specifies the replica set name for the cluster. If specified, the cluster will be treated as a replica
+// set and the driver will automatically discover all servers in the set, starting with the nodes specified through
+// ApplyURI or SetHosts. All nodes in the replica set must have the same replica set name, or they will not be
+// considered as part of the set by the Client. This can also be set through the "replicaSet" URI option (e.g.
+// "replicaSet=replset"). The default is empty.
+func (c *ClientOptions) SetReplicaSet(s string) *ClientOptions {
+	c.ReplicaSet = &s
+	return c
+}
+
+// SetRetryWrites specifies whether supported write operations should be retried once on certain errors, such as network
+// errors.
+//
+// Supported operations are InsertOne, UpdateOne, ReplaceOne, DeleteOne, FindOneAndDelete, FindOneAndReplace,
+// FindOneAndDelete, InsertMany, and BulkWrite. Note that BulkWrite requests must not include UpdateManyModel or
+// DeleteManyModel instances to be considered retryable. Unacknowledged writes will not be retried, even if this option
+// is set to true.
+//
+// This option requires server version >= 3.6 and a replica set or sharded cluster and will be ignored for any other
+// cluster type. This can also be set through the "retryWrites" URI option (e.g. "retryWrites=true"). The default is
+// true.
+func (c *ClientOptions) SetRetryWrites(b bool) *ClientOptions {
+	c.RetryWrites = &b
+
+	return c
+}
+
+// SetRetryReads specifies whether supported read operations should be retried once on certain errors, such as network
+// errors.
+//
+// Supported operations are Find, FindOne, Aggregate without a $out stage, Distinct, CountDocuments,
+// EstimatedDocumentCount, Watch (for Client, Database, and Collection), ListCollections, and ListDatabases. Note that
+// operations run through RunCommand are not retried.
+//
+// This option requires server version >= 3.6 and driver version >= 1.1.0. The default is true.
+func (c *ClientOptions) SetRetryReads(b bool) *ClientOptions {
+	c.RetryReads = &b
+	return c
+}
+
+// SetServerSelectionTimeout specifies how long the driver will wait to find an available, suitable server to execute an
+// operation. This can also be set through the "serverSelectionTimeoutMS" URI option (e.g.
+// "serverSelectionTimeoutMS=30000"). The default value is 30 seconds.
+func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOptions {
+	c.ServerSelectionTimeout = &d
+	return c
+}
+
+// SetSocketTimeout specifies how long the driver will wait for a socket read or write to return before returning a
+// network error. This can also be set through the "socketTimeoutMS" URI option (e.g. "socketTimeoutMS=1000"). The
+// default value is 0, meaning no timeout is used and socket operations can block indefinitely.
+//
+// NOTE(benjirewis): SocketTimeout will be deprecated in a future release. The more general Timeout option may be used
+// in its place to control the amount of time that a single operation can run before returning an error. Setting
+// SocketTimeout and Timeout on a single client will result in undefined behavior.
+func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions {
+	c.SocketTimeout = &d
+	return c
+}
+
+// SetTimeout specifies the amount of time that a single operation run on this Client can execute before returning an error.
+// The deadline of any operation run through the Client will be honored above any Timeout set on the Client; Timeout will only
+// be honored if there is no deadline on the operation Context. Timeout can also be set through the "timeoutMS" URI option
+// (e.g. "timeoutMS=1000"). The default value is nil, meaning operations do not inherit a timeout from the Client.
+//
+// If any Timeout is set (even 0) on the Client, the values of MaxTime on operation options, TransactionOptions.MaxCommitTime and
+// SessionOptions.DefaultMaxCommitTime will be ignored. Setting Timeout and SocketTimeout or WriteConcern.wTimeout will result
+// in undefined behavior.
+//
+// NOTE(benjirewis): SetTimeout represents unstable, provisional API. The behavior of the driver when a Timeout is specified is
+// subject to change.
+func (c *ClientOptions) SetTimeout(d time.Duration) *ClientOptions {
+	c.Timeout = &d
+	return c
+}
+
+// SetTLSConfig specifies a tls.Config instance to use use to configure TLS on all connections created to the cluster.
+// This can also be set through the following URI options:
+//
+// 1. "tls" (or "ssl"): Specify if TLS should be used (e.g. "tls=true").
+//
+// 2. Either "tlsCertificateKeyFile" (or "sslClientCertificateKeyFile") or a combination of "tlsCertificateFile" and
+// "tlsPrivateKeyFile". The "tlsCertificateKeyFile" option specifies a path to the client certificate and private key,
+// which must be concatenated into one file. The "tlsCertificateFile" and "tlsPrivateKey" combination specifies separate
+// paths to the client certificate and private key, respectively. Note that if "tlsCertificateKeyFile" is used, the
+// other two options must not be specified. Only the subject name of the first certificate is honored as the username
+// for X509 auth in a file with multiple certs.
+//
+// 3. "tlsCertificateKeyFilePassword" (or "sslClientCertificateKeyPassword"): Specify the password to decrypt the client
+// private key file (e.g. "tlsCertificateKeyFilePassword=password").
+//
+// 4. "tlsCaFile" (or "sslCertificateAuthorityFile"): Specify the path to a single or bundle of certificate authorities
+// to be considered trusted when making a TLS connection (e.g. "tlsCaFile=/path/to/caFile").
+//
+// 5. "tlsInsecure" (or "sslInsecure"): Specifies whether or not certificates and hostnames received from the server
+// should be validated. If true (e.g. "tlsInsecure=true"), the TLS library will accept any certificate presented by the
+// server and any host name in that certificate. Note that setting this to true makes TLS susceptible to
+// man-in-the-middle attacks and should only be done for testing.
+//
+// The default is nil, meaning no TLS will be enabled.
+func (c *ClientOptions) SetTLSConfig(cfg *tls.Config) *ClientOptions {
+	c.TLSConfig = cfg
+	return c
+}
+
+// SetHTTPClient specifies the http.Client to be used for any HTTP requests.
+//
+// This should only be used to set custom HTTP client configurations. By default, the connection will use an httputil.DefaultHTTPClient.
+func (c *ClientOptions) SetHTTPClient(client *http.Client) *ClientOptions {
+	c.HTTPClient = client
+	return c
+}
+
+// SetWriteConcern specifies the write concern to use to for write operations. This can also be set through the following
+// URI options:
+//
+// 1. "w": Specify the number of nodes in the cluster that must acknowledge write operations before the operation
+// returns or "majority" to specify that a majority of the nodes must acknowledge writes. This can either be an integer
+// (e.g. "w=10") or the string "majority" (e.g. "w=majority").
+//
+// 2. "wTimeoutMS": Specify how long write operations should wait for the correct number of nodes to acknowledge the
+// operation (e.g. "wTimeoutMS=1000").
+//
+// 3. "journal": Specifies whether or not write operations should be written to an on-disk journal on the server before
+// returning (e.g. "journal=true").
+//
+// The default is nil, meaning the server will use its configured default.
+func (c *ClientOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *ClientOptions {
+	c.WriteConcern = wc
+
+	return c
+}
+
+// SetZlibLevel specifies the level for the zlib compressor. This option is ignored if zlib is not specified as a
+// compressor through ApplyURI or SetCompressors. Supported values are -1 through 9, inclusive. -1 tells the zlib
+// library to use its default, 0 means no compression, 1 means best speed, and 9 means best compression.
+// This can also be set through the "zlibCompressionLevel" URI option (e.g. "zlibCompressionLevel=-1"). Defaults to -1.
+func (c *ClientOptions) SetZlibLevel(level int) *ClientOptions {
+	c.ZlibLevel = &level
+
+	return c
+}
+
+// SetZstdLevel sets the level for the zstd compressor. This option is ignored if zstd is not specified as a compressor
+// through ApplyURI or SetCompressors. Supported values are 1 through 20, inclusive. 1 means best speed and 20 means
+// best compression. This can also be set through the "zstdCompressionLevel" URI option. Defaults to 6.
+func (c *ClientOptions) SetZstdLevel(level int) *ClientOptions {
+	c.ZstdLevel = &level
+	return c
+}
+
+// SetAutoEncryptionOptions specifies an AutoEncryptionOptions instance to automatically encrypt and decrypt commands
+// and their results. See the options.AutoEncryptionOptions documentation for more information about the supported
+// options.
+func (c *ClientOptions) SetAutoEncryptionOptions(opts *AutoEncryptionOptions) *ClientOptions {
+	c.AutoEncryptionOptions = opts
+	return c
+}
+
+// SetDisableOCSPEndpointCheck specifies whether or not the driver should reach out to OCSP responders to verify the
+// certificate status for certificates presented by the server that contain a list of OCSP responders.
+//
+// If set to true, the driver will verify the status of the certificate using a response stapled by the server, if there
+// is one, but will not send an HTTP request to any responders if there is no staple. In this case, the driver will
+// continue the connection even though the certificate status is not known.
+//
+// This can also be set through the tlsDisableOCSPEndpointCheck URI option. Both this URI option and tlsInsecure must
+// not be set at the same time and will error if they are. The default value is false.
+func (c *ClientOptions) SetDisableOCSPEndpointCheck(disableCheck bool) *ClientOptions {
+	c.DisableOCSPEndpointCheck = &disableCheck
+	return c
+}
+
+// SetServerAPIOptions specifies a ServerAPIOptions instance used to configure the API version sent to the server
+// when running commands. See the options.ServerAPIOptions documentation for more information about the supported
+// options.
+func (c *ClientOptions) SetServerAPIOptions(opts *ServerAPIOptions) *ClientOptions {
+	c.ServerAPIOptions = opts
+	return c
+}
+
+// SetServerMonitoringMode specifies the server monitoring protocol to use. See
+// the helper constants ServerMonitoringModeAuto, ServerMonitoringModePoll, and
+// ServerMonitoringModeStream for more information about valid server
+// monitoring modes.
+func (c *ClientOptions) SetServerMonitoringMode(mode string) *ClientOptions {
+	c.ServerMonitoringMode = &mode
+
+	return c
+}
+
+// SetSRVMaxHosts specifies the maximum number of SRV results to randomly select during polling. To limit the number
+// of hosts selected in SRV discovery, this function must be called before ApplyURI. This can also be set through
+// the "srvMaxHosts" URI option.
+func (c *ClientOptions) SetSRVMaxHosts(srvMaxHosts int) *ClientOptions {
+	c.SRVMaxHosts = &srvMaxHosts
+	return c
+}
+
+// SetSRVServiceName specifies a custom SRV service name to use in SRV polling. To use a custom SRV service name
+// in SRV discovery, this function must be called before ApplyURI. This can also be set through the "srvServiceName"
+// URI option.
+func (c *ClientOptions) SetSRVServiceName(srvName string) *ClientOptions {
+	c.SRVServiceName = &srvName
+	return c
+}
+
+// MergeClientOptions combines the given *ClientOptions into a single *ClientOptions in a last one wins fashion.
+// The specified options are merged with the existing options on the client, with the specified options taking
+// precedence.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeClientOptions(opts ...*ClientOptions) *ClientOptions {
+	c := Client()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.Dialer != nil {
+			c.Dialer = opt.Dialer
+		}
+		if opt.AppName != nil {
+			c.AppName = opt.AppName
+		}
+		if opt.Auth != nil {
+			c.Auth = opt.Auth
+		}
+		if opt.AuthenticateToAnything != nil {
+			c.AuthenticateToAnything = opt.AuthenticateToAnything
+		}
+		if opt.Compressors != nil {
+			c.Compressors = opt.Compressors
+		}
+		if opt.ConnectTimeout != nil {
+			c.ConnectTimeout = opt.ConnectTimeout
+		}
+		if opt.Crypt != nil {
+			c.Crypt = opt.Crypt
+		}
+		if opt.HeartbeatInterval != nil {
+			c.HeartbeatInterval = opt.HeartbeatInterval
+		}
+		if len(opt.Hosts) > 0 {
+			c.Hosts = opt.Hosts
+		}
+		if opt.HTTPClient != nil {
+			c.HTTPClient = opt.HTTPClient
+		}
+		if opt.LoadBalanced != nil {
+			c.LoadBalanced = opt.LoadBalanced
+		}
+		if opt.LocalThreshold != nil {
+			c.LocalThreshold = opt.LocalThreshold
+		}
+		if opt.MaxConnIdleTime != nil {
+			c.MaxConnIdleTime = opt.MaxConnIdleTime
+		}
+		if opt.MaxPoolSize != nil {
+			c.MaxPoolSize = opt.MaxPoolSize
+		}
+		if opt.MinPoolSize != nil {
+			c.MinPoolSize = opt.MinPoolSize
+		}
+		if opt.MaxConnecting != nil {
+			c.MaxConnecting = opt.MaxConnecting
+		}
+		if opt.PoolMonitor != nil {
+			c.PoolMonitor = opt.PoolMonitor
+		}
+		if opt.Monitor != nil {
+			c.Monitor = opt.Monitor
+		}
+		if opt.ServerAPIOptions != nil {
+			c.ServerAPIOptions = opt.ServerAPIOptions
+		}
+		if opt.ServerMonitor != nil {
+			c.ServerMonitor = opt.ServerMonitor
+		}
+		if opt.ReadConcern != nil {
+			c.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			c.ReadPreference = opt.ReadPreference
+		}
+		if opt.BSONOptions != nil {
+			c.BSONOptions = opt.BSONOptions
+		}
+		if opt.Registry != nil {
+			c.Registry = opt.Registry
+		}
+		if opt.ReplicaSet != nil {
+			c.ReplicaSet = opt.ReplicaSet
+		}
+		if opt.RetryWrites != nil {
+			c.RetryWrites = opt.RetryWrites
+		}
+		if opt.RetryReads != nil {
+			c.RetryReads = opt.RetryReads
+		}
+		if opt.ServerSelectionTimeout != nil {
+			c.ServerSelectionTimeout = opt.ServerSelectionTimeout
+		}
+		if opt.Direct != nil {
+			c.Direct = opt.Direct
+		}
+		if opt.SocketTimeout != nil {
+			c.SocketTimeout = opt.SocketTimeout
+		}
+		if opt.SRVMaxHosts != nil {
+			c.SRVMaxHosts = opt.SRVMaxHosts
+		}
+		if opt.SRVServiceName != nil {
+			c.SRVServiceName = opt.SRVServiceName
+		}
+		if opt.Timeout != nil {
+			c.Timeout = opt.Timeout
+		}
+		if opt.TLSConfig != nil {
+			c.TLSConfig = opt.TLSConfig
+		}
+		if opt.WriteConcern != nil {
+			c.WriteConcern = opt.WriteConcern
+		}
+		if opt.ZlibLevel != nil {
+			c.ZlibLevel = opt.ZlibLevel
+		}
+		if opt.ZstdLevel != nil {
+			c.ZstdLevel = opt.ZstdLevel
+		}
+		if opt.AutoEncryptionOptions != nil {
+			c.AutoEncryptionOptions = opt.AutoEncryptionOptions
+		}
+		if opt.Deployment != nil {
+			c.Deployment = opt.Deployment
+		}
+		if opt.DisableOCSPEndpointCheck != nil {
+			c.DisableOCSPEndpointCheck = opt.DisableOCSPEndpointCheck
+		}
+		if opt.err != nil {
+			c.err = opt.err
+		}
+		if opt.cs != nil {
+			c.cs = opt.cs
+		}
+		if opt.LoggerOptions != nil {
+			c.LoggerOptions = opt.LoggerOptions
+		}
+		if opt.ServerMonitoringMode != nil {
+			c.ServerMonitoringMode = opt.ServerMonitoringMode
+		}
+	}
+
+	return c
+}
+
+// addCACertFromFile adds a root CA certificate to the configuration given a path
+// to the containing file.
+func addCACertFromFile(cfg *tls.Config, file string) error {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		return err
+	}
+
+	if cfg.RootCAs == nil {
+		cfg.RootCAs = x509.NewCertPool()
+	}
+	if !cfg.RootCAs.AppendCertsFromPEM(data) {
+		return errors.New("the specified CA file does not contain any valid certificates")
+	}
+
+	return nil
+}
+
+func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassword string) (string, error) {
+	keyData, err := ioutil.ReadFile(keyFile)
+	if err != nil {
+		return "", err
+	}
+	certData, err := ioutil.ReadFile(certFile)
+	if err != nil {
+		return "", err
+	}
+
+	keySize := len(keyData)
+	if keySize > 64*1024*1024 {
+		return "", errors.New("X.509 key must be less than 64 MiB")
+	}
+	certSize := len(certData)
+	if certSize > 64*1024*1024 {
+		return "", errors.New("X.509 certificate must be less than 64 MiB")
+	}
+	dataSize := keySize + certSize + 1
+	if dataSize > math.MaxInt {
+		return "", errors.New("size overflow")
+	}
+	data := make([]byte, 0, dataSize)
+	data = append(data, keyData...)
+	data = append(data, '\n')
+	data = append(data, certData...)
+	return addClientCertFromBytes(cfg, data, keyPassword)
+}
+
+func addClientCertFromConcatenatedFile(cfg *tls.Config, certKeyFile, keyPassword string) (string, error) {
+	data, err := ioutil.ReadFile(certKeyFile)
+	if err != nil {
+		return "", err
+	}
+
+	return addClientCertFromBytes(cfg, data, keyPassword)
+}
+
+// addClientCertFromBytes adds client certificates to the configuration given a path to the
+// containing file and returns the subject name in the first certificate.
+func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (string, error) {
+	var currentBlock *pem.Block
+	var certDecodedBlock []byte
+	var certBlocks, keyBlocks [][]byte
+
+	remaining := data
+	start := 0
+	for {
+		currentBlock, remaining = pem.Decode(remaining)
+		if currentBlock == nil {
+			break
+		}
+
+		if currentBlock.Type == "CERTIFICATE" {
+			certBlock := data[start : len(data)-len(remaining)]
+			certBlocks = append(certBlocks, certBlock)
+			// Assign the certDecodedBlock when it is never set,
+			// so only the first certificate is honored in a file with multiple certs.
+			if certDecodedBlock == nil {
+				certDecodedBlock = currentBlock.Bytes
+			}
+			start += len(certBlock)
+		} else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") {
+			isEncrypted := x509.IsEncryptedPEMBlock(currentBlock) || strings.Contains(currentBlock.Type, "ENCRYPTED PRIVATE KEY")
+			if isEncrypted {
+				if keyPasswd == "" {
+					return "", fmt.Errorf("no password provided to decrypt private key")
+				}
+
+				var keyBytes []byte
+				var err error
+				// Process the X.509-encrypted or PKCS-encrypted PEM block.
+				if x509.IsEncryptedPEMBlock(currentBlock) {
+					// Only covers encrypted PEM data with a DEK-Info header.
+					keyBytes, err = x509.DecryptPEMBlock(currentBlock, []byte(keyPasswd))
+					if err != nil {
+						return "", err
+					}
+				} else if strings.Contains(currentBlock.Type, "ENCRYPTED") {
+					// The pkcs8 package only handles the PKCS #5 v2.0 scheme.
+					decrypted, err := pkcs8.ParsePKCS8PrivateKey(currentBlock.Bytes, []byte(keyPasswd))
+					if err != nil {
+						return "", err
+					}
+					keyBytes, err = x509.MarshalPKCS8PrivateKey(decrypted)
+					if err != nil {
+						return "", err
+					}
+				}
+				var encoded bytes.Buffer
+				err = pem.Encode(&encoded, &pem.Block{Type: currentBlock.Type, Bytes: keyBytes})
+				if err != nil {
+					return "", fmt.Errorf("error encoding private key as PEM: %w", err)
+				}
+				keyBlock := encoded.Bytes()
+				keyBlocks = append(keyBlocks, keyBlock)
+				start = len(data) - len(remaining)
+			} else {
+				keyBlock := data[start : len(data)-len(remaining)]
+				keyBlocks = append(keyBlocks, keyBlock)
+				start += len(keyBlock)
+			}
+		}
+	}
+	if len(certBlocks) == 0 {
+		return "", fmt.Errorf("failed to find CERTIFICATE")
+	}
+	if len(keyBlocks) == 0 {
+		return "", fmt.Errorf("failed to find PRIVATE KEY")
+	}
+
+	cert, err := tls.X509KeyPair(bytes.Join(certBlocks, []byte("\n")), bytes.Join(keyBlocks, []byte("\n")))
+	if err != nil {
+		return "", err
+	}
+
+	cfg.Certificates = append(cfg.Certificates, cert)
+
+	// The documentation for the tls.X509KeyPair indicates that the Leaf certificate is not
+	// retained.
+	crt, err := x509.ParseCertificate(certDecodedBlock)
+	if err != nil {
+		return "", err
+	}
+
+	return crt.Subject.String(), nil
+}
+
+func stringSliceContains(source []string, target string) bool {
+	for _, str := range source {
+		if str == target {
+			return true
+		}
+	}
+	return false
+}
+
+// create a username for x509 authentication from an x509 certificate subject.
+func extractX509UsernameFromSubject(subject string) string {
+	// the Go x509 package gives the subject with the pairs in the reverse order from what we want.
+	pairs := strings.Split(subject, ",")
+	for left, right := 0, len(pairs)-1; left < right; left, right = left+1, right-1 {
+		pairs[left], pairs[right] = pairs[right], pairs[left]
+	}
+
+	return strings.Join(pairs, ",")
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..7904dbd6725ef9edb23fb33c7250015d37d7ae3d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go
@@ -0,0 +1,104 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+)
+
+// CollectionOptions represents options that can be used to configure a Collection.
+type CollectionOptions struct {
+	// ReadConcern is the read concern to use for operations executed on the Collection. The default value is nil, which means that
+	// the read concern of the Database used to configure the Collection will be used.
+	ReadConcern *readconcern.ReadConcern
+
+	// WriteConcern is the write concern to use for operations executed on the Collection. The default value is nil, which means that
+	// the write concern of the Database used to configure the Collection will be used.
+	WriteConcern *writeconcern.WriteConcern
+
+	// ReadPreference is the read preference to use for operations executed on the Collection. The default value is nil, which means that
+	// the read preference of the Database used to configure the Collection will be used.
+	ReadPreference *readpref.ReadPref
+
+	// BSONOptions configures optional BSON marshaling and unmarshaling
+	// behavior.
+	BSONOptions *BSONOptions
+
+	// Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value
+	// is nil, which means that the registry of the Database used to configure the Collection will be used.
+	Registry *bsoncodec.Registry
+}
+
+// Collection creates a new CollectionOptions instance.
+func Collection() *CollectionOptions {
+	return &CollectionOptions{}
+}
+
+// SetReadConcern sets the value for the ReadConcern field.
+func (c *CollectionOptions) SetReadConcern(rc *readconcern.ReadConcern) *CollectionOptions {
+	c.ReadConcern = rc
+	return c
+}
+
+// SetWriteConcern sets the value for the WriteConcern field.
+func (c *CollectionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *CollectionOptions {
+	c.WriteConcern = wc
+	return c
+}
+
+// SetReadPreference sets the value for the ReadPreference field.
+func (c *CollectionOptions) SetReadPreference(rp *readpref.ReadPref) *CollectionOptions {
+	c.ReadPreference = rp
+	return c
+}
+
+// SetBSONOptions configures optional BSON marshaling and unmarshaling behavior.
+func (c *CollectionOptions) SetBSONOptions(opts *BSONOptions) *CollectionOptions {
+	c.BSONOptions = opts
+	return c
+}
+
+// SetRegistry sets the value for the Registry field.
+func (c *CollectionOptions) SetRegistry(r *bsoncodec.Registry) *CollectionOptions {
+	c.Registry = r
+	return c
+}
+
+// MergeCollectionOptions combines the given CollectionOptions instances into a single *CollectionOptions in a
+// last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeCollectionOptions(opts ...*CollectionOptions) *CollectionOptions {
+	c := Collection()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			c.ReadConcern = opt.ReadConcern
+		}
+		if opt.WriteConcern != nil {
+			c.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadPreference != nil {
+			c.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			c.Registry = opt.Registry
+		}
+		if opt.BSONOptions != nil {
+			c.BSONOptions = opt.BSONOptions
+		}
+	}
+
+	return c
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb765d950daf4eb8b74bb381a4093f394df913a5
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go
@@ -0,0 +1,122 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// CountOptions represents options that can be used to configure a CountDocuments operation.
+type CountOptions struct {
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// TODO(GODRIVER-2386): CountOptions executor uses aggregation under the hood, which means this type has to be
+	// TODO a string for now.  This can be replaced with `Comment interface{}` once 2386 is implemented.
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default is nil, which means that no comment will be included in the logs.
+	Comment *string
+
+	// The index to use for the aggregation. This should either be the index name as a string or the index specification
+	// as a document. The driver will return an error if the hint parameter is a multi-key map. The default value is nil,
+	// which means that no hint will be sent.
+	Hint interface{}
+
+	// The maximum number of documents to count. The default value is 0, which means that there is no limit and all
+	// documents matching the filter will be counted.
+	Limit *int64
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there is
+	// no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used in
+	// its place to control the amount of time that a single operation can run before returning an error. MaxTime is
+	// ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// The number of documents to skip before counting. The default value is 0.
+	Skip *int64
+}
+
+// Count creates a new CountOptions instance.
+func Count() *CountOptions {
+	return &CountOptions{}
+}
+
+// SetCollation sets the value for the Collation field.
+func (co *CountOptions) SetCollation(c *Collation) *CountOptions {
+	co.Collation = c
+	return co
+}
+
+// SetComment sets the value for the Comment field.
+func (co *CountOptions) SetComment(c string) *CountOptions {
+	co.Comment = &c
+	return co
+}
+
+// SetHint sets the value for the Hint field.
+func (co *CountOptions) SetHint(h interface{}) *CountOptions {
+	co.Hint = h
+	return co
+}
+
+// SetLimit sets the value for the Limit field.
+func (co *CountOptions) SetLimit(i int64) *CountOptions {
+	co.Limit = &i
+	return co
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions {
+	co.MaxTime = &d
+	return co
+}
+
+// SetSkip sets the value for the Skip field.
+func (co *CountOptions) SetSkip(i int64) *CountOptions {
+	co.Skip = &i
+	return co
+}
+
+// MergeCountOptions combines the given CountOptions instances into a single CountOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeCountOptions(opts ...*CountOptions) *CountOptions {
+	countOpts := Count()
+	for _, co := range opts {
+		if co == nil {
+			continue
+		}
+		if co.Collation != nil {
+			countOpts.Collation = co.Collation
+		}
+		if co.Comment != nil {
+			countOpts.Comment = co.Comment
+		}
+		if co.Hint != nil {
+			countOpts.Hint = co.Hint
+		}
+		if co.Limit != nil {
+			countOpts.Limit = co.Limit
+		}
+		if co.MaxTime != nil {
+			countOpts.MaxTime = co.MaxTime
+		}
+		if co.Skip != nil {
+			countOpts.Skip = co.Skip
+		}
+	}
+
+	return countOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..d8ffaaf33787aada774b19916418fcb61c5c22e4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go
@@ -0,0 +1,360 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// DefaultIndexOptions represents the default options for a collection to apply on new indexes. This type can be used
+// when creating a new collection through the CreateCollectionOptions.SetDefaultIndexOptions method.
+type DefaultIndexOptions struct {
+	// Specifies the storage engine to use for the index. The value must be a document in the form
+	// {<storage engine name>: <options>}. The default value is nil, which means that the default storage engine
+	// will be used.
+	StorageEngine interface{}
+}
+
+// DefaultIndex creates a new DefaultIndexOptions instance.
+func DefaultIndex() *DefaultIndexOptions {
+	return &DefaultIndexOptions{}
+}
+
+// SetStorageEngine sets the value for the StorageEngine field.
+func (d *DefaultIndexOptions) SetStorageEngine(storageEngine interface{}) *DefaultIndexOptions {
+	d.StorageEngine = storageEngine
+	return d
+}
+
+// TimeSeriesOptions specifies options on a time-series collection.
+type TimeSeriesOptions struct {
+	// TimeField is the top-level field to be used for time. Inserted documents must have this field,
+	// and the field must be of the BSON UTC datetime type (0x9).
+	TimeField string
+
+	// MetaField is the name of the top-level field describing the series. This field is used to group
+	// related data and may be of any BSON type, except for array. This name may not be the same
+	// as the TimeField or _id. This field is optional.
+	MetaField *string
+
+	// Granularity is the granularity of time-series data. Allowed granularity options are
+	// "seconds", "minutes" and "hours". This field is optional.
+	Granularity *string
+
+	// BucketMaxSpan is the maximum range of time values for a bucket. The
+	// time.Duration is rounded down to the nearest second and applied as
+	// the command option: "bucketRoundingSeconds". This field is optional.
+	BucketMaxSpan *time.Duration
+
+	// BucketRounding is used to determine the minimum time boundary when
+	// opening a new bucket by rounding the first timestamp down to the next
+	// multiple of this value. The time.Duration is rounded down to the
+	// nearest second and applied as the command option:
+	// "bucketRoundingSeconds". This field is optional.
+	BucketRounding *time.Duration
+}
+
+// TimeSeries creates a new TimeSeriesOptions instance.
+func TimeSeries() *TimeSeriesOptions {
+	return &TimeSeriesOptions{}
+}
+
+// SetTimeField sets the value for the TimeField.
+func (tso *TimeSeriesOptions) SetTimeField(timeField string) *TimeSeriesOptions {
+	tso.TimeField = timeField
+	return tso
+}
+
+// SetMetaField sets the value for the MetaField.
+func (tso *TimeSeriesOptions) SetMetaField(metaField string) *TimeSeriesOptions {
+	tso.MetaField = &metaField
+	return tso
+}
+
+// SetGranularity sets the value for Granularity.
+func (tso *TimeSeriesOptions) SetGranularity(granularity string) *TimeSeriesOptions {
+	tso.Granularity = &granularity
+	return tso
+}
+
+// SetBucketMaxSpan sets the value for BucketMaxSpan.
+func (tso *TimeSeriesOptions) SetBucketMaxSpan(dur time.Duration) *TimeSeriesOptions {
+	tso.BucketMaxSpan = &dur
+
+	return tso
+}
+
+// SetBucketRounding sets the value for BucketRounding.
+func (tso *TimeSeriesOptions) SetBucketRounding(dur time.Duration) *TimeSeriesOptions {
+	tso.BucketRounding = &dur
+
+	return tso
+}
+
+// CreateCollectionOptions represents options that can be used to configure a CreateCollection operation.
+type CreateCollectionOptions struct {
+	// Specifies if the collection is capped (see https://www.mongodb.com/docs/manual/core/capped-collections/). If true,
+	// the SizeInBytes option must also be specified. The default value is false.
+	Capped *bool
+
+	// Specifies the default collation for the new collection. This option is only valid for MongoDB versions >= 3.4.
+	// For previous server versions, the driver will return an error if this option is used. The default value is nil.
+	Collation *Collation
+
+	// Specifies how change streams opened against the collection can return pre- and post-images of updated
+	// documents. The value must be a document in the form {<option name>: <options>}. This option is only valid for
+	// MongoDB versions >= 6.0. The default value is nil, which means that change streams opened against the collection
+	// will not return pre- and post-images of updated documents in any way.
+	ChangeStreamPreAndPostImages interface{}
+
+	// Specifies a default configuration for indexes on the collection. This option is only valid for MongoDB versions
+	// >= 3.4. The default value is nil, meaning indexes will be configured using server defaults.
+	DefaultIndexOptions *DefaultIndexOptions
+
+	// Specifies the maximum number of documents allowed in a capped collection. The limit specified by the SizeInBytes
+	// option takes precedence over this option. If a capped collection reaches its size limit, old documents will be
+	// removed, regardless of the number of documents in the collection. The default value is 0, meaning the maximum
+	// number of documents is unbounded.
+	MaxDocuments *int64
+
+	// Specifies the maximum size in bytes for a capped collection. The default value is 0.
+	SizeInBytes *int64
+
+	// Specifies the storage engine to use for the index. The value must be a document in the form
+	// {<storage engine name>: <options>}. The default value is nil, which means that the default storage engine
+	// will be used.
+	StorageEngine interface{}
+
+	// Specifies what should happen if a document being inserted does not pass validation. Valid values are "error" and
+	// "warn". See https://www.mongodb.com/docs/manual/core/schema-validation/#accept-or-reject-invalid-documents for more
+	// information. This option is only valid for MongoDB versions >= 3.2. The default value is "error".
+	ValidationAction *string
+
+	// Specifies how strictly the server applies validation rules to existing documents in the collection during update
+	// operations. Valid values are "off", "strict", and "moderate". See
+	// https://www.mongodb.com/docs/manual/core/schema-validation/#existing-documents for more information. This option is
+	// only valid for MongoDB versions >= 3.2. The default value is "strict".
+	ValidationLevel *string
+
+	// A document specifying validation rules for the collection. See
+	// https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about schema validation. This option
+	// is only valid for MongoDB versions >= 3.2. The default value is nil, meaning no validator will be used for the
+	// collection.
+	Validator interface{}
+
+	// Value indicating after how many seconds old time-series data should be deleted. See
+	// https://www.mongodb.com/docs/manual/reference/command/create/ for supported options, and
+	// https://www.mongodb.com/docs/manual/core/timeseries-collections/ for more information on time-series
+	// collections.
+	//
+	// This option is only valid for MongoDB versions >= 5.0
+	ExpireAfterSeconds *int64
+
+	// Options for specifying a time-series collection. See
+	// https://www.mongodb.com/docs/manual/reference/command/create/ for supported options, and
+	// https://www.mongodb.com/docs/manual/core/timeseries-collections/ for more information on time-series
+	// collections.
+	//
+	// This option is only valid for MongoDB versions >= 5.0
+	TimeSeriesOptions *TimeSeriesOptions
+
+	// EncryptedFields configures encrypted fields.
+	//
+	// This option is only valid for MongoDB versions >= 6.0
+	EncryptedFields interface{}
+
+	// ClusteredIndex is used to create a collection with a clustered index.
+	//
+	// This option is only valid for MongoDB versions >= 5.3
+	ClusteredIndex interface{}
+}
+
+// CreateCollection creates a new CreateCollectionOptions instance.
+func CreateCollection() *CreateCollectionOptions {
+	return &CreateCollectionOptions{}
+}
+
+// SetCapped sets the value for the Capped field.
+func (c *CreateCollectionOptions) SetCapped(capped bool) *CreateCollectionOptions {
+	c.Capped = &capped
+	return c
+}
+
+// SetCollation sets the value for the Collation field.
+func (c *CreateCollectionOptions) SetCollation(collation *Collation) *CreateCollectionOptions {
+	c.Collation = collation
+	return c
+}
+
+// SetChangeStreamPreAndPostImages sets the value for the ChangeStreamPreAndPostImages field.
+func (c *CreateCollectionOptions) SetChangeStreamPreAndPostImages(csppi interface{}) *CreateCollectionOptions {
+	c.ChangeStreamPreAndPostImages = &csppi
+	return c
+}
+
+// SetDefaultIndexOptions sets the value for the DefaultIndexOptions field.
+func (c *CreateCollectionOptions) SetDefaultIndexOptions(opts *DefaultIndexOptions) *CreateCollectionOptions {
+	c.DefaultIndexOptions = opts
+	return c
+}
+
+// SetMaxDocuments sets the value for the MaxDocuments field.
+func (c *CreateCollectionOptions) SetMaxDocuments(max int64) *CreateCollectionOptions {
+	c.MaxDocuments = &max
+	return c
+}
+
+// SetSizeInBytes sets the value for the SizeInBytes field.
+func (c *CreateCollectionOptions) SetSizeInBytes(size int64) *CreateCollectionOptions {
+	c.SizeInBytes = &size
+	return c
+}
+
+// SetStorageEngine sets the value for the StorageEngine field.
+func (c *CreateCollectionOptions) SetStorageEngine(storageEngine interface{}) *CreateCollectionOptions {
+	c.StorageEngine = &storageEngine
+	return c
+}
+
+// SetValidationAction sets the value for the ValidationAction field.
+func (c *CreateCollectionOptions) SetValidationAction(action string) *CreateCollectionOptions {
+	c.ValidationAction = &action
+	return c
+}
+
+// SetValidationLevel sets the value for the ValidationLevel field.
+func (c *CreateCollectionOptions) SetValidationLevel(level string) *CreateCollectionOptions {
+	c.ValidationLevel = &level
+	return c
+}
+
+// SetValidator sets the value for the Validator field.
+func (c *CreateCollectionOptions) SetValidator(validator interface{}) *CreateCollectionOptions {
+	c.Validator = validator
+	return c
+}
+
+// SetExpireAfterSeconds sets the value for the ExpireAfterSeconds field.
+func (c *CreateCollectionOptions) SetExpireAfterSeconds(eas int64) *CreateCollectionOptions {
+	c.ExpireAfterSeconds = &eas
+	return c
+}
+
+// SetTimeSeriesOptions sets the options for time-series collections.
+func (c *CreateCollectionOptions) SetTimeSeriesOptions(timeSeriesOpts *TimeSeriesOptions) *CreateCollectionOptions {
+	c.TimeSeriesOptions = timeSeriesOpts
+	return c
+}
+
+// SetEncryptedFields sets the encrypted fields for encrypted collections.
+func (c *CreateCollectionOptions) SetEncryptedFields(encryptedFields interface{}) *CreateCollectionOptions {
+	c.EncryptedFields = encryptedFields
+	return c
+}
+
+// SetClusteredIndex sets the value for the ClusteredIndex field.
+func (c *CreateCollectionOptions) SetClusteredIndex(clusteredIndex interface{}) *CreateCollectionOptions {
+	c.ClusteredIndex = clusteredIndex
+	return c
+}
+
+// MergeCreateCollectionOptions combines the given CreateCollectionOptions instances into a single
+// CreateCollectionOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeCreateCollectionOptions(opts ...*CreateCollectionOptions) *CreateCollectionOptions {
+	cc := CreateCollection()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.Capped != nil {
+			cc.Capped = opt.Capped
+		}
+		if opt.Collation != nil {
+			cc.Collation = opt.Collation
+		}
+		if opt.ChangeStreamPreAndPostImages != nil {
+			cc.ChangeStreamPreAndPostImages = opt.ChangeStreamPreAndPostImages
+		}
+		if opt.DefaultIndexOptions != nil {
+			cc.DefaultIndexOptions = opt.DefaultIndexOptions
+		}
+		if opt.MaxDocuments != nil {
+			cc.MaxDocuments = opt.MaxDocuments
+		}
+		if opt.SizeInBytes != nil {
+			cc.SizeInBytes = opt.SizeInBytes
+		}
+		if opt.StorageEngine != nil {
+			cc.StorageEngine = opt.StorageEngine
+		}
+		if opt.ValidationAction != nil {
+			cc.ValidationAction = opt.ValidationAction
+		}
+		if opt.ValidationLevel != nil {
+			cc.ValidationLevel = opt.ValidationLevel
+		}
+		if opt.Validator != nil {
+			cc.Validator = opt.Validator
+		}
+		if opt.ExpireAfterSeconds != nil {
+			cc.ExpireAfterSeconds = opt.ExpireAfterSeconds
+		}
+		if opt.TimeSeriesOptions != nil {
+			cc.TimeSeriesOptions = opt.TimeSeriesOptions
+		}
+		if opt.EncryptedFields != nil {
+			cc.EncryptedFields = opt.EncryptedFields
+		}
+		if opt.ClusteredIndex != nil {
+			cc.ClusteredIndex = opt.ClusteredIndex
+		}
+	}
+
+	return cc
+}
+
+// CreateViewOptions represents options that can be used to configure a CreateView operation.
+type CreateViewOptions struct {
+	// Specifies the default collation for the new collection. This option is only valid for MongoDB versions >= 3.4.
+	// For previous server versions, the driver will return an error if this option is used. The default value is nil.
+	Collation *Collation
+}
+
+// CreateView creates an new CreateViewOptions instance.
+func CreateView() *CreateViewOptions {
+	return &CreateViewOptions{}
+}
+
+// SetCollation sets the value for the Collation field.
+func (c *CreateViewOptions) SetCollation(collation *Collation) *CreateViewOptions {
+	c.Collation = collation
+	return c
+}
+
+// MergeCreateViewOptions combines the given CreateViewOptions instances into a single CreateViewOptions in a
+// last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeCreateViewOptions(opts ...*CreateViewOptions) *CreateViewOptions {
+	cv := CreateView()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.Collation != nil {
+			cv.Collation = opt.Collation
+		}
+	}
+
+	return cv
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..5afe8a2480ca946387efa264a4b3012e343aca58
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go
@@ -0,0 +1,104 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// DataKeyOptions represents all possible options used to create a new data key.
+type DataKeyOptions struct {
+	MasterKey   interface{}
+	KeyAltNames []string
+
+	// KeyMaterial is used to encrypt data. If omitted, keyMaterial is generated form a cryptographically secure random
+	// source. "Key Material" is used interchangeably with "dataKey" and "Data Encryption Key" (DEK).
+	KeyMaterial []byte
+}
+
+// DataKey creates a new DataKeyOptions instance.
+func DataKey() *DataKeyOptions {
+	return &DataKeyOptions{}
+}
+
+// SetMasterKey specifies a KMS-specific key used to encrypt the new data key.
+//
+// If being used with a local KMS provider, this option is not applicable and should not be specified.
+//
+// For the AWS, Azure, and GCP KMS providers, this option is required and must be a document. For each, the value of the
+// "endpoint" or "keyVaultEndpoint" must be a host name with an optional port number (e.g. "foo.com" or "foo.com:443").
+//
+// When using AWS, the document must have the format:
+//
+//	{
+//	  region: <string>,
+//	  key: <string>,             // The Amazon Resource Name (ARN) to the AWS customer master key (CMK).
+//	  endpoint: Optional<string> // An alternate host identifier to send KMS requests to.
+//	}
+//
+// If unset, the "endpoint" defaults to "kms.<region>.amazonaws.com".
+//
+// When using Azure, the document must have the format:
+//
+//	{
+//	  keyVaultEndpoint: <string>,  // A host identifier to send KMS requests to.
+//	  keyName: <string>,
+//	  keyVersion: Optional<string> // A specific version of the named key.
+//	}
+//
+// If unset, "keyVersion" defaults to the key's primary version.
+//
+// When using GCP, the document must have the format:
+//
+//	{
+//	  projectId: <string>,
+//	  location: <string>,
+//	  keyRing: <string>,
+//	  keyName: <string>,
+//	  keyVersion: Optional<string>, // A specific version of the named key.
+//	  endpoint: Optional<string>    // An alternate host identifier to send KMS requests to.
+//	}
+//
+// If unset, "keyVersion" defaults to the key's primary version and "endpoint" defaults to "cloudkms.googleapis.com".
+func (dk *DataKeyOptions) SetMasterKey(masterKey interface{}) *DataKeyOptions {
+	dk.MasterKey = masterKey
+	return dk
+}
+
+// SetKeyAltNames specifies an optional list of string alternate names used to reference a key. If a key is created'
+// with alternate names, encryption may refer to the key by a unique alternate name instead of by _id.
+func (dk *DataKeyOptions) SetKeyAltNames(keyAltNames []string) *DataKeyOptions {
+	dk.KeyAltNames = keyAltNames
+	return dk
+}
+
+// SetKeyMaterial will set a custom keyMaterial to DataKeyOptions which can be used to encrypt data.
+func (dk *DataKeyOptions) SetKeyMaterial(keyMaterial []byte) *DataKeyOptions {
+	dk.KeyMaterial = keyMaterial
+	return dk
+}
+
+// MergeDataKeyOptions combines the argued DataKeyOptions in a last-one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeDataKeyOptions(opts ...*DataKeyOptions) *DataKeyOptions {
+	dko := DataKey()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.MasterKey != nil {
+			dko.MasterKey = opt.MasterKey
+		}
+		if opt.KeyAltNames != nil {
+			dko.KeyAltNames = opt.KeyAltNames
+		}
+		if opt.KeyMaterial != nil {
+			dko.KeyMaterial = opt.KeyMaterial
+		}
+	}
+
+	return dko
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..38ee13550b41b7e0500bc7d804949519e6d36e60
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go
@@ -0,0 +1,104 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+)
+
+// DatabaseOptions represents options that can be used to configure a Database.
+type DatabaseOptions struct {
+	// ReadConcern is the read concern to use for operations executed on the Database. The default value is nil, which means that
+	// the read concern of the Client used to configure the Database will be used.
+	ReadConcern *readconcern.ReadConcern
+
+	// WriteConcern is the write concern to use for operations executed on the Database. The default value is nil, which means that the
+	// write concern of the Client used to configure the Database will be used.
+	WriteConcern *writeconcern.WriteConcern
+
+	// ReadPreference is the read preference to use for operations executed on the Database. The default value is nil, which means that
+	// the read preference of the Client used to configure the Database will be used.
+	ReadPreference *readpref.ReadPref
+
+	// BSONOptions configures optional BSON marshaling and unmarshaling
+	// behavior.
+	BSONOptions *BSONOptions
+
+	// Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Database. The default value
+	// is nil, which means that the registry of the Client used to configure the Database will be used.
+	Registry *bsoncodec.Registry
+}
+
+// Database creates a new DatabaseOptions instance.
+func Database() *DatabaseOptions {
+	return &DatabaseOptions{}
+}
+
+// SetReadConcern sets the value for the ReadConcern field.
+func (d *DatabaseOptions) SetReadConcern(rc *readconcern.ReadConcern) *DatabaseOptions {
+	d.ReadConcern = rc
+	return d
+}
+
+// SetWriteConcern sets the value for the WriteConcern field.
+func (d *DatabaseOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *DatabaseOptions {
+	d.WriteConcern = wc
+	return d
+}
+
+// SetReadPreference sets the value for the ReadPreference field.
+func (d *DatabaseOptions) SetReadPreference(rp *readpref.ReadPref) *DatabaseOptions {
+	d.ReadPreference = rp
+	return d
+}
+
+// SetBSONOptions configures optional BSON marshaling and unmarshaling behavior.
+func (d *DatabaseOptions) SetBSONOptions(opts *BSONOptions) *DatabaseOptions {
+	d.BSONOptions = opts
+	return d
+}
+
+// SetRegistry sets the value for the Registry field.
+func (d *DatabaseOptions) SetRegistry(r *bsoncodec.Registry) *DatabaseOptions {
+	d.Registry = r
+	return d
+}
+
+// MergeDatabaseOptions combines the given DatabaseOptions instances into a single DatabaseOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeDatabaseOptions(opts ...*DatabaseOptions) *DatabaseOptions {
+	d := Database()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			d.ReadConcern = opt.ReadConcern
+		}
+		if opt.WriteConcern != nil {
+			d.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadPreference != nil {
+			d.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			d.Registry = opt.Registry
+		}
+		if opt.BSONOptions != nil {
+			d.BSONOptions = opt.BSONOptions
+		}
+	}
+
+	return d
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..59aaef91533712d1e21b3c68e6e243fe85fe2669
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// DeleteOptions represents options that can be used to configure DeleteOne and DeleteMany operations.
+type DeleteOptions struct {
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The index to use for the operation. This should either be the index name as a string or the index specification
+	// as a document. This option is only valid for MongoDB versions >= 4.4. Server versions >= 3.4 will return an error
+	// if this option is specified. For server versions < 3.4, the driver will return a client-side error if this option
+	// is specified. The driver will return an error if this option is specified during an unacknowledged write
+	// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil,
+	// which means that no hint will be sent.
+	Hint interface{}
+
+	// Specifies parameters for the delete expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// Delete creates a new DeleteOptions instance.
+func Delete() *DeleteOptions {
+	return &DeleteOptions{}
+}
+
+// SetCollation sets the value for the Collation field.
+func (do *DeleteOptions) SetCollation(c *Collation) *DeleteOptions {
+	do.Collation = c
+	return do
+}
+
+// SetComment sets the value for the Comment field.
+func (do *DeleteOptions) SetComment(comment interface{}) *DeleteOptions {
+	do.Comment = comment
+	return do
+}
+
+// SetHint sets the value for the Hint field.
+func (do *DeleteOptions) SetHint(hint interface{}) *DeleteOptions {
+	do.Hint = hint
+	return do
+}
+
+// SetLet sets the value for the Let field.
+func (do *DeleteOptions) SetLet(let interface{}) *DeleteOptions {
+	do.Let = let
+	return do
+}
+
+// MergeDeleteOptions combines the given DeleteOptions instances into a single DeleteOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions {
+	dOpts := Delete()
+	for _, do := range opts {
+		if do == nil {
+			continue
+		}
+		if do.Collation != nil {
+			dOpts.Collation = do.Collation
+		}
+		if do.Comment != nil {
+			dOpts.Comment = do.Comment
+		}
+		if do.Hint != nil {
+			dOpts.Hint = do.Hint
+		}
+		if do.Let != nil {
+			dOpts.Let = do.Let
+		}
+	}
+
+	return dOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..819f2a9a8f22ab9face64a521bbeb38d149bb84e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go
@@ -0,0 +1,81 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// DistinctOptions represents options that can be used to configure a Distinct operation.
+type DistinctOptions struct {
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation. The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be
+	// used in its place to control the amount of time that a single operation can run before returning an error.
+	// MaxTime is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+}
+
+// Distinct creates a new DistinctOptions instance.
+func Distinct() *DistinctOptions {
+	return &DistinctOptions{}
+}
+
+// SetCollation sets the value for the Collation field.
+func (do *DistinctOptions) SetCollation(c *Collation) *DistinctOptions {
+	do.Collation = c
+	return do
+}
+
+// SetComment sets the value for the Comment field.
+func (do *DistinctOptions) SetComment(comment interface{}) *DistinctOptions {
+	do.Comment = comment
+	return do
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (do *DistinctOptions) SetMaxTime(d time.Duration) *DistinctOptions {
+	do.MaxTime = &d
+	return do
+}
+
+// MergeDistinctOptions combines the given DistinctOptions instances into a single DistinctOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeDistinctOptions(opts ...*DistinctOptions) *DistinctOptions {
+	distinctOpts := Distinct()
+	for _, do := range opts {
+		if do == nil {
+			continue
+		}
+		if do.Collation != nil {
+			distinctOpts.Collation = do.Collation
+		}
+		if do.Comment != nil {
+			distinctOpts.Comment = do.Comment
+		}
+		if do.MaxTime != nil {
+			distinctOpts.MaxTime = do.MaxTime
+		}
+	}
+
+	return distinctOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..23ef4c30a6bf7f5903a32674fba77e31bbaac856
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/doc.go
@@ -0,0 +1,8 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package options defines the optional configurations for the MongoDB Go Driver.
+package options
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..68278ba45e6cdfbdda37fa3d16859a26052a131c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go
@@ -0,0 +1,153 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// These constants specify valid values for QueryType
+// QueryType is used for Queryable Encryption.
+const (
+	QueryTypeEquality string = "equality"
+)
+
+// RangeOptions specifies index options for a Queryable Encryption field supporting "range" queries.
+type RangeOptions struct {
+	Min        *bson.RawValue
+	Max        *bson.RawValue
+	Sparsity   *int64
+	TrimFactor *int32
+	Precision  *int32
+}
+
+// EncryptOptions represents options to explicitly encrypt a value.
+type EncryptOptions struct {
+	KeyID            *primitive.Binary
+	KeyAltName       *string
+	Algorithm        string
+	QueryType        string
+	ContentionFactor *int64
+	RangeOptions     *RangeOptions
+}
+
+// Encrypt creates a new EncryptOptions instance.
+func Encrypt() *EncryptOptions {
+	return &EncryptOptions{}
+}
+
+// SetKeyID specifies an _id of a data key. This should be a UUID (a primitive.Binary with subtype 4).
+func (e *EncryptOptions) SetKeyID(keyID primitive.Binary) *EncryptOptions {
+	e.KeyID = &keyID
+	return e
+}
+
+// SetKeyAltName identifies a key vault document by 'keyAltName'.
+func (e *EncryptOptions) SetKeyAltName(keyAltName string) *EncryptOptions {
+	e.KeyAltName = &keyAltName
+	return e
+}
+
+// SetAlgorithm specifies an algorithm to use for encryption. This should be one of the following:
+// - AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic
+// - AEAD_AES_256_CBC_HMAC_SHA_512-Random
+// - Indexed
+// - Unindexed
+// - Range
+// This is required.
+// Indexed and Unindexed are used for Queryable Encryption.
+func (e *EncryptOptions) SetAlgorithm(algorithm string) *EncryptOptions {
+	e.Algorithm = algorithm
+	return e
+}
+
+// SetQueryType specifies the intended query type. It is only valid to set if algorithm is "Indexed".
+// This should be one of the following:
+// - equality
+// QueryType is used for Queryable Encryption.
+func (e *EncryptOptions) SetQueryType(queryType string) *EncryptOptions {
+	e.QueryType = queryType
+	return e
+}
+
+// SetContentionFactor specifies the contention factor. It is only valid to set if algorithm is "Indexed".
+// ContentionFactor is used for Queryable Encryption.
+func (e *EncryptOptions) SetContentionFactor(contentionFactor int64) *EncryptOptions {
+	e.ContentionFactor = &contentionFactor
+	return e
+}
+
+// SetRangeOptions specifies the options to use for explicit encryption with range. It is only valid to set if algorithm is "Range".
+func (e *EncryptOptions) SetRangeOptions(ro RangeOptions) *EncryptOptions {
+	e.RangeOptions = &ro
+	return e
+}
+
+// SetMin sets the range index minimum value.
+func (ro *RangeOptions) SetMin(min bson.RawValue) *RangeOptions {
+	ro.Min = &min
+	return ro
+}
+
+// SetMax sets the range index maximum value.
+func (ro *RangeOptions) SetMax(max bson.RawValue) *RangeOptions {
+	ro.Max = &max
+	return ro
+}
+
+// SetSparsity sets the range index sparsity.
+func (ro *RangeOptions) SetSparsity(sparsity int64) *RangeOptions {
+	ro.Sparsity = &sparsity
+	return ro
+}
+
+// SetTrimFactor sets the range index trim factor.
+func (ro *RangeOptions) SetTrimFactor(trimFactor int32) *RangeOptions {
+	ro.TrimFactor = &trimFactor
+	return ro
+}
+
+// SetPrecision sets the range index precision.
+func (ro *RangeOptions) SetPrecision(precision int32) *RangeOptions {
+	ro.Precision = &precision
+	return ro
+}
+
+// MergeEncryptOptions combines the argued EncryptOptions in a last-one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeEncryptOptions(opts ...*EncryptOptions) *EncryptOptions {
+	eo := Encrypt()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.KeyID != nil {
+			eo.KeyID = opt.KeyID
+		}
+		if opt.KeyAltName != nil {
+			eo.KeyAltName = opt.KeyAltName
+		}
+		if opt.Algorithm != "" {
+			eo.Algorithm = opt.Algorithm
+		}
+		if opt.QueryType != "" {
+			eo.QueryType = opt.QueryType
+		}
+		if opt.ContentionFactor != nil {
+			eo.ContentionFactor = opt.ContentionFactor
+		}
+		if opt.RangeOptions != nil {
+			eo.RangeOptions = opt.RangeOptions
+		}
+	}
+
+	return eo
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..d088af9c9a644243b0d3198bc3ce40a3ad3082e0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go
@@ -0,0 +1,67 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// EstimatedDocumentCountOptions represents options that can be used to configure an EstimatedDocumentCount operation.
+type EstimatedDocumentCountOptions struct {
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+}
+
+// EstimatedDocumentCount creates a new EstimatedDocumentCountOptions instance.
+func EstimatedDocumentCount() *EstimatedDocumentCountOptions {
+	return &EstimatedDocumentCountOptions{}
+}
+
+// SetComment sets the value for the Comment field.
+func (eco *EstimatedDocumentCountOptions) SetComment(comment interface{}) *EstimatedDocumentCountOptions {
+	eco.Comment = comment
+	return eco
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option
+// may be used in its place to control the amount of time that a single operation can run before
+// returning an error. MaxTime is ignored if Timeout is set on the client.
+func (eco *EstimatedDocumentCountOptions) SetMaxTime(d time.Duration) *EstimatedDocumentCountOptions {
+	eco.MaxTime = &d
+	return eco
+}
+
+// MergeEstimatedDocumentCountOptions combines the given EstimatedDocumentCountOptions instances into a single
+// EstimatedDocumentCountOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeEstimatedDocumentCountOptions(opts ...*EstimatedDocumentCountOptions) *EstimatedDocumentCountOptions {
+	e := EstimatedDocumentCount()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Comment != nil {
+			e.Comment = opt.Comment
+		}
+		if opt.MaxTime != nil {
+			e.MaxTime = opt.MaxTime
+		}
+	}
+
+	return e
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..fa3bf1197a6695fea901720fcc283e07f3d082a3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go
@@ -0,0 +1,1110 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+)
+
+// FindOptions represents options that can be used to configure a Find operation.
+type FindOptions struct {
+	// AllowDiskUse specifies whether the server can write temporary data to disk while executing the Find operation.
+	// This option is only valid for MongoDB versions >= 4.4. Server versions >= 3.2 will report an error if this option
+	// is specified. For server versions < 3.2, the driver will return a client-side error if this option is specified.
+	// The default value is false.
+	AllowDiskUse *bool
+
+	// AllowPartial results specifies whether the Find operation on a sharded cluster can return partial results if some
+	// shards are down rather than returning an error. The default value is false.
+	AllowPartialResults *bool
+
+	// BatchSize is the maximum number of documents to be included in each batch returned by the server.
+	BatchSize *int32
+
+	// Collation specifies a collation to use for string comparisons during the operation. This option is only valid for
+	// MongoDB versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation.
+	// The default is nil, which means that no comment will be included in the logs.
+	Comment *string
+
+	// CursorType specifies the type of cursor that should be created for the operation. The default is NonTailable, which
+	// means that the cursor will be closed by the server when the last batch of documents is retrieved.
+	CursorType *CursorType
+
+	// Hint is the index to use for the Find operation. This should either be the index name as a string or the index
+	// specification as a document. The driver will return an error if the hint parameter is a multi-key map. The default
+	// value is nil, which means that no hint will be sent.
+	Hint interface{}
+
+	// Limit is the maximum number of documents to return. The default value is 0, which means that all documents matching the
+	// filter will be returned. A negative limit specifies that the resulting documents should be returned in a single
+	// batch. The default value is 0.
+	Limit *int64
+
+	// Max is a document specifying the exclusive upper bound for a specific index. The default value is nil, which means that
+	// there is no maximum value.
+	Max interface{}
+
+	// MaxAwaitTime is the maximum amount of time that the server should wait for new documents to satisfy a tailable cursor
+	// query. This option is only valid for tailable await cursors (see the CursorType option for more information) and
+	// MongoDB versions >= 3.2. For other cursor types or previous server versions, this option is ignored.
+	MaxAwaitTime *time.Duration
+
+	// MaxTime is the maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used in its
+	// place to control the amount of time that a single operation can run before returning an error. MaxTime is ignored if
+	// Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// Min is a document specifying the inclusive lower bound for a specific index. The default value is 0, which means that
+	// there is no minimum value.
+	Min interface{}
+
+	// NoCursorTimeout specifies whether the cursor created by the operation will not timeout after a period of inactivity.
+	// The default value is false.
+	NoCursorTimeout *bool
+
+	// OplogReplay is for internal replication use only and should not be set.
+	//
+	// Deprecated: This option has been deprecated in MongoDB version 4.4 and will be ignored by the server if it is
+	// set.
+	OplogReplay *bool
+
+	// Project is a document describing which fields will be included in the documents returned by the Find operation. The
+	// default value is nil, which means all fields will be included.
+	Projection interface{}
+
+	// ReturnKey specifies whether the documents returned by the Find operation will only contain fields corresponding to the
+	// index used. The default value is false.
+	ReturnKey *bool
+
+	// ShowRecordID specifies whether a $recordId field with a record identifier will be included in the documents returned by
+	// the Find operation. The default value is false.
+	ShowRecordID *bool
+
+	// Skip is the number of documents to skip before adding documents to the result. The default value is 0.
+	Skip *int64
+
+	// Snapshot specifies whether the cursor will not return a document more than once because of an intervening write operation.
+	// The default value is false.
+	//
+	// Deprecated: This option has been deprecated in MongoDB version 3.6 and removed in MongoDB version 4.0.
+	Snapshot *bool
+
+	// Sort is a document specifying the order in which documents should be returned.  The driver will return an error if the
+	// sort parameter is a multi-key map.
+	Sort interface{}
+
+	// Let specifies parameters for the find expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// Find creates a new FindOptions instance.
+func Find() *FindOptions {
+	return &FindOptions{}
+}
+
+// SetAllowDiskUse sets the value for the AllowDiskUse field.
+func (f *FindOptions) SetAllowDiskUse(b bool) *FindOptions {
+	f.AllowDiskUse = &b
+	return f
+}
+
+// SetAllowPartialResults sets the value for the AllowPartialResults field.
+func (f *FindOptions) SetAllowPartialResults(b bool) *FindOptions {
+	f.AllowPartialResults = &b
+	return f
+}
+
+// SetBatchSize sets the value for the BatchSize field.
+func (f *FindOptions) SetBatchSize(i int32) *FindOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetCollation sets the value for the Collation field.
+func (f *FindOptions) SetCollation(collation *Collation) *FindOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment sets the value for the Comment field.
+func (f *FindOptions) SetComment(comment string) *FindOptions {
+	f.Comment = &comment
+	return f
+}
+
+// SetCursorType sets the value for the CursorType field.
+func (f *FindOptions) SetCursorType(ct CursorType) *FindOptions {
+	f.CursorType = &ct
+	return f
+}
+
+// SetHint sets the value for the Hint field.
+func (f *FindOptions) SetHint(hint interface{}) *FindOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetLet sets the value for the Let field.
+func (f *FindOptions) SetLet(let interface{}) *FindOptions {
+	f.Let = let
+	return f
+}
+
+// SetLimit sets the value for the Limit field.
+func (f *FindOptions) SetLimit(i int64) *FindOptions {
+	f.Limit = &i
+	return f
+}
+
+// SetMax sets the value for the Max field.
+func (f *FindOptions) SetMax(max interface{}) *FindOptions {
+	f.Max = max
+	return f
+}
+
+// SetMaxAwaitTime sets the value for the MaxAwaitTime field.
+func (f *FindOptions) SetMaxAwaitTime(d time.Duration) *FindOptions {
+	f.MaxAwaitTime = &d
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used used in its place to control the amount of time that a single operation
+// can run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (f *FindOptions) SetMaxTime(d time.Duration) *FindOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetMin sets the value for the Min field.
+func (f *FindOptions) SetMin(min interface{}) *FindOptions {
+	f.Min = min
+	return f
+}
+
+// SetNoCursorTimeout sets the value for the NoCursorTimeout field.
+func (f *FindOptions) SetNoCursorTimeout(b bool) *FindOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetOplogReplay sets the value for the OplogReplay field.
+//
+// Deprecated: This option has been deprecated in MongoDB version 4.4 and will be ignored by the server if it is set.
+func (f *FindOptions) SetOplogReplay(b bool) *FindOptions {
+	f.OplogReplay = &b
+	return f
+}
+
+// SetProjection sets the value for the Projection field.
+func (f *FindOptions) SetProjection(projection interface{}) *FindOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnKey sets the value for the ReturnKey field.
+func (f *FindOptions) SetReturnKey(b bool) *FindOptions {
+	f.ReturnKey = &b
+	return f
+}
+
+// SetShowRecordID sets the value for the ShowRecordID field.
+func (f *FindOptions) SetShowRecordID(b bool) *FindOptions {
+	f.ShowRecordID = &b
+	return f
+}
+
+// SetSkip sets the value for the Skip field.
+func (f *FindOptions) SetSkip(i int64) *FindOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSnapshot sets the value for the Snapshot field.
+//
+// Deprecated: This option has been deprecated in MongoDB version 3.6 and removed in MongoDB version 4.0.
+func (f *FindOptions) SetSnapshot(b bool) *FindOptions {
+	f.Snapshot = &b
+	return f
+}
+
+// SetSort sets the value for the Sort field.
+func (f *FindOptions) SetSort(sort interface{}) *FindOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOptions combines the given FindOptions instances into a single FindOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeFindOptions(opts ...*FindOptions) *FindOptions {
+	fo := Find()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.AllowDiskUse != nil {
+			fo.AllowDiskUse = opt.AllowDiskUse
+		}
+		if opt.AllowPartialResults != nil {
+			fo.AllowPartialResults = opt.AllowPartialResults
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.CursorType != nil {
+			fo.CursorType = opt.CursorType
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Let != nil {
+			fo.Let = opt.Let
+		}
+		if opt.Limit != nil {
+			fo.Limit = opt.Limit
+		}
+		if opt.Max != nil {
+			fo.Max = opt.Max
+		}
+		if opt.MaxAwaitTime != nil {
+			fo.MaxAwaitTime = opt.MaxAwaitTime
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Min != nil {
+			fo.Min = opt.Min
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.OplogReplay != nil {
+			fo.OplogReplay = opt.OplogReplay
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnKey != nil {
+			fo.ReturnKey = opt.ReturnKey
+		}
+		if opt.ShowRecordID != nil {
+			fo.ShowRecordID = opt.ShowRecordID
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Snapshot != nil {
+			fo.Snapshot = opt.Snapshot
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
+
+// FindOneOptions represents options that can be used to configure a FindOne operation.
+type FindOneOptions struct {
+	// If true, an operation on a sharded cluster can return partial results if some shards are down rather than
+	// returning an error. The default value is false.
+	AllowPartialResults *bool
+
+	// The maximum number of documents to be included in each batch returned by the server.
+	//
+	// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+	BatchSize *int32
+
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation.
+	// The default is nil, which means that no comment will be included in the logs.
+	Comment *string
+
+	// Specifies the type of cursor that should be created for the operation. The default is NonTailable, which means
+	// that the cursor will be closed by the server when the last batch of documents is retrieved.
+	//
+	// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+	CursorType *CursorType
+
+	// The index to use for the aggregation. This should either be the index name as a string or the index specification
+	// as a document. The driver will return an error if the hint parameter is a multi-key map. The default value is nil,
+	// which means that no hint will be sent.
+	Hint interface{}
+
+	// A document specifying the exclusive upper bound for a specific index. The default value is nil, which means that
+	// there is no maximum value.
+	Max interface{}
+
+	// The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query.
+	// This option is only valid for tailable await cursors (see the CursorType option for more information) and
+	// MongoDB versions >= 3.2. For other cursor types or previous server versions, this option is ignored.
+	//
+	// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+	MaxAwaitTime *time.Duration
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// A document specifying the inclusive lower bound for a specific index. The default value is 0, which means that
+	// there is no minimum value.
+	Min interface{}
+
+	// If true, the cursor created by the operation will not timeout after a period of inactivity. The default value
+	// is false.
+	//
+	// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+	NoCursorTimeout *bool
+
+	// This option is for internal replication use only and should not be set.
+	//
+	// Deprecated: This option has been deprecated in MongoDB version 4.4 and will be ignored by the server if it is
+	// set.
+	OplogReplay *bool
+
+	// A document describing which fields will be included in the document returned by the operation. The default value
+	// is nil, which means all fields will be included.
+	Projection interface{}
+
+	// If true, the document returned by the operation will only contain fields corresponding to the index used. The
+	// default value is false.
+	ReturnKey *bool
+
+	// If true, a $recordId field with a record identifier will be included in the document returned by the operation.
+	// The default value is false.
+	ShowRecordID *bool
+
+	// The number of documents to skip before selecting the document to be returned. The default value is 0.
+	Skip *int64
+
+	// If true, the cursor will not return a document more than once because of an intervening write operation. The
+	// default value is false.
+	//
+	// Deprecated: This option has been deprecated in MongoDB version 3.6 and removed in MongoDB version 4.0.
+	Snapshot *bool
+
+	// A document specifying the sort order to apply to the query. The first document in the sorted order will be
+	// returned. The driver will return an error if the sort parameter is a multi-key map.
+	Sort interface{}
+}
+
+// FindOne creates a new FindOneOptions instance.
+func FindOne() *FindOneOptions {
+	return &FindOneOptions{}
+}
+
+// SetAllowPartialResults sets the value for the AllowPartialResults field.
+func (f *FindOneOptions) SetAllowPartialResults(b bool) *FindOneOptions {
+	f.AllowPartialResults = &b
+	return f
+}
+
+// SetBatchSize sets the value for the BatchSize field.
+//
+// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+func (f *FindOneOptions) SetBatchSize(i int32) *FindOneOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetCollation sets the value for the Collation field.
+func (f *FindOneOptions) SetCollation(collation *Collation) *FindOneOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment sets the value for the Comment field.
+func (f *FindOneOptions) SetComment(comment string) *FindOneOptions {
+	f.Comment = &comment
+	return f
+}
+
+// SetCursorType sets the value for the CursorType field.
+//
+// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+func (f *FindOneOptions) SetCursorType(ct CursorType) *FindOneOptions {
+	f.CursorType = &ct
+	return f
+}
+
+// SetHint sets the value for the Hint field.
+func (f *FindOneOptions) SetHint(hint interface{}) *FindOneOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetMax sets the value for the Max field.
+func (f *FindOneOptions) SetMax(max interface{}) *FindOneOptions {
+	f.Max = max
+	return f
+}
+
+// SetMaxAwaitTime sets the value for the MaxAwaitTime field.
+//
+// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+func (f *FindOneOptions) SetMaxAwaitTime(d time.Duration) *FindOneOptions {
+	f.MaxAwaitTime = &d
+	return f
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (f *FindOneOptions) SetMaxTime(d time.Duration) *FindOneOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetMin sets the value for the Min field.
+func (f *FindOneOptions) SetMin(min interface{}) *FindOneOptions {
+	f.Min = min
+	return f
+}
+
+// SetNoCursorTimeout sets the value for the NoCursorTimeout field.
+//
+// Deprecated: This option is not valid for a findOne operation, as no cursor is actually created.
+func (f *FindOneOptions) SetNoCursorTimeout(b bool) *FindOneOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetOplogReplay sets the value for the OplogReplay field.
+//
+// Deprecated: This option has been deprecated in MongoDB version 4.4 and will be ignored by the server if it is
+// set.
+func (f *FindOneOptions) SetOplogReplay(b bool) *FindOneOptions {
+	f.OplogReplay = &b
+	return f
+}
+
+// SetProjection sets the value for the Projection field.
+func (f *FindOneOptions) SetProjection(projection interface{}) *FindOneOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnKey sets the value for the ReturnKey field.
+func (f *FindOneOptions) SetReturnKey(b bool) *FindOneOptions {
+	f.ReturnKey = &b
+	return f
+}
+
+// SetShowRecordID sets the value for the ShowRecordID field.
+func (f *FindOneOptions) SetShowRecordID(b bool) *FindOneOptions {
+	f.ShowRecordID = &b
+	return f
+}
+
+// SetSkip sets the value for the Skip field.
+func (f *FindOneOptions) SetSkip(i int64) *FindOneOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSnapshot sets the value for the Snapshot field.
+//
+// Deprecated: This option has been deprecated in MongoDB version 3.6 and removed in MongoDB version 4.0.
+func (f *FindOneOptions) SetSnapshot(b bool) *FindOneOptions {
+	f.Snapshot = &b
+	return f
+}
+
+// SetSort sets the value for the Sort field.
+func (f *FindOneOptions) SetSort(sort interface{}) *FindOneOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOneOptions combines the given FindOneOptions instances into a single FindOneOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeFindOneOptions(opts ...*FindOneOptions) *FindOneOptions {
+	fo := FindOne()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.AllowPartialResults != nil {
+			fo.AllowPartialResults = opt.AllowPartialResults
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.CursorType != nil {
+			fo.CursorType = opt.CursorType
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Max != nil {
+			fo.Max = opt.Max
+		}
+		if opt.MaxAwaitTime != nil {
+			fo.MaxAwaitTime = opt.MaxAwaitTime
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Min != nil {
+			fo.Min = opt.Min
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.OplogReplay != nil {
+			fo.OplogReplay = opt.OplogReplay
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnKey != nil {
+			fo.ReturnKey = opt.ReturnKey
+		}
+		if opt.ShowRecordID != nil {
+			fo.ShowRecordID = opt.ShowRecordID
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Snapshot != nil {
+			fo.Snapshot = opt.Snapshot
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndReplaceOptions represents options that can be used to configure a FindOneAndReplace instance.
+type FindOneAndReplaceOptions struct {
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// A document describing which fields will be included in the document returned by the operation. The default value
+	// is nil, which means all fields will be included.
+	Projection interface{}
+
+	// Specifies whether the original or replaced document should be returned by the operation. The default value is
+	// Before, which means the original document will be returned from before the replacement is performed.
+	ReturnDocument *ReturnDocument
+
+	// A document specifying which document should be replaced if the filter used by the operation matches multiple
+	// documents in the collection. If set, the first document in the sorted order will be replaced. The driver will
+	// return an error if the sort parameter is a multi-key map. The default value is nil.
+	Sort interface{}
+
+	// If true, a new document will be inserted if the filter does not match any documents in the collection. The
+	// default value is false.
+	Upsert *bool
+
+	// The index to use for the operation. This should either be the index name as a string or the index specification
+	// as a document. This option is only valid for MongoDB versions >= 4.4. MongoDB version 4.2 will report an error if
+	// this option is specified. For server versions < 4.2, the driver will return an error if this option is specified.
+	// The driver will return an error if this option is used with during an unacknowledged write operation. The driver
+	// will return an error if the hint parameter is a multi-key map. The default value is nil, which means that no hint
+	// will be sent.
+	Hint interface{}
+
+	// Specifies parameters for the find one and replace expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// FindOneAndReplace creates a new FindOneAndReplaceOptions instance.
+func FindOneAndReplace() *FindOneAndReplaceOptions {
+	return &FindOneAndReplaceOptions{}
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (f *FindOneAndReplaceOptions) SetBypassDocumentValidation(b bool) *FindOneAndReplaceOptions {
+	f.BypassDocumentValidation = &b
+	return f
+}
+
+// SetCollation sets the value for the Collation field.
+func (f *FindOneAndReplaceOptions) SetCollation(collation *Collation) *FindOneAndReplaceOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment sets the value for the Comment field.
+func (f *FindOneAndReplaceOptions) SetComment(comment interface{}) *FindOneAndReplaceOptions {
+	f.Comment = comment
+	return f
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (f *FindOneAndReplaceOptions) SetMaxTime(d time.Duration) *FindOneAndReplaceOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection sets the value for the Projection field.
+func (f *FindOneAndReplaceOptions) SetProjection(projection interface{}) *FindOneAndReplaceOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnDocument sets the value for the ReturnDocument field.
+func (f *FindOneAndReplaceOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndReplaceOptions {
+	f.ReturnDocument = &rd
+	return f
+}
+
+// SetSort sets the value for the Sort field.
+func (f *FindOneAndReplaceOptions) SetSort(sort interface{}) *FindOneAndReplaceOptions {
+	f.Sort = sort
+	return f
+}
+
+// SetUpsert sets the value for the Upsert field.
+func (f *FindOneAndReplaceOptions) SetUpsert(b bool) *FindOneAndReplaceOptions {
+	f.Upsert = &b
+	return f
+}
+
+// SetHint sets the value for the Hint field.
+func (f *FindOneAndReplaceOptions) SetHint(hint interface{}) *FindOneAndReplaceOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetLet sets the value for the Let field.
+func (f *FindOneAndReplaceOptions) SetLet(let interface{}) *FindOneAndReplaceOptions {
+	f.Let = let
+	return f
+}
+
+// MergeFindOneAndReplaceOptions combines the given FindOneAndReplaceOptions instances into a single
+// FindOneAndReplaceOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeFindOneAndReplaceOptions(opts ...*FindOneAndReplaceOptions) *FindOneAndReplaceOptions {
+	fo := FindOneAndReplace()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.BypassDocumentValidation != nil {
+			fo.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnDocument != nil {
+			fo.ReturnDocument = opt.ReturnDocument
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+		if opt.Upsert != nil {
+			fo.Upsert = opt.Upsert
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Let != nil {
+			fo.Let = opt.Let
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndUpdateOptions represents options that can be used to configure a FindOneAndUpdate options.
+type FindOneAndUpdateOptions struct {
+	// A set of filters specifying to which array elements an update should apply. This option is only valid for MongoDB
+	// versions >= 3.6. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the update will apply to all array elements.
+	ArrayFilters *ArrayFilters
+
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime is
+	// ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// A document describing which fields will be included in the document returned by the operation. The default value
+	// is nil, which means all fields will be included.
+	Projection interface{}
+
+	// Specifies whether the original or replaced document should be returned by the operation. The default value is
+	// Before, which means the original document will be returned before the replacement is performed.
+	ReturnDocument *ReturnDocument
+
+	// A document specifying which document should be updated if the filter used by the operation matches multiple
+	// documents in the collection. If set, the first document in the sorted order will be updated. The driver will
+	// return an error if the sort parameter is a multi-key map. The default value is nil.
+	Sort interface{}
+
+	// If true, a new document will be inserted if the filter does not match any documents in the collection. The
+	// default value is false.
+	Upsert *bool
+
+	// The index to use for the operation. This should either be the index name as a string or the index specification
+	// as a document. This option is only valid for MongoDB versions >= 4.4. MongoDB version 4.2 will report an error if
+	// this option is specified. For server versions < 4.2, the driver will return an error if this option is specified.
+	// The driver will return an error if this option is used with during an unacknowledged write operation. The driver
+	// will return an error if the hint parameter is a multi-key map. The default value is nil, which means that no hint
+	// will be sent.
+	Hint interface{}
+
+	// Specifies parameters for the find one and update expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// FindOneAndUpdate creates a new FindOneAndUpdateOptions instance.
+func FindOneAndUpdate() *FindOneAndUpdateOptions {
+	return &FindOneAndUpdateOptions{}
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (f *FindOneAndUpdateOptions) SetBypassDocumentValidation(b bool) *FindOneAndUpdateOptions {
+	f.BypassDocumentValidation = &b
+	return f
+}
+
+// SetArrayFilters sets the value for the ArrayFilters field.
+func (f *FindOneAndUpdateOptions) SetArrayFilters(filters ArrayFilters) *FindOneAndUpdateOptions {
+	f.ArrayFilters = &filters
+	return f
+}
+
+// SetCollation sets the value for the Collation field.
+func (f *FindOneAndUpdateOptions) SetCollation(collation *Collation) *FindOneAndUpdateOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment sets the value for the Comment field.
+func (f *FindOneAndUpdateOptions) SetComment(comment interface{}) *FindOneAndUpdateOptions {
+	f.Comment = comment
+	return f
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (f *FindOneAndUpdateOptions) SetMaxTime(d time.Duration) *FindOneAndUpdateOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection sets the value for the Projection field.
+func (f *FindOneAndUpdateOptions) SetProjection(projection interface{}) *FindOneAndUpdateOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnDocument sets the value for the ReturnDocument field.
+func (f *FindOneAndUpdateOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndUpdateOptions {
+	f.ReturnDocument = &rd
+	return f
+}
+
+// SetSort sets the value for the Sort field.
+func (f *FindOneAndUpdateOptions) SetSort(sort interface{}) *FindOneAndUpdateOptions {
+	f.Sort = sort
+	return f
+}
+
+// SetUpsert sets the value for the Upsert field.
+func (f *FindOneAndUpdateOptions) SetUpsert(b bool) *FindOneAndUpdateOptions {
+	f.Upsert = &b
+	return f
+}
+
+// SetHint sets the value for the Hint field.
+func (f *FindOneAndUpdateOptions) SetHint(hint interface{}) *FindOneAndUpdateOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetLet sets the value for the Let field.
+func (f *FindOneAndUpdateOptions) SetLet(let interface{}) *FindOneAndUpdateOptions {
+	f.Let = let
+	return f
+}
+
+// MergeFindOneAndUpdateOptions combines the given FindOneAndUpdateOptions instances into a single
+// FindOneAndUpdateOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeFindOneAndUpdateOptions(opts ...*FindOneAndUpdateOptions) *FindOneAndUpdateOptions {
+	fo := FindOneAndUpdate()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ArrayFilters != nil {
+			fo.ArrayFilters = opt.ArrayFilters
+		}
+		if opt.BypassDocumentValidation != nil {
+			fo.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnDocument != nil {
+			fo.ReturnDocument = opt.ReturnDocument
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+		if opt.Upsert != nil {
+			fo.Upsert = opt.Upsert
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Let != nil {
+			fo.Let = opt.Let
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndDeleteOptions represents options that can be used to configure a FindOneAndDelete operation.
+type FindOneAndDeleteOptions struct {
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// A document describing which fields will be included in the document returned by the operation. The default value
+	// is nil, which means all fields will be included.
+	Projection interface{}
+
+	// A document specifying which document should be replaced if the filter used by the operation matches multiple
+	// documents in the collection. If set, the first document in the sorted order will be selected for replacement.
+	// The driver will return an error if the sort parameter is a multi-key map. The default value is nil.
+	Sort interface{}
+
+	// The index to use for the operation. This should either be the index name as a string or the index specification
+	// as a document. This option is only valid for MongoDB versions >= 4.4. MongoDB version 4.2 will report an error if
+	// this option is specified. For server versions < 4.2, the driver will return an error if this option is specified.
+	// The driver will return an error if this option is used with during an unacknowledged write operation. The driver
+	// will return an error if the hint parameter is a multi-key map. The default value is nil, which means that no hint
+	// will be sent.
+	Hint interface{}
+
+	// Specifies parameters for the find one and delete expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// FindOneAndDelete creates a new FindOneAndDeleteOptions instance.
+func FindOneAndDelete() *FindOneAndDeleteOptions {
+	return &FindOneAndDeleteOptions{}
+}
+
+// SetCollation sets the value for the Collation field.
+func (f *FindOneAndDeleteOptions) SetCollation(collation *Collation) *FindOneAndDeleteOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment sets the value for the Comment field.
+func (f *FindOneAndDeleteOptions) SetComment(comment interface{}) *FindOneAndDeleteOptions {
+	f.Comment = comment
+	return f
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (f *FindOneAndDeleteOptions) SetMaxTime(d time.Duration) *FindOneAndDeleteOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection sets the value for the Projection field.
+func (f *FindOneAndDeleteOptions) SetProjection(projection interface{}) *FindOneAndDeleteOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetSort sets the value for the Sort field.
+func (f *FindOneAndDeleteOptions) SetSort(sort interface{}) *FindOneAndDeleteOptions {
+	f.Sort = sort
+	return f
+}
+
+// SetHint sets the value for the Hint field.
+func (f *FindOneAndDeleteOptions) SetHint(hint interface{}) *FindOneAndDeleteOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetLet sets the value for the Let field.
+func (f *FindOneAndDeleteOptions) SetLet(let interface{}) *FindOneAndDeleteOptions {
+	f.Let = let
+	return f
+}
+
+// MergeFindOneAndDeleteOptions combines the given FindOneAndDeleteOptions instances into a single
+// FindOneAndDeleteOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeFindOneAndDeleteOptions(opts ...*FindOneAndDeleteOptions) *FindOneAndDeleteOptions {
+	fo := FindOneAndDelete()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Let != nil {
+			fo.Let = opt.Let
+		}
+	}
+
+	return fo
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..c8d347f4e7ed2bad753bc8defdab10802d7936ac
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go
@@ -0,0 +1,341 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+)
+
+// DefaultName is the default name for a GridFS bucket.
+var DefaultName = "fs"
+
+// DefaultChunkSize is the default size of each file chunk in bytes (255 KiB).
+var DefaultChunkSize int32 = 255 * 1024
+
+// DefaultRevision is the default revision number for a download by name operation.
+var DefaultRevision int32 = -1
+
+// BucketOptions represents options that can be used to configure GridFS bucket.
+type BucketOptions struct {
+	// The name of the bucket. The default value is "fs".
+	Name *string
+
+	// The number of bytes in each chunk in the bucket. The default value is 255 KiB.
+	ChunkSizeBytes *int32
+
+	// The write concern for the bucket. The default value is the write concern of the database from which the bucket
+	// is created.
+	WriteConcern *writeconcern.WriteConcern
+
+	// The read concern for the bucket. The default value is the read concern of the database from which the bucket
+	// is created.
+	ReadConcern *readconcern.ReadConcern
+
+	// The read preference for the bucket. The default value is the read preference of the database from which the
+	// bucket is created.
+	ReadPreference *readpref.ReadPref
+}
+
+// GridFSBucket creates a new BucketOptions instance.
+func GridFSBucket() *BucketOptions {
+	return &BucketOptions{
+		Name:           &DefaultName,
+		ChunkSizeBytes: &DefaultChunkSize,
+	}
+}
+
+// SetName sets the value for the Name field.
+func (b *BucketOptions) SetName(name string) *BucketOptions {
+	b.Name = &name
+	return b
+}
+
+// SetChunkSizeBytes sets the value for the ChunkSize field.
+func (b *BucketOptions) SetChunkSizeBytes(i int32) *BucketOptions {
+	b.ChunkSizeBytes = &i
+	return b
+}
+
+// SetWriteConcern sets the value for the WriteConcern field.
+func (b *BucketOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *BucketOptions {
+	b.WriteConcern = wc
+	return b
+}
+
+// SetReadConcern sets the value for the ReadConcern field.
+func (b *BucketOptions) SetReadConcern(rc *readconcern.ReadConcern) *BucketOptions {
+	b.ReadConcern = rc
+	return b
+}
+
+// SetReadPreference sets the value for the ReadPreference field.
+func (b *BucketOptions) SetReadPreference(rp *readpref.ReadPref) *BucketOptions {
+	b.ReadPreference = rp
+	return b
+}
+
+// MergeBucketOptions combines the given BucketOptions instances into a single BucketOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeBucketOptions(opts ...*BucketOptions) *BucketOptions {
+	b := GridFSBucket()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Name != nil {
+			b.Name = opt.Name
+		}
+		if opt.ChunkSizeBytes != nil {
+			b.ChunkSizeBytes = opt.ChunkSizeBytes
+		}
+		if opt.WriteConcern != nil {
+			b.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadConcern != nil {
+			b.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			b.ReadPreference = opt.ReadPreference
+		}
+	}
+
+	return b
+}
+
+// UploadOptions represents options that can be used to configure a GridFS upload operation.
+type UploadOptions struct {
+	// The number of bytes in each chunk in the bucket. The default value is DefaultChunkSize (255 KiB).
+	ChunkSizeBytes *int32
+
+	// Additional application data that will be stored in the "metadata" field of the document in the files collection.
+	// The default value is nil, which means that the document in the files collection will not contain a "metadata"
+	// field.
+	Metadata interface{}
+
+	// The BSON registry to use for converting filters to BSON documents. The default value is bson.DefaultRegistry.
+	Registry *bsoncodec.Registry
+}
+
+// GridFSUpload creates a new UploadOptions instance.
+func GridFSUpload() *UploadOptions {
+	return &UploadOptions{Registry: bson.DefaultRegistry}
+}
+
+// SetChunkSizeBytes sets the value for the ChunkSize field.
+func (u *UploadOptions) SetChunkSizeBytes(i int32) *UploadOptions {
+	u.ChunkSizeBytes = &i
+	return u
+}
+
+// SetMetadata sets the value for the Metadata field.
+func (u *UploadOptions) SetMetadata(doc interface{}) *UploadOptions {
+	u.Metadata = doc
+	return u
+}
+
+// MergeUploadOptions combines the given UploadOptions instances into a single UploadOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeUploadOptions(opts ...*UploadOptions) *UploadOptions {
+	u := GridFSUpload()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ChunkSizeBytes != nil {
+			u.ChunkSizeBytes = opt.ChunkSizeBytes
+		}
+		if opt.Metadata != nil {
+			u.Metadata = opt.Metadata
+		}
+		if opt.Registry != nil {
+			u.Registry = opt.Registry
+		}
+	}
+
+	return u
+}
+
+// NameOptions represents options that can be used to configure a GridFS DownloadByName operation.
+type NameOptions struct {
+	// Specifies the revision of the file to retrieve. Revision numbers are defined as follows:
+	//
+	// * 0 = the original stored file
+	// * 1 = the first revision
+	// * 2 = the second revision
+	// * etc..
+	// * -2 = the second most recent revision
+	// * -1 = the most recent revision.
+	//
+	// The default value is -1
+	Revision *int32
+}
+
+// GridFSName creates a new NameOptions instance.
+func GridFSName() *NameOptions {
+	return &NameOptions{}
+}
+
+// SetRevision sets the value for the Revision field.
+func (n *NameOptions) SetRevision(r int32) *NameOptions {
+	n.Revision = &r
+	return n
+}
+
+// MergeNameOptions combines the given NameOptions instances into a single *NameOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeNameOptions(opts ...*NameOptions) *NameOptions {
+	n := GridFSName()
+	n.Revision = &DefaultRevision
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Revision != nil {
+			n.Revision = opt.Revision
+		}
+	}
+
+	return n
+}
+
+// GridFSFindOptions represents options that can be used to configure a GridFS Find operation.
+type GridFSFindOptions struct {
+	// If true, the server can write temporary data to disk while executing the find operation. The default value
+	// is false. This option is only valid for MongoDB versions >= 4.4. For previous server versions, the server will
+	// return an error if this option is used.
+	AllowDiskUse *bool
+
+	// The maximum number of documents to be included in each batch returned by the server.
+	BatchSize *int32
+
+	// The maximum number of documents to return. The default value is 0, which means that all documents matching the
+	// filter will be returned. A negative limit specifies that the resulting documents should be returned in a single
+	// batch. The default value is 0.
+	Limit *int32
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+
+	// If true, the cursor created by the operation will not timeout after a period of inactivity. The default value
+	// is false.
+	NoCursorTimeout *bool
+
+	// The number of documents to skip before adding documents to the result. The default value is 0.
+	Skip *int32
+
+	// A document specifying the order in which documents should be returned.  The driver will return an error if the
+	// sort parameter is a multi-key map.
+	Sort interface{}
+}
+
+// GridFSFind creates a new GridFSFindOptions instance.
+func GridFSFind() *GridFSFindOptions {
+	return &GridFSFindOptions{}
+}
+
+// SetAllowDiskUse sets the value for the AllowDiskUse field.
+func (f *GridFSFindOptions) SetAllowDiskUse(b bool) *GridFSFindOptions {
+	f.AllowDiskUse = &b
+	return f
+}
+
+// SetBatchSize sets the value for the BatchSize field.
+func (f *GridFSFindOptions) SetBatchSize(i int32) *GridFSFindOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetLimit sets the value for the Limit field.
+func (f *GridFSFindOptions) SetLimit(i int32) *GridFSFindOptions {
+	f.Limit = &i
+	return f
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (f *GridFSFindOptions) SetMaxTime(d time.Duration) *GridFSFindOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetNoCursorTimeout sets the value for the NoCursorTimeout field.
+func (f *GridFSFindOptions) SetNoCursorTimeout(b bool) *GridFSFindOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetSkip sets the value for the Skip field.
+func (f *GridFSFindOptions) SetSkip(i int32) *GridFSFindOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSort sets the value for the Sort field.
+func (f *GridFSFindOptions) SetSort(sort interface{}) *GridFSFindOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeGridFSFindOptions combines the given GridFSFindOptions instances into a single GridFSFindOptions in a
+// last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeGridFSFindOptions(opts ...*GridFSFindOptions) *GridFSFindOptions {
+	fo := GridFSFind()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.AllowDiskUse != nil {
+			fo.AllowDiskUse = opt.AllowDiskUse
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Limit != nil {
+			fo.Limit = opt.Limit
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..ab7e2b3f6b65de07f07066dc2767fdd9ccd036ac
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go
@@ -0,0 +1,494 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+)
+
+// CreateIndexesOptions represents options that can be used to configure IndexView.CreateOne and IndexView.CreateMany
+// operations.
+type CreateIndexesOptions struct {
+	// The number of data-bearing members of a replica set, including the primary, that must complete the index builds
+	// successfully before the primary marks the indexes as ready. This should either be a string or int32 value. The
+	// semantics of the values are as follows:
+	//
+	// 1. String: specifies a tag. All members with that tag must complete the build.
+	// 2. int: the number of members that must complete the build.
+	// 3. "majority": A special value to indicate that more than half the nodes must complete the build.
+	// 4. "votingMembers": A special value to indicate that all voting data-bearing nodes must complete.
+	//
+	// This option is only available on MongoDB versions >= 4.4. A client-side error will be returned if the option
+	// is specified for MongoDB versions <= 4.2. The default value is nil, meaning that the server-side default will be
+	// used. See dochub.mongodb.org/core/index-commit-quorum for more information.
+	CommitQuorum interface{}
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+}
+
+// CreateIndexes creates a new CreateIndexesOptions instance.
+func CreateIndexes() *CreateIndexesOptions {
+	return &CreateIndexesOptions{}
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (c *CreateIndexesOptions) SetMaxTime(d time.Duration) *CreateIndexesOptions {
+	c.MaxTime = &d
+	return c
+}
+
+// SetCommitQuorumInt sets the value for the CommitQuorum field as an int32.
+func (c *CreateIndexesOptions) SetCommitQuorumInt(quorum int32) *CreateIndexesOptions {
+	c.CommitQuorum = quorum
+	return c
+}
+
+// SetCommitQuorumString sets the value for the CommitQuorum field as a string.
+func (c *CreateIndexesOptions) SetCommitQuorumString(quorum string) *CreateIndexesOptions {
+	c.CommitQuorum = quorum
+	return c
+}
+
+// SetCommitQuorumMajority sets the value for the CommitQuorum to special "majority" value.
+func (c *CreateIndexesOptions) SetCommitQuorumMajority() *CreateIndexesOptions {
+	c.CommitQuorum = "majority"
+	return c
+}
+
+// SetCommitQuorumVotingMembers sets the value for the CommitQuorum to special "votingMembers" value.
+func (c *CreateIndexesOptions) SetCommitQuorumVotingMembers() *CreateIndexesOptions {
+	c.CommitQuorum = "votingMembers"
+	return c
+}
+
+// MergeCreateIndexesOptions combines the given CreateIndexesOptions into a single CreateIndexesOptions in a last one
+// wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeCreateIndexesOptions(opts ...*CreateIndexesOptions) *CreateIndexesOptions {
+	c := CreateIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+		if opt.CommitQuorum != nil {
+			c.CommitQuorum = opt.CommitQuorum
+		}
+	}
+
+	return c
+}
+
+// DropIndexesOptions represents options that can be used to configure IndexView.DropOne and IndexView.DropAll
+// operations.
+type DropIndexesOptions struct {
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+}
+
+// DropIndexes creates a new DropIndexesOptions instance.
+func DropIndexes() *DropIndexesOptions {
+	return &DropIndexesOptions{}
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (d *DropIndexesOptions) SetMaxTime(duration time.Duration) *DropIndexesOptions {
+	d.MaxTime = &duration
+	return d
+}
+
+// MergeDropIndexesOptions combines the given DropIndexesOptions into a single DropIndexesOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeDropIndexesOptions(opts ...*DropIndexesOptions) *DropIndexesOptions {
+	c := DropIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// ListIndexesOptions represents options that can be used to configure an IndexView.List operation.
+type ListIndexesOptions struct {
+	// The maximum number of documents to be included in each batch returned by the server.
+	BatchSize *int32
+
+	// The maximum amount of time that the query can run on the server. The default value is nil, meaning that there
+	// is no time limit for query execution.
+	//
+	// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used
+	// in its place to control the amount of time that a single operation can run before returning an error. MaxTime
+	// is ignored if Timeout is set on the client.
+	MaxTime *time.Duration
+}
+
+// ListIndexes creates a new ListIndexesOptions instance.
+func ListIndexes() *ListIndexesOptions {
+	return &ListIndexesOptions{}
+}
+
+// SetBatchSize sets the value for the BatchSize field.
+func (l *ListIndexesOptions) SetBatchSize(i int32) *ListIndexesOptions {
+	l.BatchSize = &i
+	return l
+}
+
+// SetMaxTime sets the value for the MaxTime field.
+//
+// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can
+// run before returning an error. MaxTime is ignored if Timeout is set on the client.
+func (l *ListIndexesOptions) SetMaxTime(d time.Duration) *ListIndexesOptions {
+	l.MaxTime = &d
+	return l
+}
+
+// MergeListIndexesOptions combines the given ListIndexesOptions instances into a single *ListIndexesOptions in a
+// last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeListIndexesOptions(opts ...*ListIndexesOptions) *ListIndexesOptions {
+	c := ListIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.BatchSize != nil {
+			c.BatchSize = opt.BatchSize
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// IndexOptions represents options that can be used to configure a new index created through the IndexView.CreateOne
+// or IndexView.CreateMany operations.
+type IndexOptions struct {
+	// If true, the index will be built in the background on the server and will not block other tasks. The default
+	// value is false.
+	//
+	// Deprecated: This option has been deprecated in MongoDB version 4.2.
+	Background *bool
+
+	// The length of time, in seconds, for documents to remain in the collection. The default value is 0, which means
+	// that documents will remain in the collection until they're explicitly deleted or the collection is dropped.
+	ExpireAfterSeconds *int32
+
+	// The name of the index. The default value is "[field1]_[direction1]_[field2]_[direction2]...". For example, an
+	// index with the specification {name: 1, age: -1} will be named "name_1_age_-1".
+	Name *string
+
+	// If true, the index will only reference documents that contain the fields specified in the index. The default is
+	// false.
+	Sparse *bool
+
+	// Specifies the storage engine to use for the index. The value must be a document in the form
+	// {<storage engine name>: <options>}. The default value is nil, which means that the default storage engine
+	// will be used. This option is only applicable for MongoDB versions >= 3.0 and is ignored for previous server
+	// versions.
+	StorageEngine interface{}
+
+	// If true, the collection will not accept insertion or update of documents where the index key value matches an
+	// existing value in the index. The default is false.
+	Unique *bool
+
+	// The index version number, either 0 or 1.
+	Version *int32
+
+	// The language that determines the list of stop words and the rules for the stemmer and tokenizer. This option
+	// is only applicable for text indexes and is ignored for other index types. The default value is "english".
+	DefaultLanguage *string
+
+	// The name of the field in the collection's documents that contains the override language for the document. This
+	// option is only applicable for text indexes and is ignored for other index types. The default value is the value
+	// of the DefaultLanguage option.
+	LanguageOverride *string
+
+	// The index version number for a text index. See https://www.mongodb.com/docs/manual/core/index-text/#text-versions for
+	// information about different version numbers.
+	TextVersion *int32
+
+	// A document that contains field and weight pairs. The weight is an integer ranging from 1 to 99,999, inclusive,
+	// indicating the significance of the field relative to the other indexed fields in terms of the score. This option
+	// is only applicable for text indexes and is ignored for other index types. The default value is nil, which means
+	// that every field will have a weight of 1.
+	Weights interface{}
+
+	// The index version number for a 2D sphere index. See https://www.mongodb.com/docs/manual/core/2dsphere/#dsphere-v2 for
+	// information about different version numbers.
+	SphereVersion *int32
+
+	// The precision of the stored geohash value of the location data. This option only applies to 2D indexes and is
+	// ignored for other index types. The value must be between 1 and 32, inclusive. The default value is 26.
+	Bits *int32
+
+	// The upper inclusive boundary for longitude and latitude values. This option is only applicable to 2D indexes and
+	// is ignored for other index types. The default value is 180.0.
+	Max *float64
+
+	// The lower inclusive boundary for longitude and latitude values. This option is only applicable to 2D indexes and
+	// is ignored for other index types. The default value is -180.0.
+	Min *float64
+
+	// The number of units within which to group location values. Location values that are within BucketSize units of
+	// each other will be grouped in the same bucket. This option is only applicable to geoHaystack indexes and is
+	// ignored for other index types. The value must be greater than 0.
+	BucketSize *int32
+
+	// A document that defines which collection documents the index should reference. This option is only valid for
+	// MongoDB versions >= 3.2 and is ignored for previous server versions.
+	PartialFilterExpression interface{}
+
+	// The collation to use for string comparisons for the index. This option is only valid for MongoDB versions >= 3.4.
+	// For previous server versions, the driver will return an error if this option is used.
+	Collation *Collation
+
+	// A document that defines the wildcard projection for the index.
+	WildcardProjection interface{}
+
+	// If true, the index will exist on the target collection but will not be used by the query planner when executing
+	// operations. This option is only valid for MongoDB versions >= 4.4. The default value is false.
+	Hidden *bool
+}
+
+// Index creates a new IndexOptions instance.
+func Index() *IndexOptions {
+	return &IndexOptions{}
+}
+
+// SetBackground sets value for the Background field.
+//
+// Deprecated: This option has been deprecated in MongoDB version 4.2.
+func (i *IndexOptions) SetBackground(background bool) *IndexOptions {
+	i.Background = &background
+	return i
+}
+
+// SetExpireAfterSeconds sets value for the ExpireAfterSeconds field.
+func (i *IndexOptions) SetExpireAfterSeconds(seconds int32) *IndexOptions {
+	i.ExpireAfterSeconds = &seconds
+	return i
+}
+
+// SetName sets the value for the Name field.
+func (i *IndexOptions) SetName(name string) *IndexOptions {
+	i.Name = &name
+	return i
+}
+
+// SetSparse sets the value of the Sparse field.
+func (i *IndexOptions) SetSparse(sparse bool) *IndexOptions {
+	i.Sparse = &sparse
+	return i
+}
+
+// SetStorageEngine sets the value for the StorageEngine field.
+func (i *IndexOptions) SetStorageEngine(engine interface{}) *IndexOptions {
+	i.StorageEngine = engine
+	return i
+}
+
+// SetUnique sets the value for the Unique field.
+func (i *IndexOptions) SetUnique(unique bool) *IndexOptions {
+	i.Unique = &unique
+	return i
+}
+
+// SetVersion sets the value for the Version field.
+func (i *IndexOptions) SetVersion(version int32) *IndexOptions {
+	i.Version = &version
+	return i
+}
+
+// SetDefaultLanguage sets the value for the DefaultLanguage field.
+func (i *IndexOptions) SetDefaultLanguage(language string) *IndexOptions {
+	i.DefaultLanguage = &language
+	return i
+}
+
+// SetLanguageOverride sets the value of the LanguageOverride field.
+func (i *IndexOptions) SetLanguageOverride(override string) *IndexOptions {
+	i.LanguageOverride = &override
+	return i
+}
+
+// SetTextVersion sets the value for the TextVersion field.
+func (i *IndexOptions) SetTextVersion(version int32) *IndexOptions {
+	i.TextVersion = &version
+	return i
+}
+
+// SetWeights sets the value for the Weights field.
+func (i *IndexOptions) SetWeights(weights interface{}) *IndexOptions {
+	i.Weights = weights
+	return i
+}
+
+// SetSphereVersion sets the value for the SphereVersion field.
+func (i *IndexOptions) SetSphereVersion(version int32) *IndexOptions {
+	i.SphereVersion = &version
+	return i
+}
+
+// SetBits sets the value for the Bits field.
+func (i *IndexOptions) SetBits(bits int32) *IndexOptions {
+	i.Bits = &bits
+	return i
+}
+
+// SetMax sets the value for the Max field.
+func (i *IndexOptions) SetMax(max float64) *IndexOptions {
+	i.Max = &max
+	return i
+}
+
+// SetMin sets the value for the Min field.
+func (i *IndexOptions) SetMin(min float64) *IndexOptions {
+	i.Min = &min
+	return i
+}
+
+// SetBucketSize sets the value for the BucketSize field
+func (i *IndexOptions) SetBucketSize(bucketSize int32) *IndexOptions {
+	i.BucketSize = &bucketSize
+	return i
+}
+
+// SetPartialFilterExpression sets the value for the PartialFilterExpression field.
+func (i *IndexOptions) SetPartialFilterExpression(expression interface{}) *IndexOptions {
+	i.PartialFilterExpression = expression
+	return i
+}
+
+// SetCollation sets the value for the Collation field.
+func (i *IndexOptions) SetCollation(collation *Collation) *IndexOptions {
+	i.Collation = collation
+	return i
+}
+
+// SetWildcardProjection sets the value for the WildcardProjection field.
+func (i *IndexOptions) SetWildcardProjection(wildcardProjection interface{}) *IndexOptions {
+	i.WildcardProjection = wildcardProjection
+	return i
+}
+
+// SetHidden sets the value for the Hidden field.
+func (i *IndexOptions) SetHidden(hidden bool) *IndexOptions {
+	i.Hidden = &hidden
+	return i
+}
+
+// MergeIndexOptions combines the given IndexOptions into a single IndexOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeIndexOptions(opts ...*IndexOptions) *IndexOptions {
+	i := Index()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Background != nil {
+			i.Background = opt.Background
+		}
+		if opt.ExpireAfterSeconds != nil {
+			i.ExpireAfterSeconds = opt.ExpireAfterSeconds
+		}
+		if opt.Name != nil {
+			i.Name = opt.Name
+		}
+		if opt.Sparse != nil {
+			i.Sparse = opt.Sparse
+		}
+		if opt.StorageEngine != nil {
+			i.StorageEngine = opt.StorageEngine
+		}
+		if opt.Unique != nil {
+			i.Unique = opt.Unique
+		}
+		if opt.Version != nil {
+			i.Version = opt.Version
+		}
+		if opt.DefaultLanguage != nil {
+			i.DefaultLanguage = opt.DefaultLanguage
+		}
+		if opt.LanguageOverride != nil {
+			i.LanguageOverride = opt.LanguageOverride
+		}
+		if opt.TextVersion != nil {
+			i.TextVersion = opt.TextVersion
+		}
+		if opt.Weights != nil {
+			i.Weights = opt.Weights
+		}
+		if opt.SphereVersion != nil {
+			i.SphereVersion = opt.SphereVersion
+		}
+		if opt.Bits != nil {
+			i.Bits = opt.Bits
+		}
+		if opt.Max != nil {
+			i.Max = opt.Max
+		}
+		if opt.Min != nil {
+			i.Min = opt.Min
+		}
+		if opt.BucketSize != nil {
+			i.BucketSize = opt.BucketSize
+		}
+		if opt.PartialFilterExpression != nil {
+			i.PartialFilterExpression = opt.PartialFilterExpression
+		}
+		if opt.Collation != nil {
+			i.Collation = opt.Collation
+		}
+		if opt.WildcardProjection != nil {
+			i.WildcardProjection = opt.WildcardProjection
+		}
+		if opt.Hidden != nil {
+			i.Hidden = opt.Hidden
+		}
+	}
+
+	return i
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..82137c60a38d62548fce36ae297c67defa8eded0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go
@@ -0,0 +1,125 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// InsertOneOptions represents options that can be used to configure an InsertOne operation.
+type InsertOneOptions struct {
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+}
+
+// InsertOne creates a new InsertOneOptions instance.
+func InsertOne() *InsertOneOptions {
+	return &InsertOneOptions{}
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (ioo *InsertOneOptions) SetBypassDocumentValidation(b bool) *InsertOneOptions {
+	ioo.BypassDocumentValidation = &b
+	return ioo
+}
+
+// SetComment sets the value for the Comment field.
+func (ioo *InsertOneOptions) SetComment(comment interface{}) *InsertOneOptions {
+	ioo.Comment = comment
+	return ioo
+}
+
+// MergeInsertOneOptions combines the given InsertOneOptions instances into a single InsertOneOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeInsertOneOptions(opts ...*InsertOneOptions) *InsertOneOptions {
+	ioOpts := InsertOne()
+	for _, ioo := range opts {
+		if ioo == nil {
+			continue
+		}
+		if ioo.BypassDocumentValidation != nil {
+			ioOpts.BypassDocumentValidation = ioo.BypassDocumentValidation
+		}
+		if ioo.Comment != nil {
+			ioOpts.Comment = ioo.Comment
+		}
+	}
+
+	return ioOpts
+}
+
+// InsertManyOptions represents options that can be used to configure an InsertMany operation.
+type InsertManyOptions struct {
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// If true, no writes will be executed after one fails. The default value is true.
+	Ordered *bool
+}
+
+// InsertMany creates a new InsertManyOptions instance.
+func InsertMany() *InsertManyOptions {
+	return &InsertManyOptions{
+		Ordered: &DefaultOrdered,
+	}
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (imo *InsertManyOptions) SetBypassDocumentValidation(b bool) *InsertManyOptions {
+	imo.BypassDocumentValidation = &b
+	return imo
+}
+
+// SetComment sets the value for the Comment field.
+func (imo *InsertManyOptions) SetComment(comment interface{}) *InsertManyOptions {
+	imo.Comment = comment
+	return imo
+}
+
+// SetOrdered sets the value for the Ordered field.
+func (imo *InsertManyOptions) SetOrdered(b bool) *InsertManyOptions {
+	imo.Ordered = &b
+	return imo
+}
+
+// MergeInsertManyOptions combines the given InsertManyOptions instances into a single InsertManyOptions in a last one
+// wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeInsertManyOptions(opts ...*InsertManyOptions) *InsertManyOptions {
+	imOpts := InsertMany()
+	for _, imo := range opts {
+		if imo == nil {
+			continue
+		}
+		if imo.BypassDocumentValidation != nil {
+			imOpts.BypassDocumentValidation = imo.BypassDocumentValidation
+		}
+		if imo.Comment != nil {
+			imOpts.Comment = imo.Comment
+		}
+		if imo.Ordered != nil {
+			imOpts.Ordered = imo.Ordered
+		}
+	}
+
+	return imOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..69b8c997e9a38a62886d56a9b774b03a9dab0914
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go
@@ -0,0 +1,69 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ListCollectionsOptions represents options that can be used to configure a ListCollections operation.
+type ListCollectionsOptions struct {
+	// If true, each collection document will only contain a field for the collection name. The default value is false.
+	NameOnly *bool
+
+	// The maximum number of documents to be included in each batch returned by the server.
+	BatchSize *int32
+
+	// If true, and NameOnly is true, limits the documents returned to only contain collections the user is authorized to use. The default value
+	// is false. This option is only valid for MongoDB server versions >= 4.0. Server versions < 4.0 ignore this option.
+	AuthorizedCollections *bool
+}
+
+// ListCollections creates a new ListCollectionsOptions instance.
+func ListCollections() *ListCollectionsOptions {
+	return &ListCollectionsOptions{}
+}
+
+// SetNameOnly sets the value for the NameOnly field.
+func (lc *ListCollectionsOptions) SetNameOnly(b bool) *ListCollectionsOptions {
+	lc.NameOnly = &b
+	return lc
+}
+
+// SetBatchSize sets the value for the BatchSize field.
+func (lc *ListCollectionsOptions) SetBatchSize(size int32) *ListCollectionsOptions {
+	lc.BatchSize = &size
+	return lc
+}
+
+// SetAuthorizedCollections sets the value for the AuthorizedCollections field. This option is only valid for MongoDB server versions >= 4.0. Server
+// versions < 4.0 ignore this option.
+func (lc *ListCollectionsOptions) SetAuthorizedCollections(b bool) *ListCollectionsOptions {
+	lc.AuthorizedCollections = &b
+	return lc
+}
+
+// MergeListCollectionsOptions combines the given ListCollectionsOptions instances into a single *ListCollectionsOptions
+// in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeListCollectionsOptions(opts ...*ListCollectionsOptions) *ListCollectionsOptions {
+	lc := ListCollections()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.NameOnly != nil {
+			lc.NameOnly = opt.NameOnly
+		}
+		if opt.BatchSize != nil {
+			lc.BatchSize = opt.BatchSize
+		}
+		if opt.AuthorizedCollections != nil {
+			lc.AuthorizedCollections = opt.AuthorizedCollections
+		}
+	}
+
+	return lc
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..fbd3df60d808266f4ba193a54ceea0736423a322
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go
@@ -0,0 +1,58 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ListDatabasesOptions represents options that can be used to configure a ListDatabases operation.
+type ListDatabasesOptions struct {
+	// If true, only the Name field of the returned DatabaseSpecification objects will be populated. The default value
+	// is false.
+	NameOnly *bool
+
+	// If true, only the databases which the user is authorized to see will be returned. For more information about
+	// the behavior of this option, see https://www.mongodb.com/docs/manual/reference/privilege-actions/#find. The default
+	// value is true.
+	AuthorizedDatabases *bool
+}
+
+// ListDatabases creates a new ListDatabasesOptions instance.
+func ListDatabases() *ListDatabasesOptions {
+	return &ListDatabasesOptions{}
+}
+
+// SetNameOnly sets the value for the NameOnly field.
+func (ld *ListDatabasesOptions) SetNameOnly(b bool) *ListDatabasesOptions {
+	ld.NameOnly = &b
+	return ld
+}
+
+// SetAuthorizedDatabases sets the value for the AuthorizedDatabases field.
+func (ld *ListDatabasesOptions) SetAuthorizedDatabases(b bool) *ListDatabasesOptions {
+	ld.AuthorizedDatabases = &b
+	return ld
+}
+
+// MergeListDatabasesOptions combines the given ListDatabasesOptions instances into a single *ListDatabasesOptions in a
+// last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeListDatabasesOptions(opts ...*ListDatabasesOptions) *ListDatabasesOptions {
+	ld := ListDatabases()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.NameOnly != nil {
+			ld.NameOnly = opt.NameOnly
+		}
+		if opt.AuthorizedDatabases != nil {
+			ld.AuthorizedDatabases = opt.AuthorizedDatabases
+		}
+	}
+
+	return ld
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/loggeroptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/loggeroptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..b8379358127cd542fe5062fdbf60e22e3d8a654f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/loggeroptions.go
@@ -0,0 +1,115 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"go.mongodb.org/mongo-driver/internal/logger"
+)
+
+// LogLevel is an enumeration representing the supported log severity levels.
+type LogLevel int
+
+const (
+	// LogLevelInfo enables logging of informational messages. These logs
+	// are high-level information about normal driver behavior.
+	LogLevelInfo LogLevel = LogLevel(logger.LevelInfo)
+
+	// LogLevelDebug enables logging of debug messages. These logs can be
+	// voluminous and are intended for detailed information that may be
+	// helpful when debugging an application.
+	LogLevelDebug LogLevel = LogLevel(logger.LevelDebug)
+)
+
+// LogComponent is an enumeration representing the "components" which can be
+// logged against. A LogLevel can be configured on a per-component basis.
+type LogComponent int
+
+const (
+	// LogComponentAll enables logging for all components.
+	LogComponentAll LogComponent = LogComponent(logger.ComponentAll)
+
+	// LogComponentCommand enables command monitor logging.
+	LogComponentCommand LogComponent = LogComponent(logger.ComponentCommand)
+
+	// LogComponentTopology enables topology logging.
+	LogComponentTopology LogComponent = LogComponent(logger.ComponentTopology)
+
+	// LogComponentServerSelection enables server selection logging.
+	LogComponentServerSelection LogComponent = LogComponent(logger.ComponentServerSelection)
+
+	// LogComponentConnection enables connection services logging.
+	LogComponentConnection LogComponent = LogComponent(logger.ComponentConnection)
+)
+
+// LogSink is an interface that can be implemented to provide a custom sink for
+// the driver's logs.
+type LogSink interface {
+	// Info logs a non-error message with the given key/value pairs. This
+	// method will only be called if the provided level has been defined
+	// for a component in the LoggerOptions.
+	//
+	// Here are the following level mappings for V = "Verbosity":
+	//
+	//  - V(0): off
+	//  - V(1): informational
+	//  - V(2): debugging
+	//
+	// This level mapping is taken from the go-logr/logr library
+	// specifications, specifically:
+	//
+	// "Level V(0) is the default, and logger.V(0).Info() has the same
+	// meaning as logger.Info()."
+	Info(level int, message string, keysAndValues ...interface{})
+
+	// Error logs an error message with the given key/value pairs
+	Error(err error, message string, keysAndValues ...interface{})
+}
+
+// LoggerOptions represent options used to configure Logging in the Go Driver.
+type LoggerOptions struct {
+	// ComponentLevels is a map of LogComponent to LogLevel. The LogLevel
+	// for a given LogComponent will be used to determine if a log message
+	// should be logged.
+	ComponentLevels map[LogComponent]LogLevel
+
+	// Sink is the LogSink that will be used to log messages. If this is
+	// nil, the driver will use the standard logging library.
+	Sink LogSink
+
+	// MaxDocumentLength is the maximum length of a document to be logged.
+	// If the underlying document is larger than this value, it will be
+	// truncated and appended with an ellipses "...".
+	MaxDocumentLength uint
+}
+
+// Logger creates a new LoggerOptions instance.
+func Logger() *LoggerOptions {
+	return &LoggerOptions{
+		ComponentLevels: map[LogComponent]LogLevel{},
+	}
+}
+
+// SetComponentLevel sets the LogLevel value for a LogComponent.
+func (opts *LoggerOptions) SetComponentLevel(component LogComponent, level LogLevel) *LoggerOptions {
+	opts.ComponentLevels[component] = level
+
+	return opts
+}
+
+// SetMaxDocumentLength sets the maximum length of a document to be logged.
+func (opts *LoggerOptions) SetMaxDocumentLength(maxDocumentLength uint) *LoggerOptions {
+	opts.MaxDocumentLength = maxDocumentLength
+
+	return opts
+}
+
+// SetSink sets the LogSink to use for logging.
+func (opts *LoggerOptions) SetSink(sink LogSink) *LoggerOptions {
+	opts.Sink = sink
+
+	return opts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..36088c2fcbf05b3728ddc3e1878c207b2378fbd9
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go
@@ -0,0 +1,183 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// Collation allows users to specify language-specific rules for string comparison, such as
+// rules for lettercase and accent marks.
+type Collation struct {
+	Locale          string `bson:",omitempty"` // The locale
+	CaseLevel       bool   `bson:",omitempty"` // The case level
+	CaseFirst       string `bson:",omitempty"` // The case ordering
+	Strength        int    `bson:",omitempty"` // The number of comparison levels to use
+	NumericOrdering bool   `bson:",omitempty"` // Whether to order numbers based on numerical order and not collation order
+	Alternate       string `bson:",omitempty"` // Whether spaces and punctuation are considered base characters
+	MaxVariable     string `bson:",omitempty"` // Which characters are affected by alternate: "shifted"
+	Normalization   bool   `bson:",omitempty"` // Causes text to be normalized into Unicode NFD
+	Backwards       bool   `bson:",omitempty"` // Causes secondary differences to be considered in reverse order, as it is done in the French language
+}
+
+// ToDocument converts the Collation to a bson.Raw.
+//
+// Deprecated: Marshaling a Collation to BSON will not be supported in Go Driver 2.0.
+func (co *Collation) ToDocument() bson.Raw {
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	if co.Locale != "" {
+		doc = bsoncore.AppendStringElement(doc, "locale", co.Locale)
+	}
+	if co.CaseLevel {
+		doc = bsoncore.AppendBooleanElement(doc, "caseLevel", true)
+	}
+	if co.CaseFirst != "" {
+		doc = bsoncore.AppendStringElement(doc, "caseFirst", co.CaseFirst)
+	}
+	if co.Strength != 0 {
+		doc = bsoncore.AppendInt32Element(doc, "strength", int32(co.Strength))
+	}
+	if co.NumericOrdering {
+		doc = bsoncore.AppendBooleanElement(doc, "numericOrdering", true)
+	}
+	if co.Alternate != "" {
+		doc = bsoncore.AppendStringElement(doc, "alternate", co.Alternate)
+	}
+	if co.MaxVariable != "" {
+		doc = bsoncore.AppendStringElement(doc, "maxVariable", co.MaxVariable)
+	}
+	if co.Normalization {
+		doc = bsoncore.AppendBooleanElement(doc, "normalization", true)
+	}
+	if co.Backwards {
+		doc = bsoncore.AppendBooleanElement(doc, "backwards", true)
+	}
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+	return doc
+}
+
+// CursorType specifies whether a cursor should close when the last data is retrieved. See
+// NonTailable, Tailable, and TailableAwait.
+type CursorType int8
+
+const (
+	// NonTailable specifies that a cursor should close after retrieving the last data.
+	NonTailable CursorType = iota
+	// Tailable specifies that a cursor should not close when the last data is retrieved and can be resumed later.
+	Tailable
+	// TailableAwait specifies that a cursor should not close when the last data is retrieved and
+	// that it should block for a certain amount of time for new data before returning no data.
+	TailableAwait
+)
+
+// ReturnDocument specifies whether a findAndUpdate operation should return the document as it was
+// before the update or as it is after the update.
+type ReturnDocument int8
+
+const (
+	// Before specifies that findAndUpdate should return the document as it was before the update.
+	Before ReturnDocument = iota
+	// After specifies that findAndUpdate should return the document as it is after the update.
+	After
+)
+
+// FullDocument specifies how a change stream should return the modified document.
+type FullDocument string
+
+const (
+	// Default does not include a document copy.
+	Default FullDocument = "default"
+	// Off is the same as sending no value for fullDocumentBeforeChange.
+	Off FullDocument = "off"
+	// Required is the same as WhenAvailable but raises a server-side error if the post-image is not available.
+	Required FullDocument = "required"
+	// UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that
+	// was changed.
+	UpdateLookup FullDocument = "updateLookup"
+	// WhenAvailable includes a post-image of the modified document for replace and update change events
+	// if the post-image for this event is available.
+	WhenAvailable FullDocument = "whenAvailable"
+)
+
+// TODO(GODRIVER-2617): Once Registry is removed, ArrayFilters doesn't need to
+// TODO be a separate type. Remove the type and update all ArrayFilters fields
+// TODO to be type []interface{}.
+
+// ArrayFilters is used to hold filters for the array filters CRUD option. If a registry is nil, bson.DefaultRegistry
+// will be used when converting the filter interfaces to BSON.
+type ArrayFilters struct {
+	// Registry is the registry to use for converting filters. Defaults to bson.DefaultRegistry.
+	//
+	// Deprecated: Marshaling ArrayFilters to BSON will not be supported in Go Driver 2.0.
+	Registry *bsoncodec.Registry
+
+	Filters []interface{} // The filters to apply
+}
+
+// ToArray builds a []bson.Raw from the provided ArrayFilters.
+//
+// Deprecated: Marshaling ArrayFilters to BSON will not be supported in Go Driver 2.0.
+func (af *ArrayFilters) ToArray() ([]bson.Raw, error) {
+	registry := af.Registry
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+	filters := make([]bson.Raw, 0, len(af.Filters))
+	for _, f := range af.Filters {
+		filter, err := bson.MarshalWithRegistry(registry, f)
+		if err != nil {
+			return nil, err
+		}
+		filters = append(filters, filter)
+	}
+	return filters, nil
+}
+
+// ToArrayDocument builds a BSON array for the array filters CRUD option. If the registry for af is nil,
+// bson.DefaultRegistry will be used when converting the filter interfaces to BSON.
+//
+// Deprecated: Marshaling ArrayFilters to BSON will not be supported in Go Driver 2.0.
+func (af *ArrayFilters) ToArrayDocument() (bson.Raw, error) {
+	registry := af.Registry
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+
+	idx, arr := bsoncore.AppendArrayStart(nil)
+	for i, f := range af.Filters {
+		filter, err := bson.MarshalWithRegistry(registry, f)
+		if err != nil {
+			return nil, err
+		}
+
+		arr = bsoncore.AppendDocumentElement(arr, strconv.Itoa(i), filter)
+	}
+	arr, _ = bsoncore.AppendArrayEnd(arr, idx)
+	return arr, nil
+}
+
+// MarshalError is returned when attempting to transform a value into a document
+// results in an error.
+//
+// Deprecated: MarshalError is unused and will be removed in Go Driver 2.0.
+type MarshalError struct {
+	Value interface{}
+	Err   error
+}
+
+// Error implements the error interface.
+//
+// Deprecated: MarshalError is unused and will be removed in Go Driver 2.0.
+func (me MarshalError) Error() string {
+	return fmt.Sprintf("cannot transform type %s to a bson.Raw", reflect.TypeOf(me.Value))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..f7d39601947277f347f1b5cc0ac6c4fac49440ef
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go
@@ -0,0 +1,118 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ReplaceOptions represents options that can be used to configure a ReplaceOne operation.
+type ReplaceOptions struct {
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The index to use for the operation. This should either be the index name as a string or the index specification
+	// as a document. This option is only valid for MongoDB versions >= 4.2. Server versions >= 3.4 will return an error
+	// if this option is specified. For server versions < 3.4, the driver will return a client-side error if this option
+	// is specified. The driver will return an error if this option is specified during an unacknowledged write
+	// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil,
+	// which means that no hint will be sent.
+	Hint interface{}
+
+	// If true, a new document will be inserted if the filter does not match any documents in the collection. The
+	// default value is false.
+	Upsert *bool
+
+	// Specifies parameters for the aggregate expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// Replace creates a new ReplaceOptions instance.
+func Replace() *ReplaceOptions {
+	return &ReplaceOptions{}
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (ro *ReplaceOptions) SetBypassDocumentValidation(b bool) *ReplaceOptions {
+	ro.BypassDocumentValidation = &b
+	return ro
+}
+
+// SetCollation sets the value for the Collation field.
+func (ro *ReplaceOptions) SetCollation(c *Collation) *ReplaceOptions {
+	ro.Collation = c
+	return ro
+}
+
+// SetComment sets the value for the Comment field.
+func (ro *ReplaceOptions) SetComment(comment interface{}) *ReplaceOptions {
+	ro.Comment = comment
+	return ro
+}
+
+// SetHint sets the value for the Hint field.
+func (ro *ReplaceOptions) SetHint(h interface{}) *ReplaceOptions {
+	ro.Hint = h
+	return ro
+}
+
+// SetUpsert sets the value for the Upsert field.
+func (ro *ReplaceOptions) SetUpsert(b bool) *ReplaceOptions {
+	ro.Upsert = &b
+	return ro
+}
+
+// SetLet sets the value for the Let field.
+func (ro *ReplaceOptions) SetLet(l interface{}) *ReplaceOptions {
+	ro.Let = l
+	return ro
+}
+
+// MergeReplaceOptions combines the given ReplaceOptions instances into a single ReplaceOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeReplaceOptions(opts ...*ReplaceOptions) *ReplaceOptions {
+	rOpts := Replace()
+	for _, ro := range opts {
+		if ro == nil {
+			continue
+		}
+		if ro.BypassDocumentValidation != nil {
+			rOpts.BypassDocumentValidation = ro.BypassDocumentValidation
+		}
+		if ro.Collation != nil {
+			rOpts.Collation = ro.Collation
+		}
+		if ro.Comment != nil {
+			rOpts.Comment = ro.Comment
+		}
+		if ro.Hint != nil {
+			rOpts.Hint = ro.Hint
+		}
+		if ro.Upsert != nil {
+			rOpts.Upsert = ro.Upsert
+		}
+		if ro.Let != nil {
+			rOpts.Let = ro.Let
+		}
+	}
+
+	return rOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..22ba586042b7036c79ebd2563b25397d5319ba47
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// RewrapManyDataKeyOptions represents all possible options used to decrypt and encrypt all matching data keys with a
+// possibly new masterKey.
+type RewrapManyDataKeyOptions struct {
+	// Provider identifies the new KMS provider. If omitted, encrypting uses the current KMS provider.
+	Provider *string
+
+	// MasterKey identifies the new masterKey. If omitted, rewraps with the current masterKey.
+	MasterKey interface{}
+}
+
+// RewrapManyDataKey creates a new RewrapManyDataKeyOptions instance.
+func RewrapManyDataKey() *RewrapManyDataKeyOptions {
+	return new(RewrapManyDataKeyOptions)
+}
+
+// SetProvider sets the value for the Provider field.
+func (rmdko *RewrapManyDataKeyOptions) SetProvider(provider string) *RewrapManyDataKeyOptions {
+	rmdko.Provider = &provider
+	return rmdko
+}
+
+// SetMasterKey sets the value for the MasterKey field.
+func (rmdko *RewrapManyDataKeyOptions) SetMasterKey(masterKey interface{}) *RewrapManyDataKeyOptions {
+	rmdko.MasterKey = masterKey
+	return rmdko
+}
+
+// MergeRewrapManyDataKeyOptions combines the given RewrapManyDataKeyOptions instances into a single
+// RewrapManyDataKeyOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeRewrapManyDataKeyOptions(opts ...*RewrapManyDataKeyOptions) *RewrapManyDataKeyOptions {
+	rmdkOpts := RewrapManyDataKey()
+	for _, rmdko := range opts {
+		if rmdko == nil {
+			continue
+		}
+		if provider := rmdko.Provider; provider != nil {
+			rmdkOpts.Provider = provider
+		}
+		if masterKey := rmdko.MasterKey; masterKey != nil {
+			rmdkOpts.MasterKey = masterKey
+		}
+	}
+	return rmdkOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..b0cdec32ce0c98e44f2da11894a3658d66895ae3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go
@@ -0,0 +1,47 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+)
+
+// RunCmdOptions represents options that can be used to configure a RunCommand operation.
+type RunCmdOptions struct {
+	// The read preference to use for the operation. The default value is nil, which means that the primary read
+	// preference will be used.
+	ReadPreference *readpref.ReadPref
+}
+
+// RunCmd creates a new RunCmdOptions instance.
+func RunCmd() *RunCmdOptions {
+	return &RunCmdOptions{}
+}
+
+// SetReadPreference sets value for the ReadPreference field.
+func (rc *RunCmdOptions) SetReadPreference(rp *readpref.ReadPref) *RunCmdOptions {
+	rc.ReadPreference = rp
+	return rc
+}
+
+// MergeRunCmdOptions combines the given RunCmdOptions instances into one *RunCmdOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeRunCmdOptions(opts ...*RunCmdOptions) *RunCmdOptions {
+	rc := RunCmd()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadPreference != nil {
+			rc.ReadPreference = opt.ReadPreference
+		}
+	}
+
+	return rc
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..8cb8a08b7800f4d52ee6939d07aae7d3b773002c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go
@@ -0,0 +1,48 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// SearchIndexesOptions represents options that can be used to configure a SearchIndexView.
+type SearchIndexesOptions struct {
+	Name *string
+	Type *string
+}
+
+// SearchIndexes creates a new SearchIndexesOptions instance.
+func SearchIndexes() *SearchIndexesOptions {
+	return &SearchIndexesOptions{}
+}
+
+// SetName sets the value for the Name field.
+func (sio *SearchIndexesOptions) SetName(name string) *SearchIndexesOptions {
+	sio.Name = &name
+	return sio
+}
+
+// SetType sets the value for the Type field.
+func (sio *SearchIndexesOptions) SetType(typ string) *SearchIndexesOptions {
+	sio.Type = &typ
+	return sio
+}
+
+// CreateSearchIndexesOptions represents options that can be used to configure a SearchIndexView.CreateOne or
+// SearchIndexView.CreateMany operation.
+type CreateSearchIndexesOptions struct {
+}
+
+// ListSearchIndexesOptions represents options that can be used to configure a SearchIndexView.List operation.
+type ListSearchIndexesOptions struct {
+	AggregateOpts *AggregateOptions
+}
+
+// DropSearchIndexOptions represents options that can be used to configure a SearchIndexView.DropOne operation.
+type DropSearchIndexOptions struct {
+}
+
+// UpdateSearchIndexOptions represents options that can be used to configure a SearchIndexView.UpdateOne operation.
+type UpdateSearchIndexOptions struct {
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/serverapioptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/serverapioptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..4eef2e19935e9fcc5627ae6297d281d851f1ebd7
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/serverapioptions.go
@@ -0,0 +1,59 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"fmt"
+)
+
+// ServerAPIOptions represents options used to configure the API version sent to the server
+// when running commands.
+//
+// Sending a specified server API version causes the server to behave in a manner compatible with that
+// API version. It also causes the driver to behave in a manner compatible with the driver’s behavior as
+// of the release when the driver first started to support the specified server API version.
+//
+// The user must specify a ServerAPIVersion if including ServerAPIOptions in their client. That version
+// must also be currently supported by the driver. This version of the driver supports API version "1".
+type ServerAPIOptions struct {
+	ServerAPIVersion  ServerAPIVersion
+	Strict            *bool
+	DeprecationErrors *bool
+}
+
+// ServerAPI creates a new ServerAPIOptions configured with the provided serverAPIversion.
+func ServerAPI(serverAPIVersion ServerAPIVersion) *ServerAPIOptions {
+	return &ServerAPIOptions{ServerAPIVersion: serverAPIVersion}
+}
+
+// SetStrict specifies whether the server should return errors for features that are not part of the API version.
+func (s *ServerAPIOptions) SetStrict(strict bool) *ServerAPIOptions {
+	s.Strict = &strict
+	return s
+}
+
+// SetDeprecationErrors specifies whether the server should return errors for deprecated features.
+func (s *ServerAPIOptions) SetDeprecationErrors(deprecationErrors bool) *ServerAPIOptions {
+	s.DeprecationErrors = &deprecationErrors
+	return s
+}
+
+// ServerAPIVersion represents an API version that can be used in ServerAPIOptions.
+type ServerAPIVersion string
+
+const (
+	// ServerAPIVersion1 is the first API version.
+	ServerAPIVersion1 ServerAPIVersion = "1"
+)
+
+// Validate determines if the provided ServerAPIVersion is currently supported by the driver.
+func (sav ServerAPIVersion) Validate() error {
+	if sav == ServerAPIVersion1 {
+		return nil
+	}
+	return fmt.Errorf("api version %q not supported; this driver version only supports API version \"1\"", sav)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..e1eab098be45a70ebbcb6c4d506f6a8c8dacc04b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go
@@ -0,0 +1,134 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+)
+
+// DefaultCausalConsistency is the default value for the CausalConsistency option.
+var DefaultCausalConsistency = true
+
+// SessionOptions represents options that can be used to configure a Session.
+type SessionOptions struct {
+	// If true, causal consistency will be enabled for the session. This option cannot be set to true if Snapshot is
+	// set to true. The default value is true unless Snapshot is set to true. See
+	// https://www.mongodb.com/docs/manual/core/read-isolation-consistency-recency/#sessions for more information.
+	CausalConsistency *bool
+
+	// The default read concern for transactions started in the session. The default value is nil, which means that
+	// the read concern of the client used to start the session will be used.
+	DefaultReadConcern *readconcern.ReadConcern
+
+	// The default read preference for transactions started in the session. The default value is nil, which means that
+	// the read preference of the client used to start the session will be used.
+	DefaultReadPreference *readpref.ReadPref
+
+	// The default write concern for transactions started in the session. The default value is nil, which means that
+	// the write concern of the client used to start the session will be used.
+	DefaultWriteConcern *writeconcern.WriteConcern
+
+	// The default maximum amount of time that a CommitTransaction operation executed in the session can run on the
+	// server. The default value is nil, which means that that there is no time limit for execution.
+	//
+	// NOTE(benjirewis): DefaultMaxCommitTime will be deprecated in a future release. The more general Timeout option
+	// may be used in its place to control the amount of time that a single operation can run before returning an
+	// error. DefaultMaxCommitTime is ignored if Timeout is set on the client.
+	DefaultMaxCommitTime *time.Duration
+
+	// If true, all read operations performed with this session will be read from the same snapshot. This option cannot
+	// be set to true if CausalConsistency is set to true. Transactions and write operations are not allowed on
+	// snapshot sessions and will error. The default value is false.
+	Snapshot *bool
+}
+
+// Session creates a new SessionOptions instance.
+func Session() *SessionOptions {
+	return &SessionOptions{}
+}
+
+// SetCausalConsistency sets the value for the CausalConsistency field.
+func (s *SessionOptions) SetCausalConsistency(b bool) *SessionOptions {
+	s.CausalConsistency = &b
+	return s
+}
+
+// SetDefaultReadConcern sets the value for the DefaultReadConcern field.
+func (s *SessionOptions) SetDefaultReadConcern(rc *readconcern.ReadConcern) *SessionOptions {
+	s.DefaultReadConcern = rc
+	return s
+}
+
+// SetDefaultReadPreference sets the value for the DefaultReadPreference field.
+func (s *SessionOptions) SetDefaultReadPreference(rp *readpref.ReadPref) *SessionOptions {
+	s.DefaultReadPreference = rp
+	return s
+}
+
+// SetDefaultWriteConcern sets the value for the DefaultWriteConcern field.
+func (s *SessionOptions) SetDefaultWriteConcern(wc *writeconcern.WriteConcern) *SessionOptions {
+	s.DefaultWriteConcern = wc
+	return s
+}
+
+// SetDefaultMaxCommitTime sets the value for the DefaultMaxCommitTime field.
+//
+// NOTE(benjirewis): DefaultMaxCommitTime will be deprecated in a future release. The more
+// general Timeout option may be used in its place to control the amount of time that a
+// single operation can run before returning an error. DefaultMaxCommitTime is ignored if
+// Timeout is set on the client.
+func (s *SessionOptions) SetDefaultMaxCommitTime(mct *time.Duration) *SessionOptions {
+	s.DefaultMaxCommitTime = mct
+	return s
+}
+
+// SetSnapshot sets the value for the Snapshot field.
+func (s *SessionOptions) SetSnapshot(b bool) *SessionOptions {
+	s.Snapshot = &b
+	return s
+}
+
+// MergeSessionOptions combines the given SessionOptions instances into a single SessionOptions in a last-one-wins
+// fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeSessionOptions(opts ...*SessionOptions) *SessionOptions {
+	s := Session()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.CausalConsistency != nil {
+			s.CausalConsistency = opt.CausalConsistency
+		}
+		if opt.DefaultReadConcern != nil {
+			s.DefaultReadConcern = opt.DefaultReadConcern
+		}
+		if opt.DefaultReadPreference != nil {
+			s.DefaultReadPreference = opt.DefaultReadPreference
+		}
+		if opt.DefaultWriteConcern != nil {
+			s.DefaultWriteConcern = opt.DefaultWriteConcern
+		}
+		if opt.DefaultMaxCommitTime != nil {
+			s.DefaultMaxCommitTime = opt.DefaultMaxCommitTime
+		}
+		if opt.Snapshot != nil {
+			s.Snapshot = opt.Snapshot
+		}
+	}
+	if s.CausalConsistency == nil && (s.Snapshot == nil || !*s.Snapshot) {
+		s.CausalConsistency = &DefaultCausalConsistency
+	}
+
+	return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..9270cd20d4d3177e0459ade91ec5cf4da3032d92
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go
@@ -0,0 +1,103 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+)
+
+// TransactionOptions represents options that can be used to configure a transaction.
+type TransactionOptions struct {
+	// The read concern for operations in the transaction. The default value is nil, which means that the default
+	// read concern of the session used to start the transaction will be used.
+	ReadConcern *readconcern.ReadConcern
+
+	// The read preference for operations in the transaction. The default value is nil, which means that the default
+	// read preference of the session used to start the transaction will be used.
+	ReadPreference *readpref.ReadPref
+
+	// The write concern for operations in the transaction. The default value is nil, which means that the default
+	// write concern of the session used to start the transaction will be used.
+	WriteConcern *writeconcern.WriteConcern
+
+	// The default maximum amount of time that a CommitTransaction operation executed in the session can run on the
+	// server. The default value is nil, meaning that there is no time limit for execution.
+
+	// The maximum amount of time that a CommitTransaction operation can executed in the transaction can run on the
+	// server. The default value is nil, which means that the default maximum commit time of the session used to
+	// start the transaction will be used.
+	//
+	// NOTE(benjirewis): MaxCommitTime will be deprecated in a future release. The more general Timeout option may
+	// be used in its place to control the amount of time that a single operation can run before returning an error.
+	// MaxCommitTime is ignored if Timeout is set on the client.
+	MaxCommitTime *time.Duration
+}
+
+// Transaction creates a new TransactionOptions instance.
+func Transaction() *TransactionOptions {
+	return &TransactionOptions{}
+}
+
+// SetReadConcern sets the value for the ReadConcern field.
+func (t *TransactionOptions) SetReadConcern(rc *readconcern.ReadConcern) *TransactionOptions {
+	t.ReadConcern = rc
+	return t
+}
+
+// SetReadPreference sets the value for the ReadPreference field.
+func (t *TransactionOptions) SetReadPreference(rp *readpref.ReadPref) *TransactionOptions {
+	t.ReadPreference = rp
+	return t
+}
+
+// SetWriteConcern sets the value for the WriteConcern field.
+func (t *TransactionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *TransactionOptions {
+	t.WriteConcern = wc
+	return t
+}
+
+// SetMaxCommitTime sets the value for the MaxCommitTime field.
+//
+// NOTE(benjirewis): MaxCommitTime will be deprecated in a future release. The more general Timeout
+// option may be used in its place to control the amount of time that a single operation can run before
+// returning an error. MaxCommitTime is ignored if Timeout is set on the client.
+func (t *TransactionOptions) SetMaxCommitTime(mct *time.Duration) *TransactionOptions {
+	t.MaxCommitTime = mct
+	return t
+}
+
+// MergeTransactionOptions combines the given TransactionOptions instances into a single TransactionOptions in a
+// last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeTransactionOptions(opts ...*TransactionOptions) *TransactionOptions {
+	t := Transaction()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			t.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			t.ReadPreference = opt.ReadPreference
+		}
+		if opt.WriteConcern != nil {
+			t.WriteConcern = opt.WriteConcern
+		}
+		if opt.MaxCommitTime != nil {
+			t.MaxCommitTime = opt.MaxCommitTime
+		}
+	}
+
+	return t
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..5206f9f01b7c35b04ec3cf8a93f3d536b70647f9
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go
@@ -0,0 +1,131 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// UpdateOptions represents options that can be used to configure UpdateOne and UpdateMany operations.
+type UpdateOptions struct {
+	// A set of filters specifying to which array elements an update should apply. This option is only valid for MongoDB
+	// versions >= 3.6. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the update will apply to all array elements.
+	ArrayFilters *ArrayFilters
+
+	// If true, writes executed as part of the operation will opt out of document-level validation on the server. This
+	// option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is
+	// false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document
+	// validation.
+	BypassDocumentValidation *bool
+
+	// Specifies a collation to use for string comparisons during the operation. This option is only valid for MongoDB
+	// versions >= 3.4. For previous server versions, the driver will return an error if this option is used. The
+	// default value is nil, which means the default collation of the collection will be used.
+	Collation *Collation
+
+	// A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace
+	// the operation.  The default value is nil, which means that no comment will be included in the logs.
+	Comment interface{}
+
+	// The index to use for the operation. This should either be the index name as a string or the index specification
+	// as a document. This option is only valid for MongoDB versions >= 4.2. Server versions >= 3.4 will return an error
+	// if this option is specified. For server versions < 3.4, the driver will return a client-side error if this option
+	// is specified. The driver will return an error if this option is specified during an unacknowledged write
+	// operation. The driver will return an error if the hint parameter is a multi-key map. The default value is nil,
+	// which means that no hint will be sent.
+	Hint interface{}
+
+	// If true, a new document will be inserted if the filter does not match any documents in the collection. The
+	// default value is false.
+	Upsert *bool
+
+	// Specifies parameters for the update expression. This option is only valid for MongoDB versions >= 5.0. Older
+	// servers will report an error for using this option. This must be a document mapping parameter names to values.
+	// Values must be constant or closed expressions that do not reference document fields. Parameters can then be
+	// accessed as variables in an aggregate expression context (e.g. "$$var").
+	Let interface{}
+}
+
+// Update creates a new UpdateOptions instance.
+func Update() *UpdateOptions {
+	return &UpdateOptions{}
+}
+
+// SetArrayFilters sets the value for the ArrayFilters field.
+func (uo *UpdateOptions) SetArrayFilters(af ArrayFilters) *UpdateOptions {
+	uo.ArrayFilters = &af
+	return uo
+}
+
+// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
+func (uo *UpdateOptions) SetBypassDocumentValidation(b bool) *UpdateOptions {
+	uo.BypassDocumentValidation = &b
+	return uo
+}
+
+// SetCollation sets the value for the Collation field.
+func (uo *UpdateOptions) SetCollation(c *Collation) *UpdateOptions {
+	uo.Collation = c
+	return uo
+}
+
+// SetComment sets the value for the Comment field.
+func (uo *UpdateOptions) SetComment(comment interface{}) *UpdateOptions {
+	uo.Comment = comment
+	return uo
+}
+
+// SetHint sets the value for the Hint field.
+func (uo *UpdateOptions) SetHint(h interface{}) *UpdateOptions {
+	uo.Hint = h
+	return uo
+}
+
+// SetUpsert sets the value for the Upsert field.
+func (uo *UpdateOptions) SetUpsert(b bool) *UpdateOptions {
+	uo.Upsert = &b
+	return uo
+}
+
+// SetLet sets the value for the Let field.
+func (uo *UpdateOptions) SetLet(l interface{}) *UpdateOptions {
+	uo.Let = l
+	return uo
+}
+
+// MergeUpdateOptions combines the given UpdateOptions instances into a single UpdateOptions in a last-one-wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions {
+	uOpts := Update()
+	for _, uo := range opts {
+		if uo == nil {
+			continue
+		}
+		if uo.ArrayFilters != nil {
+			uOpts.ArrayFilters = uo.ArrayFilters
+		}
+		if uo.BypassDocumentValidation != nil {
+			uOpts.BypassDocumentValidation = uo.BypassDocumentValidation
+		}
+		if uo.Collation != nil {
+			uOpts.Collation = uo.Collation
+		}
+		if uo.Comment != nil {
+			uOpts.Comment = uo.Comment
+		}
+		if uo.Hint != nil {
+			uOpts.Hint = uo.Hint
+		}
+		if uo.Upsert != nil {
+			uOpts.Upsert = uo.Upsert
+		}
+		if uo.Let != nil {
+			uOpts.Let = uo.Let
+		}
+	}
+
+	return uOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go
new file mode 100644
index 0000000000000000000000000000000000000000..51408e142d6ce65622fcc178d0bcf9b805d3ef85
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go
@@ -0,0 +1,129 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package readconcern defines read concerns for MongoDB operations.
+//
+// For more information about MongoDB read concerns, see
+// https://www.mongodb.com/docs/manual/reference/read-concern/
+package readconcern // import "go.mongodb.org/mongo-driver/mongo/readconcern"
+
+import (
+	"errors"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// A ReadConcern defines a MongoDB read concern, which allows you to control the consistency and
+// isolation properties of the data read from replica sets and replica set shards.
+//
+// For more information about MongoDB read concerns, see
+// https://www.mongodb.com/docs/manual/reference/read-concern/
+type ReadConcern struct {
+	Level string
+}
+
+// Option is an option to provide when creating a ReadConcern.
+//
+// Deprecated: Use the ReadConcern literal declaration instead. For example:
+//
+//	&readconcern.ReadConcern{Level: "local"}
+type Option func(concern *ReadConcern)
+
+// Level creates an option that sets the level of a ReadConcern.
+//
+// Deprecated: Use the ReadConcern literal declaration instead. For example:
+//
+//	&readconcern.ReadConcern{Level: "local"}
+func Level(level string) Option {
+	return func(concern *ReadConcern) {
+		concern.Level = level
+	}
+}
+
+// Local returns a ReadConcern that requests data from the instance with no guarantee that the data
+// has been written to a majority of the replica set members (i.e. may be rolled back).
+//
+// For more information about read concern "local", see
+// https://www.mongodb.com/docs/manual/reference/read-concern-local/
+func Local() *ReadConcern {
+	return New(Level("local"))
+}
+
+// Majority returns a ReadConcern that requests data that has been acknowledged by a majority of the
+// replica set members (i.e. the documents read are durable and guaranteed not to roll back).
+//
+// For more information about read concern "majority", see
+// https://www.mongodb.com/docs/manual/reference/read-concern-majority/
+func Majority() *ReadConcern {
+	return New(Level("majority"))
+}
+
+// Linearizable returns a ReadConcern that requests data that reflects all successful
+// majority-acknowledged writes that completed prior to the start of the read operation.
+//
+// For more information about read concern "linearizable", see
+// https://www.mongodb.com/docs/manual/reference/read-concern-linearizable/
+func Linearizable() *ReadConcern {
+	return New(Level("linearizable"))
+}
+
+// Available returns a ReadConcern that requests data from an instance with no guarantee that the
+// data has been written to a majority of the replica set members (i.e. may be rolled back).
+//
+// For more information about read concern "available", see
+// https://www.mongodb.com/docs/manual/reference/read-concern-available/
+func Available() *ReadConcern {
+	return New(Level("available"))
+}
+
+// Snapshot returns a ReadConcern that requests majority-committed data as it appears across shards
+// from a specific single point in time in the recent past.
+//
+// For more information about read concern "snapshot", see
+// https://www.mongodb.com/docs/manual/reference/read-concern-snapshot/
+func Snapshot() *ReadConcern {
+	return New(Level("snapshot"))
+}
+
+// New constructs a new read concern from the given string.
+//
+// Deprecated: Use the ReadConcern literal declaration instead. For example:
+//
+//	&readconcern.ReadConcern{Level: "local"}
+func New(options ...Option) *ReadConcern {
+	concern := &ReadConcern{}
+
+	for _, option := range options {
+		option(concern)
+	}
+
+	return concern
+}
+
+// MarshalBSONValue implements the bson.ValueMarshaler interface.
+//
+// Deprecated: Marshaling a ReadConcern to BSON will not be supported in Go Driver 2.0.
+func (rc *ReadConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if rc == nil {
+		return 0, nil, errors.New("cannot marshal nil ReadConcern")
+	}
+
+	var elems []byte
+
+	if len(rc.Level) > 0 {
+		elems = bsoncore.AppendStringElement(elems, "level", rc.Level)
+	}
+
+	return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
+}
+
+// GetLevel returns the read concern level.
+//
+// Deprecated: Use the ReadConcern.Level field instead.
+func (rc *ReadConcern) GetLevel() string {
+	return rc.Level
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce036504cbb2112393c36d2a7f88569b13f8a723
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go
@@ -0,0 +1,88 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Mode indicates the user's preference on reads.
+type Mode uint8
+
+// Mode constants
+const (
+	_ Mode = iota
+	// PrimaryMode indicates that only a primary is
+	// considered for reading. This is the default
+	// mode.
+	PrimaryMode
+	// PrimaryPreferredMode indicates that if a primary
+	// is available, use it; otherwise, eligible
+	// secondaries will be considered.
+	PrimaryPreferredMode
+	// SecondaryMode indicates that only secondaries
+	// should be considered.
+	SecondaryMode
+	// SecondaryPreferredMode indicates that only secondaries
+	// should be considered when one is available. If none
+	// are available, then a primary will be considered.
+	SecondaryPreferredMode
+	// NearestMode indicates that all primaries and secondaries
+	// will be considered.
+	NearestMode
+)
+
+// ModeFromString returns a mode corresponding to
+// mode.
+func ModeFromString(mode string) (Mode, error) {
+	switch strings.ToLower(mode) {
+	case "primary":
+		return PrimaryMode, nil
+	case "primarypreferred":
+		return PrimaryPreferredMode, nil
+	case "secondary":
+		return SecondaryMode, nil
+	case "secondarypreferred":
+		return SecondaryPreferredMode, nil
+	case "nearest":
+		return NearestMode, nil
+	}
+	return Mode(0), fmt.Errorf("unknown read preference %v", mode)
+}
+
+// String returns the string representation of mode.
+func (mode Mode) String() string {
+	switch mode {
+	case PrimaryMode:
+		return "primary"
+	case PrimaryPreferredMode:
+		return "primaryPreferred"
+	case SecondaryMode:
+		return "secondary"
+	case SecondaryPreferredMode:
+		return "secondaryPreferred"
+	case NearestMode:
+		return "nearest"
+	default:
+		return "unknown"
+	}
+}
+
+// IsValid checks whether the mode is valid.
+func (mode Mode) IsValid() bool {
+	switch mode {
+	case PrimaryMode,
+		PrimaryPreferredMode,
+		SecondaryMode,
+		SecondaryPreferredMode,
+		NearestMode:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..c59b0705f1f63d1d7300a253011e51ddbb04e2a1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go
@@ -0,0 +1,83 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/tag"
+)
+
+// ErrInvalidTagSet indicates that an invalid set of tags was specified.
+var ErrInvalidTagSet = errors.New("an even number of tags must be specified")
+
+// Option configures a read preference
+type Option func(*ReadPref) error
+
+// WithMaxStaleness sets the maximum staleness a
+// server is allowed.
+func WithMaxStaleness(ms time.Duration) Option {
+	return func(rp *ReadPref) error {
+		rp.maxStaleness = ms
+		rp.maxStalenessSet = true
+		return nil
+	}
+}
+
+// WithTags specifies a single tag set used to match replica set members. If no members match the
+// tag set, read operations will return an error. To avoid errors if no members match the tag set, use
+// [WithTagSets] and include an empty tag set as the last tag set in the list.
+//
+// The last call to [WithTags] or [WithTagSets] overrides all previous calls to either method.
+//
+// For more information about read preference tags, see
+// https://www.mongodb.com/docs/manual/core/read-preference-tags/
+func WithTags(tags ...string) Option {
+	return func(rp *ReadPref) error {
+		length := len(tags)
+		if length < 2 || length%2 != 0 {
+			return ErrInvalidTagSet
+		}
+
+		tagset := make(tag.Set, 0, length/2)
+
+		for i := 1; i < length; i += 2 {
+			tagset = append(tagset, tag.Tag{Name: tags[i-1], Value: tags[i]})
+		}
+
+		return WithTagSets(tagset)(rp)
+	}
+}
+
+// WithTagSets specifies a list of tag sets used to match replica set members. If the list contains
+// multiple tag sets, members are matched against each tag set in succession until a match is found.
+// Once a match is found, the remaining tag sets are ignored. If no members match any of the tag
+// sets, the read operation returns with an error. To avoid an error if no members match any of the
+// tag sets, include an empty tag set as the last tag set in the list.
+//
+// The last call to [WithTags] or [WithTagSets] overrides all previous calls to either method.
+//
+// For more information about read preference tags, see
+// https://www.mongodb.com/docs/manual/core/read-preference-tags/
+func WithTagSets(tagSets ...tag.Set) Option {
+	return func(rp *ReadPref) error {
+		rp.tagSets = tagSets
+		return nil
+	}
+}
+
+// WithHedgeEnabled specifies whether or not hedged reads should be enabled in the server. This feature requires MongoDB
+// server version 4.4 or higher. For more information about hedged reads, see
+// https://www.mongodb.com/docs/manual/core/sharded-cluster-query-router/#mongos-hedged-reads. If not specified, the default
+// is to not send a value to the server, which will result in the server defaults being used.
+func WithHedgeEnabled(hedgeEnabled bool) Option {
+	return func(rp *ReadPref) error {
+		rp.hedgeEnabled = &hedgeEnabled
+		return nil
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go
new file mode 100644
index 0000000000000000000000000000000000000000..e2a1d7f340f3836facc70c7b01421ec6f428d2b6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go
@@ -0,0 +1,133 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package readpref defines read preferences for MongoDB queries.
+package readpref // import "go.mongodb.org/mongo-driver/mongo/readpref"
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/tag"
+)
+
+var (
+	errInvalidReadPreference = errors.New("can not specify tags, max staleness, or hedge with mode primary")
+)
+
+// Primary constructs a read preference with a PrimaryMode.
+func Primary() *ReadPref {
+	return &ReadPref{mode: PrimaryMode}
+}
+
+// PrimaryPreferred constructs a read preference with a PrimaryPreferredMode.
+func PrimaryPreferred(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(PrimaryPreferredMode, opts...)
+	return rp
+}
+
+// SecondaryPreferred constructs a read preference with a SecondaryPreferredMode.
+func SecondaryPreferred(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(SecondaryPreferredMode, opts...)
+	return rp
+}
+
+// Secondary constructs a read preference with a SecondaryMode.
+func Secondary(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(SecondaryMode, opts...)
+	return rp
+}
+
+// Nearest constructs a read preference with a NearestMode.
+func Nearest(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(NearestMode, opts...)
+	return rp
+}
+
+// New creates a new ReadPref.
+func New(mode Mode, opts ...Option) (*ReadPref, error) {
+	rp := &ReadPref{
+		mode: mode,
+	}
+
+	if mode == PrimaryMode && len(opts) != 0 {
+		return nil, errInvalidReadPreference
+	}
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		err := opt(rp)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return rp, nil
+}
+
+// ReadPref determines which servers are considered suitable for read operations.
+type ReadPref struct {
+	maxStaleness    time.Duration
+	maxStalenessSet bool
+	mode            Mode
+	tagSets         []tag.Set
+	hedgeEnabled    *bool
+}
+
+// MaxStaleness is the maximum amount of time to allow
+// a server to be considered eligible for selection. The
+// second return value indicates if this value has been set.
+func (r *ReadPref) MaxStaleness() (time.Duration, bool) {
+	return r.maxStaleness, r.maxStalenessSet
+}
+
+// Mode indicates the mode of the read preference.
+func (r *ReadPref) Mode() Mode {
+	return r.mode
+}
+
+// TagSets are multiple tag sets indicating
+// which servers should be considered.
+func (r *ReadPref) TagSets() []tag.Set {
+	return r.tagSets
+}
+
+// HedgeEnabled returns whether or not hedged reads are enabled for this read preference. If this option was not
+// specified during read preference construction, nil is returned.
+func (r *ReadPref) HedgeEnabled() *bool {
+	return r.hedgeEnabled
+}
+
+// String returns a human-readable description of the read preference.
+func (r *ReadPref) String() string {
+	var b bytes.Buffer
+	b.WriteString(r.mode.String())
+	delim := "("
+	if r.maxStalenessSet {
+		fmt.Fprintf(&b, "%smaxStaleness=%v", delim, r.maxStaleness)
+		delim = " "
+	}
+	for _, tagSet := range r.tagSets {
+		fmt.Fprintf(&b, "%stagSet=%s", delim, tagSet.String())
+		delim = " "
+	}
+	if r.hedgeEnabled != nil {
+		fmt.Fprintf(&b, "%shedgeEnabled=%v", delim, *r.hedgeEnabled)
+		delim = " "
+	}
+	if delim != "(" {
+		b.WriteString(")")
+	}
+	return b.String()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/results.go b/vendor/go.mongodb.org/mongo-driver/mongo/results.go
new file mode 100644
index 0000000000000000000000000000000000000000..2dbaf2af62d0be9428032f3c640cf722c0aea8fb
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/results.go
@@ -0,0 +1,286 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+)
+
+// BulkWriteResult is the result type returned by a BulkWrite operation.
+type BulkWriteResult struct {
+	// The number of documents inserted.
+	InsertedCount int64
+
+	// The number of documents matched by filters in update and replace operations.
+	MatchedCount int64
+
+	// The number of documents modified by update and replace operations.
+	ModifiedCount int64
+
+	// The number of documents deleted.
+	DeletedCount int64
+
+	// The number of documents upserted by update and replace operations.
+	UpsertedCount int64
+
+	// A map of operation index to the _id of each upserted document.
+	UpsertedIDs map[int64]interface{}
+}
+
+// InsertOneResult is the result type returned by an InsertOne operation.
+type InsertOneResult struct {
+	// The _id of the inserted document. A value generated by the driver will be of type primitive.ObjectID.
+	InsertedID interface{}
+}
+
+// InsertManyResult is a result type returned by an InsertMany operation.
+type InsertManyResult struct {
+	// The _id values of the inserted documents. Values generated by the driver will be of type primitive.ObjectID.
+	InsertedIDs []interface{}
+}
+
+// TODO(GODRIVER-2367): Remove the BSON struct tags on DeleteResult.
+
+// DeleteResult is the result type returned by DeleteOne and DeleteMany operations.
+type DeleteResult struct {
+	DeletedCount int64 `bson:"n"` // The number of documents deleted.
+}
+
+// RewrapManyDataKeyResult is the result of the bulk write operation used to update the key vault collection with
+// rewrapped data keys.
+type RewrapManyDataKeyResult struct {
+	*BulkWriteResult
+}
+
+// ListDatabasesResult is a result of a ListDatabases operation.
+type ListDatabasesResult struct {
+	// A slice containing one DatabaseSpecification for each database matched by the operation's filter.
+	Databases []DatabaseSpecification
+
+	// The total size of the database files of the returned databases in bytes.
+	// This will be the sum of the SizeOnDisk field for each specification in Databases.
+	TotalSize int64
+}
+
+func newListDatabasesResultFromOperation(res operation.ListDatabasesResult) ListDatabasesResult {
+	var ldr ListDatabasesResult
+	ldr.Databases = make([]DatabaseSpecification, 0, len(res.Databases))
+	for _, spec := range res.Databases {
+		ldr.Databases = append(
+			ldr.Databases,
+			DatabaseSpecification{Name: spec.Name, SizeOnDisk: spec.SizeOnDisk, Empty: spec.Empty},
+		)
+	}
+	ldr.TotalSize = res.TotalSize
+	return ldr
+}
+
+// DatabaseSpecification contains information for a database. This type is returned as part of ListDatabasesResult.
+type DatabaseSpecification struct {
+	Name       string // The name of the database.
+	SizeOnDisk int64  // The total size of the database files on disk in bytes.
+	Empty      bool   // Specifies whether or not the database is empty.
+}
+
+// UpdateResult is the result type returned from UpdateOne, UpdateMany, and ReplaceOne operations.
+type UpdateResult struct {
+	MatchedCount  int64       // The number of documents matched by the filter.
+	ModifiedCount int64       // The number of documents modified by the operation.
+	UpsertedCount int64       // The number of documents upserted by the operation.
+	UpsertedID    interface{} // The _id field of the upserted document, or nil if no upsert was done.
+}
+
+// UnmarshalBSON implements the bson.Unmarshaler interface.
+//
+// Deprecated: Unmarshalling an UpdateResult directly from BSON is not supported and may produce
+// different results compared to running Update* operations directly.
+func (result *UpdateResult) UnmarshalBSON(b []byte) error {
+	// TODO(GODRIVER-2367): Remove the ability to unmarshal BSON directly to an UpdateResult.
+	elems, err := bson.Raw(b).Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "n":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				result.MatchedCount = int64(elem.Value().Int32())
+			case bson.TypeInt64:
+				result.MatchedCount = elem.Value().Int64()
+			default:
+				return fmt.Errorf("Received invalid type for n, should be Int32 or Int64, received %s", elem.Value().Type)
+			}
+		case "nModified":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				result.ModifiedCount = int64(elem.Value().Int32())
+			case bson.TypeInt64:
+				result.ModifiedCount = elem.Value().Int64()
+			default:
+				return fmt.Errorf("Received invalid type for nModified, should be Int32 or Int64, received %s", elem.Value().Type)
+			}
+		case "upserted":
+			switch elem.Value().Type {
+			case bson.TypeArray:
+				e, err := elem.Value().Array().IndexErr(0)
+				if err != nil {
+					break
+				}
+				if e.Value().Type != bson.TypeEmbeddedDocument {
+					break
+				}
+				var d struct {
+					ID interface{} `bson:"_id"`
+				}
+				err = bson.Unmarshal(e.Value().Document(), &d)
+				if err != nil {
+					return err
+				}
+				result.UpsertedID = d.ID
+			default:
+				return fmt.Errorf("Received invalid type for upserted, should be Array, received %s", elem.Value().Type)
+			}
+		}
+	}
+
+	return nil
+}
+
+// IndexSpecification represents an index in a database. This type is returned by the IndexView.ListSpecifications
+// function and is also used in the CollectionSpecification type.
+type IndexSpecification struct {
+	// The index name.
+	Name string
+
+	// The namespace for the index. This is a string in the format "databaseName.collectionName".
+	Namespace string
+
+	// The keys specification document for the index.
+	KeysDocument bson.Raw
+
+	// The index version.
+	Version int32
+
+	// The length of time, in seconds, for documents to remain in the collection. The default value is 0, which means
+	// that documents will remain in the collection until they're explicitly deleted or the collection is dropped.
+	ExpireAfterSeconds *int32
+
+	// If true, the index will only reference documents that contain the fields specified in the index. The default is
+	// false.
+	Sparse *bool
+
+	// If true, the collection will not accept insertion or update of documents where the index key value matches an
+	// existing value in the index. The default is false.
+	Unique *bool
+
+	// The clustered index.
+	Clustered *bool
+}
+
+var _ bson.Unmarshaler = (*IndexSpecification)(nil)
+
+type unmarshalIndexSpecification struct {
+	Name               string   `bson:"name"`
+	Namespace          string   `bson:"ns"`
+	KeysDocument       bson.Raw `bson:"key"`
+	Version            int32    `bson:"v"`
+	ExpireAfterSeconds *int32   `bson:"expireAfterSeconds"`
+	Sparse             *bool    `bson:"sparse"`
+	Unique             *bool    `bson:"unique"`
+	Clustered          *bool    `bson:"clustered"`
+}
+
+// UnmarshalBSON implements the bson.Unmarshaler interface.
+//
+// Deprecated: Unmarshaling an IndexSpecification from BSON will not be supported in Go Driver 2.0.
+func (i *IndexSpecification) UnmarshalBSON(data []byte) error {
+	var temp unmarshalIndexSpecification
+	if err := bson.Unmarshal(data, &temp); err != nil {
+		return err
+	}
+
+	i.Name = temp.Name
+	i.Namespace = temp.Namespace
+	i.KeysDocument = temp.KeysDocument
+	i.Version = temp.Version
+	i.ExpireAfterSeconds = temp.ExpireAfterSeconds
+	i.Sparse = temp.Sparse
+	i.Unique = temp.Unique
+	i.Clustered = temp.Clustered
+	return nil
+}
+
+// CollectionSpecification represents a collection in a database. This type is returned by the
+// Database.ListCollectionSpecifications function.
+type CollectionSpecification struct {
+	// The collection name.
+	Name string
+
+	// The type of the collection. This will either be "collection" or "view".
+	Type string
+
+	// Whether or not the collection is readOnly. This will be false for MongoDB versions < 3.4.
+	ReadOnly bool
+
+	// The collection UUID. This field will be nil for MongoDB versions < 3.6. For versions 3.6 and higher, this will
+	// be a primitive.Binary with Subtype 4.
+	UUID *primitive.Binary
+
+	// A document containing the options used to construct the collection.
+	Options bson.Raw
+
+	// An IndexSpecification instance with details about the collection's _id index. This will be nil if the NameOnly
+	// option is used and for MongoDB versions < 3.4.
+	IDIndex *IndexSpecification
+}
+
+var _ bson.Unmarshaler = (*CollectionSpecification)(nil)
+
+// unmarshalCollectionSpecification is used to unmarshal BSON bytes from a listCollections command into a
+// CollectionSpecification.
+type unmarshalCollectionSpecification struct {
+	Name string `bson:"name"`
+	Type string `bson:"type"`
+	Info *struct {
+		ReadOnly bool              `bson:"readOnly"`
+		UUID     *primitive.Binary `bson:"uuid"`
+	} `bson:"info"`
+	Options bson.Raw            `bson:"options"`
+	IDIndex *IndexSpecification `bson:"idIndex"`
+}
+
+// UnmarshalBSON implements the bson.Unmarshaler interface.
+//
+// Deprecated: Unmarshaling a CollectionSpecification from BSON will not be supported in Go Driver
+// 2.0.
+func (cs *CollectionSpecification) UnmarshalBSON(data []byte) error {
+	var temp unmarshalCollectionSpecification
+	if err := bson.Unmarshal(data, &temp); err != nil {
+		return err
+	}
+
+	cs.Name = temp.Name
+	cs.Type = temp.Type
+	if cs.Type == "" {
+		// The "type" field is only present on 3.4+ because views were introduced in 3.4, so we implicitly set the
+		// value to "collection" if it's empty.
+		cs.Type = "collection"
+	}
+	if temp.Info != nil {
+		cs.ReadOnly = temp.Info.ReadOnly
+		cs.UUID = temp.Info.UUID
+	}
+	cs.Options = temp.Options
+	cs.IDIndex = temp.IDIndex
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go
new file mode 100644
index 0000000000000000000000000000000000000000..3253a73a2b33e19f01d4ce4afc7eb594231e04d6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go
@@ -0,0 +1,258 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"fmt"
+	"strconv"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// SearchIndexView is a type that can be used to create, drop, list and update search indexes on a collection. A SearchIndexView for
+// a collection can be created by a call to Collection.SearchIndexes().
+type SearchIndexView struct {
+	coll *Collection
+}
+
+// SearchIndexModel represents a new search index to be created.
+type SearchIndexModel struct {
+	// A document describing the definition for the search index. It cannot be nil.
+	// See https://www.mongodb.com/docs/atlas/atlas-search/create-index/ for reference.
+	Definition interface{}
+
+	// The search index options.
+	Options *options.SearchIndexesOptions
+}
+
+// List executes a listSearchIndexes command and returns a cursor over the search indexes in the collection.
+//
+// The name parameter specifies the index name. A nil pointer matches all indexes.
+//
+// The opts parameter can be used to specify options for this operation (see the options.ListSearchIndexesOptions
+// documentation).
+func (siv SearchIndexView) List(
+	ctx context.Context,
+	searchIdxOpts *options.SearchIndexesOptions,
+	opts ...*options.ListSearchIndexesOptions,
+) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	index := bson.D{}
+	if searchIdxOpts != nil && searchIdxOpts.Name != nil {
+		index = bson.D{{"name", *searchIdxOpts.Name}}
+	}
+
+	aggregateOpts := make([]*options.AggregateOptions, len(opts))
+	for i, opt := range opts {
+		aggregateOpts[i] = opt.AggregateOpts
+	}
+
+	return siv.coll.Aggregate(ctx, Pipeline{{{"$listSearchIndexes", index}}}, aggregateOpts...)
+}
+
+// CreateOne executes a createSearchIndexes command to create a search index on the collection and returns the name of the new
+// search index. See the SearchIndexView.CreateMany documentation for more information and an example.
+func (siv SearchIndexView) CreateOne(
+	ctx context.Context,
+	model SearchIndexModel,
+	opts ...*options.CreateSearchIndexesOptions,
+) (string, error) {
+	names, err := siv.CreateMany(ctx, []SearchIndexModel{model}, opts...)
+	if err != nil {
+		return "", err
+	}
+
+	return names[0], nil
+}
+
+// CreateMany executes a createSearchIndexes command to create multiple search indexes on the collection and returns
+// the names of the new search indexes.
+//
+// For each SearchIndexModel in the models parameter, the index name can be specified.
+//
+// The opts parameter can be used to specify options for this operation (see the options.CreateSearchIndexesOptions
+// documentation).
+func (siv SearchIndexView) CreateMany(
+	ctx context.Context,
+	models []SearchIndexModel,
+	_ ...*options.CreateSearchIndexesOptions,
+) ([]string, error) {
+	var indexes bsoncore.Document
+	aidx, indexes := bsoncore.AppendArrayStart(indexes)
+
+	for i, model := range models {
+		if model.Definition == nil {
+			return nil, fmt.Errorf("search index model definition cannot be nil")
+		}
+
+		definition, err := marshal(model.Definition, siv.coll.bsonOpts, siv.coll.registry)
+		if err != nil {
+			return nil, err
+		}
+
+		var iidx int32
+		iidx, indexes = bsoncore.AppendDocumentElementStart(indexes, strconv.Itoa(i))
+		if model.Options != nil && model.Options.Name != nil {
+			indexes = bsoncore.AppendStringElement(indexes, "name", *model.Options.Name)
+		}
+		if model.Options != nil && model.Options.Type != nil {
+			indexes = bsoncore.AppendStringElement(indexes, "type", *model.Options.Type)
+		}
+		indexes = bsoncore.AppendDocumentElement(indexes, "definition", definition)
+
+		indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	indexes, err := bsoncore.AppendArrayEnd(indexes, aidx)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	if sess == nil && siv.coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(siv.coll.client.sessionPool, siv.coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = siv.coll.client.validSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	selector := makePinnedSelector(sess, siv.coll.writeSelector)
+
+	op := operation.NewCreateSearchIndexes(indexes).
+		Session(sess).CommandMonitor(siv.coll.client.monitor).
+		ServerSelector(selector).ClusterClock(siv.coll.client.clock).
+		Collection(siv.coll.name).Database(siv.coll.db.name).
+		Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI).
+		Timeout(siv.coll.client.timeout).Authenticator(siv.coll.client.authenticator)
+
+	err = op.Execute(ctx)
+	if err != nil {
+		_, err = processWriteError(err)
+		return nil, err
+	}
+
+	indexesCreated := op.Result().IndexesCreated
+	names := make([]string, 0, len(indexesCreated))
+	for _, index := range indexesCreated {
+		names = append(names, index.Name)
+	}
+
+	return names, nil
+}
+
+// DropOne executes a dropSearchIndexes operation to drop a search index on the collection.
+//
+// The name parameter should be the name of the search index to drop. If the name is "*", ErrMultipleIndexDrop will be returned
+// without running the command because doing so would drop all search indexes.
+//
+// The opts parameter can be used to specify options for this operation (see the options.DropSearchIndexOptions
+// documentation).
+func (siv SearchIndexView) DropOne(
+	ctx context.Context,
+	name string,
+	_ ...*options.DropSearchIndexOptions,
+) error {
+	if name == "*" {
+		return ErrMultipleIndexDrop
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && siv.coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(siv.coll.client.sessionPool, siv.coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err := siv.coll.client.validSession(sess)
+	if err != nil {
+		return err
+	}
+
+	selector := makePinnedSelector(sess, siv.coll.writeSelector)
+
+	op := operation.NewDropSearchIndex(name).
+		Session(sess).CommandMonitor(siv.coll.client.monitor).
+		ServerSelector(selector).ClusterClock(siv.coll.client.clock).
+		Collection(siv.coll.name).Database(siv.coll.db.name).
+		Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI).
+		Timeout(siv.coll.client.timeout).Authenticator(siv.coll.client.authenticator)
+
+	err = op.Execute(ctx)
+	if de, ok := err.(driver.Error); ok && de.NamespaceNotFound() {
+		return nil
+	}
+	return err
+}
+
+// UpdateOne executes a updateSearchIndex operation to update a search index on the collection.
+//
+// The name parameter should be the name of the search index to update.
+//
+// The definition parameter is a document describing the definition for the search index. It cannot be nil.
+//
+// The opts parameter can be used to specify options for this operation (see the options.UpdateSearchIndexOptions
+// documentation).
+func (siv SearchIndexView) UpdateOne(
+	ctx context.Context,
+	name string,
+	definition interface{},
+	_ ...*options.UpdateSearchIndexOptions,
+) error {
+	if definition == nil {
+		return fmt.Errorf("search index definition cannot be nil")
+	}
+
+	indexDefinition, err := marshal(definition, siv.coll.bsonOpts, siv.coll.registry)
+	if err != nil {
+		return err
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	if sess == nil && siv.coll.client.sessionPool != nil {
+		sess = session.NewImplicitClientSession(siv.coll.client.sessionPool, siv.coll.client.id)
+		defer sess.EndSession()
+	}
+
+	err = siv.coll.client.validSession(sess)
+	if err != nil {
+		return err
+	}
+
+	selector := makePinnedSelector(sess, siv.coll.writeSelector)
+
+	op := operation.NewUpdateSearchIndex(name, indexDefinition).
+		Session(sess).CommandMonitor(siv.coll.client.monitor).
+		ServerSelector(selector).ClusterClock(siv.coll.client.clock).
+		Collection(siv.coll.name).Database(siv.coll.db.name).
+		Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI).
+		Timeout(siv.coll.client.timeout).Authenticator(siv.coll.client.authenticator)
+
+	return op.Execute(ctx)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/session.go b/vendor/go.mongodb.org/mongo-driver/mongo/session.go
new file mode 100644
index 0000000000000000000000000000000000000000..77be4ab6dbb4f1d49088c45d6f04d78d80c44467
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/session.go
@@ -0,0 +1,390 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// ErrWrongClient is returned when a user attempts to pass in a session created by a different client than
+// the method call is using.
+var ErrWrongClient = errors.New("session was not created by this client")
+
+var withTransactionTimeout = 120 * time.Second
+
+// SessionContext combines the context.Context and mongo.Session interfaces. It should be used as the Context arguments
+// to operations that should be executed in a session.
+//
+// Implementations of SessionContext are not safe for concurrent use by multiple goroutines.
+//
+// There are two ways to create a SessionContext and use it in a session/transaction. The first is to use one of the
+// callback-based functions such as WithSession and UseSession. These functions create a SessionContext and pass it to
+// the provided callback. The other is to use NewSessionContext to explicitly create a SessionContext.
+type SessionContext interface {
+	context.Context
+	Session
+}
+
+type sessionContext struct {
+	context.Context
+	Session
+}
+
+type sessionKey struct {
+}
+
+// NewSessionContext creates a new SessionContext associated with the given Context and Session parameters.
+func NewSessionContext(ctx context.Context, sess Session) SessionContext {
+	return &sessionContext{
+		Context: context.WithValue(ctx, sessionKey{}, sess),
+		Session: sess,
+	}
+}
+
+// SessionFromContext extracts the mongo.Session object stored in a Context. This can be used on a SessionContext that
+// was created implicitly through one of the callback-based session APIs or explicitly by calling NewSessionContext. If
+// there is no Session stored in the provided Context, nil is returned.
+func SessionFromContext(ctx context.Context) Session {
+	val := ctx.Value(sessionKey{})
+	if val == nil {
+		return nil
+	}
+
+	sess, ok := val.(Session)
+	if !ok {
+		return nil
+	}
+
+	return sess
+}
+
+// Session is an interface that represents a MongoDB logical session. Sessions can be used to enable causal consistency
+// for a group of operations or to execute operations in an ACID transaction. A new Session can be created from a Client
+// instance. A Session created from a Client must only be used to execute operations using that Client or a Database or
+// Collection created from that Client. Custom implementations of this interface should not be used in production. For
+// more information about sessions, and their use cases, see
+// https://www.mongodb.com/docs/manual/reference/server-sessions/,
+// https://www.mongodb.com/docs/manual/core/read-isolation-consistency-recency/#causal-consistency, and
+// https://www.mongodb.com/docs/manual/core/transactions/.
+//
+// Implementations of Session are not safe for concurrent use by multiple goroutines.
+type Session interface {
+	// StartTransaction starts a new transaction, configured with the given options, on this
+	// session. This method returns an error if there is already a transaction in-progress for this
+	// session.
+	StartTransaction(...*options.TransactionOptions) error
+
+	// AbortTransaction aborts the active transaction for this session. This method returns an error
+	// if there is no active transaction for this session or if the transaction has been committed
+	// or aborted.
+	AbortTransaction(context.Context) error
+
+	// CommitTransaction commits the active transaction for this session. This method returns an
+	// error if there is no active transaction for this session or if the transaction has been
+	// aborted.
+	CommitTransaction(context.Context) error
+
+	// WithTransaction starts a transaction on this session and runs the fn callback. Errors with
+	// the TransientTransactionError and UnknownTransactionCommitResult labels are retried for up to
+	// 120 seconds. Inside the callback, the SessionContext must be used as the Context parameter
+	// for any operations that should be part of the transaction. If the ctx parameter already has a
+	// Session attached to it, it will be replaced by this session. The fn callback may be run
+	// multiple times during WithTransaction due to retry attempts, so it must be idempotent.
+	// Non-retryable operation errors or any operation errors that occur after the timeout expires
+	// will be returned without retrying. If the callback fails, the driver will call
+	// AbortTransaction. Because this method must succeed to ensure that server-side resources are
+	// properly cleaned up, context deadlines and cancellations will not be respected during this
+	// call. For a usage example, see the Client.StartSession method documentation.
+	WithTransaction(ctx context.Context, fn func(ctx SessionContext) (interface{}, error),
+		opts ...*options.TransactionOptions) (interface{}, error)
+
+	// EndSession aborts any existing transactions and close the session.
+	EndSession(context.Context)
+
+	// ClusterTime returns the current cluster time document associated with the session.
+	ClusterTime() bson.Raw
+
+	// OperationTime returns the current operation time document associated with the session.
+	OperationTime() *primitive.Timestamp
+
+	// Client the Client associated with the session.
+	Client() *Client
+
+	// ID returns the current ID document associated with the session. The ID document is in the
+	// form {"id": <BSON binary value>}.
+	ID() bson.Raw
+
+	// AdvanceClusterTime advances the cluster time for a session. This method returns an error if
+	// the session has ended.
+	AdvanceClusterTime(bson.Raw) error
+
+	// AdvanceOperationTime advances the operation time for a session. This method returns an error
+	// if the session has ended.
+	AdvanceOperationTime(*primitive.Timestamp) error
+
+	session()
+}
+
+// XSession is an unstable interface for internal use only.
+//
+// Deprecated: This interface is unstable because it provides access to a session.Client object, which exists in the
+// "x" package. It should not be used by applications and may be changed or removed in any release.
+type XSession interface {
+	ClientSession() *session.Client
+}
+
+// sessionImpl represents a set of sequential operations executed by an application that are related in some way.
+type sessionImpl struct {
+	clientSession       *session.Client
+	client              *Client
+	deployment          driver.Deployment
+	didCommitAfterStart bool // true if commit was called after start with no other operations
+}
+
+var _ Session = &sessionImpl{}
+var _ XSession = &sessionImpl{}
+
+// ClientSession implements the XSession interface.
+func (s *sessionImpl) ClientSession() *session.Client {
+	return s.clientSession
+}
+
+// ID implements the Session interface.
+func (s *sessionImpl) ID() bson.Raw {
+	return bson.Raw(s.clientSession.SessionID)
+}
+
+// EndSession implements the Session interface.
+func (s *sessionImpl) EndSession(ctx context.Context) {
+	if s.clientSession.TransactionInProgress() {
+		// ignore all errors aborting during an end session
+		_ = s.AbortTransaction(ctx)
+	}
+	s.clientSession.EndSession()
+}
+
+// WithTransaction implements the Session interface.
+func (s *sessionImpl) WithTransaction(ctx context.Context, fn func(ctx SessionContext) (interface{}, error),
+	opts ...*options.TransactionOptions) (interface{}, error) {
+	timeout := time.NewTimer(withTransactionTimeout)
+	defer timeout.Stop()
+	var err error
+	for {
+		err = s.StartTransaction(opts...)
+		if err != nil {
+			return nil, err
+		}
+
+		res, err := fn(NewSessionContext(ctx, s))
+		if err != nil {
+			if s.clientSession.TransactionRunning() {
+				// Wrap the user-provided Context in a new one that behaves like context.Background() for deadlines and
+				// cancellations, but forwards Value requests to the original one.
+				_ = s.AbortTransaction(newBackgroundContext(ctx))
+			}
+
+			select {
+			case <-timeout.C:
+				return nil, err
+			default:
+			}
+
+			if errorHasLabel(err, driver.TransientTransactionError) {
+				continue
+			}
+			return res, err
+		}
+
+		// Check if callback intentionally aborted and, if so, return immediately
+		// with no error.
+		err = s.clientSession.CheckAbortTransaction()
+		if err != nil {
+			return res, nil
+		}
+
+		// If context has errored, run AbortTransaction and return, as the CommitLoop
+		// has no chance of succeeding.
+		//
+		// Aborting after a failed CommitTransaction is dangerous. Failed transaction
+		// commits may unpin the session server-side, and subsequent transaction aborts
+		// may run on a new mongos which could end up with commit and abort being executed
+		// simultaneously.
+		if ctx.Err() != nil {
+			// Wrap the user-provided Context in a new one that behaves like context.Background() for deadlines and
+			// cancellations, but forwards Value requests to the original one.
+			_ = s.AbortTransaction(newBackgroundContext(ctx))
+			return nil, ctx.Err()
+		}
+
+	CommitLoop:
+		for {
+			err = s.CommitTransaction(newBackgroundContext(ctx))
+			// End when error is nil, as transaction has been committed.
+			if err == nil {
+				return res, nil
+			}
+
+			select {
+			case <-timeout.C:
+				return res, err
+			default:
+			}
+
+			if cerr, ok := err.(CommandError); ok {
+				if cerr.HasErrorLabel(driver.UnknownTransactionCommitResult) && !cerr.IsMaxTimeMSExpiredError() {
+					continue
+				}
+				if cerr.HasErrorLabel(driver.TransientTransactionError) {
+					break CommitLoop
+				}
+			}
+			return res, err
+		}
+	}
+}
+
+// StartTransaction implements the Session interface.
+func (s *sessionImpl) StartTransaction(opts ...*options.TransactionOptions) error {
+	err := s.clientSession.CheckStartTransaction()
+	if err != nil {
+		return err
+	}
+
+	s.didCommitAfterStart = false
+
+	topts := options.MergeTransactionOptions(opts...)
+	coreOpts := &session.TransactionOptions{
+		ReadConcern:    topts.ReadConcern,
+		ReadPreference: topts.ReadPreference,
+		WriteConcern:   topts.WriteConcern,
+		MaxCommitTime:  topts.MaxCommitTime,
+	}
+
+	return s.clientSession.StartTransaction(coreOpts)
+}
+
+// AbortTransaction implements the Session interface.
+func (s *sessionImpl) AbortTransaction(ctx context.Context) error {
+	err := s.clientSession.CheckAbortTransaction()
+	if err != nil {
+		return err
+	}
+
+	// Do not run the abort command if the transaction is in starting state
+	if s.clientSession.TransactionStarting() || s.didCommitAfterStart {
+		return s.clientSession.AbortTransaction()
+	}
+
+	selector := makePinnedSelector(s.clientSession, description.WriteSelector())
+
+	s.clientSession.Aborting = true
+	_ = operation.NewAbortTransaction().Session(s.clientSession).ClusterClock(s.client.clock).Database("admin").
+		Deployment(s.deployment).WriteConcern(s.clientSession.CurrentWc).ServerSelector(selector).
+		Retry(driver.RetryOncePerCommand).CommandMonitor(s.client.monitor).
+		RecoveryToken(bsoncore.Document(s.clientSession.RecoveryToken)).ServerAPI(s.client.serverAPI).
+		Authenticator(s.client.authenticator).Execute(ctx)
+
+	s.clientSession.Aborting = false
+	_ = s.clientSession.AbortTransaction()
+
+	return nil
+}
+
+// CommitTransaction implements the Session interface.
+func (s *sessionImpl) CommitTransaction(ctx context.Context) error {
+	err := s.clientSession.CheckCommitTransaction()
+	if err != nil {
+		return err
+	}
+
+	// Do not run the commit command if the transaction is in started state
+	if s.clientSession.TransactionStarting() || s.didCommitAfterStart {
+		s.didCommitAfterStart = true
+		return s.clientSession.CommitTransaction()
+	}
+
+	if s.clientSession.TransactionCommitted() {
+		s.clientSession.RetryingCommit = true
+	}
+
+	selector := makePinnedSelector(s.clientSession, description.WriteSelector())
+
+	s.clientSession.Committing = true
+	op := operation.NewCommitTransaction().
+		Session(s.clientSession).ClusterClock(s.client.clock).Database("admin").Deployment(s.deployment).
+		WriteConcern(s.clientSession.CurrentWc).ServerSelector(selector).Retry(driver.RetryOncePerCommand).
+		CommandMonitor(s.client.monitor).RecoveryToken(bsoncore.Document(s.clientSession.RecoveryToken)).
+		ServerAPI(s.client.serverAPI).MaxTime(s.clientSession.CurrentMct).Authenticator(s.client.authenticator)
+
+	err = op.Execute(ctx)
+	// Return error without updating transaction state if it is a timeout, as the transaction has not
+	// actually been committed.
+	if IsTimeout(err) {
+		return replaceErrors(err)
+	}
+	s.clientSession.Committing = false
+	commitErr := s.clientSession.CommitTransaction()
+
+	// We set the write concern to majority for subsequent calls to CommitTransaction.
+	s.clientSession.UpdateCommitTransactionWriteConcern()
+
+	if err != nil {
+		return replaceErrors(err)
+	}
+	return commitErr
+}
+
+// ClusterTime implements the Session interface.
+func (s *sessionImpl) ClusterTime() bson.Raw {
+	return s.clientSession.ClusterTime
+}
+
+// AdvanceClusterTime implements the Session interface.
+func (s *sessionImpl) AdvanceClusterTime(d bson.Raw) error {
+	return s.clientSession.AdvanceClusterTime(d)
+}
+
+// OperationTime implements the Session interface.
+func (s *sessionImpl) OperationTime() *primitive.Timestamp {
+	return s.clientSession.OperationTime
+}
+
+// AdvanceOperationTime implements the Session interface.
+func (s *sessionImpl) AdvanceOperationTime(ts *primitive.Timestamp) error {
+	return s.clientSession.AdvanceOperationTime(ts)
+}
+
+// Client implements the Session interface.
+func (s *sessionImpl) Client() *Client {
+	return s.client
+}
+
+// session implements the Session interface.
+func (*sessionImpl) session() {
+}
+
+// sessionFromContext checks for a sessionImpl in the argued context and returns the session if it
+// exists
+func sessionFromContext(ctx context.Context) *session.Client {
+	s := ctx.Value(sessionKey{})
+	if ses, ok := s.(*sessionImpl); ses != nil && ok {
+		return ses.clientSession
+	}
+
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go b/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go
new file mode 100644
index 0000000000000000000000000000000000000000..f6ed4dc88ecdb9d84f2fd110c69f3943490a1dd5
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go
@@ -0,0 +1,142 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/mongo/options"
+)
+
+// ErrNoDocuments is returned by SingleResult methods when the operation that created the SingleResult did not return
+// any documents.
+var ErrNoDocuments = errors.New("mongo: no documents in result")
+
+// SingleResult represents a single document returned from an operation. If the operation resulted in an error, all
+// SingleResult methods will return that error. If the operation did not return any documents, all SingleResult methods
+// will return ErrNoDocuments.
+type SingleResult struct {
+	ctx      context.Context
+	err      error
+	cur      *Cursor
+	rdr      bson.Raw
+	bsonOpts *options.BSONOptions
+	reg      *bsoncodec.Registry
+}
+
+// NewSingleResultFromDocument creates a SingleResult with the provided error, registry, and an underlying Cursor pre-loaded with
+// the provided document, error and registry. If no registry is provided, bson.DefaultRegistry will be used. If an error distinct
+// from the one provided occurs during creation of the SingleResult, that error will be stored on the returned SingleResult.
+//
+// The document parameter must be a non-nil document.
+func NewSingleResultFromDocument(document interface{}, err error, registry *bsoncodec.Registry) *SingleResult {
+	if document == nil {
+		return &SingleResult{err: ErrNilDocument}
+	}
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+
+	cur, createErr := NewCursorFromDocuments([]interface{}{document}, err, registry)
+	if createErr != nil {
+		return &SingleResult{err: createErr}
+	}
+
+	return &SingleResult{
+		cur: cur,
+		err: err,
+		reg: registry,
+	}
+}
+
+// Decode will unmarshal the document represented by this SingleResult into v. If there was an error from the operation
+// that created this SingleResult, that error will be returned. If the operation returned no documents, Decode will
+// return ErrNoDocuments.
+//
+// If the operation was successful and returned a document, Decode will return any errors from the unmarshalling process
+// without any modification. If v is nil or is a typed nil, an error will be returned.
+func (sr *SingleResult) Decode(v interface{}) error {
+	if sr.err != nil {
+		return sr.err
+	}
+	if sr.reg == nil {
+		return bson.ErrNilRegistry
+	}
+
+	if sr.err = sr.setRdrContents(); sr.err != nil {
+		return sr.err
+	}
+
+	dec, err := getDecoder(sr.rdr, sr.bsonOpts, sr.reg)
+	if err != nil {
+		return fmt.Errorf("error configuring BSON decoder: %w", err)
+	}
+
+	return dec.Decode(v)
+}
+
+// Raw returns the document represented by this SingleResult as a bson.Raw. If
+// there was an error from the operation that created this SingleResult, both
+// the result and that error will be returned. If the operation returned no
+// documents, this will return (nil, ErrNoDocuments).
+func (sr *SingleResult) Raw() (bson.Raw, error) {
+	if sr.err != nil {
+		return sr.rdr, sr.err
+	}
+
+	if sr.err = sr.setRdrContents(); sr.err != nil {
+		return nil, sr.err
+	}
+	return sr.rdr, nil
+}
+
+// DecodeBytes will return the document represented by this SingleResult as a bson.Raw. If there was an error from the
+// operation that created this SingleResult, both the result and that error will be returned. If the operation returned
+// no documents, this will return (nil, ErrNoDocuments).
+//
+// Deprecated: Use [SingleResult.Raw] instead.
+func (sr *SingleResult) DecodeBytes() (bson.Raw, error) {
+	return sr.Raw()
+}
+
+// setRdrContents will set the contents of rdr by iterating the underlying cursor if necessary.
+func (sr *SingleResult) setRdrContents() error {
+	switch {
+	case sr.err != nil:
+		return sr.err
+	case sr.rdr != nil:
+		return nil
+	case sr.cur != nil:
+		defer sr.cur.Close(sr.ctx)
+
+		if !sr.cur.Next(sr.ctx) {
+			if err := sr.cur.Err(); err != nil {
+				return err
+			}
+
+			return ErrNoDocuments
+		}
+		sr.rdr = sr.cur.Current
+		return nil
+	}
+
+	return ErrNoDocuments
+}
+
+// Err provides a way to check for query errors without calling Decode. Err returns the error, if
+// any, that was encountered while running the operation. If the operation was successful but did
+// not return any documents, Err returns ErrNoDocuments. If this error is not nil, this error will
+// also be returned from Decode.
+func (sr *SingleResult) Err() error {
+	sr.err = sr.setRdrContents()
+
+	return sr.err
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/util.go b/vendor/go.mongodb.org/mongo-driver/mongo/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..270fa24a255f1e3a012b6d56eccc51baf4904a39
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/util.go
@@ -0,0 +1,7 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a73d8d72f9b2440fddb1cb82764dbeb488e1743
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go
@@ -0,0 +1,439 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package writeconcern defines write concerns for MongoDB operations.
+//
+// For more information about MongoDB write concerns, see
+// https://www.mongodb.com/docs/manual/reference/write-concern/
+package writeconcern // import "go.mongodb.org/mongo-driver/mongo/writeconcern"
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+const majority = "majority"
+
+// ErrInconsistent indicates that an inconsistent write concern was specified.
+//
+// Deprecated: ErrInconsistent will be removed in Go Driver 2.0.
+var ErrInconsistent = errors.New("a write concern cannot have both w=0 and j=true")
+
+// ErrEmptyWriteConcern indicates that a write concern has no fields set.
+//
+// Deprecated: ErrEmptyWriteConcern will be removed in Go Driver 2.0.
+var ErrEmptyWriteConcern = errors.New("a write concern must have at least one field set")
+
+// ErrNegativeW indicates that a negative integer `w` field was specified.
+//
+// Deprecated: ErrNegativeW will be removed in Go Driver 2.0.
+var ErrNegativeW = errors.New("write concern `w` field cannot be a negative number")
+
+// ErrNegativeWTimeout indicates that a negative WTimeout was specified.
+//
+// Deprecated: ErrNegativeWTimeout will be removed in Go Driver 2.0.
+var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be negative")
+
+// A WriteConcern defines a MongoDB write concern, which describes the level of acknowledgment
+// requested from MongoDB for write operations to a standalone mongod, to replica sets, or to
+// sharded clusters.
+//
+// For more information about MongoDB write concerns, see
+// https://www.mongodb.com/docs/manual/reference/write-concern/
+type WriteConcern struct {
+	// W requests acknowledgment that the write operation has propagated to a
+	// specified number of mongod instances or to mongod instances with
+	// specified tags. It sets the "w" option in a MongoDB write concern.
+	//
+	// W values must be a string or an int.
+	//
+	// Common values are:
+	//   - "majority": requests acknowledgment that write operations have been
+	//     durably committed to the calculated majority of the data-bearing
+	//     voting members.
+	//   - 1: requests acknowledgment that write operations have been written
+	//     to 1 node.
+	//   - 0: requests no acknowledgment of write operations
+	//
+	// For more information about the "w" option, see
+	// https://www.mongodb.com/docs/manual/reference/write-concern/#w-option
+	W interface{}
+
+	// Journal requests acknowledgment from MongoDB that the write operation has
+	// been written to the on-disk journal. It sets the "j" option in a MongoDB
+	// write concern.
+	//
+	// For more information about the "j" option, see
+	// https://www.mongodb.com/docs/manual/reference/write-concern/#j-option
+	Journal *bool
+
+	// WTimeout specifies a time limit for the write concern. It sets the
+	// "wtimeout" option in a MongoDB write concern.
+	//
+	// It is only applicable for "w" values greater than 1. Using a WTimeout and
+	// setting Timeout on the Client at the same time will result in undefined
+	// behavior.
+	//
+	// For more information about the "wtimeout" option, see
+	// https://www.mongodb.com/docs/manual/reference/write-concern/#wtimeout
+	WTimeout time.Duration
+}
+
+// Unacknowledged returns a WriteConcern that requests no acknowledgment of
+// write operations.
+//
+// For more information about write concern "w: 0", see
+// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-number-
+func Unacknowledged() *WriteConcern {
+	return &WriteConcern{W: 0}
+}
+
+// W1 returns a WriteConcern that requests acknowledgment that write operations
+// have been written to memory on one node (e.g. the standalone mongod or the
+// primary in a replica set).
+//
+// For more information about write concern "w: 1", see
+// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-number-
+func W1() *WriteConcern {
+	return &WriteConcern{W: 1}
+}
+
+// Journaled returns a WriteConcern that requests acknowledgment that write
+// operations have been written to the on-disk journal on MongoDB.
+//
+// The database's default value for "w" determines how many nodes must write to
+// their on-disk journal before the write operation is acknowledged.
+//
+// For more information about write concern "j: true", see
+// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-ournal
+func Journaled() *WriteConcern {
+	journal := true
+	return &WriteConcern{Journal: &journal}
+}
+
+// Majority returns a WriteConcern that requests acknowledgment that write
+// operations have been durably committed to the calculated majority of the
+// data-bearing voting members.
+//
+// Write concern "w: majority" typically requires write operations to be written
+// to the on-disk journal before they are acknowledged, unless journaling is
+// disabled on MongoDB or the "writeConcernMajorityJournalDefault" replica set
+// configuration is set to false.
+//
+// For more information about write concern "w: majority", see
+// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-majority-
+func Majority() *WriteConcern {
+	return &WriteConcern{W: majority}
+}
+
+// Custom returns a WriteConcern that requests acknowledgment that write
+// operations have propagated to tagged members that satisfy the custom write
+// concern defined in "settings.getLastErrorModes".
+//
+// For more information about custom write concern names, see
+// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-custom-write-concern-name-
+func Custom(tag string) *WriteConcern {
+	return &WriteConcern{W: tag}
+}
+
+// Option is an option to provide when creating a WriteConcern.
+//
+// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead.
+// For example:
+//
+//	writeconcern.Majority()
+//
+// or
+//
+//	journal := true
+//	&writeconcern.WriteConcern{
+//		W:       2,
+//		Journal: &journal,
+//	}
+type Option func(concern *WriteConcern)
+
+// New constructs a new WriteConcern.
+//
+// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead.
+// For example:
+//
+//	writeconcern.Majority()
+//
+// or
+//
+//	journal := true
+//	&writeconcern.WriteConcern{
+//		W:       2,
+//		Journal: &journal,
+//	}
+func New(options ...Option) *WriteConcern {
+	concern := &WriteConcern{}
+
+	for _, option := range options {
+		option(concern)
+	}
+
+	return concern
+}
+
+// W requests acknowledgement that write operations propagate to the specified number of mongod
+// instances.
+//
+// Deprecated: Use the Unacknowledged or W1 functions or define a struct literal instead.
+// For example:
+//
+//	writeconcern.Unacknowledged()
+//
+// or
+//
+//	journal := true
+//	&writeconcern.WriteConcern{
+//		W:       2,
+//		Journal: &journal,
+//	}
+func W(w int) Option {
+	return func(concern *WriteConcern) {
+		concern.W = w
+	}
+}
+
+// WMajority requests acknowledgement that write operations propagate to the majority of mongod
+// instances.
+//
+// Deprecated: Use [Majority] instead.
+func WMajority() Option {
+	return func(concern *WriteConcern) {
+		concern.W = majority
+	}
+}
+
+// WTagSet requests acknowledgement that write operations propagate to the specified mongod
+// instance.
+//
+// Deprecated: Use [Custom] instead.
+func WTagSet(tag string) Option {
+	return func(concern *WriteConcern) {
+		concern.W = tag
+	}
+}
+
+// J requests acknowledgement from MongoDB that write operations are written to
+// the journal.
+//
+// Deprecated: Use the Journaled function or define a struct literal instead.
+// For example:
+//
+//	writeconcern.Journaled()
+//
+// or
+//
+//	journal := true
+//	&writeconcern.WriteConcern{
+//		W:       2,
+//		Journal: &journal,
+//	}
+func J(j bool) Option {
+	return func(concern *WriteConcern) {
+		// To maintain backward compatible behavior (now that the J field is a
+		// bool pointer), only set a value for J if the input is true. If the
+		// input is false, do not set a value, which omits "j" from the
+		// marshaled write concern.
+		if j {
+			concern.Journal = &j
+		}
+	}
+}
+
+// WTimeout specifies a time limit for the write concern.
+//
+// It is only applicable for "w" values greater than 1. Using a WTimeout and setting Timeout on the
+// Client at the same time will result in undefined behavior.
+//
+// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead.
+// For example:
+//
+//	wc := writeconcern.W1()
+//	wc.WTimeout = 30 * time.Second
+//
+// or
+//
+//	journal := true
+//	&writeconcern.WriteConcern{
+//		W:        "majority",
+//		WTimeout: 30 * time.Second,
+//	}
+func WTimeout(d time.Duration) Option {
+	return func(concern *WriteConcern) {
+		concern.WTimeout = d
+	}
+}
+
+// MarshalBSONValue implements the bson.ValueMarshaler interface.
+//
+// Deprecated: Marshaling a WriteConcern to BSON will not be supported in Go
+// Driver 2.0.
+func (wc *WriteConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if wc == nil {
+		return 0, nil, ErrEmptyWriteConcern
+	}
+
+	var elems []byte
+	if wc.W != nil {
+		// Only support string or int values for W. That aligns with the
+		// documentation and the behavior of other functions, like Acknowledged.
+		switch w := wc.W.(type) {
+		case int:
+			if w < 0 {
+				return 0, nil, ErrNegativeW
+			}
+
+			// If Journal=true and W=0, return an error because that write
+			// concern is ambiguous.
+			if wc.Journal != nil && *wc.Journal && w == 0 {
+				return 0, nil, ErrInconsistent
+			}
+
+			elems = bsoncore.AppendInt32Element(elems, "w", int32(w))
+		case string:
+			elems = bsoncore.AppendStringElement(elems, "w", w)
+		default:
+			return 0,
+				nil,
+				fmt.Errorf("WriteConcern.W must be a string or int, but is a %T", wc.W)
+		}
+	}
+
+	if wc.Journal != nil {
+		elems = bsoncore.AppendBooleanElement(elems, "j", *wc.Journal)
+	}
+
+	if wc.WTimeout < 0 {
+		return 0, nil, ErrNegativeWTimeout
+	}
+
+	if wc.WTimeout != 0 {
+		elems = bsoncore.AppendInt64Element(elems, "wtimeout", int64(wc.WTimeout/time.Millisecond))
+	}
+
+	if len(elems) == 0 {
+		return 0, nil, ErrEmptyWriteConcern
+	}
+	return bson.TypeEmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
+}
+
+// AcknowledgedValue returns true if a BSON RawValue for a write concern represents an acknowledged write concern.
+// The element's value must be a document representing a write concern.
+//
+// Deprecated: AcknowledgedValue will not be supported in Go Driver 2.0.
+func AcknowledgedValue(rawv bson.RawValue) bool {
+	doc, ok := bsoncore.Value{Type: rawv.Type, Data: rawv.Value}.DocumentOK()
+	if !ok {
+		return false
+	}
+
+	val, err := doc.LookupErr("w")
+	if err != nil {
+		// key w not found --> acknowledged
+		return true
+	}
+
+	i32, ok := val.Int32OK()
+	if !ok {
+		return false
+	}
+	return i32 != 0
+}
+
+// Acknowledged indicates whether or not a write with the given write concern will be acknowledged.
+func (wc *WriteConcern) Acknowledged() bool {
+	// Only {w: 0} or {w: 0, j: false} are an unacknowledged write concerns. All other values are
+	// acknowledged.
+	return wc == nil || wc.W != 0 || (wc.Journal != nil && *wc.Journal)
+}
+
+// IsValid returns true if the WriteConcern is valid.
+func (wc *WriteConcern) IsValid() bool {
+	if wc == nil {
+		return true
+	}
+
+	switch w := wc.W.(type) {
+	case int:
+		// A write concern with {w: int} must have a non-negative value and
+		// cannot have the combination {w: 0, j: true}.
+		return w >= 0 && (w > 0 || wc.Journal == nil || !*wc.Journal)
+	case string, nil:
+		// A write concern with {w: string} or no w specified is always valid.
+		return true
+	default:
+		// A write concern with an unsupported w type is not valid.
+		return false
+	}
+}
+
+// GetW returns the write concern w level.
+//
+// Deprecated: Use the WriteConcern.W field instead.
+func (wc *WriteConcern) GetW() interface{} {
+	return wc.W
+}
+
+// GetJ returns the write concern journaling level.
+//
+// Deprecated: Use the WriteConcern.Journal field instead.
+func (wc *WriteConcern) GetJ() bool {
+	// Treat a nil Journal as false. That maintains backward compatibility with the existing
+	// behavior of GetJ where unset is false. If users want the real value of Journal, they can
+	// access the Journal field.
+	return wc.Journal != nil && *wc.Journal
+}
+
+// GetWTimeout returns the write concern timeout.
+//
+// Deprecated: Use the WriteConcern.WTimeout field instead.
+func (wc *WriteConcern) GetWTimeout() time.Duration {
+	return wc.WTimeout
+}
+
+// WithOptions returns a copy of this WriteConcern with the options set.
+//
+// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead.
+// For example:
+//
+//	writeconcern.Majority()
+//
+// or
+//
+//	journal := true
+//	&writeconcern.WriteConcern{
+//		W:       2,
+//		Journal: &journal,
+//	}
+func (wc *WriteConcern) WithOptions(options ...Option) *WriteConcern {
+	if wc == nil {
+		return New(options...)
+	}
+	newWC := &WriteConcern{}
+	*newWC = *wc
+
+	for _, option := range options {
+		option(newWC)
+	}
+
+	return newWC
+}
+
+// AckWrite returns true if a write concern represents an acknowledged write
+//
+// Deprecated: Use [WriteConcern.Acknowledged] instead.
+func AckWrite(wc *WriteConcern) bool {
+	return wc == nil || wc.Acknowledged()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/tag/tag.go b/vendor/go.mongodb.org/mongo-driver/tag/tag.go
new file mode 100644
index 0000000000000000000000000000000000000000..4faff52549aa159bfabc79f6512db71d0ee6812c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/tag/tag.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package tag provides types for filtering replica set members using tags in a read preference.
+//
+// For more information about read preference tags, see
+// https://www.mongodb.com/docs/manual/core/read-preference-tags/
+package tag // import "go.mongodb.org/mongo-driver/tag"
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Tag is a name/value pair.
+type Tag struct {
+	Name  string
+	Value string
+}
+
+// String returns a human-readable human-readable description of the tag.
+func (tag Tag) String() string {
+	return fmt.Sprintf("%s=%s", tag.Name, tag.Value)
+}
+
+// NewTagSetFromMap creates a tag set from a map.
+//
+// For more information about read preference tags, see
+// https://www.mongodb.com/docs/manual/core/read-preference-tags/
+func NewTagSetFromMap(m map[string]string) Set {
+	var set Set
+	for k, v := range m {
+		set = append(set, Tag{Name: k, Value: v})
+	}
+
+	return set
+}
+
+// NewTagSetsFromMaps creates a list of tag sets from a slice of maps.
+//
+// For more information about read preference tags, see
+// https://www.mongodb.com/docs/manual/core/read-preference-tags/
+func NewTagSetsFromMaps(maps []map[string]string) []Set {
+	sets := make([]Set, 0, len(maps))
+	for _, m := range maps {
+		sets = append(sets, NewTagSetFromMap(m))
+	}
+	return sets
+}
+
+// Set is an ordered list of Tags.
+type Set []Tag
+
+// Contains indicates whether the name/value pair exists in the tagset.
+func (ts Set) Contains(name, value string) bool {
+	for _, t := range ts {
+		if t.Name == name && t.Value == value {
+			return true
+		}
+	}
+
+	return false
+}
+
+// ContainsAll indicates whether all the name/value pairs exist in the tagset.
+func (ts Set) ContainsAll(other []Tag) bool {
+	for _, ot := range other {
+		if !ts.Contains(ot.Name, ot.Value) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// String returns a human-readable human-readable description of the tagset.
+func (ts Set) String() string {
+	var b bytes.Buffer
+	for i, tag := range ts {
+		if i > 0 {
+			b.WriteString(",")
+		}
+		b.WriteString(tag.String())
+	}
+	return b.String()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..3dc006afde7a069e79799d8fcc1a3104cf9e8f6a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/version/version.go
@@ -0,0 +1,11 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package version defines the Go Driver version.
+package version // import "go.mongodb.org/mongo-driver/version"
+
+// Driver is the current version of the driver.
+var Driver = "1.17.1"
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bc0afa70099d7efe6b7d8be8f1c8da6d119192c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go
@@ -0,0 +1,164 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+// NewArrayLengthError creates and returns an error for when the length of an array exceeds the
+// bytes available.
+func NewArrayLengthError(length, rem int) error {
+	return lengthError("array", length, rem)
+}
+
+// Array is a raw bytes representation of a BSON array.
+type Array []byte
+
+// NewArrayFromReader reads an array from r. This function will only validate the length is
+// correct and that the array ends with a null byte.
+func NewArrayFromReader(r io.Reader) (Array, error) {
+	return newBufferFromReader(r)
+}
+
+// Index searches for and retrieves the value at the given index. This method will panic if
+// the array is invalid or if the index is out of bounds.
+func (a Array) Index(index uint) Value {
+	value, err := a.IndexErr(index)
+	if err != nil {
+		panic(err)
+	}
+	return value
+}
+
+// IndexErr searches for and retrieves the value at the given index.
+func (a Array) IndexErr(index uint) (Value, error) {
+	elem, err := indexErr(a, index)
+	if err != nil {
+		return Value{}, err
+	}
+	return elem.Value(), err
+}
+
+// DebugString outputs a human readable version of Array. It will attempt to stringify the
+// valid components of the array even if the entire array is not valid.
+func (a Array) DebugString() string {
+	if len(a) < 5 {
+		return "<malformed>"
+	}
+	var buf strings.Builder
+	buf.WriteString("Array")
+	length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length
+	buf.WriteByte('(')
+	buf.WriteString(strconv.Itoa(int(length)))
+	length -= 4
+	buf.WriteString(")[")
+	var elem Element
+	var ok bool
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
+			break
+		}
+		buf.WriteString(elem.Value().DebugString())
+		if length != 1 {
+			buf.WriteByte(',')
+		}
+	}
+	buf.WriteByte(']')
+
+	return buf.String()
+}
+
+// String outputs an ExtendedJSON version of Array. If the Array is not valid, this method
+// returns an empty string.
+func (a Array) String() string {
+	if len(a) < 5 {
+		return ""
+	}
+	var buf strings.Builder
+	buf.WriteByte('[')
+
+	length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length
+
+	length -= 4
+
+	var elem Element
+	var ok bool
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return ""
+		}
+		buf.WriteString(elem.Value().String())
+		if length > 1 {
+			buf.WriteByte(',')
+		}
+	}
+	if length != 1 { // Missing final null byte or inaccurate length
+		return ""
+	}
+
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+// Values returns this array as a slice of values. The returned slice will contain valid values.
+// If the array is not valid, the values up to the invalid point will be returned along with an
+// error.
+func (a Array) Values() ([]Value, error) {
+	return values(a)
+}
+
+// Validate validates the array and ensures the elements contained within are valid.
+func (a Array) Validate() error {
+	length, rem, ok := ReadLength(a)
+	if !ok {
+		return NewInsufficientBytesError(a, rem)
+	}
+	if int(length) > len(a) {
+		return NewArrayLengthError(int(length), len(a))
+	}
+	if a[length-1] != 0x00 {
+		return ErrMissingNull
+	}
+
+	length -= 4
+	var elem Element
+
+	var keyNum int64
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return NewInsufficientBytesError(a, rem)
+		}
+
+		// validate element
+		err := elem.Validate()
+		if err != nil {
+			return err
+		}
+
+		// validate keys increase numerically
+		if fmt.Sprint(keyNum) != elem.Key() {
+			return fmt.Errorf("array key %q is out of order or invalid", elem.Key())
+		}
+		keyNum++
+	}
+
+	if len(rem) < 1 || rem[0] != 0x00 {
+		return ErrMissingNull
+	}
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go
new file mode 100644
index 0000000000000000000000000000000000000000..7e6937d896ec674ca1fea4f0c8f34c4a272d6087
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go
@@ -0,0 +1,201 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"strconv"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ArrayBuilder builds a bson array
+type ArrayBuilder struct {
+	arr     []byte
+	indexes []int32
+	keys    []int
+}
+
+// NewArrayBuilder creates a new ArrayBuilder
+func NewArrayBuilder() *ArrayBuilder {
+	return (&ArrayBuilder{}).startArray()
+}
+
+// startArray reserves the array's length and sets the index to where the length begins
+func (a *ArrayBuilder) startArray() *ArrayBuilder {
+	var index int32
+	index, a.arr = AppendArrayStart(a.arr)
+	a.indexes = append(a.indexes, index)
+	a.keys = append(a.keys, 0)
+	return a
+}
+
+// Build updates the length of the array and index to the beginning of the documents length
+// bytes, then returns the array (bson bytes)
+func (a *ArrayBuilder) Build() Array {
+	lastIndex := len(a.indexes) - 1
+	lastKey := len(a.keys) - 1
+	a.arr, _ = AppendArrayEnd(a.arr, a.indexes[lastIndex])
+	a.indexes = a.indexes[:lastIndex]
+	a.keys = a.keys[:lastKey]
+	return a.arr
+}
+
+// incrementKey() increments the value keys and returns the key to be used to a.appendArray* functions
+func (a *ArrayBuilder) incrementKey() string {
+	idx := len(a.keys) - 1
+	key := strconv.Itoa(a.keys[idx])
+	a.keys[idx]++
+	return key
+}
+
+// AppendInt32 will append i32 to ArrayBuilder.arr
+func (a *ArrayBuilder) AppendInt32(i32 int32) *ArrayBuilder {
+	a.arr = AppendInt32Element(a.arr, a.incrementKey(), i32)
+	return a
+}
+
+// AppendDocument will append doc to ArrayBuilder.arr
+func (a *ArrayBuilder) AppendDocument(doc []byte) *ArrayBuilder {
+	a.arr = AppendDocumentElement(a.arr, a.incrementKey(), doc)
+	return a
+}
+
+// AppendArray will append arr to ArrayBuilder.arr
+func (a *ArrayBuilder) AppendArray(arr []byte) *ArrayBuilder {
+	a.arr = AppendArrayElement(a.arr, a.incrementKey(), arr)
+	return a
+}
+
+// AppendDouble will append f to ArrayBuilder.doc
+func (a *ArrayBuilder) AppendDouble(f float64) *ArrayBuilder {
+	a.arr = AppendDoubleElement(a.arr, a.incrementKey(), f)
+	return a
+}
+
+// AppendString will append str to ArrayBuilder.doc
+func (a *ArrayBuilder) AppendString(str string) *ArrayBuilder {
+	a.arr = AppendStringElement(a.arr, a.incrementKey(), str)
+	return a
+}
+
+// AppendObjectID will append oid to ArrayBuilder.doc
+func (a *ArrayBuilder) AppendObjectID(oid primitive.ObjectID) *ArrayBuilder {
+	a.arr = AppendObjectIDElement(a.arr, a.incrementKey(), oid)
+	return a
+}
+
+// AppendBinary will append a BSON binary element using subtype, and
+// b to a.arr
+func (a *ArrayBuilder) AppendBinary(subtype byte, b []byte) *ArrayBuilder {
+	a.arr = AppendBinaryElement(a.arr, a.incrementKey(), subtype, b)
+	return a
+}
+
+// AppendUndefined will append a BSON undefined element using key to a.arr
+func (a *ArrayBuilder) AppendUndefined() *ArrayBuilder {
+	a.arr = AppendUndefinedElement(a.arr, a.incrementKey())
+	return a
+}
+
+// AppendBoolean will append a boolean element using b to a.arr
+func (a *ArrayBuilder) AppendBoolean(b bool) *ArrayBuilder {
+	a.arr = AppendBooleanElement(a.arr, a.incrementKey(), b)
+	return a
+}
+
+// AppendDateTime will append datetime element dt to a.arr
+func (a *ArrayBuilder) AppendDateTime(dt int64) *ArrayBuilder {
+	a.arr = AppendDateTimeElement(a.arr, a.incrementKey(), dt)
+	return a
+}
+
+// AppendNull will append a null element to a.arr
+func (a *ArrayBuilder) AppendNull() *ArrayBuilder {
+	a.arr = AppendNullElement(a.arr, a.incrementKey())
+	return a
+}
+
+// AppendRegex will append pattern and options to a.arr
+func (a *ArrayBuilder) AppendRegex(pattern, options string) *ArrayBuilder {
+	a.arr = AppendRegexElement(a.arr, a.incrementKey(), pattern, options)
+	return a
+}
+
+// AppendDBPointer will append ns and oid to a.arr
+func (a *ArrayBuilder) AppendDBPointer(ns string, oid primitive.ObjectID) *ArrayBuilder {
+	a.arr = AppendDBPointerElement(a.arr, a.incrementKey(), ns, oid)
+	return a
+}
+
+// AppendJavaScript will append js to a.arr
+func (a *ArrayBuilder) AppendJavaScript(js string) *ArrayBuilder {
+	a.arr = AppendJavaScriptElement(a.arr, a.incrementKey(), js)
+	return a
+}
+
+// AppendSymbol will append symbol to a.arr
+func (a *ArrayBuilder) AppendSymbol(symbol string) *ArrayBuilder {
+	a.arr = AppendSymbolElement(a.arr, a.incrementKey(), symbol)
+	return a
+}
+
+// AppendCodeWithScope will append code and scope to a.arr
+func (a *ArrayBuilder) AppendCodeWithScope(code string, scope Document) *ArrayBuilder {
+	a.arr = AppendCodeWithScopeElement(a.arr, a.incrementKey(), code, scope)
+	return a
+}
+
+// AppendTimestamp will append t and i to a.arr
+func (a *ArrayBuilder) AppendTimestamp(t, i uint32) *ArrayBuilder {
+	a.arr = AppendTimestampElement(a.arr, a.incrementKey(), t, i)
+	return a
+}
+
+// AppendInt64 will append i64 to a.arr
+func (a *ArrayBuilder) AppendInt64(i64 int64) *ArrayBuilder {
+	a.arr = AppendInt64Element(a.arr, a.incrementKey(), i64)
+	return a
+}
+
+// AppendDecimal128 will append d128 to a.arr
+func (a *ArrayBuilder) AppendDecimal128(d128 primitive.Decimal128) *ArrayBuilder {
+	a.arr = AppendDecimal128Element(a.arr, a.incrementKey(), d128)
+	return a
+}
+
+// AppendMaxKey will append a max key element to a.arr
+func (a *ArrayBuilder) AppendMaxKey() *ArrayBuilder {
+	a.arr = AppendMaxKeyElement(a.arr, a.incrementKey())
+	return a
+}
+
+// AppendMinKey will append a min key element to a.arr
+func (a *ArrayBuilder) AppendMinKey() *ArrayBuilder {
+	a.arr = AppendMinKeyElement(a.arr, a.incrementKey())
+	return a
+}
+
+// AppendValue appends a BSON value to the array.
+func (a *ArrayBuilder) AppendValue(val Value) *ArrayBuilder {
+	a.arr = AppendValueElement(a.arr, a.incrementKey(), val)
+	return a
+}
+
+// StartArray starts building an inline Array. After this document is completed,
+// the user must call a.FinishArray
+func (a *ArrayBuilder) StartArray() *ArrayBuilder {
+	a.arr = AppendHeader(a.arr, bsontype.Array, a.incrementKey())
+	a.startArray()
+	return a
+}
+
+// FinishArray builds the most recent array created
+func (a *ArrayBuilder) FinishArray() *ArrayBuilder {
+	a.arr = a.Build()
+	return a
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go
new file mode 100644
index 0000000000000000000000000000000000000000..52162f8aa02a43e461018c19cf8599d1db958457
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go
@@ -0,0 +1,189 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// DocumentBuilder builds a bson document
+type DocumentBuilder struct {
+	doc     []byte
+	indexes []int32
+}
+
+// startDocument reserves the document's length and set the index to where the length begins
+func (db *DocumentBuilder) startDocument() *DocumentBuilder {
+	var index int32
+	index, db.doc = AppendDocumentStart(db.doc)
+	db.indexes = append(db.indexes, index)
+	return db
+}
+
+// NewDocumentBuilder creates a new DocumentBuilder
+func NewDocumentBuilder() *DocumentBuilder {
+	return (&DocumentBuilder{}).startDocument()
+}
+
+// Build updates the length of the document and index to the beginning of the documents length
+// bytes, then returns the document (bson bytes)
+func (db *DocumentBuilder) Build() Document {
+	last := len(db.indexes) - 1
+	db.doc, _ = AppendDocumentEnd(db.doc, db.indexes[last])
+	db.indexes = db.indexes[:last]
+	return db.doc
+}
+
+// AppendInt32 will append an int32 element using key and i32 to DocumentBuilder.doc
+func (db *DocumentBuilder) AppendInt32(key string, i32 int32) *DocumentBuilder {
+	db.doc = AppendInt32Element(db.doc, key, i32)
+	return db
+}
+
+// AppendDocument will append a bson embedded document element using key
+// and doc to DocumentBuilder.doc
+func (db *DocumentBuilder) AppendDocument(key string, doc []byte) *DocumentBuilder {
+	db.doc = AppendDocumentElement(db.doc, key, doc)
+	return db
+}
+
+// AppendArray will append a bson array using key and arr to DocumentBuilder.doc
+func (db *DocumentBuilder) AppendArray(key string, arr []byte) *DocumentBuilder {
+	db.doc = AppendHeader(db.doc, bsontype.Array, key)
+	db.doc = AppendArray(db.doc, arr)
+	return db
+}
+
+// AppendDouble will append a double element using key and f to DocumentBuilder.doc
+func (db *DocumentBuilder) AppendDouble(key string, f float64) *DocumentBuilder {
+	db.doc = AppendDoubleElement(db.doc, key, f)
+	return db
+}
+
+// AppendString will append str to DocumentBuilder.doc with the given key
+func (db *DocumentBuilder) AppendString(key string, str string) *DocumentBuilder {
+	db.doc = AppendStringElement(db.doc, key, str)
+	return db
+}
+
+// AppendObjectID will append oid to DocumentBuilder.doc with the given key
+func (db *DocumentBuilder) AppendObjectID(key string, oid primitive.ObjectID) *DocumentBuilder {
+	db.doc = AppendObjectIDElement(db.doc, key, oid)
+	return db
+}
+
+// AppendBinary will append a BSON binary element using key, subtype, and
+// b to db.doc
+func (db *DocumentBuilder) AppendBinary(key string, subtype byte, b []byte) *DocumentBuilder {
+	db.doc = AppendBinaryElement(db.doc, key, subtype, b)
+	return db
+}
+
+// AppendUndefined will append a BSON undefined element using key to db.doc
+func (db *DocumentBuilder) AppendUndefined(key string) *DocumentBuilder {
+	db.doc = AppendUndefinedElement(db.doc, key)
+	return db
+}
+
+// AppendBoolean will append a boolean element using key and b to db.doc
+func (db *DocumentBuilder) AppendBoolean(key string, b bool) *DocumentBuilder {
+	db.doc = AppendBooleanElement(db.doc, key, b)
+	return db
+}
+
+// AppendDateTime will append a datetime element using key and dt to db.doc
+func (db *DocumentBuilder) AppendDateTime(key string, dt int64) *DocumentBuilder {
+	db.doc = AppendDateTimeElement(db.doc, key, dt)
+	return db
+}
+
+// AppendNull will append a null element using key to db.doc
+func (db *DocumentBuilder) AppendNull(key string) *DocumentBuilder {
+	db.doc = AppendNullElement(db.doc, key)
+	return db
+}
+
+// AppendRegex will append pattern and options using key to db.doc
+func (db *DocumentBuilder) AppendRegex(key, pattern, options string) *DocumentBuilder {
+	db.doc = AppendRegexElement(db.doc, key, pattern, options)
+	return db
+}
+
+// AppendDBPointer will append ns and oid to using key to db.doc
+func (db *DocumentBuilder) AppendDBPointer(key string, ns string, oid primitive.ObjectID) *DocumentBuilder {
+	db.doc = AppendDBPointerElement(db.doc, key, ns, oid)
+	return db
+}
+
+// AppendJavaScript will append js using the provided key to db.doc
+func (db *DocumentBuilder) AppendJavaScript(key, js string) *DocumentBuilder {
+	db.doc = AppendJavaScriptElement(db.doc, key, js)
+	return db
+}
+
+// AppendSymbol will append a BSON symbol element using key and symbol db.doc
+func (db *DocumentBuilder) AppendSymbol(key, symbol string) *DocumentBuilder {
+	db.doc = AppendSymbolElement(db.doc, key, symbol)
+	return db
+}
+
+// AppendCodeWithScope will append code and scope using key to db.doc
+func (db *DocumentBuilder) AppendCodeWithScope(key string, code string, scope Document) *DocumentBuilder {
+	db.doc = AppendCodeWithScopeElement(db.doc, key, code, scope)
+	return db
+}
+
+// AppendTimestamp will append t and i to db.doc using provided key
+func (db *DocumentBuilder) AppendTimestamp(key string, t, i uint32) *DocumentBuilder {
+	db.doc = AppendTimestampElement(db.doc, key, t, i)
+	return db
+}
+
+// AppendInt64 will append i64 to dst using key to db.doc
+func (db *DocumentBuilder) AppendInt64(key string, i64 int64) *DocumentBuilder {
+	db.doc = AppendInt64Element(db.doc, key, i64)
+	return db
+}
+
+// AppendDecimal128 will append d128 to db.doc using provided key
+func (db *DocumentBuilder) AppendDecimal128(key string, d128 primitive.Decimal128) *DocumentBuilder {
+	db.doc = AppendDecimal128Element(db.doc, key, d128)
+	return db
+}
+
+// AppendMaxKey will append a max key element using key to db.doc
+func (db *DocumentBuilder) AppendMaxKey(key string) *DocumentBuilder {
+	db.doc = AppendMaxKeyElement(db.doc, key)
+	return db
+}
+
+// AppendMinKey will append a min key element using key to db.doc
+func (db *DocumentBuilder) AppendMinKey(key string) *DocumentBuilder {
+	db.doc = AppendMinKeyElement(db.doc, key)
+	return db
+}
+
+// AppendValue will append a BSON element with the provided key and value to the document.
+func (db *DocumentBuilder) AppendValue(key string, val Value) *DocumentBuilder {
+	db.doc = AppendValueElement(db.doc, key, val)
+	return db
+}
+
+// StartDocument starts building an inline document element with the provided key
+// After this document is completed, the user must call finishDocument
+func (db *DocumentBuilder) StartDocument(key string) *DocumentBuilder {
+	db.doc = AppendHeader(db.doc, bsontype.EmbeddedDocument, key)
+	db = db.startDocument()
+	return db
+}
+
+// FinishDocument builds the most recent document created
+func (db *DocumentBuilder) FinishDocument() *DocumentBuilder {
+	db.doc = db.Build()
+	return db
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go
new file mode 100644
index 0000000000000000000000000000000000000000..03925d7ada13fcb6110a25a7a5e73008b379802b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go
@@ -0,0 +1,842 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+const (
+	// EmptyDocumentLength is the length of a document that has been started/ended but has no elements.
+	EmptyDocumentLength = 5
+	// nullTerminator is a string version of the 0 byte that is appended at the end of cstrings.
+	nullTerminator       = string(byte(0))
+	invalidKeyPanicMsg   = "BSON element keys cannot contain null bytes"
+	invalidRegexPanicMsg = "BSON regex values cannot contain null bytes"
+)
+
+// AppendType will append t to dst and return the extended buffer.
+func AppendType(dst []byte, t bsontype.Type) []byte { return append(dst, byte(t)) }
+
+// AppendKey will append key to dst and return the extended buffer.
+func AppendKey(dst []byte, key string) []byte { return append(dst, key+nullTerminator...) }
+
+// AppendHeader will append Type t and key to dst and return the extended
+// buffer.
+func AppendHeader(dst []byte, t bsontype.Type, key string) []byte {
+	if !isValidCString(key) {
+		panic(invalidKeyPanicMsg)
+	}
+
+	dst = AppendType(dst, t)
+	dst = append(dst, key...)
+	return append(dst, 0x00)
+	// return append(AppendType(dst, t), key+string(0x00)...)
+}
+
+// TODO(skriptble): All of the Read* functions should return src resliced to start just after what was read.
+
+// ReadType will return the first byte of the provided []byte as a type. If
+// there is no available byte, false is returned.
+func ReadType(src []byte) (bsontype.Type, []byte, bool) {
+	if len(src) < 1 {
+		return 0, src, false
+	}
+	return bsontype.Type(src[0]), src[1:], true
+}
+
+// ReadKey will read a key from src. The 0x00 byte will not be present
+// in the returned string. If there are not enough bytes available, false is
+// returned.
+func ReadKey(src []byte) (string, []byte, bool) { return readcstring(src) }
+
+// ReadKeyBytes will read a key from src as bytes. The 0x00 byte will
+// not be present in the returned string. If there are not enough bytes
+// available, false is returned.
+func ReadKeyBytes(src []byte) ([]byte, []byte, bool) { return readcstringbytes(src) }
+
+// ReadHeader will read a type byte and a key from src. If both of these
+// values cannot be read, false is returned.
+func ReadHeader(src []byte) (t bsontype.Type, key string, rem []byte, ok bool) {
+	t, rem, ok = ReadType(src)
+	if !ok {
+		return 0, "", src, false
+	}
+	key, rem, ok = ReadKey(rem)
+	if !ok {
+		return 0, "", src, false
+	}
+
+	return t, key, rem, true
+}
+
+// ReadHeaderBytes will read a type and a key from src and the remainder of the bytes
+// are returned as rem. If either the type or key cannot be red, ok will be false.
+func ReadHeaderBytes(src []byte) (header []byte, rem []byte, ok bool) {
+	if len(src) < 1 {
+		return nil, src, false
+	}
+	idx := bytes.IndexByte(src[1:], 0x00)
+	if idx == -1 {
+		return nil, src, false
+	}
+	return src[:idx], src[idx+1:], true
+}
+
+// ReadElement reads the next full element from src. It returns the element, the remaining bytes in
+// the slice, and a boolean indicating if the read was successful.
+func ReadElement(src []byte) (Element, []byte, bool) {
+	if len(src) < 1 {
+		return nil, src, false
+	}
+	t := bsontype.Type(src[0])
+	idx := bytes.IndexByte(src[1:], 0x00)
+	if idx == -1 {
+		return nil, src, false
+	}
+	length, ok := valueLength(src[idx+2:], t) // We add 2 here because we called IndexByte with src[1:]
+	if !ok {
+		return nil, src, false
+	}
+	elemLength := 1 + idx + 1 + int(length)
+	if elemLength > len(src) {
+		return nil, src, false
+	}
+	if elemLength < 0 {
+		return nil, src, false
+	}
+	return src[:elemLength], src[elemLength:], true
+}
+
+// AppendValueElement appends value to dst as an element using key as the element's key.
+func AppendValueElement(dst []byte, key string, value Value) []byte {
+	dst = AppendHeader(dst, value.Type, key)
+	dst = append(dst, value.Data...)
+	return dst
+}
+
+// ReadValue reads the next value as the provided types and returns a Value, the remaining bytes,
+// and a boolean indicating if the read was successful.
+func ReadValue(src []byte, t bsontype.Type) (Value, []byte, bool) {
+	data, rem, ok := readValue(src, t)
+	if !ok {
+		return Value{}, src, false
+	}
+	return Value{Type: t, Data: data}, rem, true
+}
+
+// AppendDouble will append f to dst and return the extended buffer.
+func AppendDouble(dst []byte, f float64) []byte {
+	return appendu64(dst, math.Float64bits(f))
+}
+
+// AppendDoubleElement will append a BSON double element using key and f to dst
+// and return the extended buffer.
+func AppendDoubleElement(dst []byte, key string, f float64) []byte {
+	return AppendDouble(AppendHeader(dst, bsontype.Double, key), f)
+}
+
+// ReadDouble will read a float64 from src. If there are not enough bytes it
+// will return false.
+func ReadDouble(src []byte) (float64, []byte, bool) {
+	bits, src, ok := readu64(src)
+	if !ok {
+		return 0, src, false
+	}
+	return math.Float64frombits(bits), src, true
+}
+
+// AppendString will append s to dst and return the extended buffer.
+func AppendString(dst []byte, s string) []byte {
+	return appendstring(dst, s)
+}
+
+// AppendStringElement will append a BSON string element using key and val to dst
+// and return the extended buffer.
+func AppendStringElement(dst []byte, key, val string) []byte {
+	return AppendString(AppendHeader(dst, bsontype.String, key), val)
+}
+
+// ReadString will read a string from src. If there are not enough bytes it
+// will return false.
+func ReadString(src []byte) (string, []byte, bool) {
+	return readstring(src)
+}
+
+// AppendDocumentStart reserves a document's length and returns the index where the length begins.
+// This index can later be used to write the length of the document.
+func AppendDocumentStart(dst []byte) (index int32, b []byte) {
+	// TODO(skriptble): We really need AppendDocumentStart and AppendDocumentEnd.  AppendDocumentStart would handle calling
+	// TODO ReserveLength and providing the index of the start of the document. AppendDocumentEnd would handle taking that
+	// TODO start index, adding the null byte, calculating the length, and filling in the length at the start of the
+	// TODO document.
+	return ReserveLength(dst)
+}
+
+// AppendDocumentStartInline functions the same as AppendDocumentStart but takes a pointer to the
+// index int32 which allows this function to be used inline.
+func AppendDocumentStartInline(dst []byte, index *int32) []byte {
+	idx, doc := AppendDocumentStart(dst)
+	*index = idx
+	return doc
+}
+
+// AppendDocumentElementStart writes a document element header and then reserves the length bytes.
+func AppendDocumentElementStart(dst []byte, key string) (index int32, b []byte) {
+	return AppendDocumentStart(AppendHeader(dst, bsontype.EmbeddedDocument, key))
+}
+
+// AppendDocumentEnd writes the null byte for a document and updates the length of the document.
+// The index should be the beginning of the document's length bytes.
+func AppendDocumentEnd(dst []byte, index int32) ([]byte, error) {
+	if int(index) > len(dst)-4 {
+		return dst, fmt.Errorf("not enough bytes available after index to write length")
+	}
+	dst = append(dst, 0x00)
+	dst = UpdateLength(dst, index, int32(len(dst[index:])))
+	return dst, nil
+}
+
+// AppendDocument will append doc to dst and return the extended buffer.
+func AppendDocument(dst []byte, doc []byte) []byte { return append(dst, doc...) }
+
+// AppendDocumentElement will append a BSON embedded document element using key
+// and doc to dst and return the extended buffer.
+func AppendDocumentElement(dst []byte, key string, doc []byte) []byte {
+	return AppendDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), doc)
+}
+
+// BuildDocument will create a document with the given slice of elements and will append
+// it to dst and return the extended buffer.
+func BuildDocument(dst []byte, elems ...[]byte) []byte {
+	idx, dst := ReserveLength(dst)
+	for _, elem := range elems {
+		dst = append(dst, elem...)
+	}
+	dst = append(dst, 0x00)
+	dst = UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst
+}
+
+// BuildDocumentValue creates an Embedded Document value from the given elements.
+func BuildDocumentValue(elems ...[]byte) Value {
+	return Value{Type: bsontype.EmbeddedDocument, Data: BuildDocument(nil, elems...)}
+}
+
+// BuildDocumentElement will append a BSON embedded document element using key and the provided
+// elements and return the extended buffer.
+func BuildDocumentElement(dst []byte, key string, elems ...[]byte) []byte {
+	return BuildDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), elems...)
+}
+
+// BuildDocumentFromElements is an alaias for the BuildDocument function.
+var BuildDocumentFromElements = BuildDocument
+
+// ReadDocument will read a document from src. If there are not enough bytes it
+// will return false.
+func ReadDocument(src []byte) (doc Document, rem []byte, ok bool) { return readLengthBytes(src) }
+
+// AppendArrayStart appends the length bytes to an array and then returns the index of the start
+// of those length bytes.
+func AppendArrayStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) }
+
+// AppendArrayElementStart appends an array element header and then the length bytes for an array,
+// returning the index where the length starts.
+func AppendArrayElementStart(dst []byte, key string) (index int32, b []byte) {
+	return AppendArrayStart(AppendHeader(dst, bsontype.Array, key))
+}
+
+// AppendArrayEnd appends the null byte to an array and calculates the length, inserting that
+// calculated length starting at index.
+func AppendArrayEnd(dst []byte, index int32) ([]byte, error) { return AppendDocumentEnd(dst, index) }
+
+// AppendArray will append arr to dst and return the extended buffer.
+func AppendArray(dst []byte, arr []byte) []byte { return append(dst, arr...) }
+
+// AppendArrayElement will append a BSON array element using key and arr to dst
+// and return the extended buffer.
+func AppendArrayElement(dst []byte, key string, arr []byte) []byte {
+	return AppendArray(AppendHeader(dst, bsontype.Array, key), arr)
+}
+
+// BuildArray will append a BSON array to dst built from values.
+func BuildArray(dst []byte, values ...Value) []byte {
+	idx, dst := ReserveLength(dst)
+	for pos, val := range values {
+		dst = AppendValueElement(dst, strconv.Itoa(pos), val)
+	}
+	dst = append(dst, 0x00)
+	dst = UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst
+}
+
+// BuildArrayElement will create an array element using the provided values.
+func BuildArrayElement(dst []byte, key string, values ...Value) []byte {
+	return BuildArray(AppendHeader(dst, bsontype.Array, key), values...)
+}
+
+// ReadArray will read an array from src. If there are not enough bytes it
+// will return false.
+func ReadArray(src []byte) (arr Array, rem []byte, ok bool) { return readLengthBytes(src) }
+
+// AppendBinary will append subtype and b to dst and return the extended buffer.
+func AppendBinary(dst []byte, subtype byte, b []byte) []byte {
+	if subtype == 0x02 {
+		return appendBinarySubtype2(dst, subtype, b)
+	}
+	dst = append(appendLength(dst, int32(len(b))), subtype)
+	return append(dst, b...)
+}
+
+// AppendBinaryElement will append a BSON binary element using key, subtype, and
+// b to dst and return the extended buffer.
+func AppendBinaryElement(dst []byte, key string, subtype byte, b []byte) []byte {
+	return AppendBinary(AppendHeader(dst, bsontype.Binary, key), subtype, b)
+}
+
+// ReadBinary will read a subtype and bin from src. If there are not enough bytes it
+// will return false.
+func ReadBinary(src []byte) (subtype byte, bin []byte, rem []byte, ok bool) {
+	length, rem, ok := ReadLength(src)
+	if !ok {
+		return 0x00, nil, src, false
+	}
+	if len(rem) < 1 { // subtype
+		return 0x00, nil, src, false
+	}
+	subtype, rem = rem[0], rem[1:]
+
+	if len(rem) < int(length) {
+		return 0x00, nil, src, false
+	}
+
+	if subtype == 0x02 {
+		length, rem, ok = ReadLength(rem)
+		if !ok || len(rem) < int(length) {
+			return 0x00, nil, src, false
+		}
+	}
+
+	return subtype, rem[:length], rem[length:], true
+}
+
+// AppendUndefinedElement will append a BSON undefined element using key to dst
+// and return the extended buffer.
+func AppendUndefinedElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.Undefined, key)
+}
+
+// AppendObjectID will append oid to dst and return the extended buffer.
+func AppendObjectID(dst []byte, oid primitive.ObjectID) []byte { return append(dst, oid[:]...) }
+
+// AppendObjectIDElement will append a BSON ObjectID element using key and oid to dst
+// and return the extended buffer.
+func AppendObjectIDElement(dst []byte, key string, oid primitive.ObjectID) []byte {
+	return AppendObjectID(AppendHeader(dst, bsontype.ObjectID, key), oid)
+}
+
+// ReadObjectID will read an ObjectID from src. If there are not enough bytes it
+// will return false.
+func ReadObjectID(src []byte) (primitive.ObjectID, []byte, bool) {
+	if len(src) < 12 {
+		return primitive.ObjectID{}, src, false
+	}
+	var oid primitive.ObjectID
+	copy(oid[:], src[0:12])
+	return oid, src[12:], true
+}
+
+// AppendBoolean will append b to dst and return the extended buffer.
+func AppendBoolean(dst []byte, b bool) []byte {
+	if b {
+		return append(dst, 0x01)
+	}
+	return append(dst, 0x00)
+}
+
+// AppendBooleanElement will append a BSON boolean element using key and b to dst
+// and return the extended buffer.
+func AppendBooleanElement(dst []byte, key string, b bool) []byte {
+	return AppendBoolean(AppendHeader(dst, bsontype.Boolean, key), b)
+}
+
+// ReadBoolean will read a bool from src. If there are not enough bytes it
+// will return false.
+func ReadBoolean(src []byte) (bool, []byte, bool) {
+	if len(src) < 1 {
+		return false, src, false
+	}
+
+	return src[0] == 0x01, src[1:], true
+}
+
+// AppendDateTime will append dt to dst and return the extended buffer.
+func AppendDateTime(dst []byte, dt int64) []byte { return appendi64(dst, dt) }
+
+// AppendDateTimeElement will append a BSON datetime element using key and dt to dst
+// and return the extended buffer.
+func AppendDateTimeElement(dst []byte, key string, dt int64) []byte {
+	return AppendDateTime(AppendHeader(dst, bsontype.DateTime, key), dt)
+}
+
+// ReadDateTime will read an int64 datetime from src. If there are not enough bytes it
+// will return false.
+func ReadDateTime(src []byte) (int64, []byte, bool) { return readi64(src) }
+
+// AppendTime will append time as a BSON DateTime to dst and return the extended buffer.
+func AppendTime(dst []byte, t time.Time) []byte {
+	return AppendDateTime(dst, t.Unix()*1000+int64(t.Nanosecond()/1e6))
+}
+
+// AppendTimeElement will append a BSON datetime element using key and dt to dst
+// and return the extended buffer.
+func AppendTimeElement(dst []byte, key string, t time.Time) []byte {
+	return AppendTime(AppendHeader(dst, bsontype.DateTime, key), t)
+}
+
+// ReadTime will read an time.Time datetime from src. If there are not enough bytes it
+// will return false.
+func ReadTime(src []byte) (time.Time, []byte, bool) {
+	dt, rem, ok := readi64(src)
+	return time.Unix(dt/1e3, dt%1e3*1e6), rem, ok
+}
+
+// AppendNullElement will append a BSON null element using key to dst
+// and return the extended buffer.
+func AppendNullElement(dst []byte, key string) []byte { return AppendHeader(dst, bsontype.Null, key) }
+
+// AppendRegex will append pattern and options to dst and return the extended buffer.
+func AppendRegex(dst []byte, pattern, options string) []byte {
+	if !isValidCString(pattern) || !isValidCString(options) {
+		panic(invalidRegexPanicMsg)
+	}
+
+	return append(dst, pattern+nullTerminator+options+nullTerminator...)
+}
+
+// AppendRegexElement will append a BSON regex element using key, pattern, and
+// options to dst and return the extended buffer.
+func AppendRegexElement(dst []byte, key, pattern, options string) []byte {
+	return AppendRegex(AppendHeader(dst, bsontype.Regex, key), pattern, options)
+}
+
+// ReadRegex will read a pattern and options from src. If there are not enough bytes it
+// will return false.
+func ReadRegex(src []byte) (pattern, options string, rem []byte, ok bool) {
+	pattern, rem, ok = readcstring(src)
+	if !ok {
+		return "", "", src, false
+	}
+	options, rem, ok = readcstring(rem)
+	if !ok {
+		return "", "", src, false
+	}
+	return pattern, options, rem, true
+}
+
+// AppendDBPointer will append ns and oid to dst and return the extended buffer.
+func AppendDBPointer(dst []byte, ns string, oid primitive.ObjectID) []byte {
+	return append(appendstring(dst, ns), oid[:]...)
+}
+
+// AppendDBPointerElement will append a BSON DBPointer element using key, ns,
+// and oid to dst and return the extended buffer.
+func AppendDBPointerElement(dst []byte, key, ns string, oid primitive.ObjectID) []byte {
+	return AppendDBPointer(AppendHeader(dst, bsontype.DBPointer, key), ns, oid)
+}
+
+// ReadDBPointer will read a ns and oid from src. If there are not enough bytes it
+// will return false.
+func ReadDBPointer(src []byte) (ns string, oid primitive.ObjectID, rem []byte, ok bool) {
+	ns, rem, ok = readstring(src)
+	if !ok {
+		return "", primitive.ObjectID{}, src, false
+	}
+	oid, rem, ok = ReadObjectID(rem)
+	if !ok {
+		return "", primitive.ObjectID{}, src, false
+	}
+	return ns, oid, rem, true
+}
+
+// AppendJavaScript will append js to dst and return the extended buffer.
+func AppendJavaScript(dst []byte, js string) []byte { return appendstring(dst, js) }
+
+// AppendJavaScriptElement will append a BSON JavaScript element using key and
+// js to dst and return the extended buffer.
+func AppendJavaScriptElement(dst []byte, key, js string) []byte {
+	return AppendJavaScript(AppendHeader(dst, bsontype.JavaScript, key), js)
+}
+
+// ReadJavaScript will read a js string from src. If there are not enough bytes it
+// will return false.
+func ReadJavaScript(src []byte) (js string, rem []byte, ok bool) { return readstring(src) }
+
+// AppendSymbol will append symbol to dst and return the extended buffer.
+func AppendSymbol(dst []byte, symbol string) []byte { return appendstring(dst, symbol) }
+
+// AppendSymbolElement will append a BSON symbol element using key and symbol to dst
+// and return the extended buffer.
+func AppendSymbolElement(dst []byte, key, symbol string) []byte {
+	return AppendSymbol(AppendHeader(dst, bsontype.Symbol, key), symbol)
+}
+
+// ReadSymbol will read a symbol string from src. If there are not enough bytes it
+// will return false.
+func ReadSymbol(src []byte) (symbol string, rem []byte, ok bool) { return readstring(src) }
+
+// AppendCodeWithScope will append code and scope to dst and return the extended buffer.
+func AppendCodeWithScope(dst []byte, code string, scope []byte) []byte {
+	length := int32(4 + 4 + len(code) + 1 + len(scope)) // length of cws, length of code, code, 0x00, scope
+	dst = appendLength(dst, length)
+
+	return append(appendstring(dst, code), scope...)
+}
+
+// AppendCodeWithScopeElement will append a BSON code with scope element using
+// key, code, and scope to dst
+// and return the extended buffer.
+func AppendCodeWithScopeElement(dst []byte, key, code string, scope []byte) []byte {
+	return AppendCodeWithScope(AppendHeader(dst, bsontype.CodeWithScope, key), code, scope)
+}
+
+// ReadCodeWithScope will read code and scope from src. If there are not enough bytes it
+// will return false.
+func ReadCodeWithScope(src []byte) (code string, scope []byte, rem []byte, ok bool) {
+	length, rem, ok := ReadLength(src)
+	if !ok || len(src) < int(length) {
+		return "", nil, src, false
+	}
+
+	code, rem, ok = readstring(rem)
+	if !ok {
+		return "", nil, src, false
+	}
+
+	scope, rem, ok = ReadDocument(rem)
+	if !ok {
+		return "", nil, src, false
+	}
+	return code, scope, rem, true
+}
+
+// AppendInt32 will append i32 to dst and return the extended buffer.
+func AppendInt32(dst []byte, i32 int32) []byte { return appendi32(dst, i32) }
+
+// AppendInt32Element will append a BSON int32 element using key and i32 to dst
+// and return the extended buffer.
+func AppendInt32Element(dst []byte, key string, i32 int32) []byte {
+	return AppendInt32(AppendHeader(dst, bsontype.Int32, key), i32)
+}
+
+// ReadInt32 will read an int32 from src. If there are not enough bytes it
+// will return false.
+func ReadInt32(src []byte) (int32, []byte, bool) { return readi32(src) }
+
+// AppendTimestamp will append t and i to dst and return the extended buffer.
+func AppendTimestamp(dst []byte, t, i uint32) []byte {
+	return appendu32(appendu32(dst, i), t) // i is the lower 4 bytes, t is the higher 4 bytes
+}
+
+// AppendTimestampElement will append a BSON timestamp element using key, t, and
+// i to dst and return the extended buffer.
+func AppendTimestampElement(dst []byte, key string, t, i uint32) []byte {
+	return AppendTimestamp(AppendHeader(dst, bsontype.Timestamp, key), t, i)
+}
+
+// ReadTimestamp will read t and i from src. If there are not enough bytes it
+// will return false.
+func ReadTimestamp(src []byte) (t, i uint32, rem []byte, ok bool) {
+	i, rem, ok = readu32(src)
+	if !ok {
+		return 0, 0, src, false
+	}
+	t, rem, ok = readu32(rem)
+	if !ok {
+		return 0, 0, src, false
+	}
+	return t, i, rem, true
+}
+
+// AppendInt64 will append i64 to dst and return the extended buffer.
+func AppendInt64(dst []byte, i64 int64) []byte { return appendi64(dst, i64) }
+
+// AppendInt64Element will append a BSON int64 element using key and i64 to dst
+// and return the extended buffer.
+func AppendInt64Element(dst []byte, key string, i64 int64) []byte {
+	return AppendInt64(AppendHeader(dst, bsontype.Int64, key), i64)
+}
+
+// ReadInt64 will read an int64 from src. If there are not enough bytes it
+// will return false.
+func ReadInt64(src []byte) (int64, []byte, bool) { return readi64(src) }
+
+// AppendDecimal128 will append d128 to dst and return the extended buffer.
+func AppendDecimal128(dst []byte, d128 primitive.Decimal128) []byte {
+	high, low := d128.GetBytes()
+	return appendu64(appendu64(dst, low), high)
+}
+
+// AppendDecimal128Element will append a BSON primitive.28 element using key and
+// d128 to dst and return the extended buffer.
+func AppendDecimal128Element(dst []byte, key string, d128 primitive.Decimal128) []byte {
+	return AppendDecimal128(AppendHeader(dst, bsontype.Decimal128, key), d128)
+}
+
+// ReadDecimal128 will read a primitive.Decimal128 from src. If there are not enough bytes it
+// will return false.
+func ReadDecimal128(src []byte) (primitive.Decimal128, []byte, bool) {
+	l, rem, ok := readu64(src)
+	if !ok {
+		return primitive.Decimal128{}, src, false
+	}
+
+	h, rem, ok := readu64(rem)
+	if !ok {
+		return primitive.Decimal128{}, src, false
+	}
+
+	return primitive.NewDecimal128(h, l), rem, true
+}
+
+// AppendMaxKeyElement will append a BSON max key element using key to dst
+// and return the extended buffer.
+func AppendMaxKeyElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.MaxKey, key)
+}
+
+// AppendMinKeyElement will append a BSON min key element using key to dst
+// and return the extended buffer.
+func AppendMinKeyElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.MinKey, key)
+}
+
+// EqualValue will return true if the two values are equal.
+func EqualValue(t1, t2 bsontype.Type, v1, v2 []byte) bool {
+	if t1 != t2 {
+		return false
+	}
+	v1, _, ok := readValue(v1, t1)
+	if !ok {
+		return false
+	}
+	v2, _, ok = readValue(v2, t2)
+	if !ok {
+		return false
+	}
+	return bytes.Equal(v1, v2)
+}
+
+// valueLength will determine the length of the next value contained in src as if it
+// is type t. The returned bool will be false if there are not enough bytes in src for
+// a value of type t.
+func valueLength(src []byte, t bsontype.Type) (int32, bool) {
+	var length int32
+	ok := true
+	switch t {
+	case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+		length, _, ok = ReadLength(src)
+	case bsontype.Binary:
+		length, _, ok = ReadLength(src)
+		length += 4 + 1 // binary length + subtype byte
+	case bsontype.Boolean:
+		length = 1
+	case bsontype.DBPointer:
+		length, _, ok = ReadLength(src)
+		length += 4 + 12 // string length + ObjectID length
+	case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
+		length = 8
+	case bsontype.Decimal128:
+		length = 16
+	case bsontype.Int32:
+		length = 4
+	case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
+		length, _, ok = ReadLength(src)
+		length += 4
+	case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
+		length = 0
+	case bsontype.ObjectID:
+		length = 12
+	case bsontype.Regex:
+		regex := bytes.IndexByte(src, 0x00)
+		if regex < 0 {
+			ok = false
+			break
+		}
+		pattern := bytes.IndexByte(src[regex+1:], 0x00)
+		if pattern < 0 {
+			ok = false
+			break
+		}
+		length = int32(int64(regex) + 1 + int64(pattern) + 1)
+	default:
+		ok = false
+	}
+
+	return length, ok
+}
+
+func readValue(src []byte, t bsontype.Type) ([]byte, []byte, bool) {
+	length, ok := valueLength(src, t)
+	if !ok || int(length) > len(src) {
+		return nil, src, false
+	}
+
+	return src[:length], src[length:], true
+}
+
+// ReserveLength reserves the space required for length and returns the index where to write the length
+// and the []byte with reserved space.
+func ReserveLength(dst []byte) (int32, []byte) {
+	index := len(dst)
+	return int32(index), append(dst, 0x00, 0x00, 0x00, 0x00)
+}
+
+// UpdateLength updates the length at index with length and returns the []byte.
+func UpdateLength(dst []byte, index, length int32) []byte {
+	binary.LittleEndian.PutUint32(dst[index:], uint32(length))
+	return dst
+}
+
+func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) }
+
+func appendi32(dst []byte, i32 int32) []byte {
+	b := []byte{0, 0, 0, 0}
+	binary.LittleEndian.PutUint32(b, uint32(i32))
+	return append(dst, b...)
+}
+
+// ReadLength reads an int32 length from src and returns the length and the remaining bytes. If
+// there aren't enough bytes to read a valid length, src is returned unomdified and the returned
+// bool will be false.
+func ReadLength(src []byte) (int32, []byte, bool) {
+	ln, src, ok := readi32(src)
+	if ln < 0 {
+		return ln, src, false
+	}
+	return ln, src, ok
+}
+
+func readi32(src []byte) (int32, []byte, bool) {
+	if len(src) < 4 {
+		return 0, src, false
+	}
+	return int32(binary.LittleEndian.Uint32(src)), src[4:], true
+}
+
+func appendi64(dst []byte, i64 int64) []byte {
+	b := []byte{0, 0, 0, 0, 0, 0, 0, 0}
+	binary.LittleEndian.PutUint64(b, uint64(i64))
+	return append(dst, b...)
+}
+
+func readi64(src []byte) (int64, []byte, bool) {
+	if len(src) < 8 {
+		return 0, src, false
+	}
+	return int64(binary.LittleEndian.Uint64(src)), src[8:], true
+}
+
+func appendu32(dst []byte, u32 uint32) []byte {
+	b := []byte{0, 0, 0, 0}
+	binary.LittleEndian.PutUint32(b, u32)
+	return append(dst, b...)
+}
+
+func readu32(src []byte) (uint32, []byte, bool) {
+	if len(src) < 4 {
+		return 0, src, false
+	}
+
+	return binary.LittleEndian.Uint32(src), src[4:], true
+}
+
+func appendu64(dst []byte, u64 uint64) []byte {
+	b := []byte{0, 0, 0, 0, 0, 0, 0, 0}
+	binary.LittleEndian.PutUint64(b, u64)
+	return append(dst, b...)
+}
+
+func readu64(src []byte) (uint64, []byte, bool) {
+	if len(src) < 8 {
+		return 0, src, false
+	}
+	return binary.LittleEndian.Uint64(src), src[8:], true
+}
+
+// keep in sync with readcstringbytes
+func readcstring(src []byte) (string, []byte, bool) {
+	idx := bytes.IndexByte(src, 0x00)
+	if idx < 0 {
+		return "", src, false
+	}
+	return string(src[:idx]), src[idx+1:], true
+}
+
+// keep in sync with readcstring
+func readcstringbytes(src []byte) ([]byte, []byte, bool) {
+	idx := bytes.IndexByte(src, 0x00)
+	if idx < 0 {
+		return nil, src, false
+	}
+	return src[:idx], src[idx+1:], true
+}
+
+func appendstring(dst []byte, s string) []byte {
+	l := int32(len(s) + 1)
+	dst = appendLength(dst, l)
+	dst = append(dst, s...)
+	return append(dst, 0x00)
+}
+
+func readstring(src []byte) (string, []byte, bool) {
+	l, rem, ok := ReadLength(src)
+	if !ok {
+		return "", src, false
+	}
+	if len(src[4:]) < int(l) || l == 0 {
+		return "", src, false
+	}
+
+	return string(rem[:l-1]), rem[l:], true
+}
+
+// readLengthBytes attempts to read a length and that number of bytes. This
+// function requires that the length include the four bytes for itself.
+func readLengthBytes(src []byte) ([]byte, []byte, bool) {
+	l, _, ok := ReadLength(src)
+	if !ok {
+		return nil, src, false
+	}
+	if l < 4 {
+		return nil, src, false
+	}
+	if len(src) < int(l) {
+		return nil, src, false
+	}
+	return src[:l], src[l:], true
+}
+
+func appendBinarySubtype2(dst []byte, subtype byte, b []byte) []byte {
+	dst = appendLength(dst, int32(len(b)+4)) // The bytes we'll encode need to be 4 larger for the length bytes
+	dst = append(dst, subtype)
+	dst = appendLength(dst, int32(len(b)))
+	return append(dst, b...)
+}
+
+func isValidCString(cs string) bool {
+	return !strings.ContainsRune(cs, '\x00')
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..f68e1da1a09d999b2b82ce2f5bedfad7618961ee
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go
@@ -0,0 +1,34 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsoncore is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+//
+// Package bsoncore contains functions that can be used to encode and decode
+// BSON elements and values to or from a slice of bytes. These functions are
+// aimed at allowing low level manipulation of BSON and can be used to build a
+// higher level BSON library.
+//
+// The Read* functions within this package return the values of the element and
+// a boolean indicating if the values are valid. A boolean was used instead of
+// an error because any error that would be returned would be the same: not
+// enough bytes. This library attempts to do no validation, it will only return
+// false if there are not enough bytes for an item to be read. For example, the
+// ReadDocument function checks the length, if that length is larger than the
+// number of bytes available, it will return false, if there are enough bytes,
+// it will return those bytes and true. It is the consumers responsibility to
+// validate those bytes.
+//
+// The Append* functions within this package will append the type value to the
+// given dst slice. If the slice has enough capacity, it will not grow the
+// slice. The Append*Element functions within this package operate in the same
+// way, but additionally append the BSON type and the key before the value.
+package bsoncore
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f360f1ae158be33c49756ecc3379f3833f5c6ae
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go
@@ -0,0 +1,386 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// ValidationError is an error type returned when attempting to validate a document or array.
+type ValidationError string
+
+func (ve ValidationError) Error() string { return string(ve) }
+
+// NewDocumentLengthError creates and returns an error for when the length of a document exceeds the
+// bytes available.
+func NewDocumentLengthError(length, rem int) error {
+	return lengthError("document", length, rem)
+}
+
+func lengthError(bufferType string, length, rem int) error {
+	return ValidationError(fmt.Sprintf("%v length exceeds available bytes. length=%d remainingBytes=%d",
+		bufferType, length, rem))
+}
+
+// InsufficientBytesError indicates that there were not enough bytes to read the next component.
+type InsufficientBytesError struct {
+	Source    []byte
+	Remaining []byte
+}
+
+// NewInsufficientBytesError creates a new InsufficientBytesError with the given Document and
+// remaining bytes.
+func NewInsufficientBytesError(src, rem []byte) InsufficientBytesError {
+	return InsufficientBytesError{Source: src, Remaining: rem}
+}
+
+// Error implements the error interface.
+func (ibe InsufficientBytesError) Error() string {
+	return "too few bytes to read next component"
+}
+
+// Equal checks that err2 also is an ErrTooSmall.
+func (ibe InsufficientBytesError) Equal(err2 error) bool {
+	switch err2.(type) {
+	case InsufficientBytesError:
+		return true
+	default:
+		return false
+	}
+}
+
+// InvalidDepthTraversalError is returned when attempting a recursive Lookup when one component of
+// the path is neither an embedded document nor an array.
+type InvalidDepthTraversalError struct {
+	Key  string
+	Type bsontype.Type
+}
+
+func (idte InvalidDepthTraversalError) Error() string {
+	return fmt.Sprintf(
+		"attempt to traverse into %s, but it's type is %s, not %s nor %s",
+		idte.Key, idte.Type, bsontype.EmbeddedDocument, bsontype.Array,
+	)
+}
+
+// ErrMissingNull is returned when a document or array's last byte is not null.
+const ErrMissingNull ValidationError = "document or array end is missing null byte"
+
+// ErrInvalidLength indicates that a length in a binary representation of a BSON document or array
+// is invalid.
+const ErrInvalidLength ValidationError = "document or array length is invalid"
+
+// ErrNilReader indicates that an operation was attempted on a nil io.Reader.
+var ErrNilReader = errors.New("nil reader")
+
+// ErrEmptyKey indicates that no key was provided to a Lookup method.
+var ErrEmptyKey = errors.New("empty key provided")
+
+// ErrElementNotFound indicates that an Element matching a certain condition does not exist.
+var ErrElementNotFound = errors.New("element not found")
+
+// ErrOutOfBounds indicates that an index provided to access something was invalid.
+var ErrOutOfBounds = errors.New("out of bounds")
+
+// Document is a raw bytes representation of a BSON document.
+type Document []byte
+
+// NewDocumentFromReader reads a document from r. This function will only validate the length is
+// correct and that the document ends with a null byte.
+func NewDocumentFromReader(r io.Reader) (Document, error) {
+	return newBufferFromReader(r)
+}
+
+func newBufferFromReader(r io.Reader) ([]byte, error) {
+	if r == nil {
+		return nil, ErrNilReader
+	}
+
+	var lengthBytes [4]byte
+
+	// ReadFull guarantees that we will have read at least len(lengthBytes) if err == nil
+	_, err := io.ReadFull(r, lengthBytes[:])
+	if err != nil {
+		return nil, err
+	}
+
+	length, _, _ := readi32(lengthBytes[:]) // ignore ok since we always have enough bytes to read a length
+	if length < 0 {
+		return nil, ErrInvalidLength
+	}
+	buffer := make([]byte, length)
+
+	copy(buffer, lengthBytes[:])
+
+	_, err = io.ReadFull(r, buffer[4:])
+	if err != nil {
+		return nil, err
+	}
+
+	if buffer[length-1] != 0x00 {
+		return nil, ErrMissingNull
+	}
+
+	return buffer, nil
+}
+
+// Lookup searches the document, potentially recursively, for the given key. If there are multiple
+// keys provided, this method will recurse down, as long as the top and intermediate nodes are
+// either documents or arrays. If an error occurs or if the value doesn't exist, an empty Value is
+// returned.
+func (d Document) Lookup(key ...string) Value {
+	val, _ := d.LookupErr(key...)
+	return val
+}
+
+// LookupErr is the same as Lookup, except it returns an error in addition to an empty Value.
+func (d Document) LookupErr(key ...string) (Value, error) {
+	if len(key) < 1 {
+		return Value{}, ErrEmptyKey
+	}
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return Value{}, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return Value{}, NewInsufficientBytesError(d, rem)
+		}
+		// We use `KeyBytes` rather than `Key` to avoid a needless string alloc.
+		if string(elem.KeyBytes()) != key[0] {
+			continue
+		}
+		if len(key) > 1 {
+			tt := bsontype.Type(elem[0])
+			switch tt {
+			case bsontype.EmbeddedDocument:
+				val, err := elem.Value().Document().LookupErr(key[1:]...)
+				if err != nil {
+					return Value{}, err
+				}
+				return val, nil
+			case bsontype.Array:
+				// Convert to Document to continue Lookup recursion.
+				val, err := Document(elem.Value().Array()).LookupErr(key[1:]...)
+				if err != nil {
+					return Value{}, err
+				}
+				return val, nil
+			default:
+				return Value{}, InvalidDepthTraversalError{Key: elem.Key(), Type: tt}
+			}
+		}
+		return elem.ValueErr()
+	}
+	return Value{}, ErrElementNotFound
+}
+
+// Index searches for and retrieves the element at the given index. This method will panic if
+// the document is invalid or if the index is out of bounds.
+func (d Document) Index(index uint) Element {
+	elem, err := d.IndexErr(index)
+	if err != nil {
+		panic(err)
+	}
+	return elem
+}
+
+// IndexErr searches for and retrieves the element at the given index.
+func (d Document) IndexErr(index uint) (Element, error) {
+	return indexErr(d, index)
+}
+
+func indexErr(b []byte, index uint) (Element, error) {
+	length, rem, ok := ReadLength(b)
+	if !ok {
+		return nil, NewInsufficientBytesError(b, rem)
+	}
+
+	length -= 4
+
+	var current uint
+	var elem Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return nil, NewInsufficientBytesError(b, rem)
+		}
+		if current != index {
+			current++
+			continue
+		}
+		return elem, nil
+	}
+	return nil, ErrOutOfBounds
+}
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (d Document) DebugString() string {
+	if len(d) < 5 {
+		return "<malformed>"
+	}
+	var buf strings.Builder
+	buf.WriteString("Document")
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+	buf.WriteByte('(')
+	buf.WriteString(strconv.Itoa(int(length)))
+	length -= 4
+	buf.WriteString("){")
+	var elem Element
+	var ok bool
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
+			break
+		}
+		buf.WriteString(elem.DebugString())
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+// String outputs an ExtendedJSON version of Document. If the document is not valid, this method
+// returns an empty string.
+func (d Document) String() string {
+	if len(d) < 5 {
+		return ""
+	}
+	var buf strings.Builder
+	buf.WriteByte('{')
+
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+
+	length -= 4
+
+	var elem Element
+	var ok bool
+	first := true
+	for length > 1 {
+		if !first {
+			buf.WriteByte(',')
+		}
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return ""
+		}
+		buf.WriteString(elem.String())
+		first = false
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+// Elements returns this document as a slice of elements. The returned slice will contain valid
+// elements. If the document is not valid, the elements up to the invalid point will be returned
+// along with an error.
+func (d Document) Elements() ([]Element, error) {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return nil, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	var elems []Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return elems, NewInsufficientBytesError(d, rem)
+		}
+		if err := elem.Validate(); err != nil {
+			return elems, err
+		}
+		elems = append(elems, elem)
+	}
+	return elems, nil
+}
+
+// Values returns this document as a slice of values. The returned slice will contain valid values.
+// If the document is not valid, the values up to the invalid point will be returned along with an
+// error.
+func (d Document) Values() ([]Value, error) {
+	return values(d)
+}
+
+func values(b []byte) ([]Value, error) {
+	length, rem, ok := ReadLength(b)
+	if !ok {
+		return nil, NewInsufficientBytesError(b, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	var vals []Value
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return vals, NewInsufficientBytesError(b, rem)
+		}
+		if err := elem.Value().Validate(); err != nil {
+			return vals, err
+		}
+		vals = append(vals, elem.Value())
+	}
+	return vals, nil
+}
+
+// Validate validates the document and ensures the elements contained within are valid.
+func (d Document) Validate() error {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return NewInsufficientBytesError(d, rem)
+	}
+	if int(length) > len(d) {
+		return NewDocumentLengthError(int(length), len(d))
+	}
+	if d[length-1] != 0x00 {
+		return ErrMissingNull
+	}
+
+	length -= 4
+	var elem Element
+
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return NewInsufficientBytesError(d, rem)
+		}
+		err := elem.Validate()
+		if err != nil {
+			return err
+		}
+	}
+
+	if len(rem) < 1 || rem[0] != 0x00 {
+		return ErrMissingNull
+	}
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go
new file mode 100644
index 0000000000000000000000000000000000000000..e35bd0cd9ad3eb6e713f0bff0b0259731e5c6ca6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go
@@ -0,0 +1,189 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"errors"
+	"io"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// DocumentSequenceStyle is used to represent how a document sequence is laid out in a slice of
+// bytes.
+type DocumentSequenceStyle uint32
+
+// These constants are the valid styles for a DocumentSequence.
+const (
+	_ DocumentSequenceStyle = iota
+	SequenceStyle
+	ArrayStyle
+)
+
+// DocumentSequence represents a sequence of documents. The Style field indicates how the documents
+// are laid out inside of the Data field.
+type DocumentSequence struct {
+	Style DocumentSequenceStyle
+	Data  []byte
+	Pos   int
+}
+
+// ErrCorruptedDocument is returned when a full document couldn't be read from the sequence.
+var ErrCorruptedDocument = errors.New("invalid DocumentSequence: corrupted document")
+
+// ErrNonDocument is returned when a DocumentSequence contains a non-document BSON value.
+var ErrNonDocument = errors.New("invalid DocumentSequence: a non-document value was found in sequence")
+
+// ErrInvalidDocumentSequenceStyle is returned when an unknown DocumentSequenceStyle is set on a
+// DocumentSequence.
+var ErrInvalidDocumentSequenceStyle = errors.New("invalid DocumentSequenceStyle")
+
+// DocumentCount returns the number of documents in the sequence.
+func (ds *DocumentSequence) DocumentCount() int {
+	if ds == nil {
+		return 0
+	}
+	switch ds.Style {
+	case SequenceStyle:
+		var count int
+		var ok bool
+		rem := ds.Data
+		for len(rem) > 0 {
+			_, rem, ok = ReadDocument(rem)
+			if !ok {
+				return 0
+			}
+			count++
+		}
+		return count
+	case ArrayStyle:
+		_, rem, ok := ReadLength(ds.Data)
+		if !ok {
+			return 0
+		}
+
+		var count int
+		for len(rem) > 1 {
+			_, rem, ok = ReadElement(rem)
+			if !ok {
+				return 0
+			}
+			count++
+		}
+		return count
+	default:
+		return 0
+	}
+}
+
+// Empty returns true if the sequence is empty. It always returns true for unknown sequence styles.
+func (ds *DocumentSequence) Empty() bool {
+	if ds == nil {
+		return true
+	}
+
+	switch ds.Style {
+	case SequenceStyle:
+		return len(ds.Data) == 0
+	case ArrayStyle:
+		return len(ds.Data) <= 5
+	default:
+		return true
+	}
+}
+
+// ResetIterator resets the iteration point for the Next method to the beginning of the document
+// sequence.
+func (ds *DocumentSequence) ResetIterator() {
+	if ds == nil {
+		return
+	}
+	ds.Pos = 0
+}
+
+// Documents returns a slice of the documents. If nil either the Data field is also nil or could not
+// be properly read.
+func (ds *DocumentSequence) Documents() ([]Document, error) {
+	if ds == nil {
+		return nil, nil
+	}
+	switch ds.Style {
+	case SequenceStyle:
+		rem := ds.Data
+		var docs []Document
+		var doc Document
+		var ok bool
+		for {
+			doc, rem, ok = ReadDocument(rem)
+			if !ok {
+				if len(rem) == 0 {
+					break
+				}
+				return nil, ErrCorruptedDocument
+			}
+			docs = append(docs, doc)
+		}
+		return docs, nil
+	case ArrayStyle:
+		if len(ds.Data) == 0 {
+			return nil, nil
+		}
+		vals, err := Document(ds.Data).Values()
+		if err != nil {
+			return nil, ErrCorruptedDocument
+		}
+		docs := make([]Document, 0, len(vals))
+		for _, v := range vals {
+			if v.Type != bsontype.EmbeddedDocument {
+				return nil, ErrNonDocument
+			}
+			docs = append(docs, v.Data)
+		}
+		return docs, nil
+	default:
+		return nil, ErrInvalidDocumentSequenceStyle
+	}
+}
+
+// Next retrieves the next document from this sequence and returns it. This method will return
+// io.EOF when it has reached the end of the sequence.
+func (ds *DocumentSequence) Next() (Document, error) {
+	if ds == nil || ds.Pos >= len(ds.Data) {
+		return nil, io.EOF
+	}
+	switch ds.Style {
+	case SequenceStyle:
+		doc, _, ok := ReadDocument(ds.Data[ds.Pos:])
+		if !ok {
+			return nil, ErrCorruptedDocument
+		}
+		ds.Pos += len(doc)
+		return doc, nil
+	case ArrayStyle:
+		if ds.Pos < 4 {
+			if len(ds.Data) < 4 {
+				return nil, ErrCorruptedDocument
+			}
+			ds.Pos = 4 // Skip the length of the document
+		}
+		if len(ds.Data[ds.Pos:]) == 1 && ds.Data[ds.Pos] == 0x00 {
+			return nil, io.EOF // At the end of the document
+		}
+		elem, _, ok := ReadElement(ds.Data[ds.Pos:])
+		if !ok {
+			return nil, ErrCorruptedDocument
+		}
+		ds.Pos += len(elem)
+		val := elem.Value()
+		if val.Type != bsontype.EmbeddedDocument {
+			return nil, ErrNonDocument
+		}
+		return val.Data, nil
+	default:
+		return nil, ErrInvalidDocumentSequenceStyle
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c1ab2ae964c7a030cff38c82e797791aca14f83
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go
@@ -0,0 +1,152 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// MalformedElementError represents a class of errors that RawElement methods return.
+type MalformedElementError string
+
+func (mee MalformedElementError) Error() string { return string(mee) }
+
+// ErrElementMissingKey is returned when a RawElement is missing a key.
+const ErrElementMissingKey MalformedElementError = "element is missing key"
+
+// ErrElementMissingType is returned when a RawElement is missing a type.
+const ErrElementMissingType MalformedElementError = "element is missing type"
+
+// Element is a raw bytes representation of a BSON element.
+type Element []byte
+
+// Key returns the key for this element. If the element is not valid, this method returns an empty
+// string. If knowing if the element is valid is important, use KeyErr.
+func (e Element) Key() string {
+	key, _ := e.KeyErr()
+	return key
+}
+
+// KeyBytes returns the key for this element as a []byte. If the element is not valid, this method
+// returns an empty string. If knowing if the element is valid is important, use KeyErr. This method
+// will not include the null byte at the end of the key in the slice of bytes.
+func (e Element) KeyBytes() []byte {
+	key, _ := e.KeyBytesErr()
+	return key
+}
+
+// KeyErr returns the key for this element, returning an error if the element is not valid.
+func (e Element) KeyErr() (string, error) {
+	key, err := e.KeyBytesErr()
+	return string(key), err
+}
+
+// KeyBytesErr returns the key for this element as a []byte, returning an error if the element is
+// not valid.
+func (e Element) KeyBytesErr() ([]byte, error) {
+	if len(e) == 0 {
+		return nil, ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return nil, ErrElementMissingKey
+	}
+	return e[1 : idx+1], nil
+}
+
+// Validate ensures the element is a valid BSON element.
+func (e Element) Validate() error {
+	if len(e) < 1 {
+		return ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return ErrElementMissingKey
+	}
+	return Value{Type: bsontype.Type(e[0]), Data: e[idx+2:]}.Validate()
+}
+
+// CompareKey will compare this element's key to key. This method makes it easy to compare keys
+// without needing to allocate a string. The key may be null terminated. If a valid key cannot be
+// read this method will return false.
+func (e Element) CompareKey(key []byte) bool {
+	if len(e) < 2 {
+		return false
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return false
+	}
+	if index := bytes.IndexByte(key, 0x00); index > -1 {
+		key = key[:index]
+	}
+	return bytes.Equal(e[1:idx+1], key)
+}
+
+// Value returns the value of this element. If the element is not valid, this method returns an
+// empty Value. If knowing if the element is valid is important, use ValueErr.
+func (e Element) Value() Value {
+	val, _ := e.ValueErr()
+	return val
+}
+
+// ValueErr returns the value for this element, returning an error if the element is not valid.
+func (e Element) ValueErr() (Value, error) {
+	if len(e) == 0 {
+		return Value{}, ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return Value{}, ErrElementMissingKey
+	}
+
+	val, rem, exists := ReadValue(e[idx+2:], bsontype.Type(e[0]))
+	if !exists {
+		return Value{}, NewInsufficientBytesError(e, rem)
+	}
+	return val, nil
+}
+
+// String implements the fmt.String interface. The output will be in extended JSON format.
+func (e Element) String() string {
+	if len(e) == 0 {
+		return ""
+	}
+	t := bsontype.Type(e[0])
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return ""
+	}
+	key, valBytes := []byte(e[1:idx+1]), []byte(e[idx+2:])
+	val, _, valid := ReadValue(valBytes, t)
+	if !valid {
+		return ""
+	}
+	return "\"" + string(key) + "\": " + val.String()
+}
+
+// DebugString outputs a human readable version of RawElement. It will attempt to stringify the
+// valid components of the element even if the entire element is not valid.
+func (e Element) DebugString() string {
+	if len(e) == 0 {
+		return "<malformed>"
+	}
+	t := bsontype.Type(e[0])
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return fmt.Sprintf(`bson.Element{[%s]<malformed>}`, t)
+	}
+	key, valBytes := []byte(e[1:idx+1]), []byte(e[idx+2:])
+	val, _, valid := ReadValue(valBytes, t)
+	if !valid {
+		return fmt.Sprintf(`bson.Element{[%s]"%s": <malformed>}`, t, key)
+	}
+	return fmt.Sprintf(`bson.Element{[%s]"%s": %v}`, t, key, val)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go
new file mode 100644
index 0000000000000000000000000000000000000000..9fd903fd2b6779207912fb2cc7df4e4d320e000f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go
@@ -0,0 +1,223 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/golang/go by The Go Authors
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bsoncore
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go
new file mode 100644
index 0000000000000000000000000000000000000000..fcb0428bbd30a3fd14f0d72b2712f9b3db041925
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go
@@ -0,0 +1,964 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ElementTypeError specifies that a method to obtain a BSON value an incorrect type was called on a bson.Value.
+type ElementTypeError struct {
+	Method string
+	Type   bsontype.Type
+}
+
+// Error implements the error interface.
+func (ete ElementTypeError) Error() string {
+	return "Call of " + ete.Method + " on " + ete.Type.String() + " type"
+}
+
+// Value represents a BSON value with a type and raw bytes.
+type Value struct {
+	Type bsontype.Type
+	Data []byte
+}
+
+// Validate ensures the value is a valid BSON value.
+func (v Value) Validate() error {
+	_, _, valid := readValue(v.Data, v.Type)
+	if !valid {
+		return NewInsufficientBytesError(v.Data, v.Data)
+	}
+	return nil
+}
+
+// IsNumber returns true if the type of v is a numeric BSON type.
+func (v Value) IsNumber() bool {
+	switch v.Type {
+	case bsontype.Double, bsontype.Int32, bsontype.Int64, bsontype.Decimal128:
+		return true
+	default:
+		return false
+	}
+}
+
+// AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method
+// will panic.
+func (v Value) AsInt32() int32 {
+	if !v.IsNumber() {
+		panic(ElementTypeError{"bsoncore.Value.AsInt32", v.Type})
+	}
+	var i32 int32
+	switch v.Type {
+	case bsontype.Double:
+		f64, _, ok := ReadDouble(v.Data)
+		if !ok {
+			panic(NewInsufficientBytesError(v.Data, v.Data))
+		}
+		i32 = int32(f64)
+	case bsontype.Int32:
+		var ok bool
+		i32, _, ok = ReadInt32(v.Data)
+		if !ok {
+			panic(NewInsufficientBytesError(v.Data, v.Data))
+		}
+	case bsontype.Int64:
+		i64, _, ok := ReadInt64(v.Data)
+		if !ok {
+			panic(NewInsufficientBytesError(v.Data, v.Data))
+		}
+		i32 = int32(i64)
+	case bsontype.Decimal128:
+		panic(ElementTypeError{"bsoncore.Value.AsInt32", v.Type})
+	}
+	return i32
+}
+
+// AsInt32OK functions the same as AsInt32 but returns a boolean instead of panicking. False
+// indicates an error.
+func (v Value) AsInt32OK() (int32, bool) {
+	if !v.IsNumber() {
+		return 0, false
+	}
+	var i32 int32
+	switch v.Type {
+	case bsontype.Double:
+		f64, _, ok := ReadDouble(v.Data)
+		if !ok {
+			return 0, false
+		}
+		i32 = int32(f64)
+	case bsontype.Int32:
+		var ok bool
+		i32, _, ok = ReadInt32(v.Data)
+		if !ok {
+			return 0, false
+		}
+	case bsontype.Int64:
+		i64, _, ok := ReadInt64(v.Data)
+		if !ok {
+			return 0, false
+		}
+		i32 = int32(i64)
+	case bsontype.Decimal128:
+		return 0, false
+	}
+	return i32, true
+}
+
+// AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method
+// will panic.
+func (v Value) AsInt64() int64 {
+	if !v.IsNumber() {
+		panic(ElementTypeError{"bsoncore.Value.AsInt64", v.Type})
+	}
+	var i64 int64
+	switch v.Type {
+	case bsontype.Double:
+		f64, _, ok := ReadDouble(v.Data)
+		if !ok {
+			panic(NewInsufficientBytesError(v.Data, v.Data))
+		}
+		i64 = int64(f64)
+	case bsontype.Int32:
+		var ok bool
+		i32, _, ok := ReadInt32(v.Data)
+		if !ok {
+			panic(NewInsufficientBytesError(v.Data, v.Data))
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		var ok bool
+		i64, _, ok = ReadInt64(v.Data)
+		if !ok {
+			panic(NewInsufficientBytesError(v.Data, v.Data))
+		}
+	case bsontype.Decimal128:
+		panic(ElementTypeError{"bsoncore.Value.AsInt64", v.Type})
+	}
+	return i64
+}
+
+// AsInt64OK functions the same as AsInt64 but returns a boolean instead of panicking. False
+// indicates an error.
+func (v Value) AsInt64OK() (int64, bool) {
+	if !v.IsNumber() {
+		return 0, false
+	}
+	var i64 int64
+	switch v.Type {
+	case bsontype.Double:
+		f64, _, ok := ReadDouble(v.Data)
+		if !ok {
+			return 0, false
+		}
+		i64 = int64(f64)
+	case bsontype.Int32:
+		var ok bool
+		i32, _, ok := ReadInt32(v.Data)
+		if !ok {
+			return 0, false
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		var ok bool
+		i64, _, ok = ReadInt64(v.Data)
+		if !ok {
+			return 0, false
+		}
+	case bsontype.Decimal128:
+		return 0, false
+	}
+	return i64, true
+}
+
+// AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// TODO(GODRIVER-2751): Implement AsFloat64.
+// func (v Value) AsFloat64() float64
+
+// AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False
+// indicates an error.
+//
+// TODO(GODRIVER-2751): Implement AsFloat64OK.
+// func (v Value) AsFloat64OK() (float64, bool)
+
+// Equal compaes v to v2 and returns true if they are equal.
+func (v Value) Equal(v2 Value) bool {
+	if v.Type != v2.Type {
+		return false
+	}
+
+	return bytes.Equal(v.Data, v2.Data)
+}
+
+// String implements the fmt.String interface. This method will return values in extended JSON
+// format. If the value is not valid, this returns an empty string
+func (v Value) String() string {
+	switch v.Type {
+	case bsontype.Double:
+		f64, ok := v.DoubleOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberDouble":"%s"}`, formatDouble(f64))
+	case bsontype.String:
+		str, ok := v.StringValueOK()
+		if !ok {
+			return ""
+		}
+		return escapeString(str)
+	case bsontype.EmbeddedDocument:
+		doc, ok := v.DocumentOK()
+		if !ok {
+			return ""
+		}
+		return doc.String()
+	case bsontype.Array:
+		arr, ok := v.ArrayOK()
+		if !ok {
+			return ""
+		}
+		return arr.String()
+	case bsontype.Binary:
+		subtype, data, ok := v.BinaryOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$binary":{"base64":"%s","subType":"%02x"}}`, base64.StdEncoding.EncodeToString(data), subtype)
+	case bsontype.Undefined:
+		return `{"$undefined":true}`
+	case bsontype.ObjectID:
+		oid, ok := v.ObjectIDOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$oid":"%s"}`, oid.Hex())
+	case bsontype.Boolean:
+		b, ok := v.BooleanOK()
+		if !ok {
+			return ""
+		}
+		return strconv.FormatBool(b)
+	case bsontype.DateTime:
+		dt, ok := v.DateTimeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$date":{"$numberLong":"%d"}}`, dt)
+	case bsontype.Null:
+		return "null"
+	case bsontype.Regex:
+		pattern, options, ok := v.RegexOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(
+			`{"$regularExpression":{"pattern":%s,"options":"%s"}}`,
+			escapeString(pattern), sortStringAlphebeticAscending(options),
+		)
+	case bsontype.DBPointer:
+		ns, pointer, ok := v.DBPointerOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$dbPointer":{"$ref":%s,"$id":{"$oid":"%s"}}}`, escapeString(ns), pointer.Hex())
+	case bsontype.JavaScript:
+		js, ok := v.JavaScriptOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s}`, escapeString(js))
+	case bsontype.Symbol:
+		symbol, ok := v.SymbolOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$symbol":%s}`, escapeString(symbol))
+	case bsontype.CodeWithScope:
+		code, scope, ok := v.CodeWithScopeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s,"$scope":%s}`, code, scope)
+	case bsontype.Int32:
+		i32, ok := v.Int32OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberInt":"%d"}`, i32)
+	case bsontype.Timestamp:
+		t, i, ok := v.TimestampOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$timestamp":{"t":%v,"i":%v}}`, t, i)
+	case bsontype.Int64:
+		i64, ok := v.Int64OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberLong":"%d"}`, i64)
+	case bsontype.Decimal128:
+		d128, ok := v.Decimal128OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberDecimal":"%s"}`, d128.String())
+	case bsontype.MinKey:
+		return `{"$minKey":1}`
+	case bsontype.MaxKey:
+		return `{"$maxKey":1}`
+	default:
+		return ""
+	}
+}
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (v Value) DebugString() string {
+	switch v.Type {
+	case bsontype.String:
+		str, ok := v.StringValueOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return escapeString(str)
+	case bsontype.EmbeddedDocument:
+		doc, ok := v.DocumentOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return doc.DebugString()
+	case bsontype.Array:
+		arr, ok := v.ArrayOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return arr.DebugString()
+	case bsontype.CodeWithScope:
+		code, scope, ok := v.CodeWithScopeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s,"$scope":%s}`, code, scope.DebugString())
+	default:
+		str := v.String()
+		if str == "" {
+			return "<malformed>"
+		}
+		return str
+	}
+}
+
+// Double returns the float64 value for this element.
+// It panics if e's BSON type is not bsontype.Double.
+func (v Value) Double() float64 {
+	if v.Type != bsontype.Double {
+		panic(ElementTypeError{"bsoncore.Value.Double", v.Type})
+	}
+	f64, _, ok := ReadDouble(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return f64
+}
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (v Value) DoubleOK() (float64, bool) {
+	if v.Type != bsontype.Double {
+		return 0, false
+	}
+	f64, _, ok := ReadDouble(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return f64, true
+}
+
+// StringValue returns the string balue for this element.
+// It panics if e's BSON type is not bsontype.String.
+//
+// NOTE: This method is called StringValue to avoid a collision with the String method which
+// implements the fmt.Stringer interface.
+func (v Value) StringValue() string {
+	if v.Type != bsontype.String {
+		panic(ElementTypeError{"bsoncore.Value.StringValue", v.Type})
+	}
+	str, _, ok := ReadString(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return str
+}
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (v Value) StringValueOK() (string, bool) {
+	if v.Type != bsontype.String {
+		return "", false
+	}
+	str, _, ok := ReadString(v.Data)
+	if !ok {
+		return "", false
+	}
+	return str, true
+}
+
+// Document returns the BSON document the Value represents as a Document. It panics if the
+// value is a BSON type other than document.
+func (v Value) Document() Document {
+	if v.Type != bsontype.EmbeddedDocument {
+		panic(ElementTypeError{"bsoncore.Value.Document", v.Type})
+	}
+	doc, _, ok := ReadDocument(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return doc
+}
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (v Value) DocumentOK() (Document, bool) {
+	if v.Type != bsontype.EmbeddedDocument {
+		return nil, false
+	}
+	doc, _, ok := ReadDocument(v.Data)
+	if !ok {
+		return nil, false
+	}
+	return doc, true
+}
+
+// Array returns the BSON array the Value represents as an Array. It panics if the
+// value is a BSON type other than array.
+func (v Value) Array() Array {
+	if v.Type != bsontype.Array {
+		panic(ElementTypeError{"bsoncore.Value.Array", v.Type})
+	}
+	arr, _, ok := ReadArray(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return arr
+}
+
+// ArrayOK is the same as Array, except it returns a boolean instead
+// of panicking.
+func (v Value) ArrayOK() (Array, bool) {
+	if v.Type != bsontype.Array {
+		return nil, false
+	}
+	arr, _, ok := ReadArray(v.Data)
+	if !ok {
+		return nil, false
+	}
+	return arr, true
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Value) Binary() (subtype byte, data []byte) {
+	if v.Type != bsontype.Binary {
+		panic(ElementTypeError{"bsoncore.Value.Binary", v.Type})
+	}
+	subtype, data, _, ok := ReadBinary(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return subtype, data
+}
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (v Value) BinaryOK() (subtype byte, data []byte, ok bool) {
+	if v.Type != bsontype.Binary {
+		return 0x00, nil, false
+	}
+	subtype, data, _, ok = ReadBinary(v.Data)
+	if !ok {
+		return 0x00, nil, false
+	}
+	return subtype, data, true
+}
+
+// ObjectID returns the BSON objectid value the Value represents. It panics if the value is a BSON
+// type other than objectid.
+func (v Value) ObjectID() primitive.ObjectID {
+	if v.Type != bsontype.ObjectID {
+		panic(ElementTypeError{"bsoncore.Value.ObjectID", v.Type})
+	}
+	oid, _, ok := ReadObjectID(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return oid
+}
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (v Value) ObjectIDOK() (primitive.ObjectID, bool) {
+	if v.Type != bsontype.ObjectID {
+		return primitive.ObjectID{}, false
+	}
+	oid, _, ok := ReadObjectID(v.Data)
+	if !ok {
+		return primitive.ObjectID{}, false
+	}
+	return oid, true
+}
+
+// Boolean returns the boolean value the Value represents. It panics if the
+// value is a BSON type other than boolean.
+func (v Value) Boolean() bool {
+	if v.Type != bsontype.Boolean {
+		panic(ElementTypeError{"bsoncore.Value.Boolean", v.Type})
+	}
+	b, _, ok := ReadBoolean(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return b
+}
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (v Value) BooleanOK() (bool, bool) {
+	if v.Type != bsontype.Boolean {
+		return false, false
+	}
+	b, _, ok := ReadBoolean(v.Data)
+	if !ok {
+		return false, false
+	}
+	return b, true
+}
+
+// DateTime returns the BSON datetime value the Value represents as a
+// unix timestamp. It panics if the value is a BSON type other than datetime.
+func (v Value) DateTime() int64 {
+	if v.Type != bsontype.DateTime {
+		panic(ElementTypeError{"bsoncore.Value.DateTime", v.Type})
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return dt
+}
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (v Value) DateTimeOK() (int64, bool) {
+	if v.Type != bsontype.DateTime {
+		return 0, false
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return dt, true
+}
+
+// Time returns the BSON datetime value the Value represents. It panics if the value is a BSON
+// type other than datetime.
+func (v Value) Time() time.Time {
+	if v.Type != bsontype.DateTime {
+		panic(ElementTypeError{"bsoncore.Value.Time", v.Type})
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return time.Unix(dt/1000, dt%1000*1000000)
+}
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (v Value) TimeOK() (time.Time, bool) {
+	if v.Type != bsontype.DateTime {
+		return time.Time{}, false
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		return time.Time{}, false
+	}
+	return time.Unix(dt/1000, dt%1000*1000000), true
+}
+
+// Regex returns the BSON regex value the Value represents. It panics if the value is a BSON
+// type other than regex.
+func (v Value) Regex() (pattern, options string) {
+	if v.Type != bsontype.Regex {
+		panic(ElementTypeError{"bsoncore.Value.Regex", v.Type})
+	}
+	pattern, options, _, ok := ReadRegex(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return pattern, options
+}
+
+// RegexOK is the same as Regex, except it returns a boolean instead of
+// panicking.
+func (v Value) RegexOK() (pattern, options string, ok bool) {
+	if v.Type != bsontype.Regex {
+		return "", "", false
+	}
+	pattern, options, _, ok = ReadRegex(v.Data)
+	if !ok {
+		return "", "", false
+	}
+	return pattern, options, true
+}
+
+// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
+// type other than DBPointer.
+func (v Value) DBPointer() (string, primitive.ObjectID) {
+	if v.Type != bsontype.DBPointer {
+		panic(ElementTypeError{"bsoncore.Value.DBPointer", v.Type})
+	}
+	ns, pointer, _, ok := ReadDBPointer(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return ns, pointer
+}
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (v Value) DBPointerOK() (string, primitive.ObjectID, bool) {
+	if v.Type != bsontype.DBPointer {
+		return "", primitive.ObjectID{}, false
+	}
+	ns, pointer, _, ok := ReadDBPointer(v.Data)
+	if !ok {
+		return "", primitive.ObjectID{}, false
+	}
+	return ns, pointer, true
+}
+
+// JavaScript returns the BSON JavaScript code value the Value represents. It panics if the value is
+// a BSON type other than JavaScript code.
+func (v Value) JavaScript() string {
+	if v.Type != bsontype.JavaScript {
+		panic(ElementTypeError{"bsoncore.Value.JavaScript", v.Type})
+	}
+	js, _, ok := ReadJavaScript(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return js
+}
+
+// JavaScriptOK is the same as Javascript, excepti that it returns a boolean
+// instead of panicking.
+func (v Value) JavaScriptOK() (string, bool) {
+	if v.Type != bsontype.JavaScript {
+		return "", false
+	}
+	js, _, ok := ReadJavaScript(v.Data)
+	if !ok {
+		return "", false
+	}
+	return js, true
+}
+
+// Symbol returns the BSON symbol value the Value represents. It panics if the value is a BSON
+// type other than symbol.
+func (v Value) Symbol() string {
+	if v.Type != bsontype.Symbol {
+		panic(ElementTypeError{"bsoncore.Value.Symbol", v.Type})
+	}
+	symbol, _, ok := ReadSymbol(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return symbol
+}
+
+// SymbolOK is the same as Symbol, excepti that it returns a boolean
+// instead of panicking.
+func (v Value) SymbolOK() (string, bool) {
+	if v.Type != bsontype.Symbol {
+		return "", false
+	}
+	symbol, _, ok := ReadSymbol(v.Data)
+	if !ok {
+		return "", false
+	}
+	return symbol, true
+}
+
+// CodeWithScope returns the BSON JavaScript code with scope the Value represents.
+// It panics if the value is a BSON type other than JavaScript code with scope.
+func (v Value) CodeWithScope() (string, Document) {
+	if v.Type != bsontype.CodeWithScope {
+		panic(ElementTypeError{"bsoncore.Value.CodeWithScope", v.Type})
+	}
+	code, scope, _, ok := ReadCodeWithScope(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return code, scope
+}
+
+// CodeWithScopeOK is the same as CodeWithScope, except that it returns a boolean instead of
+// panicking.
+func (v Value) CodeWithScopeOK() (string, Document, bool) {
+	if v.Type != bsontype.CodeWithScope {
+		return "", nil, false
+	}
+	code, scope, _, ok := ReadCodeWithScope(v.Data)
+	if !ok {
+		return "", nil, false
+	}
+	return code, scope, true
+}
+
+// Int32 returns the int32 the Value represents. It panics if the value is a BSON type other than
+// int32.
+func (v Value) Int32() int32 {
+	if v.Type != bsontype.Int32 {
+		panic(ElementTypeError{"bsoncore.Value.Int32", v.Type})
+	}
+	i32, _, ok := ReadInt32(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return i32
+}
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (v Value) Int32OK() (int32, bool) {
+	if v.Type != bsontype.Int32 {
+		return 0, false
+	}
+	i32, _, ok := ReadInt32(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return i32, true
+}
+
+// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (v Value) Timestamp() (t, i uint32) {
+	if v.Type != bsontype.Timestamp {
+		panic(ElementTypeError{"bsoncore.Value.Timestamp", v.Type})
+	}
+	t, i, _, ok := ReadTimestamp(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return t, i
+}
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (v Value) TimestampOK() (t, i uint32, ok bool) {
+	if v.Type != bsontype.Timestamp {
+		return 0, 0, false
+	}
+	t, i, _, ok = ReadTimestamp(v.Data)
+	if !ok {
+		return 0, 0, false
+	}
+	return t, i, true
+}
+
+// Int64 returns the int64 the Value represents. It panics if the value is a BSON type other than
+// int64.
+func (v Value) Int64() int64 {
+	if v.Type != bsontype.Int64 {
+		panic(ElementTypeError{"bsoncore.Value.Int64", v.Type})
+	}
+	i64, _, ok := ReadInt64(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return i64
+}
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (v Value) Int64OK() (int64, bool) {
+	if v.Type != bsontype.Int64 {
+		return 0, false
+	}
+	i64, _, ok := ReadInt64(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return i64, true
+}
+
+// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
+// decimal.
+func (v Value) Decimal128() primitive.Decimal128 {
+	if v.Type != bsontype.Decimal128 {
+		panic(ElementTypeError{"bsoncore.Value.Decimal128", v.Type})
+	}
+	d128, _, ok := ReadDecimal128(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return d128
+}
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (v Value) Decimal128OK() (primitive.Decimal128, bool) {
+	if v.Type != bsontype.Decimal128 {
+		return primitive.Decimal128{}, false
+	}
+	d128, _, ok := ReadDecimal128(v.Data)
+	if !ok {
+		return primitive.Decimal128{}, false
+	}
+	return d128, true
+}
+
+var hexChars = "0123456789abcdef"
+
+func escapeString(s string) string {
+	escapeHTML := true
+	var buf bytes.Buffer
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			case '\t':
+				buf.WriteByte('\\')
+				buf.WriteByte('t')
+			case '\b':
+				buf.WriteByte('\\')
+				buf.WriteByte('b')
+			case '\f':
+				buf.WriteByte('\\')
+				buf.WriteByte('f')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hexChars[b>>4])
+				buf.WriteByte(hexChars[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hexChars[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+	return buf.String()
+}
+
+func formatDouble(f float64) string {
+	var s string
+	switch {
+	case math.IsInf(f, 1):
+		s = "Infinity"
+	case math.IsInf(f, -1):
+		s = "-Infinity"
+	case math.IsNaN(f):
+		s = "NaN"
+	default:
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
+
+type sortableString []rune
+
+func (ss sortableString) Len() int {
+	return len(ss)
+}
+
+func (ss sortableString) Less(i, j int) bool {
+	return ss[i] < ss[j]
+}
+
+func (ss sortableString) Swap(i, j int) {
+	ss[i], ss[j] = ss[j], ss[i]
+}
+
+func sortStringAlphebeticAscending(s string) string {
+	ss := sortableString([]rune(s))
+	sort.Sort(ss)
+	return string([]rune(ss))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go
new file mode 100644
index 0000000000000000000000000000000000000000..34a4a68f597c4294c55e5707e35904d97b2d7b79
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go
@@ -0,0 +1,225 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+const sourceExternal = "$external"
+
+// Config contains the configuration for an Authenticator.
+type Config = driver.AuthConfig
+
+// AuthenticatorFactory constructs an authenticator.
+type AuthenticatorFactory func(*Cred, *http.Client) (Authenticator, error)
+
+var authFactories = make(map[string]AuthenticatorFactory)
+
+func init() {
+	RegisterAuthenticatorFactory("", newDefaultAuthenticator)
+	RegisterAuthenticatorFactory(SCRAMSHA1, newScramSHA1Authenticator)
+	RegisterAuthenticatorFactory(SCRAMSHA256, newScramSHA256Authenticator)
+	RegisterAuthenticatorFactory(MONGODBCR, newMongoDBCRAuthenticator)
+	RegisterAuthenticatorFactory(PLAIN, newPlainAuthenticator)
+	RegisterAuthenticatorFactory(GSSAPI, newGSSAPIAuthenticator)
+	RegisterAuthenticatorFactory(MongoDBX509, newMongoDBX509Authenticator)
+	RegisterAuthenticatorFactory(MongoDBAWS, newMongoDBAWSAuthenticator)
+	RegisterAuthenticatorFactory(MongoDBOIDC, newOIDCAuthenticator)
+}
+
+// CreateAuthenticator creates an authenticator.
+func CreateAuthenticator(name string, cred *Cred, httpClient *http.Client) (Authenticator, error) {
+	if f, ok := authFactories[name]; ok {
+		return f(cred, httpClient)
+	}
+
+	return nil, newAuthError(fmt.Sprintf("unknown authenticator: %s", name), nil)
+}
+
+// RegisterAuthenticatorFactory registers the authenticator factory.
+func RegisterAuthenticatorFactory(name string, factory AuthenticatorFactory) {
+	authFactories[name] = factory
+}
+
+// HandshakeOptions packages options that can be passed to the Handshaker()
+// function.  DBUser is optional but must be of the form <dbname.username>;
+// if non-empty, then the connection will do SASL mechanism negotiation.
+type HandshakeOptions struct {
+	AppName               string
+	Authenticator         Authenticator
+	Compressors           []string
+	DBUser                string
+	PerformAuthentication func(description.Server) bool
+	ClusterClock          *session.ClusterClock
+	ServerAPI             *driver.ServerAPIOptions
+	LoadBalanced          bool
+}
+
+type authHandshaker struct {
+	wrapped driver.Handshaker
+	options *HandshakeOptions
+
+	handshakeInfo driver.HandshakeInformation
+	conversation  SpeculativeConversation
+}
+
+var _ driver.Handshaker = (*authHandshaker)(nil)
+
+// GetHandshakeInformation performs the initial MongoDB handshake to retrieve the required information for the provided
+// connection.
+func (ah *authHandshaker) GetHandshakeInformation(ctx context.Context, addr address.Address, conn driver.Connection) (driver.HandshakeInformation, error) {
+	if ah.wrapped != nil {
+		return ah.wrapped.GetHandshakeInformation(ctx, addr, conn)
+	}
+
+	op := operation.NewHello().
+		AppName(ah.options.AppName).
+		Compressors(ah.options.Compressors).
+		SASLSupportedMechs(ah.options.DBUser).
+		ClusterClock(ah.options.ClusterClock).
+		ServerAPI(ah.options.ServerAPI).
+		LoadBalanced(ah.options.LoadBalanced)
+
+	if ah.options.Authenticator != nil {
+		if speculativeAuth, ok := ah.options.Authenticator.(SpeculativeAuthenticator); ok {
+			var err error
+			ah.conversation, err = speculativeAuth.CreateSpeculativeConversation()
+			if err != nil {
+				return driver.HandshakeInformation{}, newAuthError("failed to create conversation", err)
+			}
+
+			// It is possible for the speculative conversation to be nil even without error if the authenticator
+			// cannot perform speculative authentication. An example of this is MONGODB-OIDC when there is
+			// no AccessToken in the cache.
+			if ah.conversation != nil {
+				firstMsg, err := ah.conversation.FirstMessage()
+				if err != nil {
+					return driver.HandshakeInformation{}, newAuthError("failed to create speculative authentication message", err)
+				}
+
+				op = op.SpeculativeAuthenticate(firstMsg)
+			}
+		}
+	}
+
+	var err error
+	ah.handshakeInfo, err = op.GetHandshakeInformation(ctx, addr, conn)
+	if err != nil {
+		return driver.HandshakeInformation{}, newAuthError("handshake failure", err)
+	}
+	return ah.handshakeInfo, nil
+}
+
+// FinishHandshake performs authentication for conn if necessary.
+func (ah *authHandshaker) FinishHandshake(ctx context.Context, conn driver.Connection) error {
+	performAuth := ah.options.PerformAuthentication
+	if performAuth == nil {
+		performAuth = func(serv description.Server) bool {
+			// Authentication is possible against all server types except arbiters
+			return serv.Kind != description.RSArbiter
+		}
+	}
+
+	desc := conn.Description()
+	if performAuth(desc) && ah.options.Authenticator != nil {
+		cfg := &Config{
+			Description:   desc,
+			Connection:    conn,
+			ClusterClock:  ah.options.ClusterClock,
+			HandshakeInfo: ah.handshakeInfo,
+			ServerAPI:     ah.options.ServerAPI,
+		}
+
+		if err := ah.authenticate(ctx, cfg); err != nil {
+			return newAuthError("auth error", err)
+		}
+	}
+
+	if ah.wrapped == nil {
+		return nil
+	}
+	return ah.wrapped.FinishHandshake(ctx, conn)
+}
+
+func (ah *authHandshaker) authenticate(ctx context.Context, cfg *Config) error {
+	// If the initial hello reply included a response to the speculative authentication attempt, we only need to
+	// conduct the remainder of the conversation.
+	if speculativeResponse := ah.handshakeInfo.SpeculativeAuthenticate; speculativeResponse != nil {
+		// Defensively ensure that the server did not include a response if speculative auth was not attempted.
+		if ah.conversation == nil {
+			return errors.New("speculative auth was not attempted but the server included a response")
+		}
+		return ah.conversation.Finish(ctx, cfg, speculativeResponse)
+	}
+
+	// If the server does not support speculative authentication or the first attempt was not successful, we need to
+	// perform authentication from scratch.
+	return ah.options.Authenticator.Auth(ctx, cfg)
+}
+
+// Handshaker creates a connection handshaker for the given authenticator.
+func Handshaker(h driver.Handshaker, options *HandshakeOptions) driver.Handshaker {
+	return &authHandshaker{
+		wrapped: h,
+		options: options,
+	}
+}
+
+// Authenticator handles authenticating a connection.
+type Authenticator = driver.Authenticator
+
+func newAuthError(msg string, inner error) error {
+	return &Error{
+		message: msg,
+		inner:   inner,
+	}
+}
+
+func newError(err error, mech string) error {
+	return &Error{
+		message: fmt.Sprintf("unable to authenticate using mechanism \"%s\"", mech),
+		inner:   err,
+	}
+}
+
+// Error is an error that occurred during authentication.
+type Error struct {
+	message string
+	inner   error
+}
+
+func (e *Error) Error() string {
+	if e.inner == nil {
+		return e.message
+	}
+	return fmt.Sprintf("%s: %s", e.message, e.inner)
+}
+
+// Inner returns the wrapped error.
+func (e *Error) Inner() error {
+	return e.inner
+}
+
+// Unwrap returns the underlying error.
+func (e *Error) Unwrap() error {
+	return e.inner
+}
+
+// Message returns the message.
+func (e *Error) Message() string {
+	return e.message
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go
new file mode 100644
index 0000000000000000000000000000000000000000..616182d9cfb81356d3512d77d29a7a1353098ec4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go
@@ -0,0 +1,189 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"bytes"
+	"context"
+	"crypto/rand"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+	v4signer "go.mongodb.org/mongo-driver/internal/aws/signer/v4"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+type clientState int
+
+const (
+	clientStarting clientState = iota
+	clientFirst
+	clientFinal
+	clientDone
+)
+
+type awsConversation struct {
+	state       clientState
+	valid       bool
+	nonce       []byte
+	credentials *credentials.Credentials
+}
+
+type serverMessage struct {
+	Nonce primitive.Binary `bson:"s"`
+	Host  string           `bson:"h"`
+}
+
+const (
+	amzDateFormat       = "20060102T150405Z"
+	defaultRegion       = "us-east-1"
+	maxHostLength       = 255
+	responceNonceLength = 64
+)
+
+// Step takes a string provided from a server (or just an empty string for the
+// very first conversation step) and attempts to move the authentication
+// conversation forward.  It returns a string to be sent to the server or an
+// error if the server message is invalid.  Calling Step after a conversation
+// completes is also an error.
+func (ac *awsConversation) Step(challenge []byte) (response []byte, err error) {
+	switch ac.state {
+	case clientStarting:
+		ac.state = clientFirst
+		response = ac.firstMsg()
+	case clientFirst:
+		ac.state = clientFinal
+		response, err = ac.finalMsg(challenge)
+	case clientFinal:
+		ac.state = clientDone
+		ac.valid = true
+	default:
+		response, err = nil, errors.New("Conversation already completed")
+	}
+	return
+}
+
+// Done returns true if the conversation is completed or has errored.
+func (ac *awsConversation) Done() bool {
+	return ac.state == clientDone
+}
+
+// Valid returns true if the conversation successfully authenticated with the
+// server, including counter-validation that the server actually has the
+// user's stored credentials.
+func (ac *awsConversation) Valid() bool {
+	return ac.valid
+}
+
+func getRegion(host string) (string, error) {
+	region := defaultRegion
+
+	if len(host) == 0 {
+		return "", errors.New("invalid STS host: empty")
+	}
+	if len(host) > maxHostLength {
+		return "", errors.New("invalid STS host: too large")
+	}
+	// The implicit region for sts.amazonaws.com is us-east-1
+	if host == "sts.amazonaws.com" {
+		return region, nil
+	}
+	if strings.HasPrefix(host, ".") || strings.HasSuffix(host, ".") || strings.Contains(host, "..") {
+		return "", errors.New("invalid STS host: empty part")
+	}
+
+	// If the host has multiple parts, the second part is the region
+	parts := strings.Split(host, ".")
+	if len(parts) >= 2 {
+		region = parts[1]
+	}
+
+	return region, nil
+}
+
+func (ac *awsConversation) firstMsg() []byte {
+	// Values are cached for use in final message parameters
+	ac.nonce = make([]byte, 32)
+	_, _ = rand.Read(ac.nonce)
+
+	idx, msg := bsoncore.AppendDocumentStart(nil)
+	msg = bsoncore.AppendInt32Element(msg, "p", 110)
+	msg = bsoncore.AppendBinaryElement(msg, "r", 0x00, ac.nonce)
+	msg, _ = bsoncore.AppendDocumentEnd(msg, idx)
+	return msg
+}
+
+func (ac *awsConversation) finalMsg(s1 []byte) ([]byte, error) {
+	var sm serverMessage
+	err := bson.Unmarshal(s1, &sm)
+	if err != nil {
+		return nil, err
+	}
+
+	// Check nonce prefix
+	if sm.Nonce.Subtype != 0x00 {
+		return nil, errors.New("server reply contained unexpected binary subtype")
+	}
+	if len(sm.Nonce.Data) != responceNonceLength {
+		return nil, fmt.Errorf("server reply nonce was not %v bytes", responceNonceLength)
+	}
+	if !bytes.HasPrefix(sm.Nonce.Data, ac.nonce) {
+		return nil, errors.New("server nonce did not extend client nonce")
+	}
+
+	region, err := getRegion(sm.Host)
+	if err != nil {
+		return nil, err
+	}
+
+	creds, err := ac.credentials.GetWithContext(context.Background())
+	if err != nil {
+		return nil, err
+	}
+
+	currentTime := time.Now().UTC()
+	body := "Action=GetCallerIdentity&Version=2011-06-15"
+
+	// Create http.Request
+	req, _ := http.NewRequest("POST", "/", strings.NewReader(body))
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	req.Header.Set("Content-Length", "43")
+	req.Host = sm.Host
+	req.Header.Set("X-Amz-Date", currentTime.Format(amzDateFormat))
+	if len(creds.SessionToken) > 0 {
+		req.Header.Set("X-Amz-Security-Token", creds.SessionToken)
+	}
+	req.Header.Set("X-MongoDB-Server-Nonce", base64.StdEncoding.EncodeToString(sm.Nonce.Data))
+	req.Header.Set("X-MongoDB-GS2-CB-Flag", "n")
+
+	// Create signer with credentials
+	signer := v4signer.NewSigner(ac.credentials)
+
+	// Get signed header
+	_, err = signer.Sign(req, strings.NewReader(body), "sts", region, currentTime)
+	if err != nil {
+		return nil, err
+	}
+
+	// create message
+	idx, msg := bsoncore.AppendDocumentStart(nil)
+	msg = bsoncore.AppendStringElement(msg, "a", req.Header.Get("Authorization"))
+	msg = bsoncore.AppendStringElement(msg, "d", req.Header.Get("X-Amz-Date"))
+	if len(creds.SessionToken) > 0 {
+		msg = bsoncore.AppendStringElement(msg, "t", creds.SessionToken)
+	}
+	msg, _ = bsoncore.AppendDocumentEnd(msg, idx)
+
+	return msg, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe8f472c9612cf809cb1ea609c28f4eec661b185
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go
@@ -0,0 +1,31 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// SpeculativeConversation represents an authentication conversation that can be merged with the initial connection
+// handshake.
+//
+// FirstMessage method returns the first message to be sent to the server. This message will be included in the initial
+// hello command.
+//
+// Finish takes the server response to the initial message and conducts the remainder of the conversation to
+// authenticate the provided connection.
+type SpeculativeConversation interface {
+	FirstMessage() (bsoncore.Document, error)
+	Finish(ctx context.Context, cfg *Config, firstResponse bsoncore.Document) error
+}
+
+// SpeculativeAuthenticator represents an authenticator that supports speculative authentication.
+type SpeculativeAuthenticator interface {
+	CreateSpeculativeConversation() (SpeculativeConversation, error)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/cred.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/cred.go
new file mode 100644
index 0000000000000000000000000000000000000000..a9685f6ed89854f517501bdb4a8599c1e34e8442
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/cred.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+// Cred is the type of user credential
+type Cred = driver.Cred
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/awscreds.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/awscreds.go
new file mode 100644
index 0000000000000000000000000000000000000000..06bba4534342c61043976b13933a333a4733ad04
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/awscreds.go
@@ -0,0 +1,58 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package creds
+
+import (
+	"context"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+	"go.mongodb.org/mongo-driver/internal/credproviders"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+const (
+	// expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring.
+	// This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions.
+	//
+	// Set an early expiration of 5 minutes before the credentials are actually expired.
+	expiryWindow = 5 * time.Minute
+)
+
+// AWSCredentialProvider wraps AWS credentials.
+type AWSCredentialProvider struct {
+	Cred *credentials.Credentials
+}
+
+// NewAWSCredentialProvider generates new AWSCredentialProvider
+func NewAWSCredentialProvider(httpClient *http.Client, providers ...credentials.Provider) AWSCredentialProvider {
+	providers = append(
+		providers,
+		credproviders.NewEnvProvider(),
+		credproviders.NewAssumeRoleProvider(httpClient, expiryWindow),
+		credproviders.NewECSProvider(httpClient, expiryWindow),
+		credproviders.NewEC2Provider(httpClient, expiryWindow),
+	)
+
+	return AWSCredentialProvider{credentials.NewChainCredentials(providers)}
+}
+
+// GetCredentialsDoc generates AWS credentials.
+func (p AWSCredentialProvider) GetCredentialsDoc(ctx context.Context) (bsoncore.Document, error) {
+	creds, err := p.Cred.GetWithContext(ctx)
+	if err != nil {
+		return nil, err
+	}
+	builder := bsoncore.NewDocumentBuilder().
+		AppendString("accessKeyId", creds.AccessKeyID).
+		AppendString("secretAccessKey", creds.SecretAccessKey)
+	if token := creds.SessionToken; len(token) > 0 {
+		builder.AppendString("sessionToken", token)
+	}
+	return builder.Build(), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/azurecreds.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/azurecreds.go
new file mode 100644
index 0000000000000000000000000000000000000000..d8f105a9d9a4d093ccbf44a37961b74d6858e894
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/azurecreds.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package creds
+
+import (
+	"context"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+	"go.mongodb.org/mongo-driver/internal/credproviders"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// AzureCredentialProvider provides Azure credentials.
+type AzureCredentialProvider struct {
+	cred *credentials.Credentials
+}
+
+// NewAzureCredentialProvider generates new AzureCredentialProvider
+func NewAzureCredentialProvider(httpClient *http.Client) AzureCredentialProvider {
+	return AzureCredentialProvider{
+		credentials.NewCredentials(credproviders.NewAzureProvider(httpClient, 1*time.Minute)),
+	}
+}
+
+// GetCredentialsDoc generates Azure credentials.
+func (p AzureCredentialProvider) GetCredentialsDoc(ctx context.Context) (bsoncore.Document, error) {
+	creds, err := p.cred.GetWithContext(ctx)
+	if err != nil {
+		return nil, err
+	}
+	builder := bsoncore.NewDocumentBuilder().
+		AppendString("accessToken", creds.SessionToken)
+	return builder.Build(), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..99c4c3470f2497245c340a5fbd29017d57a3041f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2024-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package creds is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package creds
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/gcpcreds.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/gcpcreds.go
new file mode 100644
index 0000000000000000000000000000000000000000..74f352e36e32d308f4d017433d0a74780ec395ec
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/gcpcreds.go
@@ -0,0 +1,74 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package creds
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// GCPCredentialProvider provides GCP credentials.
+type GCPCredentialProvider struct {
+	httpClient *http.Client
+}
+
+// NewGCPCredentialProvider generates new GCPCredentialProvider
+func NewGCPCredentialProvider(httpClient *http.Client) GCPCredentialProvider {
+	return GCPCredentialProvider{httpClient}
+}
+
+// GetCredentialsDoc generates GCP credentials.
+func (p GCPCredentialProvider) GetCredentialsDoc(ctx context.Context) (bsoncore.Document, error) {
+	metadataHost := "metadata.google.internal"
+	if envhost := os.Getenv("GCE_METADATA_HOST"); envhost != "" {
+		metadataHost = envhost
+	}
+	url := fmt.Sprintf("http://%s/computeMetadata/v1/instance/service-accounts/default/token", metadataHost)
+	req, err := http.NewRequest(http.MethodGet, url, nil)
+	if err != nil {
+		return nil, fmt.Errorf("unable to retrieve GCP credentials: %w", err)
+	}
+	req.Header.Set("Metadata-Flavor", "Google")
+	resp, err := p.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return nil, fmt.Errorf("unable to retrieve GCP credentials: %w", err)
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("unable to retrieve GCP credentials: error reading response body: %w", err)
+	}
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf(
+			"unable to retrieve GCP credentials: expected StatusCode 200, got StatusCode: %v. Response body: %s",
+			resp.StatusCode,
+			body)
+	}
+	var tokenResponse struct {
+		AccessToken string `json:"access_token"`
+	}
+	// Attempt to read body as JSON
+	err = json.Unmarshal(body, &tokenResponse)
+	if err != nil {
+		return nil, fmt.Errorf(
+			"unable to retrieve GCP credentials: error reading body JSON: %w (response body: %s)",
+			err,
+			body)
+	}
+	if tokenResponse.AccessToken == "" {
+		return nil, fmt.Errorf("unable to retrieve GCP credentials: got unexpected empty accessToken from GCP Metadata Server. Response body: %s", body)
+	}
+
+	builder := bsoncore.NewDocumentBuilder().AppendString("accessToken", tokenResponse.AccessToken)
+	return builder.Build(), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go
new file mode 100644
index 0000000000000000000000000000000000000000..785a41951d5b393871fd9fd96e49b36fc289cd91
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go
@@ -0,0 +1,93 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+func newDefaultAuthenticator(cred *Cred, httpClient *http.Client) (Authenticator, error) {
+	scram, err := newScramSHA256Authenticator(cred, httpClient)
+	if err != nil {
+		return nil, newAuthError("failed to create internal authenticator", err)
+	}
+	speculative, ok := scram.(SpeculativeAuthenticator)
+	if !ok {
+		typeErr := fmt.Errorf("expected SCRAM authenticator to be SpeculativeAuthenticator but got %T", scram)
+		return nil, newAuthError("failed to create internal authenticator", typeErr)
+	}
+
+	return &DefaultAuthenticator{
+		Cred:                     cred,
+		speculativeAuthenticator: speculative,
+		httpClient:               httpClient,
+	}, nil
+}
+
+// DefaultAuthenticator uses SCRAM-SHA-1 or MONGODB-CR depending
+// on the server version.
+type DefaultAuthenticator struct {
+	Cred *Cred
+
+	// The authenticator to use for speculative authentication. Because the correct auth mechanism is unknown when doing
+	// the initial hello, SCRAM-SHA-256 is used for the speculative attempt.
+	speculativeAuthenticator SpeculativeAuthenticator
+
+	httpClient *http.Client
+}
+
+var _ SpeculativeAuthenticator = (*DefaultAuthenticator)(nil)
+
+// CreateSpeculativeConversation creates a speculative conversation for SCRAM authentication.
+func (a *DefaultAuthenticator) CreateSpeculativeConversation() (SpeculativeConversation, error) {
+	return a.speculativeAuthenticator.CreateSpeculativeConversation()
+}
+
+// Auth authenticates the connection.
+func (a *DefaultAuthenticator) Auth(ctx context.Context, cfg *Config) error {
+	var actual Authenticator
+	var err error
+
+	switch chooseAuthMechanism(cfg) {
+	case SCRAMSHA256:
+		actual, err = newScramSHA256Authenticator(a.Cred, a.httpClient)
+	case SCRAMSHA1:
+		actual, err = newScramSHA1Authenticator(a.Cred, a.httpClient)
+	default:
+		actual, err = newMongoDBCRAuthenticator(a.Cred, a.httpClient)
+	}
+
+	if err != nil {
+		return newAuthError("error creating authenticator", err)
+	}
+
+	return actual.Auth(ctx, cfg)
+}
+
+// Reauth reauthenticates the connection.
+func (a *DefaultAuthenticator) Reauth(_ context.Context, _ *driver.AuthConfig) error {
+	return newAuthError("DefaultAuthenticator does not support reauthentication", nil)
+}
+
+// If a server provides a list of supported mechanisms, we choose
+// SCRAM-SHA-256 if it exists or else MUST use SCRAM-SHA-1.
+// Otherwise, we decide based on what is supported.
+func chooseAuthMechanism(cfg *Config) string {
+	if saslSupportedMechs := cfg.HandshakeInfo.SaslSupportedMechs; saslSupportedMechs != nil {
+		for _, v := range saslSupportedMechs {
+			if v == SCRAMSHA256 {
+				return v
+			}
+		}
+	}
+
+	return SCRAMSHA1
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..5f9f1f57430069918fa2231d763594d743a200f0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package auth is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package auth
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go
new file mode 100644
index 0000000000000000000000000000000000000000..b342e9a7de45ea105486af5cf27c7d7908c6abf6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go
@@ -0,0 +1,66 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build gssapi && (windows || linux || darwin)
+// +build gssapi
+// +build windows linux darwin
+
+package auth
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi"
+)
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(cred *Cred, _ *http.Client) (Authenticator, error) {
+	if cred.Source != "" && cred.Source != sourceExternal {
+		return nil, newAuthError("GSSAPI source must be empty or $external", nil)
+	}
+
+	return &GSSAPIAuthenticator{
+		Username:    cred.Username,
+		Password:    cred.Password,
+		PasswordSet: cred.PasswordSet,
+		Props:       cred.Props,
+	}, nil
+}
+
+// GSSAPIAuthenticator uses the GSSAPI algorithm over SASL to authenticate a connection.
+type GSSAPIAuthenticator struct {
+	Username    string
+	Password    string
+	PasswordSet bool
+	Props       map[string]string
+}
+
+// Auth authenticates the connection.
+func (a *GSSAPIAuthenticator) Auth(ctx context.Context, cfg *Config) error {
+	target := cfg.Description.Addr.String()
+	hostname, _, err := net.SplitHostPort(target)
+	if err != nil {
+		return newAuthError(fmt.Sprintf("invalid endpoint (%s) specified: %s", target, err), nil)
+	}
+
+	client, err := gssapi.New(hostname, a.Username, a.Password, a.PasswordSet, a.Props)
+
+	if err != nil {
+		return newAuthError("error creating gssapi", err)
+	}
+	return ConductSaslConversation(ctx, cfg, sourceExternal, client)
+}
+
+// Reauth reauthenticates the connection.
+func (a *GSSAPIAuthenticator) Reauth(_ context.Context, _ *driver.AuthConfig) error {
+	return newAuthError("GSSAPI does not support reauthentication", nil)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go
new file mode 100644
index 0000000000000000000000000000000000000000..e50553c7a1bde9e98e98ad4daea19100d5f0d386
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go
@@ -0,0 +1,19 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build !gssapi
+// +build !gssapi
+
+package auth
+
+import "net/http"
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(*Cred, *http.Client) (Authenticator, error) {
+	return nil, newAuthError("GSSAPI support not enabled during build (-tags gssapi)", nil)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go
new file mode 100644
index 0000000000000000000000000000000000000000..12046ff67c2001ccc180d64d55d58c63881c592f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go
@@ -0,0 +1,23 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build gssapi && !windows && !linux && !darwin
+// +build gssapi,!windows,!linux,!darwin
+
+package auth
+
+import (
+	"fmt"
+	"net/http"
+	"runtime"
+)
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(*Cred, *http.Client) (Authenticator, error) {
+	return nil, newAuthError(fmt.Sprintf("GSSAPI is not supported on %s", runtime.GOOS), nil)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go
new file mode 100644
index 0000000000000000000000000000000000000000..496057882da77cb6a090c48df3dcf04d3bc2b963
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go
@@ -0,0 +1,168 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build gssapi && (linux || darwin)
+// +build gssapi
+// +build linux darwin
+
+package gssapi
+
+/*
+#cgo linux CFLAGS: -DGOOS_linux
+#cgo linux LDFLAGS: -lgssapi_krb5 -lkrb5
+#cgo darwin CFLAGS: -DGOOS_darwin
+#cgo darwin LDFLAGS: -framework GSS
+#include "gss_wrapper.h"
+*/
+import "C"
+import (
+	"context"
+	"fmt"
+	"runtime"
+	"strings"
+	"unsafe"
+)
+
+// New creates a new SaslClient. The target parameter should be a hostname with no port.
+func New(target, username, password string, passwordSet bool, props map[string]string) (*SaslClient, error) {
+	serviceName := "mongodb"
+
+	for key, value := range props {
+		switch strings.ToUpper(key) {
+		case "CANONICALIZE_HOST_NAME":
+			return nil, fmt.Errorf("CANONICALIZE_HOST_NAME is not supported when using gssapi on %s", runtime.GOOS)
+		case "SERVICE_REALM":
+			return nil, fmt.Errorf("SERVICE_REALM is not supported when using gssapi on %s", runtime.GOOS)
+		case "SERVICE_NAME":
+			serviceName = value
+		case "SERVICE_HOST":
+			target = value
+		default:
+			return nil, fmt.Errorf("unknown mechanism property %s", key)
+		}
+	}
+
+	servicePrincipalName := fmt.Sprintf("%s@%s", serviceName, target)
+
+	return &SaslClient{
+		servicePrincipalName: servicePrincipalName,
+		username:             username,
+		password:             password,
+		passwordSet:          passwordSet,
+	}, nil
+}
+
+type SaslClient struct {
+	servicePrincipalName string
+	username             string
+	password             string
+	passwordSet          bool
+
+	// state
+	state           C.gssapi_client_state
+	contextComplete bool
+	done            bool
+}
+
+func (sc *SaslClient) Close() {
+	C.gssapi_client_destroy(&sc.state)
+}
+
+func (sc *SaslClient) Start() (string, []byte, error) {
+	const mechName = "GSSAPI"
+
+	cservicePrincipalName := C.CString(sc.servicePrincipalName)
+	defer C.free(unsafe.Pointer(cservicePrincipalName))
+	var cusername *C.char
+	var cpassword *C.char
+	if sc.username != "" {
+		cusername = C.CString(sc.username)
+		defer C.free(unsafe.Pointer(cusername))
+		if sc.passwordSet {
+			cpassword = C.CString(sc.password)
+			defer C.free(unsafe.Pointer(cpassword))
+		}
+	}
+	status := C.gssapi_client_init(&sc.state, cservicePrincipalName, cusername, cpassword)
+
+	if status != C.GSSAPI_OK {
+		return mechName, nil, sc.getError("unable to initialize client")
+	}
+
+	payload, err := sc.Next(nil, nil)
+
+	return mechName, payload, err
+}
+
+func (sc *SaslClient) Next(_ context.Context, challenge []byte) ([]byte, error) {
+
+	var buf unsafe.Pointer
+	var bufLen C.size_t
+	var outBuf unsafe.Pointer
+	var outBufLen C.size_t
+
+	if sc.contextComplete {
+		if sc.username == "" {
+			var cusername *C.char
+			status := C.gssapi_client_username(&sc.state, &cusername)
+			if status != C.GSSAPI_OK {
+				return nil, sc.getError("unable to acquire username")
+			}
+			defer C.free(unsafe.Pointer(cusername))
+			sc.username = C.GoString((*C.char)(unsafe.Pointer(cusername)))
+		}
+
+		bytes := append([]byte{1, 0, 0, 0}, []byte(sc.username)...)
+		buf = unsafe.Pointer(&bytes[0])
+		bufLen = C.size_t(len(bytes))
+		status := C.gssapi_client_wrap_msg(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		if status != C.GSSAPI_OK {
+			return nil, sc.getError("unable to wrap authz")
+		}
+
+		sc.done = true
+	} else {
+		if len(challenge) > 0 {
+			buf = unsafe.Pointer(&challenge[0])
+			bufLen = C.size_t(len(challenge))
+		}
+
+		status := C.gssapi_client_negotiate(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		switch status {
+		case C.GSSAPI_OK:
+			sc.contextComplete = true
+		case C.GSSAPI_CONTINUE:
+		default:
+			return nil, sc.getError("unable to negotiate with server")
+		}
+	}
+
+	if outBuf != nil {
+		defer C.free(outBuf)
+	}
+
+	return C.GoBytes(outBuf, C.int(outBufLen)), nil
+}
+
+func (sc *SaslClient) Completed() bool {
+	return sc.done
+}
+
+func (sc *SaslClient) getError(prefix string) error {
+	var desc *C.char
+
+	status := C.gssapi_error_desc(sc.state.maj_stat, sc.state.min_stat, &desc)
+	if status != C.GSSAPI_OK {
+		if desc != nil {
+			C.free(unsafe.Pointer(desc))
+		}
+
+		return fmt.Errorf("%s: (%v, %v)", prefix, sc.state.maj_stat, sc.state.min_stat)
+	}
+	defer C.free(unsafe.Pointer(desc))
+
+	return fmt.Errorf("%s: %v(%v,%v)", prefix, C.GoString(desc), int32(sc.state.maj_stat), int32(sc.state.min_stat))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c
new file mode 100644
index 0000000000000000000000000000000000000000..68b725414906dd8f0fe2258ac73f859fe46402b6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c
@@ -0,0 +1,254 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi
+//+build linux darwin
+
+#include <string.h>
+#include <stdio.h>
+#include "gss_wrapper.h"
+
+OM_uint32 gssapi_canonicalize_name(
+    OM_uint32* minor_status,
+    char *input_name,
+    gss_OID input_name_type,
+    gss_name_t *output_name
+)
+{
+    OM_uint32 major_status;
+    gss_name_t imported_name = GSS_C_NO_NAME;
+    gss_buffer_desc buffer = GSS_C_EMPTY_BUFFER;
+
+    buffer.value = input_name;
+    buffer.length = strlen(input_name);
+    major_status = gss_import_name(minor_status, &buffer, input_name_type, &imported_name);
+    if (GSS_ERROR(major_status)) {
+        return major_status;
+    }
+
+    major_status = gss_canonicalize_name(minor_status, imported_name, (gss_OID)gss_mech_krb5, output_name);
+    if (imported_name != GSS_C_NO_NAME) {
+        OM_uint32 ignored;
+        gss_release_name(&ignored, &imported_name);
+    }
+
+    return major_status;
+}
+
+int gssapi_error_desc(
+    OM_uint32 maj_stat,
+    OM_uint32 min_stat,
+    char **desc
+)
+{
+    OM_uint32 stat = maj_stat;
+    int stat_type = GSS_C_GSS_CODE;
+    if (min_stat != 0) {
+        stat = min_stat;
+        stat_type = GSS_C_MECH_CODE;
+    }
+
+    OM_uint32 local_maj_stat, local_min_stat;
+    OM_uint32 msg_ctx = 0;
+    gss_buffer_desc desc_buffer;
+    do
+    {
+        local_maj_stat = gss_display_status(
+            &local_min_stat,
+            stat,
+            stat_type,
+            GSS_C_NO_OID,
+            &msg_ctx,
+            &desc_buffer
+        );
+        if (GSS_ERROR(local_maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        if (*desc) {
+            free(*desc);
+        }
+
+        *desc = malloc(desc_buffer.length+1);
+        memcpy(*desc, desc_buffer.value, desc_buffer.length+1);
+
+        gss_release_buffer(&local_min_stat, &desc_buffer);
+    }
+    while(msg_ctx != 0);
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_init(
+    gssapi_client_state *client,
+    char* spn,
+    char* username,
+    char* password
+)
+{
+    client->cred = GSS_C_NO_CREDENTIAL;
+    client->ctx = GSS_C_NO_CONTEXT;
+
+    client->maj_stat = gssapi_canonicalize_name(&client->min_stat, spn, GSS_C_NT_HOSTBASED_SERVICE, &client->spn);
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    if (username) {
+        gss_name_t name;
+        client->maj_stat = gssapi_canonicalize_name(&client->min_stat, username, GSS_C_NT_USER_NAME, &name);
+        if (GSS_ERROR(client->maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        if (password) {
+            gss_buffer_desc password_buffer;
+            password_buffer.value = password;
+            password_buffer.length = strlen(password);
+            client->maj_stat = gss_acquire_cred_with_password(&client->min_stat, name, &password_buffer, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_INITIATE, &client->cred, NULL, NULL);
+        } else {
+            client->maj_stat = gss_acquire_cred(&client->min_stat, name, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_INITIATE, &client->cred, NULL, NULL);
+        }
+
+        if (GSS_ERROR(client->maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        OM_uint32 ignored;
+        gss_release_name(&ignored, &name);
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_username(
+    gssapi_client_state *client,
+    char** username
+)
+{
+    OM_uint32 ignored;
+    gss_name_t name = GSS_C_NO_NAME;
+
+    client->maj_stat = gss_inquire_context(&client->min_stat, client->ctx, &name, NULL, NULL, NULL, NULL, NULL, NULL);
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    gss_buffer_desc name_buffer;
+    client->maj_stat = gss_display_name(&client->min_stat, name, &name_buffer, NULL);
+    if (GSS_ERROR(client->maj_stat)) {
+        gss_release_name(&ignored, &name);
+        return GSSAPI_ERROR;
+    }
+
+	*username = malloc(name_buffer.length+1);
+	memcpy(*username, name_buffer.value, name_buffer.length+1);
+
+    gss_release_buffer(&ignored, &name_buffer);
+    gss_release_name(&ignored, &name);
+    return GSSAPI_OK;
+}
+
+int gssapi_client_negotiate(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+)
+{
+    gss_buffer_desc input_buffer = GSS_C_EMPTY_BUFFER;
+    gss_buffer_desc output_buffer = GSS_C_EMPTY_BUFFER;
+
+    if (input) {
+        input_buffer.value = input;
+        input_buffer.length = input_length;
+    }
+
+    client->maj_stat = gss_init_sec_context(
+        &client->min_stat,
+        client->cred,
+        &client->ctx,
+        client->spn,
+        GSS_C_NO_OID,
+        GSS_C_MUTUAL_FLAG | GSS_C_SEQUENCE_FLAG,
+        0,
+        GSS_C_NO_CHANNEL_BINDINGS,
+        &input_buffer,
+        NULL,
+        &output_buffer,
+        NULL,
+        NULL
+    );
+
+    if (output_buffer.length) {
+        *output = malloc(output_buffer.length);
+        *output_length = output_buffer.length;
+        memcpy(*output, output_buffer.value, output_buffer.length);
+
+        OM_uint32 ignored;
+        gss_release_buffer(&ignored, &output_buffer);
+    }
+
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    } else if (client->maj_stat == GSS_S_CONTINUE_NEEDED) {
+        return GSSAPI_CONTINUE;
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_wrap_msg(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+)
+{
+    gss_buffer_desc input_buffer = GSS_C_EMPTY_BUFFER;
+    gss_buffer_desc output_buffer = GSS_C_EMPTY_BUFFER;
+
+    input_buffer.value = input;
+    input_buffer.length = input_length;
+
+    client->maj_stat = gss_wrap(&client->min_stat, client->ctx, 0, GSS_C_QOP_DEFAULT, &input_buffer, NULL, &output_buffer);
+
+    if (output_buffer.length) {
+        *output = malloc(output_buffer.length);
+        *output_length = output_buffer.length;
+        memcpy(*output, output_buffer.value, output_buffer.length);
+
+        gss_release_buffer(&client->min_stat, &output_buffer);
+    }
+
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_destroy(
+    gssapi_client_state *client
+)
+{
+    OM_uint32 ignored;
+    if (client->ctx != GSS_C_NO_CONTEXT) {
+        gss_delete_sec_context(&ignored, &client->ctx, GSS_C_NO_BUFFER);
+    }
+
+    if (client->spn != GSS_C_NO_NAME) {
+        gss_release_name(&ignored, &client->spn);
+    }
+
+    if (client->cred != GSS_C_NO_CREDENTIAL) {
+        gss_release_cred(&ignored, &client->cred);
+    }
+
+    return GSSAPI_OK;
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h
new file mode 100644
index 0000000000000000000000000000000000000000..a105ba58b77125562bc2f6e2ec3836415a49087c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h
@@ -0,0 +1,72 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi
+//+build linux darwin
+#ifndef GSS_WRAPPER_H
+#define GSS_WRAPPER_H
+
+#include <stdlib.h>
+#ifdef GOOS_linux
+#include <gssapi/gssapi.h>
+#include <gssapi/gssapi_krb5.h>
+#endif
+#ifdef GOOS_darwin
+#include <GSS/GSS.h>
+#endif
+
+#define GSSAPI_OK 0
+#define GSSAPI_CONTINUE 1
+#define GSSAPI_ERROR 2
+
+typedef struct {
+    gss_name_t spn;
+    gss_cred_id_t cred;
+    gss_ctx_id_t ctx;
+
+    OM_uint32 maj_stat;
+    OM_uint32 min_stat;
+} gssapi_client_state;
+
+int gssapi_error_desc(
+    OM_uint32 maj_stat,
+    OM_uint32 min_stat,
+    char **desc
+);
+
+int gssapi_client_init(
+    gssapi_client_state *client,
+    char* spn,
+    char* username,
+    char* password
+);
+
+int gssapi_client_username(
+    gssapi_client_state *client,
+    char** username
+);
+
+int gssapi_client_negotiate(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+);
+
+int gssapi_client_wrap_msg(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+);
+
+int gssapi_client_destroy(
+    gssapi_client_state *client
+);
+
+#endif
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go
new file mode 100644
index 0000000000000000000000000000000000000000..f1da5a85215fe84d4400eff6c36bfc1485e83bc4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go
@@ -0,0 +1,354 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build gssapi && windows
+// +build gssapi,windows
+
+package gssapi
+
+// #include "sspi_wrapper.h"
+import "C"
+import (
+	"context"
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"unsafe"
+)
+
+// New creates a new SaslClient. The target parameter should be a hostname with no port.
+func New(target, username, password string, passwordSet bool, props map[string]string) (*SaslClient, error) {
+	initOnce.Do(initSSPI)
+	if initError != nil {
+		return nil, initError
+	}
+
+	var err error
+	serviceName := "mongodb"
+	serviceRealm := ""
+	canonicalizeHostName := false
+	var serviceHostSet bool
+
+	for key, value := range props {
+		switch strings.ToUpper(key) {
+		case "CANONICALIZE_HOST_NAME":
+			canonicalizeHostName, err = strconv.ParseBool(value)
+			if err != nil {
+				return nil, fmt.Errorf("%s must be a boolean (true, false, 0, 1) but got '%s'", key, value)
+			}
+
+		case "SERVICE_REALM":
+			serviceRealm = value
+		case "SERVICE_NAME":
+			serviceName = value
+		case "SERVICE_HOST":
+			serviceHostSet = true
+			target = value
+		}
+	}
+
+	if canonicalizeHostName {
+		// Should not canonicalize the SERVICE_HOST
+		if serviceHostSet {
+			return nil, fmt.Errorf("CANONICALIZE_HOST_NAME and SERVICE_HOST canonot both be specified")
+		}
+
+		names, err := net.LookupAddr(target)
+		if err != nil || len(names) == 0 {
+			return nil, fmt.Errorf("unable to canonicalize hostname: %s", err)
+		}
+		target = names[0]
+		if target[len(target)-1] == '.' {
+			target = target[:len(target)-1]
+		}
+	}
+
+	servicePrincipalName := fmt.Sprintf("%s/%s", serviceName, target)
+	if serviceRealm != "" {
+		servicePrincipalName += "@" + serviceRealm
+	}
+
+	return &SaslClient{
+		servicePrincipalName: servicePrincipalName,
+		username:             username,
+		password:             password,
+		passwordSet:          passwordSet,
+	}, nil
+}
+
+type SaslClient struct {
+	servicePrincipalName string
+	username             string
+	password             string
+	passwordSet          bool
+
+	// state
+	state           C.sspi_client_state
+	contextComplete bool
+	done            bool
+}
+
+func (sc *SaslClient) Close() {
+	C.sspi_client_destroy(&sc.state)
+}
+
+func (sc *SaslClient) Start() (string, []byte, error) {
+	const mechName = "GSSAPI"
+
+	var cusername *C.char
+	var cpassword *C.char
+	if sc.username != "" {
+		cusername = C.CString(sc.username)
+		defer C.free(unsafe.Pointer(cusername))
+		if sc.passwordSet {
+			cpassword = C.CString(sc.password)
+			defer C.free(unsafe.Pointer(cpassword))
+		}
+	}
+	status := C.sspi_client_init(&sc.state, cusername, cpassword)
+
+	if status != C.SSPI_OK {
+		return mechName, nil, sc.getError("unable to initialize client")
+	}
+
+	payload, err := sc.Next(nil, nil)
+
+	return mechName, payload, err
+}
+
+func (sc *SaslClient) Next(_ context.Context, challenge []byte) ([]byte, error) {
+
+	var outBuf C.PVOID
+	var outBufLen C.ULONG
+
+	if sc.contextComplete {
+		if sc.username == "" {
+			var cusername *C.char
+			status := C.sspi_client_username(&sc.state, &cusername)
+			if status != C.SSPI_OK {
+				return nil, sc.getError("unable to acquire username")
+			}
+			defer C.free(unsafe.Pointer(cusername))
+			sc.username = C.GoString((*C.char)(unsafe.Pointer(cusername)))
+		}
+
+		bytes := append([]byte{1, 0, 0, 0}, []byte(sc.username)...)
+		buf := (C.PVOID)(unsafe.Pointer(&bytes[0]))
+		bufLen := C.ULONG(len(bytes))
+		status := C.sspi_client_wrap_msg(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		if status != C.SSPI_OK {
+			return nil, sc.getError("unable to wrap authz")
+		}
+
+		sc.done = true
+	} else {
+		var buf C.PVOID
+		var bufLen C.ULONG
+		if len(challenge) > 0 {
+			buf = (C.PVOID)(unsafe.Pointer(&challenge[0]))
+			bufLen = C.ULONG(len(challenge))
+		}
+		cservicePrincipalName := C.CString(sc.servicePrincipalName)
+		defer C.free(unsafe.Pointer(cservicePrincipalName))
+
+		status := C.sspi_client_negotiate(&sc.state, cservicePrincipalName, buf, bufLen, &outBuf, &outBufLen)
+		switch status {
+		case C.SSPI_OK:
+			sc.contextComplete = true
+		case C.SSPI_CONTINUE:
+		default:
+			return nil, sc.getError("unable to negotiate with server")
+		}
+	}
+
+	if outBuf != C.PVOID(nil) {
+		defer C.free(unsafe.Pointer(outBuf))
+	}
+
+	return C.GoBytes(unsafe.Pointer(outBuf), C.int(outBufLen)), nil
+}
+
+func (sc *SaslClient) Completed() bool {
+	return sc.done
+}
+
+func (sc *SaslClient) getError(prefix string) error {
+	return getError(prefix, sc.state.status)
+}
+
+var initOnce sync.Once
+var initError error
+
+func initSSPI() {
+	rc := C.sspi_init()
+	if rc != 0 {
+		initError = fmt.Errorf("error initializing sspi: %v", rc)
+	}
+}
+
+func getError(prefix string, status C.SECURITY_STATUS) error {
+	var s string
+	switch status {
+	case C.SEC_E_ALGORITHM_MISMATCH:
+		s = "The client and server cannot communicate because they do not possess a common algorithm."
+	case C.SEC_E_BAD_BINDINGS:
+		s = "The SSPI channel bindings supplied by the client are incorrect."
+	case C.SEC_E_BAD_PKGID:
+		s = "The requested package identifier does not exist."
+	case C.SEC_E_BUFFER_TOO_SMALL:
+		s = "The buffers supplied to the function are not large enough to contain the information."
+	case C.SEC_E_CANNOT_INSTALL:
+		s = "The security package cannot initialize successfully and should not be installed."
+	case C.SEC_E_CANNOT_PACK:
+		s = "The package is unable to pack the context."
+	case C.SEC_E_CERT_EXPIRED:
+		s = "The received certificate has expired."
+	case C.SEC_E_CERT_UNKNOWN:
+		s = "An unknown error occurred while processing the certificate."
+	case C.SEC_E_CERT_WRONG_USAGE:
+		s = "The certificate is not valid for the requested usage."
+	case C.SEC_E_CONTEXT_EXPIRED:
+		s = "The application is referencing a context that has already been closed. A properly written application should not receive this error."
+	case C.SEC_E_CROSSREALM_DELEGATION_FAILURE:
+		s = "The server attempted to make a Kerberos-constrained delegation request for a target outside the server's realm."
+	case C.SEC_E_CRYPTO_SYSTEM_INVALID:
+		s = "The cryptographic system or checksum function is not valid because a required function is unavailable."
+	case C.SEC_E_DECRYPT_FAILURE:
+		s = "The specified data could not be decrypted."
+	case C.SEC_E_DELEGATION_REQUIRED:
+		s = "The requested operation cannot be completed. The computer must be trusted for delegation"
+	case C.SEC_E_DOWNGRADE_DETECTED:
+		s = "The system detected a possible attempt to compromise security. Verify that the server that authenticated you can be contacted."
+	case C.SEC_E_ENCRYPT_FAILURE:
+		s = "The specified data could not be encrypted."
+	case C.SEC_E_ILLEGAL_MESSAGE:
+		s = "The message received was unexpected or badly formatted."
+	case C.SEC_E_INCOMPLETE_CREDENTIALS:
+		s = "The credentials supplied were not complete and could not be verified. The context could not be initialized."
+	case C.SEC_E_INCOMPLETE_MESSAGE:
+		s = "The message supplied was incomplete. The signature was not verified."
+	case C.SEC_E_INSUFFICIENT_MEMORY:
+		s = "Not enough memory is available to complete the request."
+	case C.SEC_E_INTERNAL_ERROR:
+		s = "An error occurred that did not map to an SSPI error code."
+	case C.SEC_E_INVALID_HANDLE:
+		s = "The handle passed to the function is not valid."
+	case C.SEC_E_INVALID_TOKEN:
+		s = "The token passed to the function is not valid."
+	case C.SEC_E_ISSUING_CA_UNTRUSTED:
+		s = "An untrusted certification authority (CA) was detected while processing the smart card certificate used for authentication."
+	case C.SEC_E_ISSUING_CA_UNTRUSTED_KDC:
+		s = "An untrusted CA was detected while processing the domain controller certificate used for authentication. The system event log contains additional information."
+	case C.SEC_E_KDC_CERT_EXPIRED:
+		s = "The domain controller certificate used for smart card logon has expired."
+	case C.SEC_E_KDC_CERT_REVOKED:
+		s = "The domain controller certificate used for smart card logon has been revoked."
+	case C.SEC_E_KDC_INVALID_REQUEST:
+		s = "A request that is not valid was sent to the KDC."
+	case C.SEC_E_KDC_UNABLE_TO_REFER:
+		s = "The KDC was unable to generate a referral for the service requested."
+	case C.SEC_E_KDC_UNKNOWN_ETYPE:
+		s = "The requested encryption type is not supported by the KDC."
+	case C.SEC_E_LOGON_DENIED:
+		s = "The logon has been denied"
+	case C.SEC_E_MAX_REFERRALS_EXCEEDED:
+		s = "The number of maximum ticket referrals has been exceeded."
+	case C.SEC_E_MESSAGE_ALTERED:
+		s = "The message supplied for verification has been altered."
+	case C.SEC_E_MULTIPLE_ACCOUNTS:
+		s = "The received certificate was mapped to multiple accounts."
+	case C.SEC_E_MUST_BE_KDC:
+		s = "The local computer must be a Kerberos domain controller (KDC)"
+	case C.SEC_E_NO_AUTHENTICATING_AUTHORITY:
+		s = "No authority could be contacted for authentication."
+	case C.SEC_E_NO_CREDENTIALS:
+		s = "No credentials are available."
+	case C.SEC_E_NO_IMPERSONATION:
+		s = "No impersonation is allowed for this context."
+	case C.SEC_E_NO_IP_ADDRESSES:
+		s = "Unable to accomplish the requested task because the local computer does not have any IP addresses."
+	case C.SEC_E_NO_KERB_KEY:
+		s = "No Kerberos key was found."
+	case C.SEC_E_NO_PA_DATA:
+		s = "Policy administrator (PA) data is needed to determine the encryption type"
+	case C.SEC_E_NO_S4U_PROT_SUPPORT:
+		s = "The Kerberos subsystem encountered an error. A service for user protocol request was made against a domain controller which does not support service for a user."
+	case C.SEC_E_NO_TGT_REPLY:
+		s = "The client is trying to negotiate a context and the server requires a user-to-user connection"
+	case C.SEC_E_NOT_OWNER:
+		s = "The caller of the function does not own the credentials."
+	case C.SEC_E_OK:
+		s = "The operation completed successfully."
+	case C.SEC_E_OUT_OF_SEQUENCE:
+		s = "The message supplied for verification is out of sequence."
+	case C.SEC_E_PKINIT_CLIENT_FAILURE:
+		s = "The smart card certificate used for authentication is not trusted."
+	case C.SEC_E_PKINIT_NAME_MISMATCH:
+		s = "The client certificate does not contain a valid UPN or does not match the client name in the logon request."
+	case C.SEC_E_QOP_NOT_SUPPORTED:
+		s = "The quality of protection attribute is not supported by this package."
+	case C.SEC_E_REVOCATION_OFFLINE_C:
+		s = "The revocation status of the smart card certificate used for authentication could not be determined."
+	case C.SEC_E_REVOCATION_OFFLINE_KDC:
+		s = "The revocation status of the domain controller certificate used for smart card authentication could not be determined. The system event log contains additional information."
+	case C.SEC_E_SECPKG_NOT_FOUND:
+		s = "The security package was not recognized."
+	case C.SEC_E_SECURITY_QOS_FAILED:
+		s = "The security context could not be established due to a failure in the requested quality of service (for example"
+	case C.SEC_E_SHUTDOWN_IN_PROGRESS:
+		s = "A system shutdown is in progress."
+	case C.SEC_E_SMARTCARD_CERT_EXPIRED:
+		s = "The smart card certificate used for authentication has expired."
+	case C.SEC_E_SMARTCARD_CERT_REVOKED:
+		s = "The smart card certificate used for authentication has been revoked. Additional information may exist in the event log."
+	case C.SEC_E_SMARTCARD_LOGON_REQUIRED:
+		s = "Smart card logon is required and was not used."
+	case C.SEC_E_STRONG_CRYPTO_NOT_SUPPORTED:
+		s = "The other end of the security negotiation requires strong cryptography"
+	case C.SEC_E_TARGET_UNKNOWN:
+		s = "The target was not recognized."
+	case C.SEC_E_TIME_SKEW:
+		s = "The clocks on the client and server computers do not match."
+	case C.SEC_E_TOO_MANY_PRINCIPALS:
+		s = "The KDC reply contained more than one principal name."
+	case C.SEC_E_UNFINISHED_CONTEXT_DELETED:
+		s = "A security context was deleted before the context was completed. This is considered a logon failure."
+	case C.SEC_E_UNKNOWN_CREDENTIALS:
+		s = "The credentials provided were not recognized."
+	case C.SEC_E_UNSUPPORTED_FUNCTION:
+		s = "The requested function is not supported."
+	case C.SEC_E_UNSUPPORTED_PREAUTH:
+		s = "An unsupported preauthentication mechanism was presented to the Kerberos package."
+	case C.SEC_E_UNTRUSTED_ROOT:
+		s = "The certificate chain was issued by an authority that is not trusted."
+	case C.SEC_E_WRONG_CREDENTIAL_HANDLE:
+		s = "The supplied credential handle does not match the credential associated with the security context."
+	case C.SEC_E_WRONG_PRINCIPAL:
+		s = "The target principal name is incorrect."
+	case C.SEC_I_COMPLETE_AND_CONTINUE:
+		s = "The function completed successfully"
+	case C.SEC_I_COMPLETE_NEEDED:
+		s = "The function completed successfully"
+	case C.SEC_I_CONTEXT_EXPIRED:
+		s = "The message sender has finished using the connection and has initiated a shutdown. For information about initiating or recognizing a shutdown"
+	case C.SEC_I_CONTINUE_NEEDED:
+		s = "The function completed successfully"
+	case C.SEC_I_INCOMPLETE_CREDENTIALS:
+		s = "The credentials supplied were not complete and could not be verified. Additional information can be returned from the context."
+	case C.SEC_I_LOCAL_LOGON:
+		s = "The logon was completed"
+	case C.SEC_I_NO_LSA_CONTEXT:
+		s = "There is no LSA mode context associated with this context."
+	case C.SEC_I_RENEGOTIATE:
+		s = "The context data must be renegotiated with the peer."
+	default:
+		return fmt.Errorf("%s: 0x%x", prefix, uint32(status))
+	}
+
+	return fmt.Errorf("%s: %s(0x%x)", prefix, s, uint32(status))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c
new file mode 100644
index 0000000000000000000000000000000000000000..bc73723e83c214c70dead913af7264522950e384
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c
@@ -0,0 +1,249 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi,windows
+
+#include "sspi_wrapper.h"
+
+static HINSTANCE sspi_secur32_dll = NULL;
+static PSecurityFunctionTable sspi_functions = NULL;
+static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
+
+int sspi_init(
+)
+{
+	// Load the secur32.dll library using its exact path. Passing the exact DLL path rather than allowing LoadLibrary to
+	// search in different locations removes the possibility of DLL preloading attacks. We use GetSystemDirectoryA and
+	// LoadLibraryA rather than the GetSystemDirectory/LoadLibrary aliases to ensure the ANSI versions are used so we
+	// don't have to account for variations in char sizes if UNICODE is enabled.
+
+	// Passing a 0 size will return the required buffer length to hold the path, including the null terminator.
+	int requiredLen = GetSystemDirectoryA(NULL, 0);
+	if (!requiredLen) {
+		return GetLastError();
+	}
+
+	// Allocate a buffer to hold the system directory + "\secur32.dll" (length 12, not including null terminator).
+	int actualLen = requiredLen + 12;
+	char *directoryBuffer = (char *) calloc(1, actualLen);
+	int directoryLen = GetSystemDirectoryA(directoryBuffer, actualLen);
+	if (!directoryLen) {
+		free(directoryBuffer);
+		return GetLastError();
+	}
+
+	// Append the DLL name to the buffer.
+	char *dllName = "\\secur32.dll";
+	strcpy_s(&(directoryBuffer[directoryLen]), actualLen - directoryLen, dllName);
+
+	sspi_secur32_dll = LoadLibraryA(directoryBuffer);
+	free(directoryBuffer);
+	if (!sspi_secur32_dll) {
+		return GetLastError();
+	}
+
+    INIT_SECURITY_INTERFACE init_security_interface = (INIT_SECURITY_INTERFACE)GetProcAddress(sspi_secur32_dll, SECURITY_ENTRYPOINT);
+    if (!init_security_interface) {
+        return -1;
+    }
+
+    sspi_functions = (*init_security_interface)();
+    if (!sspi_functions) {
+        return -2;
+    }
+
+	return SSPI_OK;
+}
+
+int sspi_client_init(
+    sspi_client_state *client,
+    char* username,
+    char* password
+)
+{
+	TimeStamp timestamp;
+
+    if (username) {
+        if (password) {
+            SEC_WINNT_AUTH_IDENTITY auth_identity;
+
+        #ifdef _UNICODE
+            auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
+        #else
+            auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
+        #endif
+            auth_identity.User = (LPSTR) username;
+            auth_identity.UserLength = strlen(username);
+            auth_identity.Password = (LPSTR) password;
+            auth_identity.PasswordLength = strlen(password);
+            auth_identity.Domain = NULL;
+            auth_identity.DomainLength = 0;
+            client->status = sspi_functions->AcquireCredentialsHandle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, &client->cred, &timestamp);
+        } else {
+            client->status = sspi_functions->AcquireCredentialsHandle(username, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, &client->cred, &timestamp);
+        }
+    } else {
+        client->status = sspi_functions->AcquireCredentialsHandle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, &client->cred, &timestamp);
+    }
+
+    if (client->status != SEC_E_OK) {
+        return SSPI_ERROR;
+    }
+
+    return SSPI_OK;
+}
+
+int sspi_client_username(
+    sspi_client_state *client,
+    char** username
+)
+{
+    SecPkgCredentials_Names names;
+	client->status = sspi_functions->QueryCredentialsAttributes(&client->cred, SECPKG_CRED_ATTR_NAMES, &names);
+
+	if (client->status != SEC_E_OK) {
+		return SSPI_ERROR;
+	}
+
+	int len = strlen(names.sUserName) + 1;
+	*username = malloc(len);
+	memcpy(*username, names.sUserName, len);
+
+	sspi_functions->FreeContextBuffer(names.sUserName);
+
+    return SSPI_OK;
+}
+
+int sspi_client_negotiate(
+    sspi_client_state *client,
+    char* spn,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+)
+{
+    SecBufferDesc inbuf;
+	SecBuffer in_bufs[1];
+	SecBufferDesc outbuf;
+	SecBuffer out_bufs[1];
+
+	if (client->has_ctx > 0) {
+		inbuf.ulVersion = SECBUFFER_VERSION;
+		inbuf.cBuffers = 1;
+		inbuf.pBuffers = in_bufs;
+		in_bufs[0].pvBuffer = input;
+		in_bufs[0].cbBuffer = input_length;
+		in_bufs[0].BufferType = SECBUFFER_TOKEN;
+	}
+
+	outbuf.ulVersion = SECBUFFER_VERSION;
+	outbuf.cBuffers = 1;
+	outbuf.pBuffers = out_bufs;
+	out_bufs[0].pvBuffer = NULL;
+	out_bufs[0].cbBuffer = 0;
+	out_bufs[0].BufferType = SECBUFFER_TOKEN;
+
+	ULONG context_attr = 0;
+
+	client->status = sspi_functions->InitializeSecurityContext(
+        &client->cred,
+        client->has_ctx > 0 ? &client->ctx : NULL,
+        (LPSTR) spn,
+        ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
+        0,
+        SECURITY_NETWORK_DREP,
+        client->has_ctx > 0 ? &inbuf : NULL,
+        0,
+        &client->ctx,
+        &outbuf,
+        &context_attr,
+        NULL);
+
+    if (client->status != SEC_E_OK && client->status != SEC_I_CONTINUE_NEEDED) {
+        return SSPI_ERROR;
+    }
+
+    client->has_ctx = 1;
+
+	*output = malloc(out_bufs[0].cbBuffer);
+	*output_length = out_bufs[0].cbBuffer;
+	memcpy(*output, out_bufs[0].pvBuffer, *output_length);
+    sspi_functions->FreeContextBuffer(out_bufs[0].pvBuffer);
+
+    if (client->status == SEC_I_CONTINUE_NEEDED) {
+        return SSPI_CONTINUE;
+    }
+
+    return SSPI_OK;
+}
+
+int sspi_client_wrap_msg(
+    sspi_client_state *client,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+)
+{
+    SecPkgContext_Sizes sizes;
+
+	client->status = sspi_functions->QueryContextAttributes(&client->ctx, SECPKG_ATTR_SIZES, &sizes);
+	if (client->status != SEC_E_OK) {
+		return SSPI_ERROR;
+	}
+
+	char *msg = malloc((sizes.cbSecurityTrailer + input_length + sizes.cbBlockSize) * sizeof(char));
+	memcpy(&msg[sizes.cbSecurityTrailer], input, input_length);
+
+	SecBuffer wrap_bufs[3];
+	SecBufferDesc wrap_buf_desc;
+	wrap_buf_desc.cBuffers = 3;
+	wrap_buf_desc.pBuffers = wrap_bufs;
+	wrap_buf_desc.ulVersion = SECBUFFER_VERSION;
+
+	wrap_bufs[0].cbBuffer = sizes.cbSecurityTrailer;
+	wrap_bufs[0].BufferType = SECBUFFER_TOKEN;
+	wrap_bufs[0].pvBuffer = msg;
+
+	wrap_bufs[1].cbBuffer = input_length;
+	wrap_bufs[1].BufferType = SECBUFFER_DATA;
+	wrap_bufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
+
+	wrap_bufs[2].cbBuffer = sizes.cbBlockSize;
+	wrap_bufs[2].BufferType = SECBUFFER_PADDING;
+	wrap_bufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + input_length;
+
+	client->status = sspi_functions->EncryptMessage(&client->ctx, SECQOP_WRAP_NO_ENCRYPT, &wrap_buf_desc, 0);
+	if (client->status != SEC_E_OK) {
+		free(msg);
+		return SSPI_ERROR;
+	}
+
+	*output_length = wrap_bufs[0].cbBuffer + wrap_bufs[1].cbBuffer + wrap_bufs[2].cbBuffer;
+	*output = malloc(*output_length);
+
+	memcpy(*output, wrap_bufs[0].pvBuffer, wrap_bufs[0].cbBuffer);
+	memcpy(*output + wrap_bufs[0].cbBuffer, wrap_bufs[1].pvBuffer, wrap_bufs[1].cbBuffer);
+	memcpy(*output + wrap_bufs[0].cbBuffer + wrap_bufs[1].cbBuffer, wrap_bufs[2].pvBuffer, wrap_bufs[2].cbBuffer);
+
+	free(msg);
+
+	return SSPI_OK;
+}
+
+int sspi_client_destroy(
+    sspi_client_state *client
+)
+{
+    if (client->has_ctx > 0) {
+        sspi_functions->DeleteSecurityContext(&client->ctx);
+    }
+
+    sspi_functions->FreeCredentialsHandle(&client->cred);
+
+    return SSPI_OK;
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h
new file mode 100644
index 0000000000000000000000000000000000000000..e59e55c696c55e174b9f9cb5ba48c32b566b6990
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h
@@ -0,0 +1,64 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi,windows
+
+#ifndef SSPI_WRAPPER_H
+#define SSPI_WRAPPER_H
+
+#define SECURITY_WIN32 1  /* Required for SSPI */
+
+#include <windows.h>
+#include <sspi.h>
+
+#define SSPI_OK 0
+#define SSPI_CONTINUE 1
+#define SSPI_ERROR 2
+
+typedef struct {
+    CredHandle cred;
+    CtxtHandle ctx;
+
+    int has_ctx;
+
+    SECURITY_STATUS status;
+} sspi_client_state;
+
+int sspi_init();
+
+int sspi_client_init(
+    sspi_client_state *client,
+    char* username,
+    char* password
+);
+
+int sspi_client_username(
+    sspi_client_state *client,
+    char** username
+);
+
+int sspi_client_negotiate(
+    sspi_client_state *client,
+    char* spn,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+);
+
+int sspi_client_wrap_msg(
+    sspi_client_state *client,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+);
+
+int sspi_client_destroy(
+    sspi_client_state *client
+);
+
+#endif
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go
new file mode 100644
index 0000000000000000000000000000000000000000..679c54e9feb45d3da5b006f36a5b204e28eb5e91
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go
@@ -0,0 +1,92 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"errors"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/internal/aws/credentials"
+	"go.mongodb.org/mongo-driver/internal/credproviders"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds"
+)
+
+// MongoDBAWS is the mechanism name for MongoDBAWS.
+const MongoDBAWS = "MONGODB-AWS"
+
+func newMongoDBAWSAuthenticator(cred *Cred, httpClient *http.Client) (Authenticator, error) {
+	if cred.Source != "" && cred.Source != sourceExternal {
+		return nil, newAuthError("MONGODB-AWS source must be empty or $external", nil)
+	}
+	if httpClient == nil {
+		return nil, errors.New("httpClient must not be nil")
+	}
+	return &MongoDBAWSAuthenticator{
+		credentials: &credproviders.StaticProvider{
+			Value: credentials.Value{
+				AccessKeyID:     cred.Username,
+				SecretAccessKey: cred.Password,
+				SessionToken:    cred.Props["AWS_SESSION_TOKEN"],
+			},
+		},
+		httpClient: httpClient,
+	}, nil
+}
+
+// MongoDBAWSAuthenticator uses AWS-IAM credentials over SASL to authenticate a connection.
+type MongoDBAWSAuthenticator struct {
+	credentials *credproviders.StaticProvider
+	httpClient  *http.Client
+}
+
+// Auth authenticates the connection.
+func (a *MongoDBAWSAuthenticator) Auth(ctx context.Context, cfg *Config) error {
+	providers := creds.NewAWSCredentialProvider(a.httpClient, a.credentials)
+	adapter := &awsSaslAdapter{
+		conversation: &awsConversation{
+			credentials: providers.Cred,
+		},
+	}
+	err := ConductSaslConversation(ctx, cfg, sourceExternal, adapter)
+	if err != nil {
+		return newAuthError("sasl conversation error", err)
+	}
+	return nil
+}
+
+// Reauth reauthenticates the connection.
+func (a *MongoDBAWSAuthenticator) Reauth(_ context.Context, _ *driver.AuthConfig) error {
+	return newAuthError("AWS authentication does not support reauthentication", nil)
+}
+
+type awsSaslAdapter struct {
+	conversation *awsConversation
+}
+
+var _ SaslClient = (*awsSaslAdapter)(nil)
+
+func (a *awsSaslAdapter) Start() (string, []byte, error) {
+	step, err := a.conversation.Step(nil)
+	if err != nil {
+		return MongoDBAWS, nil, err
+	}
+	return MongoDBAWS, step, nil
+}
+
+func (a *awsSaslAdapter) Next(_ context.Context, challenge []byte) ([]byte, error) {
+	step, err := a.conversation.Step(challenge)
+	if err != nil {
+		return nil, err
+	}
+	return step, nil
+}
+
+func (a *awsSaslAdapter) Completed() bool {
+	return a.conversation.Done()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go
new file mode 100644
index 0000000000000000000000000000000000000000..1861956b74970d4cc38764b4112949caecd70713
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go
@@ -0,0 +1,120 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+
+	// Ignore gosec warning "Blocklisted import crypto/md5: weak cryptographic primitive". We need
+	// to use MD5 here to implement the MONGODB-CR specification.
+	/* #nosec G501 */
+	"crypto/md5"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+)
+
+// MONGODBCR is the mechanism name for MONGODB-CR.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 3.6 and removed in
+// MongoDB 4.0.
+const MONGODBCR = "MONGODB-CR"
+
+func newMongoDBCRAuthenticator(cred *Cred, _ *http.Client) (Authenticator, error) {
+	source := cred.Source
+	if source == "" {
+		source = "admin"
+	}
+	return &MongoDBCRAuthenticator{
+		DB:       source,
+		Username: cred.Username,
+		Password: cred.Password,
+	}, nil
+}
+
+// MongoDBCRAuthenticator uses the MONGODB-CR algorithm to authenticate a connection.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 3.6 and removed in
+// MongoDB 4.0.
+type MongoDBCRAuthenticator struct {
+	DB       string
+	Username string
+	Password string
+}
+
+// Auth authenticates the connection.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 3.6 and removed in
+// MongoDB 4.0.
+func (a *MongoDBCRAuthenticator) Auth(ctx context.Context, cfg *Config) error {
+
+	db := a.DB
+	if db == "" {
+		db = defaultAuthDB
+	}
+
+	doc := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendInt32Element(nil, "getnonce", 1))
+	cmd := operation.NewCommand(doc).
+		Database(db).
+		Deployment(driver.SingleConnectionDeployment{cfg.Connection}).
+		ClusterClock(cfg.ClusterClock).
+		ServerAPI(cfg.ServerAPI)
+	err := cmd.Execute(ctx)
+	if err != nil {
+		return newError(err, MONGODBCR)
+	}
+	rdr := cmd.Result()
+
+	var getNonceResult struct {
+		Nonce string `bson:"nonce"`
+	}
+
+	err = bson.Unmarshal(rdr, &getNonceResult)
+	if err != nil {
+		return newAuthError("unmarshal error", err)
+	}
+
+	doc = bsoncore.BuildDocumentFromElements(nil,
+		bsoncore.AppendInt32Element(nil, "authenticate", 1),
+		bsoncore.AppendStringElement(nil, "user", a.Username),
+		bsoncore.AppendStringElement(nil, "nonce", getNonceResult.Nonce),
+		bsoncore.AppendStringElement(nil, "key", a.createKey(getNonceResult.Nonce)),
+	)
+	cmd = operation.NewCommand(doc).
+		Database(db).
+		Deployment(driver.SingleConnectionDeployment{cfg.Connection}).
+		ClusterClock(cfg.ClusterClock).
+		ServerAPI(cfg.ServerAPI)
+	err = cmd.Execute(ctx)
+	if err != nil {
+		return newError(err, MONGODBCR)
+	}
+
+	return nil
+}
+
+// Reauth reauthenticates the connection.
+func (a *MongoDBCRAuthenticator) Reauth(_ context.Context, _ *driver.AuthConfig) error {
+	return newAuthError("MONGODB-CR does not support reauthentication", nil)
+}
+
+func (a *MongoDBCRAuthenticator) createKey(nonce string) string {
+	// Ignore gosec warning "Use of weak cryptographic primitive". We need to use MD5 here to
+	// implement the MONGODB-CR specification.
+	/* #nosec G401 */
+	h := md5.New()
+
+	_, _ = io.WriteString(h, nonce)
+	_, _ = io.WriteString(h, a.Username)
+	_, _ = io.WriteString(h, mongoPasswordDigest(a.Username, a.Password))
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/oidc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/oidc.go
new file mode 100644
index 0000000000000000000000000000000000000000..13fd10ec3d382f4ef51f22e8a58656e5ab62e76d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/oidc.go
@@ -0,0 +1,556 @@
+// Copyright (C) MongoDB, Inc. 2024-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+// MongoDBOIDC is the string constant for the MONGODB-OIDC authentication mechanism.
+const MongoDBOIDC = "MONGODB-OIDC"
+
+// EnvironmentProp is the property key name that specifies the environment for the OIDC authenticator.
+const EnvironmentProp = "ENVIRONMENT"
+
+// ResourceProp is the property key name that specifies the token resource for GCP and AZURE OIDC auth.
+const ResourceProp = "TOKEN_RESOURCE"
+
+// AllowedHostsProp is the property key name that specifies the allowed hosts for the OIDC authenticator.
+const AllowedHostsProp = "ALLOWED_HOSTS"
+
+// AzureEnvironmentValue is the value for the Azure environment.
+const AzureEnvironmentValue = "azure"
+
+// GCPEnvironmentValue is the value for the GCP environment.
+const GCPEnvironmentValue = "gcp"
+
+// TestEnvironmentValue is the value for the test environment.
+const TestEnvironmentValue = "test"
+
+const apiVersion = 1
+const invalidateSleepTimeout = 100 * time.Millisecond
+
+// The CSOT specification says to apply a 1-minute timeout if "CSOT is not applied". That's
+// ambiguous for the v1.x Go Driver because it could mean either "no timeout provided" or "CSOT not
+// enabled". Always use a maximum timeout duration of 1 minute, allowing us to ignore the ambiguity.
+// Contexts with a shorter timeout are unaffected.
+const machineCallbackTimeout = time.Minute
+const humanCallbackTimeout = 5 * time.Minute
+
+var defaultAllowedHosts = []*regexp.Regexp{
+	regexp.MustCompile(`^.*[.]mongodb[.]net(:\d+)?$`),
+	regexp.MustCompile(`^.*[.]mongodb-qa[.]net(:\d+)?$`),
+	regexp.MustCompile(`^.*[.]mongodb-dev[.]net(:\d+)?$`),
+	regexp.MustCompile(`^.*[.]mongodbgov[.]net(:\d+)?$`),
+	regexp.MustCompile(`^localhost(:\d+)?$`),
+	regexp.MustCompile(`^127[.]0[.]0[.]1(:\d+)?$`),
+	regexp.MustCompile(`^::1(:\d+)?$`),
+}
+
+// OIDCCallback is a function that takes a context and OIDCArgs and returns an OIDCCredential.
+type OIDCCallback = driver.OIDCCallback
+
+// OIDCArgs contains the arguments for the OIDC callback.
+type OIDCArgs = driver.OIDCArgs
+
+// OIDCCredential contains the access token and refresh token.
+type OIDCCredential = driver.OIDCCredential
+
+// IDPInfo contains the information needed to perform OIDC authentication with an Identity Provider.
+type IDPInfo = driver.IDPInfo
+
+var _ driver.Authenticator = (*OIDCAuthenticator)(nil)
+var _ SpeculativeAuthenticator = (*OIDCAuthenticator)(nil)
+var _ SaslClient = (*oidcOneStep)(nil)
+var _ SaslClient = (*oidcTwoStep)(nil)
+
+// OIDCAuthenticator is synchronized and handles caching of the access token, refreshToken,
+// and IDPInfo. It also provides a mechanism to refresh the access token, but this functionality
+// is only for the OIDC Human flow.
+type OIDCAuthenticator struct {
+	mu sync.Mutex // Guards all of the info in the OIDCAuthenticator struct.
+
+	AuthMechanismProperties map[string]string
+	OIDCMachineCallback     OIDCCallback
+	OIDCHumanCallback       OIDCCallback
+
+	allowedHosts *[]*regexp.Regexp
+	userName     string
+	httpClient   *http.Client
+	accessToken  string
+	refreshToken *string
+	idpInfo      *IDPInfo
+	tokenGenID   uint64
+}
+
+// SetAccessToken allows for manually setting the access token for the OIDCAuthenticator, this is
+// only for testing purposes.
+func (oa *OIDCAuthenticator) SetAccessToken(accessToken string) {
+	oa.mu.Lock()
+	defer oa.mu.Unlock()
+	oa.accessToken = accessToken
+}
+
+func newOIDCAuthenticator(cred *Cred, httpClient *http.Client) (Authenticator, error) {
+	if cred.Source != "" && cred.Source != sourceExternal {
+		return nil, newAuthError("MONGODB-OIDC source must be empty or $external", nil)
+	}
+	if cred.Password != "" {
+		return nil, fmt.Errorf("password cannot be specified for %q", MongoDBOIDC)
+	}
+	if cred.Props != nil {
+		if env, ok := cred.Props[EnvironmentProp]; ok {
+			switch strings.ToLower(env) {
+			case AzureEnvironmentValue:
+				fallthrough
+			case GCPEnvironmentValue:
+				if _, ok := cred.Props[ResourceProp]; !ok {
+					return nil, fmt.Errorf("%q must be specified for %q %q", ResourceProp, env, EnvironmentProp)
+				}
+				fallthrough
+			case TestEnvironmentValue:
+				if cred.OIDCMachineCallback != nil || cred.OIDCHumanCallback != nil {
+					return nil, fmt.Errorf("OIDC callbacks are not allowed for %q %q", env, EnvironmentProp)
+				}
+			}
+		}
+	}
+	oa := &OIDCAuthenticator{
+		userName:                cred.Username,
+		httpClient:              httpClient,
+		AuthMechanismProperties: cred.Props,
+		OIDCMachineCallback:     cred.OIDCMachineCallback,
+		OIDCHumanCallback:       cred.OIDCHumanCallback,
+	}
+	err := oa.setAllowedHosts()
+	return oa, err
+}
+
+func createPatternsForGlobs(hosts []string) ([]*regexp.Regexp, error) {
+	var err error
+	ret := make([]*regexp.Regexp, len(hosts))
+	for i := range hosts {
+		hosts[i] = strings.ReplaceAll(hosts[i], ".", "[.]")
+		hosts[i] = strings.ReplaceAll(hosts[i], "*", ".*")
+		hosts[i] = "^" + hosts[i] + "(:\\d+)?$"
+		ret[i], err = regexp.Compile(hosts[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+	return ret, nil
+}
+
+func (oa *OIDCAuthenticator) setAllowedHosts() error {
+	if oa.AuthMechanismProperties == nil {
+		oa.allowedHosts = &defaultAllowedHosts
+		return nil
+	}
+
+	allowedHosts, ok := oa.AuthMechanismProperties[AllowedHostsProp]
+	if !ok {
+		oa.allowedHosts = &defaultAllowedHosts
+		return nil
+	}
+	globs := strings.Split(allowedHosts, ",")
+	ret, err := createPatternsForGlobs(globs)
+	if err != nil {
+		return err
+	}
+	oa.allowedHosts = &ret
+	return nil
+}
+
+func (oa *OIDCAuthenticator) validateConnectionAddressWithAllowedHosts(conn driver.Connection) error {
+	if oa.allowedHosts == nil {
+		// should be unreachable, but this is a safety check.
+		return newAuthError(fmt.Sprintf("%q missing", AllowedHostsProp), nil)
+	}
+	allowedHosts := *oa.allowedHosts
+	if len(allowedHosts) == 0 {
+		return newAuthError(fmt.Sprintf("empty %q specified", AllowedHostsProp), nil)
+	}
+	for _, pattern := range allowedHosts {
+		if pattern.MatchString(string(conn.Address())) {
+			return nil
+		}
+	}
+	return newAuthError(fmt.Sprintf("address %q not allowed by %q: %v", conn.Address(), AllowedHostsProp, allowedHosts), nil)
+}
+
+type oidcOneStep struct {
+	userName    string
+	accessToken string
+}
+
+type oidcTwoStep struct {
+	conn driver.Connection
+	oa   *OIDCAuthenticator
+}
+
+func jwtStepRequest(accessToken string) []byte {
+	return bsoncore.NewDocumentBuilder().
+		AppendString("jwt", accessToken).
+		Build()
+}
+
+func principalStepRequest(principal string) []byte {
+	doc := bsoncore.NewDocumentBuilder()
+	if principal != "" {
+		doc.AppendString("n", principal)
+	}
+	return doc.Build()
+}
+
+func (oos *oidcOneStep) Start() (string, []byte, error) {
+	return MongoDBOIDC, jwtStepRequest(oos.accessToken), nil
+}
+
+func (oos *oidcOneStep) Next(context.Context, []byte) ([]byte, error) {
+	return nil, newAuthError("unexpected step in OIDC authentication", nil)
+}
+
+func (*oidcOneStep) Completed() bool {
+	return true
+}
+
+func (ots *oidcTwoStep) Start() (string, []byte, error) {
+	return MongoDBOIDC, principalStepRequest(ots.oa.userName), nil
+}
+
+func (ots *oidcTwoStep) Next(ctx context.Context, msg []byte) ([]byte, error) {
+	var idpInfo IDPInfo
+	err := bson.Unmarshal(msg, &idpInfo)
+	if err != nil {
+		return nil, fmt.Errorf("error unmarshaling BSON document: %w", err)
+	}
+
+	accessToken, err := ots.oa.getAccessToken(ctx,
+		ots.conn,
+		&OIDCArgs{
+			Version: apiVersion,
+			// idpInfo is nil for machine callbacks in the current spec.
+			IDPInfo: &idpInfo,
+			// there is no way there could be a refresh token when there is no IDPInfo.
+			RefreshToken: nil,
+		},
+		// two-step callbacks are always human callbacks.
+		ots.oa.OIDCHumanCallback)
+
+	return jwtStepRequest(accessToken), err
+}
+
+func (*oidcTwoStep) Completed() bool {
+	return true
+}
+
+func (oa *OIDCAuthenticator) providerCallback() (OIDCCallback, error) {
+	env, ok := oa.AuthMechanismProperties[EnvironmentProp]
+	if !ok {
+		return nil, nil
+	}
+
+	switch env {
+	case AzureEnvironmentValue:
+		resource, ok := oa.AuthMechanismProperties[ResourceProp]
+		if !ok {
+			return nil, newAuthError(fmt.Sprintf("%q must be specified for Azure OIDC", ResourceProp), nil)
+		}
+		return getAzureOIDCCallback(oa.userName, resource, oa.httpClient), nil
+	case GCPEnvironmentValue:
+		resource, ok := oa.AuthMechanismProperties[ResourceProp]
+		if !ok {
+			return nil, newAuthError(fmt.Sprintf("%q must be specified for GCP OIDC", ResourceProp), nil)
+		}
+		return getGCPOIDCCallback(resource, oa.httpClient), nil
+	}
+
+	return nil, fmt.Errorf("%q %q not supported for MONGODB-OIDC", EnvironmentProp, env)
+}
+
+// getAzureOIDCCallback returns the callback for the Azure Identity Provider.
+func getAzureOIDCCallback(clientID string, resource string, httpClient *http.Client) OIDCCallback {
+	// return the callback parameterized by the clientID and resource, also passing in the user
+	// configured httpClient.
+	return func(ctx context.Context, _ *OIDCArgs) (*OIDCCredential, error) {
+		resource = url.QueryEscape(resource)
+		var uri string
+		if clientID != "" {
+			uri = fmt.Sprintf("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=%s&client_id=%s", resource, clientID)
+		} else {
+			uri = fmt.Sprintf("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=%s", resource)
+		}
+		req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil)
+		if err != nil {
+			return nil, newAuthError("error creating http request to Azure Identity Provider", err)
+		}
+		req.Header.Add("Metadata", "true")
+		req.Header.Add("Accept", "application/json")
+		resp, err := httpClient.Do(req)
+		if err != nil {
+			return nil, newAuthError("error getting access token from Azure Identity Provider", err)
+		}
+		defer resp.Body.Close()
+		var azureResp struct {
+			AccessToken string `json:"access_token"`
+			ExpiresOn   int64  `json:"expires_on,string"`
+		}
+
+		if resp.StatusCode != http.StatusOK {
+			return nil, newAuthError(fmt.Sprintf("failed to get a valid response from Azure Identity Provider, http code: %d", resp.StatusCode), nil)
+		}
+		err = json.NewDecoder(resp.Body).Decode(&azureResp)
+		if err != nil {
+			return nil, newAuthError("failed parsing result from Azure Identity Provider", err)
+		}
+		expireTime := time.Unix(azureResp.ExpiresOn, 0)
+		return &OIDCCredential{
+			AccessToken: azureResp.AccessToken,
+			ExpiresAt:   &expireTime,
+		}, nil
+	}
+}
+
+// getGCPOIDCCallback returns the callback for the GCP Identity Provider.
+func getGCPOIDCCallback(resource string, httpClient *http.Client) OIDCCallback {
+	// return the callback parameterized by the clientID and resource, also passing in the user
+	// configured httpClient.
+	return func(ctx context.Context, _ *OIDCArgs) (*OIDCCredential, error) {
+		resource = url.QueryEscape(resource)
+		uri := fmt.Sprintf("http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience=%s", resource)
+		req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil)
+		if err != nil {
+			return nil, newAuthError("error creating http request to GCP Identity Provider", err)
+		}
+		req.Header.Add("Metadata-Flavor", "Google")
+		resp, err := httpClient.Do(req)
+		if err != nil {
+			return nil, newAuthError("error getting access token from GCP Identity Provider", err)
+		}
+		defer resp.Body.Close()
+		if resp.StatusCode != http.StatusOK {
+			return nil, newAuthError(fmt.Sprintf("failed to get a valid response from GCP Identity Provider, http code: %d", resp.StatusCode), nil)
+		}
+		accessToken, err := io.ReadAll(resp.Body)
+		if err != nil {
+			return nil, newAuthError("failed parsing reading response from GCP Identity Provider", err)
+		}
+		return &OIDCCredential{
+			AccessToken: string(accessToken),
+			ExpiresAt:   nil,
+		}, nil
+	}
+}
+
+func (oa *OIDCAuthenticator) getAccessToken(
+	ctx context.Context,
+	conn driver.Connection,
+	args *OIDCArgs,
+	callback OIDCCallback,
+) (string, error) {
+	oa.mu.Lock()
+	defer oa.mu.Unlock()
+
+	if oa.accessToken != "" {
+		return oa.accessToken, nil
+	}
+
+	// Attempt to refresh the access token if a refresh token is available.
+	if args.RefreshToken != nil {
+		cred, err := callback(ctx, args)
+		if err == nil && cred != nil {
+			oa.accessToken = cred.AccessToken
+			oa.tokenGenID++
+			conn.SetOIDCTokenGenID(oa.tokenGenID)
+			oa.refreshToken = cred.RefreshToken
+			return cred.AccessToken, nil
+		}
+		oa.refreshToken = nil
+		args.RefreshToken = nil
+	}
+	// If we get here this means there either was no refresh token or the refresh token failed.
+	cred, err := callback(ctx, args)
+	if err != nil {
+		return "", err
+	}
+	// This line should never occur, if go conventions are followed, but it is a safety check such
+	// that we do not throw nil pointer errors to our users if they abuse the API.
+	if cred == nil {
+		return "", newAuthError("OIDC callback returned nil credential with no specified error", nil)
+	}
+
+	oa.accessToken = cred.AccessToken
+	oa.tokenGenID++
+	conn.SetOIDCTokenGenID(oa.tokenGenID)
+	oa.refreshToken = cred.RefreshToken
+	// always set the IdPInfo, in most cases, this should just be recopying the same pointer, or nil
+	// in the machine flow.
+	oa.idpInfo = args.IDPInfo
+
+	return cred.AccessToken, nil
+}
+
+// invalidateAccessToken invalidates the access token, if the force flag is set to true (which is
+// only on a Reauth call) or if the tokenGenID of the connection is greater than or equal to the
+// tokenGenID of the OIDCAuthenticator. It should never actually be greater than, but only equal,
+// but this is a safety check, since extra invalidation is only a performance impact, not a
+// correctness impact.
+func (oa *OIDCAuthenticator) invalidateAccessToken(conn driver.Connection) {
+	oa.mu.Lock()
+	defer oa.mu.Unlock()
+	tokenGenID := conn.OIDCTokenGenID()
+	// If the connection used in a Reauth is a new connection it will not have a correct tokenGenID,
+	// it will instead be set to 0. In the absence of information, the only safe thing to do is to
+	// invalidate the cached accessToken.
+	if tokenGenID == 0 || tokenGenID >= oa.tokenGenID {
+		oa.accessToken = ""
+		conn.SetOIDCTokenGenID(0)
+	}
+}
+
+// Reauth reauthenticates the connection when the server returns a 391 code. Reauth is part of the
+// driver.Authenticator interface.
+func (oa *OIDCAuthenticator) Reauth(ctx context.Context, cfg *Config) error {
+	oa.invalidateAccessToken(cfg.Connection)
+	return oa.Auth(ctx, cfg)
+}
+
+// Auth authenticates the connection.
+func (oa *OIDCAuthenticator) Auth(ctx context.Context, cfg *Config) error {
+	var err error
+
+	if cfg == nil {
+		return newAuthError(fmt.Sprintf("config must be set for %q authentication", MongoDBOIDC), nil)
+	}
+	conn := cfg.Connection
+
+	oa.mu.Lock()
+	cachedAccessToken := oa.accessToken
+	cachedRefreshToken := oa.refreshToken
+	cachedIDPInfo := oa.idpInfo
+	oa.mu.Unlock()
+
+	if cachedAccessToken != "" {
+		err = ConductSaslConversation(ctx, cfg, sourceExternal, &oidcOneStep{
+			userName:    oa.userName,
+			accessToken: cachedAccessToken,
+		})
+		if err == nil {
+			return nil
+		}
+		// this seems like it could be incorrect since we could be inavlidating an access token that
+		// has already been replaced by a different auth attempt, but the TokenGenID will prevernt
+		// that from happening.
+		oa.invalidateAccessToken(conn)
+		time.Sleep(invalidateSleepTimeout)
+	}
+
+	if oa.OIDCHumanCallback != nil {
+		return oa.doAuthHuman(ctx, cfg, oa.OIDCHumanCallback, cachedIDPInfo, cachedRefreshToken)
+	}
+
+	// Handle user provided or automatic provider machine callback.
+	var machineCallback OIDCCallback
+	if oa.OIDCMachineCallback != nil {
+		machineCallback = oa.OIDCMachineCallback
+	} else {
+		machineCallback, err = oa.providerCallback()
+		if err != nil {
+			return fmt.Errorf("error getting built-in OIDC provider: %w", err)
+		}
+	}
+
+	if machineCallback != nil {
+		return oa.doAuthMachine(ctx, cfg, machineCallback)
+	}
+	return newAuthError("no OIDC callback provided", nil)
+}
+
+func (oa *OIDCAuthenticator) doAuthHuman(ctx context.Context, cfg *Config, humanCallback OIDCCallback, idpInfo *IDPInfo, refreshToken *string) error {
+	// Ensure that the connection address is allowed by the allowed hosts.
+	err := oa.validateConnectionAddressWithAllowedHosts(cfg.Connection)
+	if err != nil {
+		return err
+	}
+	subCtx, cancel := context.WithTimeout(ctx, humanCallbackTimeout)
+	defer cancel()
+	// If the idpInfo exists, we can just do one step
+	if idpInfo != nil {
+		accessToken, err := oa.getAccessToken(subCtx,
+			cfg.Connection,
+			&OIDCArgs{
+				Version: apiVersion,
+				// idpInfo is nil for machine callbacks in the current spec.
+				IDPInfo:      idpInfo,
+				RefreshToken: refreshToken,
+			},
+			humanCallback)
+		if err != nil {
+			return err
+		}
+		return ConductSaslConversation(
+			subCtx,
+			cfg,
+			sourceExternal,
+			&oidcOneStep{accessToken: accessToken},
+		)
+	}
+	// otherwise, we need the two step where we ask the server for the IdPInfo first.
+	ots := &oidcTwoStep{
+		conn: cfg.Connection,
+		oa:   oa,
+	}
+	return ConductSaslConversation(subCtx, cfg, sourceExternal, ots)
+}
+
+func (oa *OIDCAuthenticator) doAuthMachine(ctx context.Context, cfg *Config, machineCallback OIDCCallback) error {
+	subCtx, cancel := context.WithTimeout(ctx, machineCallbackTimeout)
+	accessToken, err := oa.getAccessToken(subCtx,
+		cfg.Connection,
+		&OIDCArgs{
+			Version: apiVersion,
+			// idpInfo is nil for machine callbacks in the current spec.
+			IDPInfo:      nil,
+			RefreshToken: nil,
+		},
+		machineCallback)
+	cancel()
+	if err != nil {
+		return err
+	}
+	return ConductSaslConversation(
+		ctx,
+		cfg,
+		sourceExternal,
+		&oidcOneStep{accessToken: accessToken},
+	)
+}
+
+// CreateSpeculativeConversation creates a speculative conversation for OIDC authentication.
+func (oa *OIDCAuthenticator) CreateSpeculativeConversation() (SpeculativeConversation, error) {
+	oa.mu.Lock()
+	defer oa.mu.Unlock()
+	accessToken := oa.accessToken
+	if accessToken == "" {
+		return nil, nil // Skip speculative auth.
+	}
+
+	return newSaslConversation(&oidcOneStep{accessToken: accessToken}, sourceExternal, true), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb0645aaac13ff416b1259605934059949b07711
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go
@@ -0,0 +1,78 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+// PLAIN is the mechanism name for PLAIN.
+const PLAIN = "PLAIN"
+
+func newPlainAuthenticator(cred *Cred, _ *http.Client) (Authenticator, error) {
+	// TODO(GODRIVER-3317): The PLAIN specification says about auth source:
+	//
+	// "MUST be specified. Defaults to the database name if supplied on the
+	// connection string or $external."
+	//
+	// We should actually pass through the auth source, not always pass
+	// $external. If it's empty, we should default to $external.
+	//
+	// For example:
+	//
+	//  source := cred.Source
+	//  if source == "" {
+	//      source = "$external"
+	//  }
+	//
+	return &PlainAuthenticator{
+		Username: cred.Username,
+		Password: cred.Password,
+	}, nil
+}
+
+// PlainAuthenticator uses the PLAIN algorithm over SASL to authenticate a connection.
+type PlainAuthenticator struct {
+	Username string
+	Password string
+}
+
+// Auth authenticates the connection.
+func (a *PlainAuthenticator) Auth(ctx context.Context, cfg *Config) error {
+	return ConductSaslConversation(ctx, cfg, sourceExternal, &plainSaslClient{
+		username: a.Username,
+		password: a.Password,
+	})
+}
+
+// Reauth reauthenticates the connection.
+func (a *PlainAuthenticator) Reauth(_ context.Context, _ *driver.AuthConfig) error {
+	return newAuthError("Plain authentication does not support reauthentication", nil)
+}
+
+type plainSaslClient struct {
+	username string
+	password string
+}
+
+var _ SaslClient = (*plainSaslClient)(nil)
+
+func (c *plainSaslClient) Start() (string, []byte, error) {
+	b := []byte("\x00" + c.username + "\x00" + c.password)
+	return PLAIN, b, nil
+}
+
+func (c *plainSaslClient) Next(context.Context, []byte) ([]byte, error) {
+	return nil, newAuthError("unexpected server challenge", nil)
+}
+
+func (c *plainSaslClient) Completed() bool {
+	return true
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ef67f02b0b3bda1ae627771d78ef4a507891288
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go
@@ -0,0 +1,173 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"fmt"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+)
+
+// SaslClient is the client piece of a sasl conversation.
+type SaslClient interface {
+	Start() (string, []byte, error)
+	Next(ctx context.Context, challenge []byte) ([]byte, error)
+	Completed() bool
+}
+
+// SaslClientCloser is a SaslClient that has resources to clean up.
+type SaslClientCloser interface {
+	SaslClient
+	Close()
+}
+
+// ExtraOptionsSaslClient is a SaslClient that appends options to the saslStart command.
+type ExtraOptionsSaslClient interface {
+	StartCommandOptions() bsoncore.Document
+}
+
+// saslConversation represents a SASL conversation. This type implements the SpeculativeConversation interface so the
+// conversation can be executed in multi-step speculative fashion.
+type saslConversation struct {
+	client      SaslClient
+	source      string
+	mechanism   string
+	speculative bool
+}
+
+var _ SpeculativeConversation = (*saslConversation)(nil)
+
+func newSaslConversation(client SaslClient, source string, speculative bool) *saslConversation {
+	authSource := source
+	if authSource == "" {
+		authSource = defaultAuthDB
+	}
+	return &saslConversation{
+		client:      client,
+		source:      authSource,
+		speculative: speculative,
+	}
+}
+
+// FirstMessage returns the first message to be sent to the server. This message contains a "db" field so it can be used
+// for speculative authentication.
+func (sc *saslConversation) FirstMessage() (bsoncore.Document, error) {
+	var payload []byte
+	var err error
+	sc.mechanism, payload, err = sc.client.Start()
+	if err != nil {
+		return nil, err
+	}
+
+	saslCmdElements := [][]byte{
+		bsoncore.AppendInt32Element(nil, "saslStart", 1),
+		bsoncore.AppendStringElement(nil, "mechanism", sc.mechanism),
+		bsoncore.AppendBinaryElement(nil, "payload", 0x00, payload),
+	}
+	if sc.speculative {
+		// The "db" field is only appended for speculative auth because the hello command is executed against admin
+		// so this is needed to tell the server the user's auth source. For a non-speculative attempt, the SASL commands
+		// will be executed against the auth source.
+		saslCmdElements = append(saslCmdElements, bsoncore.AppendStringElement(nil, "db", sc.source))
+	}
+	if extraOptionsClient, ok := sc.client.(ExtraOptionsSaslClient); ok {
+		optionsDoc := extraOptionsClient.StartCommandOptions()
+		saslCmdElements = append(saslCmdElements, bsoncore.AppendDocumentElement(nil, "options", optionsDoc))
+	}
+
+	return bsoncore.BuildDocumentFromElements(nil, saslCmdElements...), nil
+}
+
+type saslResponse struct {
+	ConversationID int    `bson:"conversationId"`
+	Code           int    `bson:"code"`
+	Done           bool   `bson:"done"`
+	Payload        []byte `bson:"payload"`
+}
+
+// Finish completes the conversation based on the first server response to authenticate the given connection.
+func (sc *saslConversation) Finish(ctx context.Context, cfg *Config, firstResponse bsoncore.Document) error {
+	if closer, ok := sc.client.(SaslClientCloser); ok {
+		defer closer.Close()
+	}
+
+	var saslResp saslResponse
+	err := bson.Unmarshal(firstResponse, &saslResp)
+	if err != nil {
+		fullErr := fmt.Errorf("unmarshal error: %w", err)
+		return newError(fullErr, sc.mechanism)
+	}
+
+	cid := saslResp.ConversationID
+	var payload []byte
+	var rdr bsoncore.Document
+	for {
+		if saslResp.Code != 0 {
+			return newError(err, sc.mechanism)
+		}
+
+		if saslResp.Done && sc.client.Completed() {
+			return nil
+		}
+
+		payload, err = sc.client.Next(ctx, saslResp.Payload)
+		if err != nil {
+			return newError(err, sc.mechanism)
+		}
+
+		if saslResp.Done && sc.client.Completed() {
+			return nil
+		}
+
+		doc := bsoncore.BuildDocumentFromElements(nil,
+			bsoncore.AppendInt32Element(nil, "saslContinue", 1),
+			bsoncore.AppendInt32Element(nil, "conversationId", int32(cid)),
+			bsoncore.AppendBinaryElement(nil, "payload", 0x00, payload),
+		)
+		saslContinueCmd := operation.NewCommand(doc).
+			Database(sc.source).
+			Deployment(driver.SingleConnectionDeployment{cfg.Connection}).
+			ClusterClock(cfg.ClusterClock).
+			ServerAPI(cfg.ServerAPI)
+
+		err = saslContinueCmd.Execute(ctx)
+		if err != nil {
+			return newError(err, sc.mechanism)
+		}
+		rdr = saslContinueCmd.Result()
+
+		err = bson.Unmarshal(rdr, &saslResp)
+		if err != nil {
+			fullErr := fmt.Errorf("unmarshal error: %w", err)
+			return newError(fullErr, sc.mechanism)
+		}
+	}
+}
+
+// ConductSaslConversation runs a full SASL conversation to authenticate the given connection.
+func ConductSaslConversation(ctx context.Context, cfg *Config, authSource string, client SaslClient) error {
+	// Create a non-speculative SASL conversation.
+	conversation := newSaslConversation(client, authSource, false)
+	saslStartDoc, err := conversation.FirstMessage()
+	if err != nil {
+		return newError(err, conversation.mechanism)
+	}
+	saslStartCmd := operation.NewCommand(saslStartDoc).
+		Database(authSource).
+		Deployment(driver.SingleConnectionDeployment{cfg.Connection}).
+		ClusterClock(cfg.ClusterClock).
+		ServerAPI(cfg.ServerAPI)
+	if err := saslStartCmd.Execute(ctx); err != nil {
+		return newError(err, conversation.mechanism)
+	}
+
+	return conversation.Finish(ctx, cfg, saslStartCmd.Result())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go
new file mode 100644
index 0000000000000000000000000000000000000000..0d7deaee0e15f4bcfc5a7dcbb402a4c0f45df4de
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go
@@ -0,0 +1,144 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Copyright (C) MongoDB, Inc. 2018-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"net/http"
+
+	"github.com/xdg-go/scram"
+	"github.com/xdg-go/stringprep"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+const (
+	// SCRAMSHA1 holds the mechanism name "SCRAM-SHA-1"
+	SCRAMSHA1 = "SCRAM-SHA-1"
+
+	// SCRAMSHA256 holds the mechanism name "SCRAM-SHA-256"
+	SCRAMSHA256 = "SCRAM-SHA-256"
+)
+
+var (
+	// Additional options for the saslStart command to enable a shorter SCRAM conversation
+	scramStartOptions bsoncore.Document = bsoncore.BuildDocumentFromElements(nil,
+		bsoncore.AppendBooleanElement(nil, "skipEmptyExchange", true),
+	)
+)
+
+func newScramSHA1Authenticator(cred *Cred, _ *http.Client) (Authenticator, error) {
+	source := cred.Source
+	if source == "" {
+		source = "admin"
+	}
+	passdigest := mongoPasswordDigest(cred.Username, cred.Password)
+	client, err := scram.SHA1.NewClientUnprepped(cred.Username, passdigest, "")
+	if err != nil {
+		return nil, newAuthError("error initializing SCRAM-SHA-1 client", err)
+	}
+	client.WithMinIterations(4096)
+	return &ScramAuthenticator{
+		mechanism: SCRAMSHA1,
+		source:    source,
+		client:    client,
+	}, nil
+}
+
+func newScramSHA256Authenticator(cred *Cred, _ *http.Client) (Authenticator, error) {
+	source := cred.Source
+	if source == "" {
+		source = "admin"
+	}
+	passprep, err := stringprep.SASLprep.Prepare(cred.Password)
+	if err != nil {
+		return nil, newAuthError("error SASLprepping password", err)
+	}
+	client, err := scram.SHA256.NewClientUnprepped(cred.Username, passprep, "")
+	if err != nil {
+		return nil, newAuthError("error initializing SCRAM-SHA-256 client", err)
+	}
+	client.WithMinIterations(4096)
+	return &ScramAuthenticator{
+		mechanism: SCRAMSHA256,
+		source:    source,
+		client:    client,
+	}, nil
+}
+
+// ScramAuthenticator uses the SCRAM algorithm over SASL to authenticate a connection.
+type ScramAuthenticator struct {
+	mechanism string
+	source    string
+	client    *scram.Client
+}
+
+var _ SpeculativeAuthenticator = (*ScramAuthenticator)(nil)
+
+// Auth authenticates the provided connection by conducting a full SASL conversation.
+func (a *ScramAuthenticator) Auth(ctx context.Context, cfg *Config) error {
+	err := ConductSaslConversation(ctx, cfg, a.source, a.createSaslClient())
+	if err != nil {
+		return newAuthError("sasl conversation error", err)
+	}
+	return nil
+}
+
+// Reauth reauthenticates the connection.
+func (a *ScramAuthenticator) Reauth(_ context.Context, _ *driver.AuthConfig) error {
+	return newAuthError("SCRAM does not support reauthentication", nil)
+}
+
+// CreateSpeculativeConversation creates a speculative conversation for SCRAM authentication.
+func (a *ScramAuthenticator) CreateSpeculativeConversation() (SpeculativeConversation, error) {
+	return newSaslConversation(a.createSaslClient(), a.source, true), nil
+}
+
+func (a *ScramAuthenticator) createSaslClient() SaslClient {
+	return &scramSaslAdapter{
+		conversation: a.client.NewConversation(),
+		mechanism:    a.mechanism,
+	}
+}
+
+type scramSaslAdapter struct {
+	mechanism    string
+	conversation *scram.ClientConversation
+}
+
+var _ SaslClient = (*scramSaslAdapter)(nil)
+var _ ExtraOptionsSaslClient = (*scramSaslAdapter)(nil)
+
+func (a *scramSaslAdapter) Start() (string, []byte, error) {
+	step, err := a.conversation.Step("")
+	if err != nil {
+		return a.mechanism, nil, err
+	}
+	return a.mechanism, []byte(step), nil
+}
+
+func (a *scramSaslAdapter) Next(_ context.Context, challenge []byte) ([]byte, error) {
+	step, err := a.conversation.Step(string(challenge))
+	if err != nil {
+		return nil, err
+	}
+	return []byte(step), nil
+}
+
+func (a *scramSaslAdapter) Completed() bool {
+	return a.conversation.Done()
+}
+
+func (*scramSaslAdapter) StartCommandOptions() bsoncore.Document {
+	return scramStartOptions
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..a75a006d561fe05d6f2cc2481ad91458abab14c1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go
@@ -0,0 +1,30 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"fmt"
+	"io"
+
+	// Ignore gosec warning "Blocklisted import crypto/md5: weak cryptographic primitive". We need
+	// to use MD5 here to implement the SCRAM specification.
+	/* #nosec G501 */
+	"crypto/md5"
+)
+
+const defaultAuthDB = "admin"
+
+func mongoPasswordDigest(username, password string) string {
+	// Ignore gosec warning "Use of weak cryptographic primitive". We need to use MD5 here to
+	// implement the SCRAM specification.
+	/* #nosec G401 */
+	h := md5.New()
+	_, _ = io.WriteString(h, username)
+	_, _ = io.WriteString(h, ":mongo:")
+	_, _ = io.WriteString(h, password)
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b84e00b48ee952870ded7cf5c550e8ebc886d31
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+)
+
+// MongoDBX509 is the mechanism name for MongoDBX509.
+const MongoDBX509 = "MONGODB-X509"
+
+func newMongoDBX509Authenticator(cred *Cred, _ *http.Client) (Authenticator, error) {
+	// TODO(GODRIVER-3309): Validate that cred.Source is either empty or
+	// "$external" to make validation uniform with other auth mechanisms that
+	// require Source to be "$external" (e.g. MONGODB-AWS, MONGODB-OIDC, etc).
+	return &MongoDBX509Authenticator{User: cred.Username}, nil
+}
+
+// MongoDBX509Authenticator uses X.509 certificates over TLS to authenticate a connection.
+type MongoDBX509Authenticator struct {
+	User string
+}
+
+var _ SpeculativeAuthenticator = (*MongoDBX509Authenticator)(nil)
+
+// x509 represents a X509 authentication conversation. This type implements the SpeculativeConversation interface so the
+// conversation can be executed in multi-step speculative fashion.
+type x509Conversation struct{}
+
+var _ SpeculativeConversation = (*x509Conversation)(nil)
+
+// FirstMessage returns the first message to be sent to the server.
+func (c *x509Conversation) FirstMessage() (bsoncore.Document, error) {
+	return createFirstX509Message(), nil
+}
+
+// createFirstX509Message creates the first message for the X509 conversation.
+func createFirstX509Message() bsoncore.Document {
+	elements := [][]byte{
+		bsoncore.AppendInt32Element(nil, "authenticate", 1),
+		bsoncore.AppendStringElement(nil, "mechanism", MongoDBX509),
+	}
+
+	return bsoncore.BuildDocument(nil, elements...)
+}
+
+// Finish implements the SpeculativeConversation interface and is a no-op because an X509 conversation only has one
+// step.
+func (c *x509Conversation) Finish(context.Context, *Config, bsoncore.Document) error {
+	return nil
+}
+
+// CreateSpeculativeConversation creates a speculative conversation for X509 authentication.
+func (a *MongoDBX509Authenticator) CreateSpeculativeConversation() (SpeculativeConversation, error) {
+	return &x509Conversation{}, nil
+}
+
+// Auth authenticates the provided connection by conducting an X509 authentication conversation.
+func (a *MongoDBX509Authenticator) Auth(ctx context.Context, cfg *Config) error {
+	requestDoc := createFirstX509Message()
+	authCmd := operation.
+		NewCommand(requestDoc).
+		Database(sourceExternal).
+		Deployment(driver.SingleConnectionDeployment{cfg.Connection}).
+		ClusterClock(cfg.ClusterClock).
+		ServerAPI(cfg.ServerAPI)
+	err := authCmd.Execute(ctx)
+	if err != nil {
+		return newAuthError("round trip error", err)
+	}
+
+	return nil
+}
+
+// Reauth reauthenticates the connection.
+func (a *MongoDBX509Authenticator) Reauth(_ context.Context, _ *driver.AuthConfig) error {
+	return newAuthError("X509 does not support reauthentication", nil)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go
new file mode 100644
index 0000000000000000000000000000000000000000..2aa0aca69475dcb05780cfef33d1160fe0f4c3e0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go
@@ -0,0 +1,534 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/codecutil"
+	"go.mongodb.org/mongo-driver/internal/csot"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// ErrNoCursor is returned by NewCursorResponse when the database response does
+// not contain a cursor.
+var ErrNoCursor = errors.New("database response does not contain a cursor")
+
+// BatchCursor is a batch implementation of a cursor. It returns documents in entire batches instead
+// of one at a time. An individual document cursor can be built on top of this batch cursor.
+type BatchCursor struct {
+	clientSession        *session.Client
+	clock                *session.ClusterClock
+	comment              interface{}
+	encoderFn            codecutil.EncoderFn
+	database             string
+	collection           string
+	id                   int64
+	err                  error
+	server               Server
+	serverDescription    description.Server
+	errorProcessor       ErrorProcessor // This will only be set when pinning to a connection.
+	connection           PinnedConnection
+	batchSize            int32
+	maxTimeMS            int64
+	currentBatch         *bsoncore.DocumentSequence
+	firstBatch           bool
+	cmdMonitor           *event.CommandMonitor
+	postBatchResumeToken bsoncore.Document
+	crypt                Crypt
+	serverAPI            *ServerAPIOptions
+
+	// legacy server (< 3.2) fields
+	limit       int32
+	numReturned int32 // number of docs returned by server
+}
+
+// CursorResponse represents the response from a command the results in a cursor. A BatchCursor can
+// be constructed from a CursorResponse.
+type CursorResponse struct {
+	Server               Server
+	ErrorProcessor       ErrorProcessor // This will only be set when pinning to a connection.
+	Connection           PinnedConnection
+	Desc                 description.Server
+	FirstBatch           *bsoncore.DocumentSequence
+	Database             string
+	Collection           string
+	ID                   int64
+	postBatchResumeToken bsoncore.Document
+}
+
+// NewCursorResponse constructs a cursor response from the given response and
+// server. If the provided database response does not contain a cursor, it
+// returns ErrNoCursor.
+//
+// NewCursorResponse can be used within the ProcessResponse method for an operation.
+func NewCursorResponse(info ResponseInfo) (CursorResponse, error) {
+	response := info.ServerResponse
+	cur, err := response.LookupErr("cursor")
+	if errors.Is(err, bsoncore.ErrElementNotFound) {
+		return CursorResponse{}, ErrNoCursor
+	}
+	if err != nil {
+		return CursorResponse{}, fmt.Errorf("error getting cursor from database response: %w", err)
+	}
+	curDoc, ok := cur.DocumentOK()
+	if !ok {
+		return CursorResponse{}, fmt.Errorf("cursor should be an embedded document but is BSON type %s", cur.Type)
+	}
+	elems, err := curDoc.Elements()
+	if err != nil {
+		return CursorResponse{}, fmt.Errorf("error getting elements from cursor: %w", err)
+	}
+	curresp := CursorResponse{Server: info.Server, Desc: info.ConnectionDescription}
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "firstBatch":
+			arr, ok := elem.Value().ArrayOK()
+			if !ok {
+				return CursorResponse{}, fmt.Errorf("firstBatch should be an array but is a BSON %s", elem.Value().Type)
+			}
+			curresp.FirstBatch = &bsoncore.DocumentSequence{Style: bsoncore.ArrayStyle, Data: arr}
+		case "ns":
+			ns, ok := elem.Value().StringValueOK()
+			if !ok {
+				return CursorResponse{}, fmt.Errorf("ns should be a string but is a BSON %s", elem.Value().Type)
+			}
+			database, collection, ok := strings.Cut(ns, ".")
+			if !ok {
+				return CursorResponse{}, errors.New("ns field must contain a valid namespace, but is missing '.'")
+			}
+			curresp.Database = database
+			curresp.Collection = collection
+		case "id":
+			curresp.ID, ok = elem.Value().Int64OK()
+			if !ok {
+				return CursorResponse{}, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type)
+			}
+		case "postBatchResumeToken":
+			curresp.postBatchResumeToken, ok = elem.Value().DocumentOK()
+			if !ok {
+				return CursorResponse{}, fmt.Errorf("post batch resume token should be a document but it is a BSON %s", elem.Value().Type)
+			}
+		}
+	}
+
+	// If the deployment is behind a load balancer and the cursor has a non-zero ID, pin the cursor to a connection and
+	// use the same connection to execute getMore and killCursors commands.
+	if curresp.Desc.LoadBalanced() && curresp.ID != 0 {
+		// Cache the server as an ErrorProcessor to use when constructing deployments for cursor commands.
+		ep, ok := curresp.Server.(ErrorProcessor)
+		if !ok {
+			return CursorResponse{}, fmt.Errorf("expected Server used to establish a cursor to implement ErrorProcessor, but got %T", curresp.Server)
+		}
+		curresp.ErrorProcessor = ep
+
+		refConn, ok := info.Connection.(PinnedConnection)
+		if !ok {
+			return CursorResponse{}, fmt.Errorf("expected Connection used to establish a cursor to implement PinnedConnection, but got %T", info.Connection)
+		}
+		if err := refConn.PinToCursor(); err != nil {
+			return CursorResponse{}, fmt.Errorf("error incrementing connection reference count when creating a cursor: %w", err)
+		}
+		curresp.Connection = refConn
+	}
+
+	return curresp, nil
+}
+
+// CursorOptions are extra options that are required to construct a BatchCursor.
+type CursorOptions struct {
+	BatchSize             int32
+	Comment               bsoncore.Value
+	MaxTimeMS             int64
+	Limit                 int32
+	CommandMonitor        *event.CommandMonitor
+	Crypt                 Crypt
+	ServerAPI             *ServerAPIOptions
+	MarshalValueEncoderFn func(io.Writer) (*bson.Encoder, error)
+}
+
+// NewBatchCursor creates a new BatchCursor from the provided parameters.
+func NewBatchCursor(cr CursorResponse, clientSession *session.Client, clock *session.ClusterClock, opts CursorOptions) (*BatchCursor, error) {
+	ds := cr.FirstBatch
+	bc := &BatchCursor{
+		clientSession:        clientSession,
+		clock:                clock,
+		comment:              opts.Comment,
+		database:             cr.Database,
+		collection:           cr.Collection,
+		id:                   cr.ID,
+		server:               cr.Server,
+		connection:           cr.Connection,
+		errorProcessor:       cr.ErrorProcessor,
+		batchSize:            opts.BatchSize,
+		maxTimeMS:            opts.MaxTimeMS,
+		cmdMonitor:           opts.CommandMonitor,
+		firstBatch:           true,
+		postBatchResumeToken: cr.postBatchResumeToken,
+		crypt:                opts.Crypt,
+		serverAPI:            opts.ServerAPI,
+		serverDescription:    cr.Desc,
+		encoderFn:            opts.MarshalValueEncoderFn,
+	}
+
+	if ds != nil {
+		bc.numReturned = int32(ds.DocumentCount())
+	}
+	if cr.Desc.WireVersion == nil {
+		bc.limit = opts.Limit
+
+		// Take as many documents from the batch as needed.
+		if bc.limit != 0 && bc.limit < bc.numReturned {
+			for i := int32(0); i < bc.limit; i++ {
+				_, err := ds.Next()
+				if err != nil {
+					return nil, err
+				}
+			}
+			ds.Data = ds.Data[:ds.Pos]
+			ds.ResetIterator()
+		}
+	}
+
+	bc.currentBatch = ds
+	return bc, nil
+}
+
+// NewEmptyBatchCursor returns a batch cursor that is empty.
+func NewEmptyBatchCursor() *BatchCursor {
+	return &BatchCursor{currentBatch: new(bsoncore.DocumentSequence)}
+}
+
+// NewBatchCursorFromDocuments returns a batch cursor with current batch set to a sequence-style
+// DocumentSequence containing the provided documents.
+func NewBatchCursorFromDocuments(documents []byte) *BatchCursor {
+	return &BatchCursor{
+		currentBatch: &bsoncore.DocumentSequence{
+			Data:  documents,
+			Style: bsoncore.SequenceStyle,
+		},
+		// BatchCursors created with this function have no associated ID nor server, so no getMore
+		// calls will be made.
+		id:     0,
+		server: nil,
+	}
+}
+
+// ID returns the cursor ID for this batch cursor.
+func (bc *BatchCursor) ID() int64 {
+	return bc.id
+}
+
+// Next indicates if there is another batch available. Returning false does not necessarily indicate
+// that the cursor is closed. This method will return false when an empty batch is returned.
+//
+// If Next returns true, there is a valid batch of documents available. If Next returns false, there
+// is not a valid batch of documents available.
+func (bc *BatchCursor) Next(ctx context.Context) bool {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if bc.firstBatch {
+		bc.firstBatch = false
+		return !bc.currentBatch.Empty()
+	}
+
+	if bc.id == 0 || bc.server == nil {
+		return false
+	}
+
+	bc.getMore(ctx)
+
+	return !bc.currentBatch.Empty()
+}
+
+// Batch will return a DocumentSequence for the current batch of documents. The returned
+// DocumentSequence is only valid until the next call to Next or Close.
+func (bc *BatchCursor) Batch() *bsoncore.DocumentSequence { return bc.currentBatch }
+
+// Err returns the latest error encountered.
+func (bc *BatchCursor) Err() error { return bc.err }
+
+// Close closes this batch cursor.
+func (bc *BatchCursor) Close(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	err := bc.KillCursor(ctx)
+	bc.id = 0
+	bc.currentBatch.Data = nil
+	bc.currentBatch.Style = 0
+	bc.currentBatch.ResetIterator()
+
+	connErr := bc.unpinConnection()
+	if err == nil {
+		err = connErr
+	}
+	return err
+}
+
+func (bc *BatchCursor) unpinConnection() error {
+	if bc.connection == nil {
+		return nil
+	}
+
+	err := bc.connection.UnpinFromCursor()
+	closeErr := bc.connection.Close()
+	if err == nil && closeErr != nil {
+		err = closeErr
+	}
+	bc.connection = nil
+	return err
+}
+
+// Server returns the server for this cursor.
+func (bc *BatchCursor) Server() Server {
+	return bc.server
+}
+
+func (bc *BatchCursor) clearBatch() {
+	bc.currentBatch.Data = bc.currentBatch.Data[:0]
+}
+
+// KillCursor kills cursor on server without closing batch cursor
+func (bc *BatchCursor) KillCursor(ctx context.Context) error {
+	if bc.server == nil || bc.id == 0 {
+		return nil
+	}
+
+	return Operation{
+		CommandFn: func(dst []byte, _ description.SelectedServer) ([]byte, error) {
+			dst = bsoncore.AppendStringElement(dst, "killCursors", bc.collection)
+			dst = bsoncore.BuildArrayElement(dst, "cursors", bsoncore.Value{Type: bsontype.Int64, Data: bsoncore.AppendInt64(nil, bc.id)})
+			return dst, nil
+		},
+		Database:       bc.database,
+		Deployment:     bc.getOperationDeployment(),
+		Client:         bc.clientSession,
+		Clock:          bc.clock,
+		Legacy:         LegacyKillCursors,
+		CommandMonitor: bc.cmdMonitor,
+		ServerAPI:      bc.serverAPI,
+
+		// No read preference is passed to the killCursor command,
+		// resulting in the default read preference: "primaryPreferred".
+		// Since this could be confusing, and there is no requirement
+		// to use a read preference here, we omit it.
+		omitReadPreference: true,
+	}.Execute(ctx)
+}
+
+// calcGetMoreBatchSize calculates the number of documents to return in the
+// response of a "getMore" operation based on the given limit, batchSize, and
+// number of documents already returned. Returns false if a non-trivial limit is
+// lower than or equal to the number of documents already returned.
+func calcGetMoreBatchSize(bc BatchCursor) (int32, bool) {
+	gmBatchSize := bc.batchSize
+
+	// Account for legacy operations that don't support setting a limit.
+	if bc.limit != 0 && bc.numReturned+bc.batchSize >= bc.limit {
+		gmBatchSize = bc.limit - bc.numReturned
+		if gmBatchSize <= 0 {
+			return gmBatchSize, false
+		}
+	}
+
+	return gmBatchSize, true
+}
+
+func (bc *BatchCursor) getMore(ctx context.Context) {
+	bc.clearBatch()
+	if bc.id == 0 {
+		return
+	}
+
+	numToReturn, ok := calcGetMoreBatchSize(*bc)
+	if !ok {
+		if err := bc.Close(ctx); err != nil {
+			bc.err = err
+		}
+
+		return
+	}
+
+	bc.err = Operation{
+		CommandFn: func(dst []byte, _ description.SelectedServer) ([]byte, error) {
+			dst = bsoncore.AppendInt64Element(dst, "getMore", bc.id)
+			dst = bsoncore.AppendStringElement(dst, "collection", bc.collection)
+			if numToReturn > 0 {
+				dst = bsoncore.AppendInt32Element(dst, "batchSize", numToReturn)
+			}
+			if bc.maxTimeMS > 0 {
+				dst = bsoncore.AppendInt64Element(dst, "maxTimeMS", bc.maxTimeMS)
+			}
+
+			comment, err := codecutil.MarshalValue(bc.comment, bc.encoderFn)
+			if err != nil {
+				return nil, fmt.Errorf("error marshaling comment as a BSON value: %w", err)
+			}
+
+			// The getMore command does not support commenting pre-4.4.
+			if comment.Type != bsontype.Type(0) && bc.serverDescription.WireVersion.Max >= 9 {
+				dst = bsoncore.AppendValueElement(dst, "comment", comment)
+			}
+
+			return dst, nil
+		},
+		Database:   bc.database,
+		Deployment: bc.getOperationDeployment(),
+		ProcessResponseFn: func(info ResponseInfo) error {
+			response := info.ServerResponse
+			id, ok := response.Lookup("cursor", "id").Int64OK()
+			if !ok {
+				return fmt.Errorf("cursor.id should be an int64 but is a BSON %s", response.Lookup("cursor", "id").Type)
+			}
+			bc.id = id
+
+			batch, ok := response.Lookup("cursor", "nextBatch").ArrayOK()
+			if !ok {
+				return fmt.Errorf("cursor.nextBatch should be an array but is a BSON %s", response.Lookup("cursor", "nextBatch").Type)
+			}
+			bc.currentBatch.Style = bsoncore.ArrayStyle
+			bc.currentBatch.Data = batch
+			bc.currentBatch.ResetIterator()
+			bc.numReturned += int32(bc.currentBatch.DocumentCount()) // Required for legacy operations which don't support limit.
+
+			pbrt, err := response.LookupErr("cursor", "postBatchResumeToken")
+			if err != nil {
+				// I don't really understand why we don't set bc.err here
+				return nil
+			}
+
+			pbrtDoc, ok := pbrt.DocumentOK()
+			if !ok {
+				bc.err = fmt.Errorf("expected BSON type for post batch resume token to be EmbeddedDocument but got %s", pbrt.Type)
+				return nil
+			}
+
+			bc.postBatchResumeToken = pbrtDoc
+
+			return nil
+		},
+		Client:         bc.clientSession,
+		Clock:          bc.clock,
+		Legacy:         LegacyGetMore,
+		CommandMonitor: bc.cmdMonitor,
+		Crypt:          bc.crypt,
+		ServerAPI:      bc.serverAPI,
+
+		// No read preference is passed to the getMore command,
+		// resulting in the default read preference: "primaryPreferred".
+		// Since this could be confusing, and there is no requirement
+		// to use a read preference here, we omit it.
+		omitReadPreference: true,
+	}.Execute(ctx)
+
+	// Once the cursor has been drained, we can unpin the connection if one is currently pinned.
+	if bc.id == 0 {
+		err := bc.unpinConnection()
+		if err != nil && bc.err == nil {
+			bc.err = err
+		}
+	}
+
+	// If we're in load balanced mode and the pinned connection encounters a network error, we should not use it for
+	// future commands. Per the spec, the connection will not be unpinned until the cursor is actually closed, but
+	// we set the cursor ID to 0 to ensure the Close() call will not execute a killCursors command.
+	if driverErr, ok := bc.err.(Error); ok && driverErr.NetworkError() && bc.connection != nil {
+		bc.id = 0
+	}
+
+	// Required for legacy operations which don't support limit.
+	if bc.limit != 0 && bc.numReturned >= bc.limit {
+		// call KillCursor instead of Close because Close will clear out the data for the current batch.
+		err := bc.KillCursor(ctx)
+		if err != nil && bc.err == nil {
+			bc.err = err
+		}
+	}
+}
+
+// PostBatchResumeToken returns the latest seen post batch resume token.
+func (bc *BatchCursor) PostBatchResumeToken() bsoncore.Document {
+	return bc.postBatchResumeToken
+}
+
+// SetBatchSize sets the batchSize for future getMore operations.
+func (bc *BatchCursor) SetBatchSize(size int32) {
+	bc.batchSize = size
+}
+
+// SetMaxTime will set the maximum amount of time the server will allow the
+// operations to execute. The server will error if this field is set but the
+// cursor is not configured with awaitData=true.
+//
+// The time.Duration value passed by this setter will be converted and rounded
+// down to the nearest millisecond.
+func (bc *BatchCursor) SetMaxTime(dur time.Duration) {
+	bc.maxTimeMS = int64(dur / time.Millisecond)
+}
+
+// SetComment sets the comment for future getMore operations.
+func (bc *BatchCursor) SetComment(comment interface{}) {
+	bc.comment = comment
+}
+
+func (bc *BatchCursor) getOperationDeployment() Deployment {
+	if bc.connection != nil {
+		return &loadBalancedCursorDeployment{
+			errorProcessor: bc.errorProcessor,
+			conn:           bc.connection,
+		}
+	}
+	return SingleServerDeployment{bc.server}
+}
+
+// loadBalancedCursorDeployment is used as a Deployment for getMore and killCursors commands when pinning to a
+// connection in load balanced mode. This type also functions as an ErrorProcessor to ensure that SDAM errors are
+// handled for these commands in this mode.
+type loadBalancedCursorDeployment struct {
+	errorProcessor ErrorProcessor
+	conn           PinnedConnection
+}
+
+var _ Deployment = (*loadBalancedCursorDeployment)(nil)
+var _ Server = (*loadBalancedCursorDeployment)(nil)
+var _ ErrorProcessor = (*loadBalancedCursorDeployment)(nil)
+
+func (lbcd *loadBalancedCursorDeployment) SelectServer(_ context.Context, _ description.ServerSelector) (Server, error) {
+	return lbcd, nil
+}
+
+func (lbcd *loadBalancedCursorDeployment) Kind() description.TopologyKind {
+	return description.LoadBalanced
+}
+
+func (lbcd *loadBalancedCursorDeployment) Connection(_ context.Context) (Connection, error) {
+	return lbcd.conn, nil
+}
+
+// RTTMonitor implements the driver.Server interface.
+func (lbcd *loadBalancedCursorDeployment) RTTMonitor() RTTMonitor {
+	return &csot.ZeroRTTMonitor{}
+}
+
+func (lbcd *loadBalancedCursorDeployment) ProcessError(err error, conn Connection) ProcessErrorResult {
+	return lbcd.errorProcessor.ProcessError(err, conn)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go
new file mode 100644
index 0000000000000000000000000000000000000000..be430afa15a0d2c4fcccbd361d61b20bbebf9cff
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go
@@ -0,0 +1,76 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"errors"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// ErrDocumentTooLarge occurs when a document that is larger than the maximum size accepted by a
+// server is passed to an insert command.
+var ErrDocumentTooLarge = errors.New("an inserted document is too large")
+
+// Batches contains the necessary information to batch split an operation. This is only used for write
+// operations.
+type Batches struct {
+	Identifier string
+	Documents  []bsoncore.Document
+	Current    []bsoncore.Document
+	Ordered    *bool
+}
+
+// Valid returns true if Batches contains both an identifier and the length of Documents is greater
+// than zero.
+func (b *Batches) Valid() bool { return b != nil && b.Identifier != "" && len(b.Documents) > 0 }
+
+// ClearBatch clears the Current batch. This must be called before AdvanceBatch will advance to the
+// next batch.
+func (b *Batches) ClearBatch() { b.Current = b.Current[:0] }
+
+// AdvanceBatch splits the next batch using maxCount and targetBatchSize. This method will do nothing if
+// the current batch has not been cleared. We do this so that when this is called during execute we
+// can call it without first needing to check if we already have a batch, which makes the code
+// simpler and makes retrying easier.
+// The maxDocSize parameter is used to check that any one document is not too large. If the first document is bigger
+// than targetBatchSize but smaller than maxDocSize, a batch of size 1 containing that document will be created.
+func (b *Batches) AdvanceBatch(maxCount, targetBatchSize, maxDocSize int) error {
+	if len(b.Current) > 0 {
+		return nil
+	}
+
+	if maxCount <= 0 {
+		maxCount = 1
+	}
+
+	splitAfter := 0
+	size := 0
+	for i, doc := range b.Documents {
+		if i == maxCount {
+			break
+		}
+		if len(doc) > maxDocSize {
+			return ErrDocumentTooLarge
+		}
+		if size+len(doc) > targetBatchSize {
+			break
+		}
+
+		size += len(doc)
+		splitAfter++
+	}
+
+	// if there are no documents, take the first one.
+	// this can happen if there is a document that is smaller than maxDocSize but greater than targetBatchSize.
+	if splitAfter == 0 {
+		splitAfter = 1
+	}
+
+	b.Current, b.Documents = b.Documents[:splitAfter], b.Documents[splitAfter:]
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9a6c68feed073bc390f06525c47eb7004a17d8e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go
@@ -0,0 +1,194 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"bytes"
+	"compress/zlib"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/golang/snappy"
+	"github.com/klauspost/compress/zstd"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage"
+)
+
+// CompressionOpts holds settings for how to compress a payload
+type CompressionOpts struct {
+	Compressor       wiremessage.CompressorID
+	ZlibLevel        int
+	ZstdLevel        int
+	UncompressedSize int32
+}
+
+// mustZstdNewWriter creates a zstd.Encoder with the given level and a nil
+// destination writer. It panics on any errors and should only be used at
+// package initialization time.
+func mustZstdNewWriter(lvl zstd.EncoderLevel) *zstd.Encoder {
+	enc, err := zstd.NewWriter(
+		nil,
+		zstd.WithWindowSize(8<<20), // Set window size to 8MB.
+		zstd.WithEncoderLevel(lvl),
+	)
+	if err != nil {
+		panic(err)
+	}
+	return enc
+}
+
+var zstdEncoders = [zstd.SpeedBestCompression + 1]*zstd.Encoder{
+	0:                           nil, // zstd.speedNotSet
+	zstd.SpeedFastest:           mustZstdNewWriter(zstd.SpeedFastest),
+	zstd.SpeedDefault:           mustZstdNewWriter(zstd.SpeedDefault),
+	zstd.SpeedBetterCompression: mustZstdNewWriter(zstd.SpeedBetterCompression),
+	zstd.SpeedBestCompression:   mustZstdNewWriter(zstd.SpeedBestCompression),
+}
+
+func getZstdEncoder(level zstd.EncoderLevel) (*zstd.Encoder, error) {
+	if zstd.SpeedFastest <= level && level <= zstd.SpeedBestCompression {
+		return zstdEncoders[level], nil
+	}
+	// The level is outside the expected range, return an error.
+	return nil, fmt.Errorf("invalid zstd compression level: %d", level)
+}
+
+// zlibEncodersOffset is the offset into the zlibEncoders array for a given
+// compression level.
+const zlibEncodersOffset = -zlib.HuffmanOnly // HuffmanOnly == -2
+
+var zlibEncoders [zlib.BestCompression + zlibEncodersOffset + 1]sync.Pool
+
+func getZlibEncoder(level int) (*zlibEncoder, error) {
+	if zlib.HuffmanOnly <= level && level <= zlib.BestCompression {
+		if enc, _ := zlibEncoders[level+zlibEncodersOffset].Get().(*zlibEncoder); enc != nil {
+			return enc, nil
+		}
+		writer, err := zlib.NewWriterLevel(nil, level)
+		if err != nil {
+			return nil, err
+		}
+		enc := &zlibEncoder{writer: writer, level: level}
+		return enc, nil
+	}
+	// The level is outside the expected range, return an error.
+	return nil, fmt.Errorf("invalid zlib compression level: %d", level)
+}
+
+func putZlibEncoder(enc *zlibEncoder) {
+	if enc != nil {
+		zlibEncoders[enc.level+zlibEncodersOffset].Put(enc)
+	}
+}
+
+type zlibEncoder struct {
+	writer *zlib.Writer
+	buf    bytes.Buffer
+	level  int
+}
+
+func (e *zlibEncoder) Encode(dst, src []byte) ([]byte, error) {
+	defer putZlibEncoder(e)
+
+	e.buf.Reset()
+	e.writer.Reset(&e.buf)
+
+	_, err := e.writer.Write(src)
+	if err != nil {
+		return nil, err
+	}
+	err = e.writer.Close()
+	if err != nil {
+		return nil, err
+	}
+	dst = append(dst[:0], e.buf.Bytes()...)
+	return dst, nil
+}
+
+var zstdBufPool = sync.Pool{
+	New: func() interface{} {
+		s := make([]byte, 0)
+		return &s
+	},
+}
+
+// CompressPayload takes a byte slice and compresses it according to the options passed
+func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) {
+	switch opts.Compressor {
+	case wiremessage.CompressorNoOp:
+		return in, nil
+	case wiremessage.CompressorSnappy:
+		return snappy.Encode(nil, in), nil
+	case wiremessage.CompressorZLib:
+		encoder, err := getZlibEncoder(opts.ZlibLevel)
+		if err != nil {
+			return nil, err
+		}
+		return encoder.Encode(nil, in)
+	case wiremessage.CompressorZstd:
+		encoder, err := getZstdEncoder(zstd.EncoderLevelFromZstd(opts.ZstdLevel))
+		if err != nil {
+			return nil, err
+		}
+		ptr := zstdBufPool.Get().(*[]byte)
+		b := encoder.EncodeAll(in, *ptr)
+		dst := make([]byte, len(b))
+		copy(dst, b)
+		*ptr = b[:0]
+		zstdBufPool.Put(ptr)
+		return dst, nil
+	default:
+		return nil, fmt.Errorf("unknown compressor ID %v", opts.Compressor)
+	}
+}
+
+var zstdReaderPool = sync.Pool{
+	New: func() interface{} {
+		r, _ := zstd.NewReader(nil)
+		return r
+	},
+}
+
+// DecompressPayload takes a byte slice that has been compressed and undoes it according to the options passed
+func DecompressPayload(in []byte, opts CompressionOpts) ([]byte, error) {
+	switch opts.Compressor {
+	case wiremessage.CompressorNoOp:
+		return in, nil
+	case wiremessage.CompressorSnappy:
+		l, err := snappy.DecodedLen(in)
+		if err != nil {
+			return nil, fmt.Errorf("decoding compressed length %w", err)
+		} else if int32(l) != opts.UncompressedSize {
+			return nil, fmt.Errorf("unexpected decompression size, expected %v but got %v", opts.UncompressedSize, l)
+		}
+		out := make([]byte, opts.UncompressedSize)
+		return snappy.Decode(out, in)
+	case wiremessage.CompressorZLib:
+		r, err := zlib.NewReader(bytes.NewReader(in))
+		if err != nil {
+			return nil, err
+		}
+		out := make([]byte, opts.UncompressedSize)
+		if _, err := io.ReadFull(r, out); err != nil {
+			return nil, err
+		}
+		if err := r.Close(); err != nil {
+			return nil, err
+		}
+		return out, nil
+	case wiremessage.CompressorZstd:
+		buf := make([]byte, 0, opts.UncompressedSize)
+		// Using a pool here is about ~20% faster
+		// than using a single global zstd.Reader
+		r := zstdReaderPool.Get().(*zstd.Decoder)
+		out, err := r.DecodeAll(in, buf)
+		zstdReaderPool.Put(r)
+		return out, err
+	default:
+		return nil, fmt.Errorf("unknown compressor ID %v", opts.Compressor)
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go
new file mode 100644
index 0000000000000000000000000000000000000000..fd69eb4904efc2fae4294bb2889521f5780405d0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go
@@ -0,0 +1,1123 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package connstring is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package connstring // import "go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/randutil"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/auth"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/dns"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage"
+)
+
+const (
+	// ServerMonitoringModeAuto indicates that the client will behave like "poll"
+	// mode when running on a FaaS (Function as a Service) platform, or like
+	// "stream" mode otherwise. The client detects its execution environment by
+	// following the rules for generating the "client.env" handshake metadata field
+	// as specified in the MongoDB Handshake specification. This is the default
+	// mode.
+	ServerMonitoringModeAuto = "auto"
+
+	// ServerMonitoringModePoll indicates that the client will periodically check
+	// the server using a hello or legacy hello command and then sleep for
+	// heartbeatFrequencyMS milliseconds before running another check.
+	ServerMonitoringModePoll = "poll"
+
+	// ServerMonitoringModeStream indicates that the client will use a streaming
+	// protocol when the server supports it. The streaming protocol optimally
+	// reduces the time it takes for a client to discover server state changes.
+	ServerMonitoringModeStream = "stream"
+)
+
+var (
+	// ErrLoadBalancedWithMultipleHosts is returned when loadBalanced=true is
+	// specified in a URI with multiple hosts.
+	ErrLoadBalancedWithMultipleHosts = errors.New(
+		"loadBalanced cannot be set to true if multiple hosts are specified")
+
+	// ErrLoadBalancedWithReplicaSet is returned when loadBalanced=true is
+	// specified in a URI with the replicaSet option.
+	ErrLoadBalancedWithReplicaSet = errors.New(
+		"loadBalanced cannot be set to true if a replica set name is specified")
+
+	// ErrLoadBalancedWithDirectConnection is returned when loadBalanced=true is
+	// specified in a URI with the directConnection option.
+	ErrLoadBalancedWithDirectConnection = errors.New(
+		"loadBalanced cannot be set to true if the direct connection option is specified")
+
+	// ErrSRVMaxHostsWithReplicaSet is returned when srvMaxHosts > 0 is
+	// specified in a URI with the replicaSet option.
+	ErrSRVMaxHostsWithReplicaSet = errors.New(
+		"srvMaxHosts cannot be a positive value if a replica set name is specified")
+
+	// ErrSRVMaxHostsWithLoadBalanced is returned when srvMaxHosts > 0 is
+	// specified in a URI with loadBalanced=true.
+	ErrSRVMaxHostsWithLoadBalanced = errors.New(
+		"srvMaxHosts cannot be a positive value if loadBalanced is set to true")
+)
+
+// random is a package-global pseudo-random number generator.
+var random = randutil.NewLockedRand()
+
+// ParseAndValidate parses the provided URI into a ConnString object.
+// It check that all values are valid.
+func ParseAndValidate(s string) (*ConnString, error) {
+	connStr, err := Parse(s)
+	if err != nil {
+		return nil, err
+	}
+	err = connStr.Validate()
+	if err != nil {
+		return nil, fmt.Errorf("error validating uri: %w", err)
+	}
+	return connStr, nil
+}
+
+// Parse parses the provided URI into a ConnString object
+// but does not check that all values are valid. Use `ConnString.Validate()`
+// to run the validation checks separately.
+func Parse(s string) (*ConnString, error) {
+	p := parser{dnsResolver: dns.DefaultResolver}
+	connStr, err := p.parse(s)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing uri: %w", err)
+	}
+	return connStr, err
+}
+
+// ConnString represents a connection string to mongodb.
+type ConnString struct {
+	Original                           string
+	AppName                            string
+	AuthMechanism                      string
+	AuthMechanismProperties            map[string]string
+	AuthMechanismPropertiesSet         bool
+	AuthSource                         string
+	AuthSourceSet                      bool
+	Compressors                        []string
+	Connect                            ConnectMode
+	ConnectSet                         bool
+	DirectConnection                   bool
+	DirectConnectionSet                bool
+	ConnectTimeout                     time.Duration
+	ConnectTimeoutSet                  bool
+	Database                           string
+	HeartbeatInterval                  time.Duration
+	HeartbeatIntervalSet               bool
+	Hosts                              []string
+	J                                  bool
+	JSet                               bool
+	LoadBalanced                       bool
+	LoadBalancedSet                    bool
+	LocalThreshold                     time.Duration
+	LocalThresholdSet                  bool
+	MaxConnIdleTime                    time.Duration
+	MaxConnIdleTimeSet                 bool
+	MaxPoolSize                        uint64
+	MaxPoolSizeSet                     bool
+	MinPoolSize                        uint64
+	MinPoolSizeSet                     bool
+	MaxConnecting                      uint64
+	MaxConnectingSet                   bool
+	Password                           string
+	PasswordSet                        bool
+	RawHosts                           []string
+	ReadConcernLevel                   string
+	ReadPreference                     string
+	ReadPreferenceTagSets              []map[string]string
+	RetryWrites                        bool
+	RetryWritesSet                     bool
+	RetryReads                         bool
+	RetryReadsSet                      bool
+	MaxStaleness                       time.Duration
+	MaxStalenessSet                    bool
+	ReplicaSet                         string
+	Scheme                             string
+	ServerMonitoringMode               string
+	ServerSelectionTimeout             time.Duration
+	ServerSelectionTimeoutSet          bool
+	SocketTimeout                      time.Duration
+	SocketTimeoutSet                   bool
+	SRVMaxHosts                        int
+	SRVServiceName                     string
+	SSL                                bool
+	SSLSet                             bool
+	SSLClientCertificateKeyFile        string
+	SSLClientCertificateKeyFileSet     bool
+	SSLClientCertificateKeyPassword    func() string
+	SSLClientCertificateKeyPasswordSet bool
+	SSLCertificateFile                 string
+	SSLCertificateFileSet              bool
+	SSLPrivateKeyFile                  string
+	SSLPrivateKeyFileSet               bool
+	SSLInsecure                        bool
+	SSLInsecureSet                     bool
+	SSLCaFile                          string
+	SSLCaFileSet                       bool
+	SSLDisableOCSPEndpointCheck        bool
+	SSLDisableOCSPEndpointCheckSet     bool
+	Timeout                            time.Duration
+	TimeoutSet                         bool
+	WString                            string
+	WNumber                            int
+	WNumberSet                         bool
+	Username                           string
+	UsernameSet                        bool
+	ZlibLevel                          int
+	ZlibLevelSet                       bool
+	ZstdLevel                          int
+	ZstdLevelSet                       bool
+
+	WTimeout              time.Duration
+	WTimeoutSet           bool
+	WTimeoutSetFromOption bool
+
+	Options        map[string][]string
+	UnknownOptions map[string][]string
+}
+
+func (u *ConnString) String() string {
+	return u.Original
+}
+
+// HasAuthParameters returns true if this ConnString has any authentication parameters set and therefore represents
+// a request for authentication.
+func (u *ConnString) HasAuthParameters() bool {
+	// Check all auth parameters except for AuthSource because an auth source without other credentials is semantically
+	// valid and must not be interpreted as a request for authentication.
+	return u.AuthMechanism != "" || u.AuthMechanismProperties != nil || u.UsernameSet || u.PasswordSet
+}
+
+// Validate checks that the Auth and SSL parameters are valid values.
+func (u *ConnString) Validate() error {
+	var err error
+
+	if err = u.validateAuth(); err != nil {
+		return err
+	}
+
+	if err = u.validateSSL(); err != nil {
+		return err
+	}
+
+	// Check for invalid write concern (i.e. w=0 and j=true)
+	if u.WNumberSet && u.WNumber == 0 && u.JSet && u.J {
+		return writeconcern.ErrInconsistent
+	}
+
+	// Check for invalid use of direct connections.
+	if (u.ConnectSet && u.Connect == SingleConnect) ||
+		(u.DirectConnectionSet && u.DirectConnection) {
+		if len(u.Hosts) > 1 {
+			return errors.New("a direct connection cannot be made if multiple hosts are specified")
+		}
+		if u.Scheme == SchemeMongoDBSRV {
+			return errors.New("a direct connection cannot be made if an SRV URI is used")
+		}
+		if u.LoadBalancedSet && u.LoadBalanced {
+			return ErrLoadBalancedWithDirectConnection
+		}
+	}
+
+	// Validation for load-balanced mode.
+	if u.LoadBalancedSet && u.LoadBalanced {
+		if len(u.Hosts) > 1 {
+			return ErrLoadBalancedWithMultipleHosts
+		}
+		if u.ReplicaSet != "" {
+			return ErrLoadBalancedWithReplicaSet
+		}
+	}
+
+	// Check for invalid use of SRVMaxHosts.
+	if u.SRVMaxHosts > 0 {
+		if u.ReplicaSet != "" {
+			return ErrSRVMaxHostsWithReplicaSet
+		}
+		if u.LoadBalanced {
+			return ErrSRVMaxHostsWithLoadBalanced
+		}
+	}
+
+	// Check for OIDC auth mechanism properties that cannot be set in the ConnString.
+	if u.AuthMechanism == auth.MongoDBOIDC {
+		if _, ok := u.AuthMechanismProperties[auth.AllowedHostsProp]; ok {
+			return fmt.Errorf(
+				"ALLOWED_HOSTS cannot be specified in the URI connection string for the %q auth mechanism, it must be specified through the ClientOptions directly",
+				auth.MongoDBOIDC,
+			)
+		}
+	}
+
+	return nil
+}
+
+func (u *ConnString) setDefaultAuthParams(dbName string) error {
+	// We do this check here rather than in validateAuth because this function is called as part of parsing and sets
+	// the value of AuthSource if authentication is enabled.
+	if u.AuthSourceSet && u.AuthSource == "" {
+		return errors.New("authSource must be non-empty when supplied in a URI")
+	}
+
+	switch strings.ToLower(u.AuthMechanism) {
+	case "plain":
+		if u.AuthSource == "" {
+			u.AuthSource = dbName
+			if u.AuthSource == "" {
+				u.AuthSource = "$external"
+			}
+		}
+	case "gssapi":
+		if u.AuthMechanismProperties == nil {
+			u.AuthMechanismProperties = map[string]string{
+				"SERVICE_NAME": "mongodb",
+			}
+		} else if v, ok := u.AuthMechanismProperties["SERVICE_NAME"]; !ok || v == "" {
+			u.AuthMechanismProperties["SERVICE_NAME"] = "mongodb"
+		}
+		fallthrough
+	case "mongodb-aws", "mongodb-x509", "mongodb-oidc":
+		if u.AuthSource == "" {
+			u.AuthSource = "$external"
+		} else if u.AuthSource != "$external" {
+			return fmt.Errorf("auth source must be $external")
+		}
+	case "mongodb-cr":
+		fallthrough
+	case "scram-sha-1":
+		fallthrough
+	case "scram-sha-256":
+		if u.AuthSource == "" {
+			u.AuthSource = dbName
+			if u.AuthSource == "" {
+				u.AuthSource = "admin"
+			}
+		}
+	case "":
+		// Only set auth source if there is a request for authentication via non-empty credentials.
+		if u.AuthSource == "" && (u.AuthMechanismProperties != nil || u.Username != "" || u.PasswordSet) {
+			u.AuthSource = dbName
+			if u.AuthSource == "" {
+				u.AuthSource = "admin"
+			}
+		}
+	default:
+		return fmt.Errorf("invalid auth mechanism")
+	}
+	return nil
+}
+
+func (u *ConnString) addOptions(connectionArgPairs []string) error {
+	var tlsssl *bool // used to determine if tls and ssl options are both specified and set differently.
+	for _, pair := range connectionArgPairs {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 || kv[0] == "" {
+			return fmt.Errorf("invalid option")
+		}
+
+		key, err := url.QueryUnescape(kv[0])
+		if err != nil {
+			return fmt.Errorf("invalid option key %q: %w", kv[0], err)
+		}
+
+		value, err := url.QueryUnescape(kv[1])
+		if err != nil {
+			return fmt.Errorf("invalid option value %q: %w", kv[1], err)
+		}
+
+		lowerKey := strings.ToLower(key)
+		switch lowerKey {
+		case "appname":
+			u.AppName = value
+		case "authmechanism":
+			u.AuthMechanism = value
+		case "authmechanismproperties":
+			u.AuthMechanismProperties = make(map[string]string)
+			pairs := strings.Split(value, ",")
+			for _, pair := range pairs {
+				kv := strings.SplitN(pair, ":", 2)
+				if len(kv) != 2 || kv[0] == "" {
+					return fmt.Errorf("invalid authMechanism property")
+				}
+				u.AuthMechanismProperties[kv[0]] = kv[1]
+			}
+			u.AuthMechanismPropertiesSet = true
+		case "authsource":
+			u.AuthSource = value
+			u.AuthSourceSet = true
+		case "compressors":
+			compressors := strings.Split(value, ",")
+			if len(compressors) < 1 {
+				return fmt.Errorf("must have at least 1 compressor")
+			}
+			u.Compressors = compressors
+		case "connect":
+			switch strings.ToLower(value) {
+			case "automatic":
+			case "direct":
+				u.Connect = SingleConnect
+			default:
+				return fmt.Errorf("invalid 'connect' value: %q", value)
+			}
+			if u.DirectConnectionSet {
+				expectedValue := u.Connect == SingleConnect // directConnection should be true if connect=direct
+				if u.DirectConnection != expectedValue {
+					return fmt.Errorf("options connect=%q and directConnection=%v conflict", value, u.DirectConnection)
+				}
+			}
+
+			u.ConnectSet = true
+		case "directconnection":
+			switch strings.ToLower(value) {
+			case "true":
+				u.DirectConnection = true
+			case "false":
+			default:
+				return fmt.Errorf("invalid 'directConnection' value: %q", value)
+			}
+
+			if u.ConnectSet {
+				expectedValue := AutoConnect
+				if u.DirectConnection {
+					expectedValue = SingleConnect
+				}
+
+				if u.Connect != expectedValue {
+					return fmt.Errorf("options connect=%q and directConnection=%q conflict", u.Connect, value)
+				}
+			}
+			u.DirectConnectionSet = true
+		case "connecttimeoutms":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.ConnectTimeout = time.Duration(n) * time.Millisecond
+			u.ConnectTimeoutSet = true
+		case "heartbeatintervalms", "heartbeatfrequencyms":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.HeartbeatInterval = time.Duration(n) * time.Millisecond
+			u.HeartbeatIntervalSet = true
+		case "journal":
+			switch value {
+			case "true":
+				u.J = true
+			case "false":
+				u.J = false
+			default:
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			u.JSet = true
+		case "loadbalanced":
+			switch value {
+			case "true":
+				u.LoadBalanced = true
+			case "false":
+				u.LoadBalanced = false
+			default:
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			u.LoadBalancedSet = true
+		case "localthresholdms":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.LocalThreshold = time.Duration(n) * time.Millisecond
+			u.LocalThresholdSet = true
+		case "maxidletimems":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.MaxConnIdleTime = time.Duration(n) * time.Millisecond
+			u.MaxConnIdleTimeSet = true
+		case "maxpoolsize":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.MaxPoolSize = uint64(n)
+			u.MaxPoolSizeSet = true
+		case "minpoolsize":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.MinPoolSize = uint64(n)
+			u.MinPoolSizeSet = true
+		case "maxconnecting":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.MaxConnecting = uint64(n)
+			u.MaxConnectingSet = true
+		case "readconcernlevel":
+			u.ReadConcernLevel = value
+		case "readpreference":
+			u.ReadPreference = value
+		case "readpreferencetags":
+			if value == "" {
+				// If "readPreferenceTags=" is supplied, append an empty map to tag sets to
+				// represent a wild-card.
+				u.ReadPreferenceTagSets = append(u.ReadPreferenceTagSets, map[string]string{})
+				break
+			}
+
+			tags := make(map[string]string)
+			items := strings.Split(value, ",")
+			for _, item := range items {
+				parts := strings.Split(item, ":")
+				if len(parts) != 2 {
+					return fmt.Errorf("invalid value for %q: %q", key, value)
+				}
+				tags[parts[0]] = parts[1]
+			}
+			u.ReadPreferenceTagSets = append(u.ReadPreferenceTagSets, tags)
+		case "maxstaleness", "maxstalenessseconds":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.MaxStaleness = time.Duration(n) * time.Second
+			u.MaxStalenessSet = true
+		case "replicaset":
+			u.ReplicaSet = value
+		case "retrywrites":
+			switch value {
+			case "true":
+				u.RetryWrites = true
+			case "false":
+				u.RetryWrites = false
+			default:
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			u.RetryWritesSet = true
+		case "retryreads":
+			switch value {
+			case "true":
+				u.RetryReads = true
+			case "false":
+				u.RetryReads = false
+			default:
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			u.RetryReadsSet = true
+		case "servermonitoringmode":
+			if !IsValidServerMonitoringMode(value) {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			u.ServerMonitoringMode = value
+		case "serverselectiontimeoutms":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.ServerSelectionTimeout = time.Duration(n) * time.Millisecond
+			u.ServerSelectionTimeoutSet = true
+		case "sockettimeoutms":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.SocketTimeout = time.Duration(n) * time.Millisecond
+			u.SocketTimeoutSet = true
+		case "srvmaxhosts":
+			// srvMaxHosts can only be set on URIs with the "mongodb+srv" scheme
+			if u.Scheme != SchemeMongoDBSRV {
+				return fmt.Errorf("cannot specify srvMaxHosts on non-SRV URI")
+			}
+
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.SRVMaxHosts = n
+		case "srvservicename":
+			// srvServiceName can only be set on URIs with the "mongodb+srv" scheme
+			if u.Scheme != SchemeMongoDBSRV {
+				return fmt.Errorf("cannot specify srvServiceName on non-SRV URI")
+			}
+
+			// srvServiceName must be between 1 and 62 characters according to
+			// our specification. Empty service names are not valid, and the service
+			// name (including prepended underscore) should not exceed the 63 character
+			// limit for DNS query subdomains.
+			if len(value) < 1 || len(value) > 62 {
+				return fmt.Errorf("srvServiceName value must be between 1 and 62 characters")
+			}
+			u.SRVServiceName = value
+		case "ssl", "tls":
+			switch value {
+			case "true":
+				u.SSL = true
+			case "false":
+				u.SSL = false
+			default:
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			if tlsssl == nil {
+				tlsssl = new(bool)
+				*tlsssl = u.SSL
+			} else if *tlsssl != u.SSL {
+				return errors.New("tls and ssl options, when both specified, must be equivalent")
+			}
+
+			u.SSLSet = true
+		case "sslclientcertificatekeyfile", "tlscertificatekeyfile":
+			u.SSL = true
+			u.SSLSet = true
+			u.SSLClientCertificateKeyFile = value
+			u.SSLClientCertificateKeyFileSet = true
+		case "sslclientcertificatekeypassword", "tlscertificatekeyfilepassword":
+			u.SSLClientCertificateKeyPassword = func() string { return value }
+			u.SSLClientCertificateKeyPasswordSet = true
+		case "tlscertificatefile":
+			u.SSL = true
+			u.SSLSet = true
+			u.SSLCertificateFile = value
+			u.SSLCertificateFileSet = true
+		case "tlsprivatekeyfile":
+			u.SSL = true
+			u.SSLSet = true
+			u.SSLPrivateKeyFile = value
+			u.SSLPrivateKeyFileSet = true
+		case "sslinsecure", "tlsinsecure":
+			switch value {
+			case "true":
+				u.SSLInsecure = true
+			case "false":
+				u.SSLInsecure = false
+			default:
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			u.SSLInsecureSet = true
+		case "sslcertificateauthorityfile", "tlscafile":
+			u.SSL = true
+			u.SSLSet = true
+			u.SSLCaFile = value
+			u.SSLCaFileSet = true
+		case "timeoutms":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.Timeout = time.Duration(n) * time.Millisecond
+			u.TimeoutSet = true
+		case "tlsdisableocspendpointcheck":
+			u.SSL = true
+			u.SSLSet = true
+
+			switch value {
+			case "true":
+				u.SSLDisableOCSPEndpointCheck = true
+			case "false":
+				u.SSLDisableOCSPEndpointCheck = false
+			default:
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.SSLDisableOCSPEndpointCheckSet = true
+		case "w":
+			if w, err := strconv.Atoi(value); err == nil {
+				if w < 0 {
+					return fmt.Errorf("invalid value for %q: %q", key, value)
+				}
+
+				u.WNumber = w
+				u.WNumberSet = true
+				u.WString = ""
+				break
+			}
+
+			u.WString = value
+			u.WNumberSet = false
+
+		case "wtimeoutms":
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.WTimeout = time.Duration(n) * time.Millisecond
+			u.WTimeoutSet = true
+		case "wtimeout":
+			// Defer to wtimeoutms, but not to a manually-set option.
+			if u.WTimeoutSet {
+				break
+			}
+			n, err := strconv.Atoi(value)
+			if err != nil || n < 0 {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+			u.WTimeout = time.Duration(n) * time.Millisecond
+		case "zlibcompressionlevel":
+			level, err := strconv.Atoi(value)
+			if err != nil || (level < -1 || level > 9) {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			if level == -1 {
+				level = wiremessage.DefaultZlibLevel
+			}
+			u.ZlibLevel = level
+			u.ZlibLevelSet = true
+		case "zstdcompressionlevel":
+			const maxZstdLevel = 22 // https://github.com/facebook/zstd/blob/a880ca239b447968493dd2fed3850e766d6305cc/contrib/linux-kernel/lib/zstd/compress.c#L3291
+			level, err := strconv.Atoi(value)
+			if err != nil || (level < -1 || level > maxZstdLevel) {
+				return fmt.Errorf("invalid value for %q: %q", key, value)
+			}
+
+			if level == -1 {
+				level = wiremessage.DefaultZstdLevel
+			}
+			u.ZstdLevel = level
+			u.ZstdLevelSet = true
+		default:
+			if u.UnknownOptions == nil {
+				u.UnknownOptions = make(map[string][]string)
+			}
+			u.UnknownOptions[lowerKey] = append(u.UnknownOptions[lowerKey], value)
+		}
+
+		if u.Options == nil {
+			u.Options = make(map[string][]string)
+		}
+		u.Options[lowerKey] = append(u.Options[lowerKey], value)
+	}
+	return nil
+}
+
+func (u *ConnString) validateAuth() error {
+	switch strings.ToLower(u.AuthMechanism) {
+	case "mongodb-cr":
+		if u.Username == "" {
+			return fmt.Errorf("username required for MONGO-CR")
+		}
+		if u.Password == "" {
+			return fmt.Errorf("password required for MONGO-CR")
+		}
+		if u.AuthMechanismProperties != nil {
+			return fmt.Errorf("MONGO-CR cannot have mechanism properties")
+		}
+	case "mongodb-x509":
+		if u.Password != "" {
+			return fmt.Errorf("password cannot be specified for MONGO-X509")
+		}
+		if u.AuthMechanismProperties != nil {
+			return fmt.Errorf("MONGO-X509 cannot have mechanism properties")
+		}
+	case "mongodb-aws":
+		if u.Username != "" && u.Password == "" {
+			return fmt.Errorf("username without password is invalid for MONGODB-AWS")
+		}
+		if u.Username == "" && u.Password != "" {
+			return fmt.Errorf("password without username is invalid for MONGODB-AWS")
+		}
+		var token bool
+		for k := range u.AuthMechanismProperties {
+			if k != "AWS_SESSION_TOKEN" {
+				return fmt.Errorf("invalid auth property for MONGODB-AWS")
+			}
+			token = true
+		}
+		if token && u.Username == "" && u.Password == "" {
+			return fmt.Errorf("token without username and password is invalid for MONGODB-AWS")
+		}
+	case "gssapi":
+		if u.Username == "" {
+			return fmt.Errorf("username required for GSSAPI")
+		}
+		for k := range u.AuthMechanismProperties {
+			if k != "SERVICE_NAME" && k != "CANONICALIZE_HOST_NAME" && k != "SERVICE_REALM" && k != "SERVICE_HOST" {
+				return fmt.Errorf("invalid auth property for GSSAPI")
+			}
+		}
+	case "plain":
+		if u.Username == "" {
+			return fmt.Errorf("username required for PLAIN")
+		}
+		if u.Password == "" {
+			return fmt.Errorf("password required for PLAIN")
+		}
+		if u.AuthMechanismProperties != nil {
+			return fmt.Errorf("PLAIN cannot have mechanism properties")
+		}
+	case "scram-sha-1":
+		if u.Username == "" {
+			return fmt.Errorf("username required for SCRAM-SHA-1")
+		}
+		if u.Password == "" {
+			return fmt.Errorf("password required for SCRAM-SHA-1")
+		}
+		if u.AuthMechanismProperties != nil {
+			return fmt.Errorf("SCRAM-SHA-1 cannot have mechanism properties")
+		}
+	case "scram-sha-256":
+		if u.Username == "" {
+			return fmt.Errorf("username required for SCRAM-SHA-256")
+		}
+		if u.Password == "" {
+			return fmt.Errorf("password required for SCRAM-SHA-256")
+		}
+		if u.AuthMechanismProperties != nil {
+			return fmt.Errorf("SCRAM-SHA-256 cannot have mechanism properties")
+		}
+	case "mongodb-oidc":
+		if u.Password != "" {
+			return fmt.Errorf("password cannot be specified for MONGODB-OIDC")
+		}
+	case "":
+		if u.UsernameSet && u.Username == "" {
+			return fmt.Errorf("username required if URI contains user info")
+		}
+	default:
+		return fmt.Errorf("invalid auth mechanism")
+	}
+	return nil
+}
+
+func (u *ConnString) validateSSL() error {
+	if !u.SSL {
+		return nil
+	}
+
+	if u.SSLClientCertificateKeyFileSet {
+		if u.SSLCertificateFileSet || u.SSLPrivateKeyFileSet {
+			return errors.New("the sslClientCertificateKeyFile/tlsCertificateKeyFile URI option cannot be provided " +
+				"along with tlsCertificateFile or tlsPrivateKeyFile")
+		}
+		return nil
+	}
+	if u.SSLCertificateFileSet && !u.SSLPrivateKeyFileSet {
+		return errors.New("the tlsPrivateKeyFile URI option must be provided if the tlsCertificateFile option is specified")
+	}
+	if u.SSLPrivateKeyFileSet && !u.SSLCertificateFileSet {
+		return errors.New("the tlsCertificateFile URI option must be provided if the tlsPrivateKeyFile option is specified")
+	}
+
+	if u.SSLInsecureSet && u.SSLDisableOCSPEndpointCheckSet {
+		return errors.New("the sslInsecure/tlsInsecure URI option cannot be provided along with " +
+			"tlsDisableOCSPEndpointCheck ")
+	}
+	return nil
+}
+
+func sanitizeHost(host string) (string, error) {
+	if host == "" {
+		return host, nil
+	}
+	unescaped, err := url.QueryUnescape(host)
+	if err != nil {
+		return "", fmt.Errorf("invalid host %q: %w", host, err)
+	}
+
+	_, port, err := net.SplitHostPort(unescaped)
+	// this is unfortunate that SplitHostPort actually requires
+	// a port to exist.
+	if err != nil {
+		if addrError, ok := err.(*net.AddrError); !ok || addrError.Err != "missing port in address" {
+			return "", err
+		}
+	}
+
+	if port != "" {
+		d, err := strconv.Atoi(port)
+		if err != nil {
+			return "", fmt.Errorf("port must be an integer: %w", err)
+		}
+		if d <= 0 || d >= 65536 {
+			return "", fmt.Errorf("port must be in the range [1, 65535]")
+		}
+	}
+	return unescaped, nil
+}
+
+// ConnectMode informs the driver on how to connect
+// to the server.
+type ConnectMode uint8
+
+var _ fmt.Stringer = ConnectMode(0)
+
+// ConnectMode constants.
+const (
+	AutoConnect ConnectMode = iota
+	SingleConnect
+)
+
+// String implements the fmt.Stringer interface.
+func (c ConnectMode) String() string {
+	switch c {
+	case AutoConnect:
+		return "automatic"
+	case SingleConnect:
+		return "direct"
+	default:
+		return "unknown"
+	}
+}
+
+// Scheme constants
+const (
+	SchemeMongoDB    = "mongodb"
+	SchemeMongoDBSRV = "mongodb+srv"
+)
+
+type parser struct {
+	dnsResolver *dns.Resolver
+}
+
+func (p *parser) parse(original string) (*ConnString, error) {
+	connStr := &ConnString{}
+	connStr.Original = original
+	uri := original
+
+	var err error
+	switch {
+	case strings.HasPrefix(uri, SchemeMongoDBSRV+"://"):
+		connStr.Scheme = SchemeMongoDBSRV
+		// remove the scheme
+		uri = uri[len(SchemeMongoDBSRV)+3:]
+	case strings.HasPrefix(uri, SchemeMongoDB+"://"):
+		connStr.Scheme = SchemeMongoDB
+		// remove the scheme
+		uri = uri[len(SchemeMongoDB)+3:]
+	default:
+		return nil, errors.New(`scheme must be "mongodb" or "mongodb+srv"`)
+	}
+
+	if idx := strings.Index(uri, "@"); idx != -1 {
+		userInfo := uri[:idx]
+		uri = uri[idx+1:]
+
+		username := userInfo
+		var password string
+
+		if u, p, ok := strings.Cut(userInfo, ":"); ok {
+			username = u
+			password = p
+			connStr.PasswordSet = true
+		}
+
+		// Validate and process the username.
+		if strings.Contains(username, "/") {
+			return nil, fmt.Errorf("unescaped slash in username")
+		}
+		connStr.Username, err = url.PathUnescape(username)
+		if err != nil {
+			return nil, fmt.Errorf("invalid username: %w", err)
+		}
+		connStr.UsernameSet = true
+
+		// Validate and process the password.
+		if strings.Contains(password, ":") {
+			return nil, fmt.Errorf("unescaped colon in password")
+		}
+		if strings.Contains(password, "/") {
+			return nil, fmt.Errorf("unescaped slash in password")
+		}
+		connStr.Password, err = url.PathUnescape(password)
+		if err != nil {
+			return nil, fmt.Errorf("invalid password: %w", err)
+		}
+	}
+
+	// fetch the hosts field
+	hosts := uri
+	if idx := strings.IndexAny(uri, "/?@"); idx != -1 {
+		if uri[idx] == '@' {
+			return nil, fmt.Errorf("unescaped @ sign in user info")
+		}
+		if uri[idx] == '?' {
+			return nil, fmt.Errorf("must have a / before the query ?")
+		}
+		hosts = uri[:idx]
+	}
+
+	for _, host := range strings.Split(hosts, ",") {
+		host, err = sanitizeHost(host)
+		if err != nil {
+			return nil, fmt.Errorf("invalid host %q: %w", host, err)
+		}
+		if host != "" {
+			connStr.RawHosts = append(connStr.RawHosts, host)
+		}
+	}
+	connStr.Hosts = connStr.RawHosts
+	uri = uri[len(hosts):]
+	extractedDatabase, err := extractDatabaseFromURI(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	uri = extractedDatabase.uri
+	connStr.Database = extractedDatabase.db
+
+	// grab connection arguments from URI
+	connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	// grab connection arguments from TXT record and enable SSL if "mongodb+srv://"
+	var connectionArgsFromTXT []string
+	if connStr.Scheme == SchemeMongoDBSRV && p.dnsResolver != nil {
+		connectionArgsFromTXT, err = p.dnsResolver.GetConnectionArgsFromTXT(hosts)
+		if err != nil {
+			return nil, err
+		}
+
+		// SSL is enabled by default for SRV, but can be manually disabled with "ssl=false".
+		connStr.SSL = true
+		connStr.SSLSet = true
+	}
+
+	// add connection arguments from URI and TXT records to connstring
+	connectionArgPairs := make([]string, 0, len(connectionArgsFromTXT)+len(connectionArgsFromQueryString))
+	connectionArgPairs = append(connectionArgPairs, connectionArgsFromTXT...)
+	connectionArgPairs = append(connectionArgPairs, connectionArgsFromQueryString...)
+
+	err = connStr.addOptions(connectionArgPairs)
+	if err != nil {
+		return nil, err
+	}
+
+	// do SRV lookup if "mongodb+srv://"
+	if connStr.Scheme == SchemeMongoDBSRV && p.dnsResolver != nil {
+		parsedHosts, err := p.dnsResolver.ParseHosts(hosts, connStr.SRVServiceName, true)
+		if err != nil {
+			return connStr, err
+		}
+
+		// If p.SRVMaxHosts is non-zero and is less than the number of hosts, randomly
+		// select SRVMaxHosts hosts from parsedHosts.
+		if connStr.SRVMaxHosts > 0 && connStr.SRVMaxHosts < len(parsedHosts) {
+			random.Shuffle(len(parsedHosts), func(i, j int) {
+				parsedHosts[i], parsedHosts[j] = parsedHosts[j], parsedHosts[i]
+			})
+			parsedHosts = parsedHosts[:connStr.SRVMaxHosts]
+		}
+
+		var hosts []string
+		for _, host := range parsedHosts {
+			host, err = sanitizeHost(host)
+			if err != nil {
+				return connStr, fmt.Errorf("invalid host %q: %w", host, err)
+			}
+			if host != "" {
+				hosts = append(hosts, host)
+			}
+		}
+		connStr.Hosts = hosts
+	}
+	if len(connStr.Hosts) == 0 {
+		return nil, fmt.Errorf("must have at least 1 host")
+	}
+
+	err = connStr.setDefaultAuthParams(extractedDatabase.db)
+	if err != nil {
+		return nil, err
+	}
+
+	// If WTimeout was set from manual options passed in, set WTImeoutSet to true.
+	if connStr.WTimeoutSetFromOption {
+		connStr.WTimeoutSet = true
+	}
+
+	return connStr, nil
+}
+
+// IsValidServerMonitoringMode will return true if the given string matches a
+// valid server monitoring mode.
+func IsValidServerMonitoringMode(mode string) bool {
+	return mode == ServerMonitoringModeAuto ||
+		mode == ServerMonitoringModeStream ||
+		mode == ServerMonitoringModePoll
+}
+
+func extractQueryArgsFromURI(uri string) ([]string, error) {
+	if len(uri) == 0 {
+		return nil, nil
+	}
+
+	if uri[0] != '?' {
+		return nil, errors.New("must have a ? separator between path and query")
+	}
+
+	uri = uri[1:]
+	if len(uri) == 0 {
+		return nil, nil
+	}
+	return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil
+
+}
+
+type extractedDatabase struct {
+	uri string
+	db  string
+}
+
+// extractDatabaseFromURI is a helper function to retrieve information about
+// the database from the passed in URI. It accepts as an argument the currently
+// parsed URI and returns the remainder of the uri, the database it found,
+// and any error it encounters while parsing.
+func extractDatabaseFromURI(uri string) (extractedDatabase, error) {
+	if len(uri) == 0 {
+		return extractedDatabase{}, nil
+	}
+
+	if uri[0] != '/' {
+		return extractedDatabase{}, errors.New("must have a / separator between hosts and path")
+	}
+
+	uri = uri[1:]
+	if len(uri) == 0 {
+		return extractedDatabase{}, nil
+	}
+
+	database := uri
+	if idx := strings.IndexRune(uri, '?'); idx != -1 {
+		database = uri[:idx]
+	}
+
+	escapedDatabase, err := url.QueryUnescape(database)
+	if err != nil {
+		return extractedDatabase{}, fmt.Errorf("invalid database %q: %w", database, err)
+	}
+
+	uri = uri[len(database):]
+
+	return extractedDatabase{
+		uri: uri,
+		db:  escapedDatabase,
+	}, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go
new file mode 100644
index 0000000000000000000000000000000000000000..576c007d67fcc07de1e9c56c5d4f21caabf4a495
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go
@@ -0,0 +1,419 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options"
+)
+
+const (
+	defaultKmsPort    = 443
+	defaultKmsTimeout = 10 * time.Second
+)
+
+// CollectionInfoFn is a callback used to retrieve collection information.
+type CollectionInfoFn func(ctx context.Context, db string, filter bsoncore.Document) (bsoncore.Document, error)
+
+// KeyRetrieverFn is a callback used to retrieve keys from the key vault.
+type KeyRetrieverFn func(ctx context.Context, filter bsoncore.Document) ([]bsoncore.Document, error)
+
+// MarkCommandFn is a callback used to add encryption markings to a command.
+type MarkCommandFn func(ctx context.Context, db string, cmd bsoncore.Document) (bsoncore.Document, error)
+
+// CryptOptions specifies options to configure a Crypt instance.
+type CryptOptions struct {
+	MongoCrypt           *mongocrypt.MongoCrypt
+	CollInfoFn           CollectionInfoFn
+	KeyFn                KeyRetrieverFn
+	MarkFn               MarkCommandFn
+	TLSConfig            map[string]*tls.Config
+	BypassAutoEncryption bool
+	BypassQueryAnalysis  bool
+}
+
+// Crypt is an interface implemented by types that can encrypt and decrypt instances of
+// bsoncore.Document.
+//
+// Users should rely on the driver's crypt type (used by default) for encryption and decryption
+// unless they are perfectly confident in another implementation of Crypt.
+type Crypt interface {
+	// Encrypt encrypts the given command.
+	Encrypt(ctx context.Context, db string, cmd bsoncore.Document) (bsoncore.Document, error)
+	// Decrypt decrypts the given command response.
+	Decrypt(ctx context.Context, cmdResponse bsoncore.Document) (bsoncore.Document, error)
+	// CreateDataKey creates a data key using the given KMS provider and options.
+	CreateDataKey(ctx context.Context, kmsProvider string, opts *options.DataKeyOptions) (bsoncore.Document, error)
+	// EncryptExplicit encrypts the given value with the given options.
+	EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *options.ExplicitEncryptionOptions) (byte, []byte, error)
+	// EncryptExplicitExpression encrypts the given expression with the given options.
+	EncryptExplicitExpression(ctx context.Context, val bsoncore.Document, opts *options.ExplicitEncryptionOptions) (bsoncore.Document, error)
+	// DecryptExplicit decrypts the given encrypted value.
+	DecryptExplicit(ctx context.Context, subtype byte, data []byte) (bsoncore.Value, error)
+	// Close cleans up any resources associated with the Crypt instance.
+	Close()
+	// BypassAutoEncryption returns true if auto-encryption should be bypassed.
+	BypassAutoEncryption() bool
+	// RewrapDataKey attempts to rewrap the document data keys matching the filter, preparing the re-wrapped documents
+	// to be returned as a slice of bsoncore.Document.
+	RewrapDataKey(ctx context.Context, filter []byte, opts *options.RewrapManyDataKeyOptions) ([]bsoncore.Document, error)
+}
+
+// crypt consumes the libmongocrypt.MongoCrypt type to iterate the mongocrypt state machine and perform encryption
+// and decryption.
+type crypt struct {
+	mongoCrypt *mongocrypt.MongoCrypt
+	collInfoFn CollectionInfoFn
+	keyFn      KeyRetrieverFn
+	markFn     MarkCommandFn
+	tlsConfig  map[string]*tls.Config
+
+	bypassAutoEncryption bool
+}
+
+// NewCrypt creates a new Crypt instance configured with the given AutoEncryptionOptions.
+func NewCrypt(opts *CryptOptions) Crypt {
+	c := &crypt{
+		mongoCrypt:           opts.MongoCrypt,
+		collInfoFn:           opts.CollInfoFn,
+		keyFn:                opts.KeyFn,
+		markFn:               opts.MarkFn,
+		tlsConfig:            opts.TLSConfig,
+		bypassAutoEncryption: opts.BypassAutoEncryption,
+	}
+	return c
+}
+
+// Encrypt encrypts the given command.
+func (c *crypt) Encrypt(ctx context.Context, db string, cmd bsoncore.Document) (bsoncore.Document, error) {
+	if c.bypassAutoEncryption {
+		return cmd, nil
+	}
+
+	cryptCtx, err := c.mongoCrypt.CreateEncryptionContext(db, cmd)
+	if err != nil {
+		return nil, err
+	}
+	defer cryptCtx.Close()
+
+	return c.executeStateMachine(ctx, cryptCtx, db)
+}
+
+// Decrypt decrypts the given command response.
+func (c *crypt) Decrypt(ctx context.Context, cmdResponse bsoncore.Document) (bsoncore.Document, error) {
+	cryptCtx, err := c.mongoCrypt.CreateDecryptionContext(cmdResponse)
+	if err != nil {
+		return nil, err
+	}
+	defer cryptCtx.Close()
+
+	return c.executeStateMachine(ctx, cryptCtx, "")
+}
+
+// CreateDataKey creates a data key using the given KMS provider and options.
+func (c *crypt) CreateDataKey(ctx context.Context, kmsProvider string, opts *options.DataKeyOptions) (bsoncore.Document, error) {
+	cryptCtx, err := c.mongoCrypt.CreateDataKeyContext(kmsProvider, opts)
+	if err != nil {
+		return nil, err
+	}
+	defer cryptCtx.Close()
+
+	return c.executeStateMachine(ctx, cryptCtx, "")
+}
+
+// RewrapDataKey attempts to rewrap the document data keys matching the filter, preparing the re-wrapped documents to
+// be returned as a slice of bsoncore.Document.
+func (c *crypt) RewrapDataKey(ctx context.Context, filter []byte,
+	opts *options.RewrapManyDataKeyOptions) ([]bsoncore.Document, error) {
+
+	cryptCtx, err := c.mongoCrypt.RewrapDataKeyContext(filter, opts)
+	if err != nil {
+		return nil, err
+	}
+	defer cryptCtx.Close()
+
+	rewrappedBSON, err := c.executeStateMachine(ctx, cryptCtx, "")
+	if err != nil {
+		return nil, err
+	}
+	if rewrappedBSON == nil {
+		return nil, nil
+	}
+
+	// mongocrypt_ctx_rewrap_many_datakey_init wraps the documents in a BSON of the form { "v": [(BSON document), ...] }
+	// where each BSON document in the slice is a document containing a rewrapped datakey.
+	rewrappedDocumentBytes, err := rewrappedBSON.LookupErr("v")
+	if err != nil {
+		return nil, err
+	}
+
+	// Parse the resulting BSON as individual documents.
+	rewrappedDocsArray, ok := rewrappedDocumentBytes.ArrayOK()
+	if !ok {
+		return nil, fmt.Errorf("expected results from mongocrypt_ctx_rewrap_many_datakey_init to be an array")
+	}
+
+	rewrappedDocumentValues, err := rewrappedDocsArray.Values()
+	if err != nil {
+		return nil, err
+	}
+
+	rewrappedDocuments := []bsoncore.Document{}
+	for _, rewrappedDocumentValue := range rewrappedDocumentValues {
+		if rewrappedDocumentValue.Type != bsontype.EmbeddedDocument {
+			// If a value in the document's array returned by mongocrypt is anything other than an embedded document,
+			// then something is wrong and we should terminate the routine.
+			return nil, fmt.Errorf("expected value of type %q, got: %q",
+				bsontype.EmbeddedDocument.String(),
+				rewrappedDocumentValue.Type.String())
+		}
+		rewrappedDocuments = append(rewrappedDocuments, rewrappedDocumentValue.Document())
+	}
+	return rewrappedDocuments, nil
+}
+
+// EncryptExplicit encrypts the given value with the given options.
+func (c *crypt) EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *options.ExplicitEncryptionOptions) (byte, []byte, error) {
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendValueElement(doc, "v", val)
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+
+	cryptCtx, err := c.mongoCrypt.CreateExplicitEncryptionContext(doc, opts)
+	if err != nil {
+		return 0, nil, err
+	}
+	defer cryptCtx.Close()
+
+	res, err := c.executeStateMachine(ctx, cryptCtx, "")
+	if err != nil {
+		return 0, nil, err
+	}
+
+	sub, data := res.Lookup("v").Binary()
+	return sub, data, nil
+}
+
+// EncryptExplicitExpression encrypts the given expression with the given options.
+func (c *crypt) EncryptExplicitExpression(ctx context.Context, expr bsoncore.Document, opts *options.ExplicitEncryptionOptions) (bsoncore.Document, error) {
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendDocumentElement(doc, "v", expr)
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+
+	cryptCtx, err := c.mongoCrypt.CreateExplicitEncryptionExpressionContext(doc, opts)
+	if err != nil {
+		return nil, err
+	}
+	defer cryptCtx.Close()
+
+	res, err := c.executeStateMachine(ctx, cryptCtx, "")
+	if err != nil {
+		return nil, err
+	}
+
+	encryptedExpr := res.Lookup("v").Document()
+	return encryptedExpr, nil
+}
+
+// DecryptExplicit decrypts the given encrypted value.
+func (c *crypt) DecryptExplicit(ctx context.Context, subtype byte, data []byte) (bsoncore.Value, error) {
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendBinaryElement(doc, "v", subtype, data)
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+
+	cryptCtx, err := c.mongoCrypt.CreateExplicitDecryptionContext(doc)
+	if err != nil {
+		return bsoncore.Value{}, err
+	}
+	defer cryptCtx.Close()
+
+	res, err := c.executeStateMachine(ctx, cryptCtx, "")
+	if err != nil {
+		return bsoncore.Value{}, err
+	}
+
+	return res.Lookup("v"), nil
+}
+
+// Close cleans up any resources associated with the Crypt instance.
+func (c *crypt) Close() {
+	c.mongoCrypt.Close()
+}
+
+func (c *crypt) BypassAutoEncryption() bool {
+	return c.bypassAutoEncryption
+}
+
+func (c *crypt) executeStateMachine(ctx context.Context, cryptCtx *mongocrypt.Context, db string) (bsoncore.Document, error) {
+	var err error
+	for {
+		state := cryptCtx.State()
+		switch state {
+		case mongocrypt.NeedMongoCollInfo:
+			err = c.collectionInfo(ctx, cryptCtx, db)
+		case mongocrypt.NeedMongoMarkings:
+			err = c.markCommand(ctx, cryptCtx, db)
+		case mongocrypt.NeedMongoKeys:
+			err = c.retrieveKeys(ctx, cryptCtx)
+		case mongocrypt.NeedKms:
+			err = c.decryptKeys(cryptCtx)
+		case mongocrypt.Ready:
+			return cryptCtx.Finish()
+		case mongocrypt.Done:
+			return nil, nil
+		case mongocrypt.NeedKmsCredentials:
+			err = c.provideKmsProviders(ctx, cryptCtx)
+		default:
+			return nil, fmt.Errorf("invalid Crypt state: %v", state)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+}
+
+func (c *crypt) collectionInfo(ctx context.Context, cryptCtx *mongocrypt.Context, db string) error {
+	op, err := cryptCtx.NextOperation()
+	if err != nil {
+		return err
+	}
+
+	collInfo, err := c.collInfoFn(ctx, db, op)
+	if err != nil {
+		return err
+	}
+	if collInfo != nil {
+		if err = cryptCtx.AddOperationResult(collInfo); err != nil {
+			return err
+		}
+	}
+
+	return cryptCtx.CompleteOperation()
+}
+
+func (c *crypt) markCommand(ctx context.Context, cryptCtx *mongocrypt.Context, db string) error {
+	op, err := cryptCtx.NextOperation()
+	if err != nil {
+		return err
+	}
+
+	markedCmd, err := c.markFn(ctx, db, op)
+	if err != nil {
+		return err
+	}
+	if err = cryptCtx.AddOperationResult(markedCmd); err != nil {
+		return err
+	}
+
+	return cryptCtx.CompleteOperation()
+}
+
+func (c *crypt) retrieveKeys(ctx context.Context, cryptCtx *mongocrypt.Context) error {
+	op, err := cryptCtx.NextOperation()
+	if err != nil {
+		return err
+	}
+
+	keys, err := c.keyFn(ctx, op)
+	if err != nil {
+		return err
+	}
+
+	for _, key := range keys {
+		if err = cryptCtx.AddOperationResult(key); err != nil {
+			return err
+		}
+	}
+
+	return cryptCtx.CompleteOperation()
+}
+
+func (c *crypt) decryptKeys(cryptCtx *mongocrypt.Context) error {
+	for {
+		kmsCtx := cryptCtx.NextKmsContext()
+		if kmsCtx == nil {
+			break
+		}
+
+		if err := c.decryptKey(kmsCtx); err != nil {
+			return err
+		}
+	}
+
+	return cryptCtx.FinishKmsContexts()
+}
+
+func (c *crypt) decryptKey(kmsCtx *mongocrypt.KmsContext) error {
+	host, err := kmsCtx.HostName()
+	if err != nil {
+		return err
+	}
+	msg, err := kmsCtx.Message()
+	if err != nil {
+		return err
+	}
+
+	// add a port to the address if it's not already present
+	addr := host
+	if idx := strings.IndexByte(host, ':'); idx == -1 {
+		addr = fmt.Sprintf("%s:%d", host, defaultKmsPort)
+	}
+
+	kmsProvider := kmsCtx.KMSProvider()
+	tlsCfg := c.tlsConfig[kmsProvider]
+	if tlsCfg == nil {
+		tlsCfg = &tls.Config{MinVersion: tls.VersionTLS12}
+	}
+	conn, err := tls.Dial("tcp", addr, tlsCfg)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		_ = conn.Close()
+	}()
+
+	if err = conn.SetWriteDeadline(time.Now().Add(defaultKmsTimeout)); err != nil {
+		return err
+	}
+	if _, err = conn.Write(msg); err != nil {
+		return err
+	}
+
+	for {
+		bytesNeeded := kmsCtx.BytesNeeded()
+		if bytesNeeded == 0 {
+			return nil
+		}
+
+		res := make([]byte, bytesNeeded)
+		bytesRead, err := conn.Read(res)
+		if err != nil && !errors.Is(err, io.EOF) {
+			return err
+		}
+
+		if err = kmsCtx.FeedResponse(res[:bytesRead]); err != nil {
+			return err
+		}
+	}
+}
+
+func (c *crypt) provideKmsProviders(ctx context.Context, cryptCtx *mongocrypt.Context) error {
+	kmsProviders, err := c.mongoCrypt.GetKmsProviders(ctx)
+	if err != nil {
+		return err
+	}
+	return cryptCtx.ProvideKmsProviders(kmsProviders)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go
new file mode 100644
index 0000000000000000000000000000000000000000..9334d493ed9c138835afb8d30bdb12db7b85ce4f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go
@@ -0,0 +1,153 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package dns is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package dns
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"runtime"
+	"strings"
+)
+
+// Resolver resolves DNS records.
+type Resolver struct {
+	// Holds the functions to use for DNS lookups
+	LookupSRV func(string, string, string) (string, []*net.SRV, error)
+	LookupTXT func(string) ([]string, error)
+}
+
+// DefaultResolver is a Resolver that uses the default Resolver from the net package.
+var DefaultResolver = &Resolver{net.LookupSRV, net.LookupTXT}
+
+// ParseHosts uses the srv string and service name to get the hosts.
+func (r *Resolver) ParseHosts(host string, srvName string, stopOnErr bool) ([]string, error) {
+	parsedHosts := strings.Split(host, ",")
+
+	if len(parsedHosts) != 1 {
+		return nil, fmt.Errorf("URI with SRV must include one and only one hostname")
+	}
+	return r.fetchSeedlistFromSRV(parsedHosts[0], srvName, stopOnErr)
+}
+
+// GetConnectionArgsFromTXT gets the TXT record associated with the host and returns the connection arguments.
+func (r *Resolver) GetConnectionArgsFromTXT(host string) ([]string, error) {
+	var connectionArgsFromTXT []string
+
+	// error ignored because not finding a TXT record should not be
+	// considered an error.
+	recordsFromTXT, _ := r.LookupTXT(host)
+
+	// This is a temporary fix to get around bug https://github.com/golang/go/issues/21472.
+	// It will currently incorrectly concatenate multiple TXT records to one
+	// on windows.
+	if runtime.GOOS == "windows" {
+		recordsFromTXT = []string{strings.Join(recordsFromTXT, "")}
+	}
+
+	if len(recordsFromTXT) > 1 {
+		return nil, errors.New("multiple records from TXT not supported")
+	}
+	if len(recordsFromTXT) > 0 {
+		connectionArgsFromTXT = strings.FieldsFunc(recordsFromTXT[0], func(r rune) bool { return r == ';' || r == '&' })
+
+		err := validateTXTResult(connectionArgsFromTXT)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return connectionArgsFromTXT, nil
+}
+
+func (r *Resolver) fetchSeedlistFromSRV(host string, srvName string, stopOnErr bool) ([]string, error) {
+	var err error
+
+	_, _, err = net.SplitHostPort(host)
+
+	if err == nil {
+		// we were able to successfully extract a port from the host,
+		// but should not be able to when using SRV
+		return nil, fmt.Errorf("URI with srv must not include a port number")
+	}
+
+	// default to "mongodb" as service name if not supplied
+	if srvName == "" {
+		srvName = "mongodb"
+	}
+	_, addresses, err := r.LookupSRV(srvName, "tcp", host)
+	if err != nil && strings.Contains(err.Error(), "cannot unmarshal DNS message") {
+		return nil, fmt.Errorf("see https://pkg.go.dev/go.mongodb.org/mongo-driver/mongo#hdr-Potential_DNS_Issues: %w", err)
+	} else if err != nil {
+		return nil, err
+	}
+
+	trimmedHost := strings.TrimSuffix(host, ".")
+
+	parsedHosts := make([]string, 0, len(addresses))
+	for _, address := range addresses {
+		trimmedAddressTarget := strings.TrimSuffix(address.Target, ".")
+		err := validateSRVResult(trimmedAddressTarget, trimmedHost)
+		if err != nil {
+			if stopOnErr {
+				return nil, err
+			}
+			continue
+		}
+		parsedHosts = append(parsedHosts, fmt.Sprintf("%s:%d", trimmedAddressTarget, address.Port))
+	}
+	return parsedHosts, nil
+}
+
+func validateSRVResult(recordFromSRV, inputHostName string) error {
+	separatedInputDomain := strings.Split(strings.ToLower(inputHostName), ".")
+	separatedRecord := strings.Split(strings.ToLower(recordFromSRV), ".")
+	if len(separatedRecord) < 2 {
+		return errors.New("DNS name must contain at least 2 labels")
+	}
+	if len(separatedRecord) < len(separatedInputDomain) {
+		return errors.New("Domain suffix from SRV record not matched input domain")
+	}
+
+	inputDomainSuffix := separatedInputDomain[1:]
+	domainSuffixOffset := len(separatedRecord) - (len(separatedInputDomain) - 1)
+
+	recordDomainSuffix := separatedRecord[domainSuffixOffset:]
+	for ix, label := range inputDomainSuffix {
+		if label != recordDomainSuffix[ix] {
+			return errors.New("Domain suffix from SRV record not matched input domain")
+		}
+	}
+	return nil
+}
+
+var allowedTXTOptions = map[string]struct{}{
+	"authsource":   {},
+	"replicaset":   {},
+	"loadbalanced": {},
+}
+
+func validateTXTResult(paramsFromTXT []string) error {
+	for _, param := range paramsFromTXT {
+		kv := strings.SplitN(param, "=", 2)
+		if len(kv) != 2 {
+			return errors.New("Invalid TXT record")
+		}
+		key := strings.ToLower(kv[0])
+		if _, ok := allowedTXTOptions[key]; !ok {
+			return fmt.Errorf("Cannot specify option '%s' in TXT record", kv[0])
+		}
+	}
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go
new file mode 100644
index 0000000000000000000000000000000000000000..363f4d6be3e867b386a83efa79b0ab852dca8d8f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go
@@ -0,0 +1,335 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package driver is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package driver // import "go.mongodb.org/mongo-driver/x/mongo/driver"
+
+import (
+	"context"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/csot"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// AuthConfig holds the information necessary to perform an authentication attempt.
+// this was moved from the auth package to avoid a circular dependency. The auth package
+// reexports this under the old name to avoid breaking the public api.
+type AuthConfig struct {
+	Description   description.Server
+	Connection    Connection
+	ClusterClock  *session.ClusterClock
+	HandshakeInfo HandshakeInformation
+	ServerAPI     *ServerAPIOptions
+}
+
+// OIDCCallback is the type for both Human and Machine Callback flows. RefreshToken will always be
+// nil in the OIDCArgs for the Machine flow.
+type OIDCCallback func(context.Context, *OIDCArgs) (*OIDCCredential, error)
+
+// OIDCArgs contains the arguments for the OIDC callback.
+type OIDCArgs struct {
+	Version      int
+	IDPInfo      *IDPInfo
+	RefreshToken *string
+}
+
+// OIDCCredential contains the access token and refresh token.
+type OIDCCredential struct {
+	AccessToken  string
+	ExpiresAt    *time.Time
+	RefreshToken *string
+}
+
+// IDPInfo contains the information needed to perform OIDC authentication with an Identity Provider.
+type IDPInfo struct {
+	Issuer        string   `bson:"issuer"`
+	ClientID      string   `bson:"clientId"`
+	RequestScopes []string `bson:"requestScopes"`
+}
+
+// Authenticator handles authenticating a connection. The implementers of this interface
+// are all in the auth package. Most authentication mechanisms do not allow for Reauth,
+// but this is included in the interface so that whenever a new mechanism is added, it
+// must be explicitly considered.
+type Authenticator interface {
+	// Auth authenticates the connection.
+	Auth(context.Context, *AuthConfig) error
+	Reauth(context.Context, *AuthConfig) error
+}
+
+// Cred is a user's credential.
+type Cred struct {
+	Source              string
+	Username            string
+	Password            string
+	PasswordSet         bool
+	Props               map[string]string
+	OIDCMachineCallback OIDCCallback
+	OIDCHumanCallback   OIDCCallback
+}
+
+// Deployment is implemented by types that can select a server from a deployment.
+type Deployment interface {
+	SelectServer(context.Context, description.ServerSelector) (Server, error)
+	Kind() description.TopologyKind
+}
+
+// Connector represents a type that can connect to a server.
+type Connector interface {
+	Connect() error
+}
+
+// Disconnector represents a type that can disconnect from a server.
+type Disconnector interface {
+	Disconnect(context.Context) error
+}
+
+// Subscription represents a subscription to topology updates. A subscriber can receive updates through the
+// Updates field.
+type Subscription struct {
+	Updates <-chan description.Topology
+	ID      uint64
+}
+
+// Subscriber represents a type to which another type can subscribe. A subscription contains a channel that
+// is updated with topology descriptions.
+type Subscriber interface {
+	Subscribe() (*Subscription, error)
+	Unsubscribe(*Subscription) error
+}
+
+// Server represents a MongoDB server. Implementations should pool connections and handle the
+// retrieving and returning of connections.
+type Server interface {
+	Connection(context.Context) (Connection, error)
+
+	// RTTMonitor returns the round-trip time monitor associated with this server.
+	RTTMonitor() RTTMonitor
+}
+
+// Connection represents a connection to a MongoDB server.
+type Connection interface {
+	WriteWireMessage(context.Context, []byte) error
+	ReadWireMessage(ctx context.Context) ([]byte, error)
+	Description() description.Server
+
+	// Close closes any underlying connection and returns or frees any resources held by the
+	// connection. Close is idempotent and can be called multiple times, although subsequent calls
+	// to Close may return an error. A connection cannot be used after it is closed.
+	Close() error
+
+	ID() string
+	ServerConnectionID() *int64
+	DriverConnectionID() uint64 // TODO(GODRIVER-2824): change type to int64.
+	Address() address.Address
+	Stale() bool
+	OIDCTokenGenID() uint64
+	SetOIDCTokenGenID(uint64)
+}
+
+// RTTMonitor represents a round-trip-time monitor.
+type RTTMonitor interface {
+	// EWMA returns the exponentially weighted moving average observed round-trip time.
+	EWMA() time.Duration
+
+	// Min returns the minimum observed round-trip time over the window period.
+	Min() time.Duration
+
+	// P90 returns the 90th percentile observed round-trip time over the window period.
+	P90() time.Duration
+
+	// Stats returns stringified stats of the current state of the monitor.
+	Stats() string
+}
+
+var _ RTTMonitor = &csot.ZeroRTTMonitor{}
+
+// PinnedConnection represents a Connection that can be pinned by one or more cursors or transactions. Implementations
+// of this interface should maintain the following invariants:
+//
+// 1. Each Pin* call should increment the number of references for the connection.
+// 2. Each Unpin* call should decrement the number of references for the connection.
+// 3. Calls to Close() should be ignored until all resources have unpinned the connection.
+type PinnedConnection interface {
+	Connection
+	PinToCursor() error
+	PinToTransaction() error
+	UnpinFromCursor() error
+	UnpinFromTransaction() error
+}
+
+// The session.LoadBalancedTransactionConnection type is a copy of PinnedConnection that was introduced to avoid
+// import cycles. This compile-time assertion ensures that these types remain in sync if the PinnedConnection interface
+// is changed in the future.
+var _ PinnedConnection = (session.LoadBalancedTransactionConnection)(nil)
+
+// LocalAddresser is a type that is able to supply its local address
+type LocalAddresser interface {
+	LocalAddress() address.Address
+}
+
+// Expirable represents an expirable object.
+type Expirable interface {
+	Expire() error
+	Alive() bool
+}
+
+// StreamerConnection represents a Connection that supports streaming wire protocol messages using the moreToCome and
+// exhaustAllowed flags.
+//
+// The SetStreaming and CurrentlyStreaming functions correspond to the moreToCome flag on server responses. If a
+// response has moreToCome set, SetStreaming(true) will be called and CurrentlyStreaming() should return true.
+//
+// CanStream corresponds to the exhaustAllowed flag. The operations layer will set exhaustAllowed on outgoing wire
+// messages to inform the server that the driver supports streaming.
+type StreamerConnection interface {
+	Connection
+	SetStreaming(bool)
+	CurrentlyStreaming() bool
+	SupportsStreaming() bool
+}
+
+// Compressor is an interface used to compress wire messages. If a Connection supports compression
+// it should implement this interface as well. The CompressWireMessage method will be called during
+// the execution of an operation if the wire message is allowed to be compressed.
+type Compressor interface {
+	CompressWireMessage(src, dst []byte) ([]byte, error)
+}
+
+// ProcessErrorResult represents the result of a ErrorProcessor.ProcessError() call. Exact values for this type can be
+// checked directly (e.g. res == ServerMarkedUnknown), but it is recommended that applications use the ServerChanged()
+// function instead.
+type ProcessErrorResult int
+
+const (
+	// NoChange indicates that the error did not affect the state of the server.
+	NoChange ProcessErrorResult = iota
+	// ServerMarkedUnknown indicates that the error only resulted in the server being marked as Unknown.
+	ServerMarkedUnknown
+	// ConnectionPoolCleared indicates that the error resulted in the server being marked as Unknown and its connection
+	// pool being cleared.
+	ConnectionPoolCleared
+)
+
+// ErrorProcessor implementations can handle processing errors, which may modify their internal state.
+// If this type is implemented by a Server, then Operation.Execute will call it's ProcessError
+// method after it decodes a wire message.
+type ErrorProcessor interface {
+	ProcessError(err error, conn Connection) ProcessErrorResult
+}
+
+// HandshakeInformation contains information extracted from a MongoDB connection handshake. This is a helper type that
+// augments description.Server by also tracking server connection ID and authentication-related fields. We use this type
+// rather than adding authentication-related fields to description.Server to avoid retaining sensitive information in a
+// user-facing type. The server connection ID is stored in this type because unlike description.Server, all handshakes are
+// correlated with a single network connection.
+type HandshakeInformation struct {
+	Description             description.Server
+	SpeculativeAuthenticate bsoncore.Document
+	ServerConnectionID      *int64
+	SaslSupportedMechs      []string
+}
+
+// Handshaker is the interface implemented by types that can perform a MongoDB
+// handshake over a provided driver.Connection. This is used during connection
+// initialization. Implementations must be goroutine safe.
+type Handshaker interface {
+	GetHandshakeInformation(context.Context, address.Address, Connection) (HandshakeInformation, error)
+	FinishHandshake(context.Context, Connection) error
+}
+
+// SingleServerDeployment is an implementation of Deployment that always returns a single server.
+type SingleServerDeployment struct{ Server }
+
+var _ Deployment = SingleServerDeployment{}
+
+// SelectServer implements the Deployment interface. This method does not use the
+// description.SelectedServer provided and instead returns the embedded Server.
+func (ssd SingleServerDeployment) SelectServer(context.Context, description.ServerSelector) (Server, error) {
+	return ssd.Server, nil
+}
+
+// Kind implements the Deployment interface. It always returns description.Single.
+func (SingleServerDeployment) Kind() description.TopologyKind { return description.Single }
+
+// SingleConnectionDeployment is an implementation of Deployment that always returns the same Connection. This
+// implementation should only be used for connection handshakes and server heartbeats as it does not implement
+// ErrorProcessor, which is necessary for application operations.
+type SingleConnectionDeployment struct{ C Connection }
+
+var _ Deployment = SingleConnectionDeployment{}
+var _ Server = SingleConnectionDeployment{}
+
+// SelectServer implements the Deployment interface. This method does not use the
+// description.SelectedServer provided and instead returns itself. The Connections returned from the
+// Connection method have a no-op Close method.
+func (scd SingleConnectionDeployment) SelectServer(context.Context, description.ServerSelector) (Server, error) {
+	return scd, nil
+}
+
+// Kind implements the Deployment interface. It always returns description.Single.
+func (SingleConnectionDeployment) Kind() description.TopologyKind { return description.Single }
+
+// Connection implements the Server interface. It always returns the embedded connection.
+func (scd SingleConnectionDeployment) Connection(context.Context) (Connection, error) {
+	return scd.C, nil
+}
+
+// RTTMonitor implements the driver.Server interface.
+func (scd SingleConnectionDeployment) RTTMonitor() RTTMonitor {
+	return &csot.ZeroRTTMonitor{}
+}
+
+// TODO(GODRIVER-617): We can likely use 1 type for both the Type and the RetryMode by using 2 bits for the mode and 1
+// TODO bit for the type. Although in the practical sense, we might not want to do that since the type of retryability
+// TODO is tied to the operation itself and isn't going change, e.g. and insert operation will always be a write,
+// TODO however some operations are both reads and  writes, for instance aggregate is a read but with a $out parameter
+// TODO it's a write.
+
+// Type specifies whether an operation is a read, write, or unknown.
+type Type uint
+
+// THese are the availables types of Type.
+const (
+	_ Type = iota
+	Write
+	Read
+)
+
+// RetryMode specifies the way that retries are handled for retryable operations.
+type RetryMode uint
+
+// These are the modes available for retrying. Note that if Timeout is specified on the Client, the
+// operation will automatically retry as many times as possible within the context's deadline
+// unless RetryNone is used.
+const (
+	// RetryNone disables retrying.
+	RetryNone RetryMode = iota
+	// RetryOnce will enable retrying the entire operation once if Timeout is not specified.
+	RetryOnce
+	// RetryOncePerCommand will enable retrying each command associated with an operation if Timeout
+	// is not specified. For example, if an insert is batch split into 4 commands then each of
+	// those commands is eligible for one retry.
+	RetryOncePerCommand
+	// RetryContext will enable retrying until the context.Context's deadline is exceeded or it is
+	// cancelled.
+	RetryContext
+)
+
+// Enabled returns if this RetryMode enables retrying.
+func (rm RetryMode) Enabled() bool {
+	return rm == RetryOnce || rm == RetryOncePerCommand || rm == RetryContext
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..59c3992e9aa0f17566616992575ce35f0477d00a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go
@@ -0,0 +1,552 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/internal/csot"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// LegacyNotPrimaryErrMsg is the error message that older MongoDB servers (see
+// SERVER-50412 for versions) return when a write operation is erroneously sent
+// to a non-primary node.
+const LegacyNotPrimaryErrMsg = "not master"
+
+var (
+	retryableCodes          = []int32{11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 262}
+	nodeIsRecoveringCodes   = []int32{11600, 11602, 13436, 189, 91}
+	notPrimaryCodes         = []int32{10107, 13435, 10058}
+	nodeIsShuttingDownCodes = []int32{11600, 91}
+
+	unknownReplWriteConcernCode   = int32(79)
+	unsatisfiableWriteConcernCode = int32(100)
+)
+
+var (
+	// UnknownTransactionCommitResult is an error label for unknown transaction commit results.
+	UnknownTransactionCommitResult = "UnknownTransactionCommitResult"
+	// TransientTransactionError is an error label for transient errors with transactions.
+	TransientTransactionError = "TransientTransactionError"
+	// NetworkError is an error label for network errors.
+	NetworkError = "NetworkError"
+	// RetryableWriteError is an error label for retryable write errors.
+	RetryableWriteError = "RetryableWriteError"
+	// NoWritesPerformed is an error label indicated that no writes were performed for an operation.
+	NoWritesPerformed = "NoWritesPerformed"
+	// ErrCursorNotFound is the cursor not found error for legacy find operations.
+	ErrCursorNotFound = errors.New("cursor not found")
+	// ErrUnacknowledgedWrite is returned from functions that have an unacknowledged
+	// write concern.
+	ErrUnacknowledgedWrite = errors.New("unacknowledged write")
+	// ErrUnsupportedStorageEngine is returned when a retryable write is attempted against a server
+	// that uses a storage engine that does not support retryable writes
+	ErrUnsupportedStorageEngine = errors.New("this MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string")
+	// ErrDeadlineWouldBeExceeded is returned when a Timeout set on an operation
+	// would be exceeded if the operation were sent to the server. It wraps
+	// context.DeadlineExceeded.
+	ErrDeadlineWouldBeExceeded = fmt.Errorf(
+		"operation not sent to server, as Timeout would be exceeded: %w",
+		context.DeadlineExceeded)
+	// ErrNegativeMaxTime is returned when MaxTime on an operation is a negative value.
+	ErrNegativeMaxTime = errors.New("a negative value was provided for MaxTime on an operation")
+)
+
+// QueryFailureError is an error representing a command failure as a document.
+type QueryFailureError struct {
+	Message  string
+	Response bsoncore.Document
+	Wrapped  error
+}
+
+// Error implements the error interface.
+func (e QueryFailureError) Error() string {
+	return fmt.Sprintf("%s: %v", e.Message, e.Response)
+}
+
+// Unwrap returns the underlying error.
+func (e QueryFailureError) Unwrap() error {
+	return e.Wrapped
+}
+
+// ResponseError is an error parsing the response to a command.
+type ResponseError struct {
+	Message string
+	Wrapped error
+}
+
+// NewCommandResponseError creates a CommandResponseError.
+func NewCommandResponseError(msg string, err error) ResponseError {
+	return ResponseError{Message: msg, Wrapped: err}
+}
+
+// Error implements the error interface.
+func (e ResponseError) Error() string {
+	if e.Wrapped != nil {
+		return fmt.Sprintf("%s: %s", e.Message, e.Wrapped)
+	}
+	return e.Message
+}
+
+// WriteCommandError is an error for a write command.
+type WriteCommandError struct {
+	WriteConcernError *WriteConcernError
+	WriteErrors       WriteErrors
+	Labels            []string
+	Raw               bsoncore.Document
+}
+
+// UnsupportedStorageEngine returns whether or not the WriteCommandError comes from a retryable write being attempted
+// against a server that has a storage engine where they are not supported
+func (wce WriteCommandError) UnsupportedStorageEngine() bool {
+	for _, writeError := range wce.WriteErrors {
+		if writeError.Code == 20 && strings.HasPrefix(strings.ToLower(writeError.Message), "transaction numbers") {
+			return true
+		}
+	}
+	return false
+}
+
+func (wce WriteCommandError) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "write command error: [")
+	fmt.Fprintf(&buf, "{%s}, ", wce.WriteErrors)
+	fmt.Fprintf(&buf, "{%s}]", wce.WriteConcernError)
+	return buf.String()
+}
+
+// Retryable returns true if the error is retryable
+func (wce WriteCommandError) Retryable(wireVersion *description.VersionRange) bool {
+	for _, label := range wce.Labels {
+		if label == RetryableWriteError {
+			return true
+		}
+	}
+	if wireVersion != nil && wireVersion.Max >= 9 {
+		return false
+	}
+
+	if wce.WriteConcernError == nil {
+		return false
+	}
+	return wce.WriteConcernError.Retryable()
+}
+
+// HasErrorLabel returns true if the error contains the specified label.
+func (wce WriteCommandError) HasErrorLabel(label string) bool {
+	if wce.Labels != nil {
+		for _, l := range wce.Labels {
+			if l == label {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// WriteConcernError is a write concern failure that occurred as a result of a
+// write operation.
+type WriteConcernError struct {
+	Name            string
+	Code            int64
+	Message         string
+	Details         bsoncore.Document
+	Labels          []string
+	TopologyVersion *description.TopologyVersion
+	Raw             bsoncore.Document
+}
+
+func (wce WriteConcernError) Error() string {
+	if wce.Name != "" {
+		return fmt.Sprintf("(%v) %v", wce.Name, wce.Message)
+	}
+	return wce.Message
+}
+
+// Retryable returns true if the error is retryable
+func (wce WriteConcernError) Retryable() bool {
+	for _, code := range retryableCodes {
+		if wce.Code == int64(code) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// NodeIsRecovering returns true if this error is a node is recovering error.
+func (wce WriteConcernError) NodeIsRecovering() bool {
+	for _, code := range nodeIsRecoveringCodes {
+		if wce.Code == int64(code) {
+			return true
+		}
+	}
+	hasNoCode := wce.Code == 0
+	return hasNoCode && strings.Contains(wce.Message, "node is recovering")
+}
+
+// NodeIsShuttingDown returns true if this error is a node is shutting down error.
+func (wce WriteConcernError) NodeIsShuttingDown() bool {
+	for _, code := range nodeIsShuttingDownCodes {
+		if wce.Code == int64(code) {
+			return true
+		}
+	}
+	hasNoCode := wce.Code == 0
+	return hasNoCode && strings.Contains(wce.Message, "node is shutting down")
+}
+
+// NotPrimary returns true if this error is a not primary error.
+func (wce WriteConcernError) NotPrimary() bool {
+	for _, code := range notPrimaryCodes {
+		if wce.Code == int64(code) {
+			return true
+		}
+	}
+	hasNoCode := wce.Code == 0
+	return hasNoCode && strings.Contains(wce.Message, LegacyNotPrimaryErrMsg)
+}
+
+// WriteError is a non-write concern failure that occurred as a result of a write
+// operation.
+type WriteError struct {
+	Index   int64
+	Code    int64
+	Message string
+	Details bsoncore.Document
+	Raw     bsoncore.Document
+}
+
+func (we WriteError) Error() string { return we.Message }
+
+// WriteErrors is a group of non-write concern failures that occurred as a result
+// of a write operation.
+type WriteErrors []WriteError
+
+func (we WriteErrors) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "write errors: [")
+	for idx, err := range we {
+		if idx != 0 {
+			fmt.Fprintf(&buf, ", ")
+		}
+		fmt.Fprintf(&buf, "{%s}", err)
+	}
+	fmt.Fprint(&buf, "]")
+	return buf.String()
+}
+
+// Error is a command execution error from the database.
+type Error struct {
+	Code            int32
+	Message         string
+	Labels          []string
+	Name            string
+	Wrapped         error
+	TopologyVersion *description.TopologyVersion
+	Raw             bsoncore.Document
+}
+
+// UnsupportedStorageEngine returns whether e came as a result of an unsupported storage engine
+func (e Error) UnsupportedStorageEngine() bool {
+	return e.Code == 20 && strings.HasPrefix(strings.ToLower(e.Message), "transaction numbers")
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+	var msg string
+	if e.Name != "" {
+		msg = fmt.Sprintf("(%v)", e.Name)
+	}
+	msg += " " + e.Message
+	if e.Wrapped != nil {
+		msg += ": " + e.Wrapped.Error()
+	}
+	return msg
+}
+
+// Unwrap returns the underlying error.
+func (e Error) Unwrap() error {
+	return e.Wrapped
+}
+
+// HasErrorLabel returns true if the error contains the specified label.
+func (e Error) HasErrorLabel(label string) bool {
+	if e.Labels != nil {
+		for _, l := range e.Labels {
+			if l == label {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// RetryableRead returns true if the error is retryable for a read operation
+func (e Error) RetryableRead() bool {
+	for _, label := range e.Labels {
+		if label == NetworkError {
+			return true
+		}
+	}
+	for _, code := range retryableCodes {
+		if e.Code == code {
+			return true
+		}
+	}
+
+	return false
+}
+
+// RetryableWrite returns true if the error is retryable for a write operation
+func (e Error) RetryableWrite(wireVersion *description.VersionRange) bool {
+	for _, label := range e.Labels {
+		if label == NetworkError || label == RetryableWriteError {
+			return true
+		}
+	}
+	if wireVersion != nil && wireVersion.Max >= 9 {
+		return false
+	}
+	for _, code := range retryableCodes {
+		if e.Code == code {
+			return true
+		}
+	}
+
+	return false
+}
+
+// NetworkError returns true if the error is a network error.
+func (e Error) NetworkError() bool {
+	for _, label := range e.Labels {
+		if label == NetworkError {
+			return true
+		}
+	}
+	return false
+}
+
+// NodeIsRecovering returns true if this error is a node is recovering error.
+func (e Error) NodeIsRecovering() bool {
+	for _, code := range nodeIsRecoveringCodes {
+		if e.Code == code {
+			return true
+		}
+	}
+	hasNoCode := e.Code == 0
+	return hasNoCode && strings.Contains(e.Message, "node is recovering")
+}
+
+// NodeIsShuttingDown returns true if this error is a node is shutting down error.
+func (e Error) NodeIsShuttingDown() bool {
+	for _, code := range nodeIsShuttingDownCodes {
+		if e.Code == code {
+			return true
+		}
+	}
+	hasNoCode := e.Code == 0
+	return hasNoCode && strings.Contains(e.Message, "node is shutting down")
+}
+
+// NotPrimary returns true if this error is a not primary error.
+func (e Error) NotPrimary() bool {
+	for _, code := range notPrimaryCodes {
+		if e.Code == code {
+			return true
+		}
+	}
+	hasNoCode := e.Code == 0
+	return hasNoCode && strings.Contains(e.Message, LegacyNotPrimaryErrMsg)
+}
+
+// NamespaceNotFound returns true if this errors is a NamespaceNotFound error.
+func (e Error) NamespaceNotFound() bool {
+	return e.Code == 26 || e.Message == "ns not found"
+}
+
+// ExtractErrorFromServerResponse extracts an error from a server response bsoncore.Document
+// if there is one. Also used in testing for SDAM.
+func ExtractErrorFromServerResponse(ctx context.Context, doc bsoncore.Document) error {
+	var errmsg, codeName string
+	var code int32
+	var labels []string
+	var ok bool
+	var tv *description.TopologyVersion
+	var wcError WriteCommandError
+	elems, err := doc.Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "ok":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				if elem.Value().Int32() == 1 {
+					ok = true
+				}
+			case bson.TypeInt64:
+				if elem.Value().Int64() == 1 {
+					ok = true
+				}
+			case bson.TypeDouble:
+				if elem.Value().Double() == 1 {
+					ok = true
+				}
+			case bson.TypeBoolean:
+				if elem.Value().Boolean() {
+					ok = true
+				}
+			}
+		case "errmsg":
+			if str, okay := elem.Value().StringValueOK(); okay {
+				errmsg = str
+			}
+		case "codeName":
+			if str, okay := elem.Value().StringValueOK(); okay {
+				codeName = str
+			}
+		case "code":
+			if c, okay := elem.Value().Int32OK(); okay {
+				code = c
+			}
+		case "errorLabels":
+			if arr, okay := elem.Value().ArrayOK(); okay {
+				vals, err := arr.Values()
+				if err != nil {
+					continue
+				}
+				for _, val := range vals {
+					if str, ok := val.StringValueOK(); ok {
+						labels = append(labels, str)
+					}
+				}
+
+			}
+		case "writeErrors":
+			arr, exists := elem.Value().ArrayOK()
+			if !exists {
+				break
+			}
+			vals, err := arr.Values()
+			if err != nil {
+				continue
+			}
+			for _, val := range vals {
+				var we WriteError
+				doc, exists := val.DocumentOK()
+				if !exists {
+					continue
+				}
+				if index, exists := doc.Lookup("index").AsInt64OK(); exists {
+					we.Index = index
+				}
+				if code, exists := doc.Lookup("code").AsInt64OK(); exists {
+					we.Code = code
+				}
+				if msg, exists := doc.Lookup("errmsg").StringValueOK(); exists {
+					we.Message = msg
+				}
+				if info, exists := doc.Lookup("errInfo").DocumentOK(); exists {
+					we.Details = make([]byte, len(info))
+					copy(we.Details, info)
+				}
+				we.Raw = doc
+				wcError.WriteErrors = append(wcError.WriteErrors, we)
+			}
+		case "writeConcernError":
+			doc, exists := elem.Value().DocumentOK()
+			if !exists {
+				break
+			}
+			wcError.WriteConcernError = new(WriteConcernError)
+			wcError.WriteConcernError.Raw = doc
+			if code, exists := doc.Lookup("code").AsInt64OK(); exists {
+				wcError.WriteConcernError.Code = code
+			}
+			if name, exists := doc.Lookup("codeName").StringValueOK(); exists {
+				wcError.WriteConcernError.Name = name
+			}
+			if msg, exists := doc.Lookup("errmsg").StringValueOK(); exists {
+				wcError.WriteConcernError.Message = msg
+			}
+			if info, exists := doc.Lookup("errInfo").DocumentOK(); exists {
+				wcError.WriteConcernError.Details = make([]byte, len(info))
+				copy(wcError.WriteConcernError.Details, info)
+			}
+			if errLabels, exists := doc.Lookup("errorLabels").ArrayOK(); exists {
+				vals, err := errLabels.Values()
+				if err != nil {
+					continue
+				}
+				for _, val := range vals {
+					if str, ok := val.StringValueOK(); ok {
+						labels = append(labels, str)
+					}
+				}
+			}
+		case "topologyVersion":
+			doc, ok := elem.Value().DocumentOK()
+			if !ok {
+				break
+			}
+			version, err := description.NewTopologyVersion(bson.Raw(doc))
+			if err == nil {
+				tv = version
+			}
+		}
+	}
+
+	if !ok {
+		if errmsg == "" {
+			errmsg = "command failed"
+		}
+
+		err := Error{
+			Code:            code,
+			Message:         errmsg,
+			Name:            codeName,
+			Labels:          labels,
+			TopologyVersion: tv,
+			Raw:             doc,
+		}
+
+		// If CSOT is enabled and we get a MaxTimeMSExpired error, assume that
+		// the error was caused by setting "maxTimeMS" on the command based on
+		// the context deadline or on "timeoutMS". In that case, make the error
+		// wrap context.DeadlineExceeded so that users can always check
+		//
+		//  errors.Is(err, context.DeadlineExceeded)
+		//
+		// for either client-side or server-side timeouts.
+		if csot.IsTimeoutContext(ctx) && err.Code == 50 {
+			err.Wrapped = context.DeadlineExceeded
+		}
+
+		return err
+	}
+
+	if len(wcError.WriteErrors) > 0 || wcError.WriteConcernError != nil {
+		wcError.Labels = labels
+		if wcError.WriteConcernError != nil {
+			wcError.WriteConcernError.TopologyVersion = tv
+		}
+		wcError.Raw = doc
+		return wcError
+	}
+
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go
new file mode 100644
index 0000000000000000000000000000000000000000..c40f1f80916a3014db44bdfaeff2511f4ec2815e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go
@@ -0,0 +1,23 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+// LegacyOperationKind indicates if an operation is a legacy find, getMore, or killCursors. This is used
+// in Operation.Execute, which will create legacy OP_QUERY, OP_GET_MORE, or OP_KILL_CURSORS instead
+// of sending them as a command.
+type LegacyOperationKind uint
+
+// These constants represent the three different kinds of legacy operations.
+const (
+	LegacyNone LegacyOperationKind = iota
+	LegacyFind
+	LegacyGetMore
+	LegacyKillCursors
+	LegacyListCollections
+	LegacyListIndexes
+	LegacyHandshake
+)
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go
new file mode 100644
index 0000000000000000000000000000000000000000..4e4b51d74b692abb026a4274edd6e9ea13622a55
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build cse
+// +build cse
+
+package mongocrypt
+
+/*
+#include <stdlib.h>
+#include <mongocrypt.h>
+*/
+import "C"
+import (
+	"unsafe"
+)
+
+// binary is a wrapper type around a mongocrypt_binary_t*
+type binary struct {
+	p       *C.uint8_t
+	wrapped *C.mongocrypt_binary_t
+}
+
+// newBinary creates an empty binary instance.
+func newBinary() *binary {
+	return &binary{
+		wrapped: C.mongocrypt_binary_new(),
+	}
+}
+
+// newBinaryFromBytes creates a binary instance from a byte buffer.
+func newBinaryFromBytes(data []byte) *binary {
+	if len(data) == 0 {
+		return newBinary()
+	}
+
+	// TODO: Consider using runtime.Pinner to replace the C.CBytes after using go1.21.0.
+	addr := (*C.uint8_t)(C.CBytes(data)) // uint8_t*
+	dataLen := C.uint32_t(len(data))     // uint32_t
+	return &binary{
+		p:       addr,
+		wrapped: C.mongocrypt_binary_new_from_data(addr, dataLen),
+	}
+}
+
+// toBytes converts the given binary instance to []byte.
+func (b *binary) toBytes() []byte {
+	dataPtr := C.mongocrypt_binary_data(b.wrapped) // C.uint8_t*
+	dataLen := C.mongocrypt_binary_len(b.wrapped)  // C.uint32_t
+
+	return C.GoBytes(unsafe.Pointer(dataPtr), C.int(dataLen))
+}
+
+// close cleans up any resources associated with the given binary instance.
+func (b *binary) close() {
+	if b.p != nil {
+		C.free(unsafe.Pointer(b.p))
+	}
+	C.mongocrypt_binary_destroy(b.wrapped)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..3401e738493fa4fa999585aa4130d68d55f0fd68
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go
@@ -0,0 +1,44 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build cse
+// +build cse
+
+package mongocrypt
+
+// #include <mongocrypt.h>
+import "C"
+import (
+	"fmt"
+)
+
+// Error represents an error from an operation on a MongoCrypt instance.
+type Error struct {
+	Code    int32
+	Message string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+	return fmt.Sprintf("mongocrypt error %d: %v", e.Code, e.Message)
+}
+
+// errorFromStatus builds a Error from a mongocrypt_status_t object.
+func errorFromStatus(status *C.mongocrypt_status_t) error {
+	cCode := C.mongocrypt_status_code(status) // uint32_t
+	// mongocrypt_status_message takes uint32_t* as its second param to store the length of the returned string.
+	// pass nil because the length is handled by C.GoString
+	cMsg := C.mongocrypt_status_message(status, nil) // const char*
+	var msg string
+	if cMsg != nil {
+		msg = C.GoString(cMsg)
+	}
+
+	return Error{
+		Code:    int32(cCode),
+		Message: msg,
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go
new file mode 100644
index 0000000000000000000000000000000000000000..706a0f9e75e72428000725c90c937a6cdf86442e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go
@@ -0,0 +1,21 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build !cse
+// +build !cse
+
+package mongocrypt
+
+// Error represents an error from an operation on a MongoCrypt instance.
+type Error struct {
+	Code    int32
+	Message string
+}
+
+// Error implements the error interface
+func (Error) Error() string {
+	panic(cseNotSupportedMsg)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5044f8f5fd5b2bd1b3afe4e00e0d2f45dd4ce7b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go
@@ -0,0 +1,524 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build cse
+// +build cse
+
+package mongocrypt
+
+// #cgo linux solaris darwin pkg-config: libmongocrypt
+// #cgo windows CFLAGS: -I"c:/libmongocrypt/include"
+// #cgo windows LDFLAGS: -lmongocrypt -Lc:/libmongocrypt/bin
+// #include <mongocrypt.h>
+// #include <stdlib.h>
+import "C"
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"unsafe"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/internal/httputil"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options"
+)
+
+type kmsProvider interface {
+	GetCredentialsDoc(context.Context) (bsoncore.Document, error)
+}
+
+type MongoCrypt struct {
+	wrapped      *C.mongocrypt_t
+	kmsProviders map[string]kmsProvider
+	httpClient   *http.Client
+}
+
+// Version returns the version string for the loaded libmongocrypt, or an empty string
+// if libmongocrypt was not loaded.
+func Version() string {
+	str := C.GoString(C.mongocrypt_version(nil))
+	return str
+}
+
+// NewMongoCrypt constructs a new MongoCrypt instance configured using the provided MongoCryptOptions.
+func NewMongoCrypt(opts *options.MongoCryptOptions) (*MongoCrypt, error) {
+	// create mongocrypt_t handle
+	wrapped := C.mongocrypt_new()
+	if wrapped == nil {
+		return nil, errors.New("could not create new mongocrypt object")
+	}
+	httpClient := opts.HTTPClient
+	if httpClient == nil {
+		httpClient = httputil.DefaultHTTPClient
+	}
+	kmsProviders := make(map[string]kmsProvider)
+	if needsKmsProvider(opts.KmsProviders, "gcp") {
+		kmsProviders["gcp"] = creds.NewGCPCredentialProvider(httpClient)
+	}
+	if needsKmsProvider(opts.KmsProviders, "aws") {
+		kmsProviders["aws"] = creds.NewAWSCredentialProvider(httpClient)
+	}
+	if needsKmsProvider(opts.KmsProviders, "azure") {
+		kmsProviders["azure"] = creds.NewAzureCredentialProvider(httpClient)
+	}
+	crypt := &MongoCrypt{
+		wrapped:      wrapped,
+		kmsProviders: kmsProviders,
+		httpClient:   httpClient,
+	}
+
+	// set options in mongocrypt
+	if err := crypt.setProviderOptions(opts.KmsProviders); err != nil {
+		return nil, err
+	}
+	if err := crypt.setLocalSchemaMap(opts.LocalSchemaMap); err != nil {
+		return nil, err
+	}
+	if err := crypt.setEncryptedFieldsMap(opts.EncryptedFieldsMap); err != nil {
+		return nil, err
+	}
+
+	if opts.BypassQueryAnalysis {
+		C.mongocrypt_setopt_bypass_query_analysis(wrapped)
+	}
+
+	// If loading the crypt_shared library isn't disabled, set the default library search path "$SYSTEM"
+	// and set a library override path if one was provided.
+	if !opts.CryptSharedLibDisabled {
+		systemStr := C.CString("$SYSTEM")
+		defer C.free(unsafe.Pointer(systemStr))
+		C.mongocrypt_setopt_append_crypt_shared_lib_search_path(crypt.wrapped, systemStr)
+
+		if opts.CryptSharedLibOverridePath != "" {
+			cryptSharedLibOverridePathStr := C.CString(opts.CryptSharedLibOverridePath)
+			defer C.free(unsafe.Pointer(cryptSharedLibOverridePathStr))
+			C.mongocrypt_setopt_set_crypt_shared_lib_path_override(crypt.wrapped, cryptSharedLibOverridePathStr)
+		}
+	}
+
+	C.mongocrypt_setopt_use_need_kms_credentials_state(crypt.wrapped)
+
+	// initialize handle
+	if !C.mongocrypt_init(crypt.wrapped) {
+		return nil, crypt.createErrorFromStatus()
+	}
+
+	return crypt, nil
+}
+
+// CreateEncryptionContext creates a Context to use for encryption.
+func (m *MongoCrypt) CreateEncryptionContext(db string, cmd bsoncore.Document) (*Context, error) {
+	ctx := newContext(C.mongocrypt_ctx_new(m.wrapped))
+	if ctx.wrapped == nil {
+		return nil, m.createErrorFromStatus()
+	}
+
+	cmdBinary := newBinaryFromBytes(cmd)
+	defer cmdBinary.close()
+	dbStr := C.CString(db)
+	defer C.free(unsafe.Pointer(dbStr))
+
+	if ok := C.mongocrypt_ctx_encrypt_init(ctx.wrapped, dbStr, C.int32_t(-1), cmdBinary.wrapped); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+	return ctx, nil
+}
+
+// CreateDecryptionContext creates a Context to use for decryption.
+func (m *MongoCrypt) CreateDecryptionContext(cmd bsoncore.Document) (*Context, error) {
+	ctx := newContext(C.mongocrypt_ctx_new(m.wrapped))
+	if ctx.wrapped == nil {
+		return nil, m.createErrorFromStatus()
+	}
+
+	cmdBinary := newBinaryFromBytes(cmd)
+	defer cmdBinary.close()
+
+	if ok := C.mongocrypt_ctx_decrypt_init(ctx.wrapped, cmdBinary.wrapped); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+	return ctx, nil
+}
+
+// lookupString returns a string for the value corresponding to the given key in the document.
+// if the key does not exist or the value is not a string, the empty string is returned.
+func lookupString(doc bsoncore.Document, key string) string {
+	strVal, _ := doc.Lookup(key).StringValueOK()
+	return strVal
+}
+
+func setAltName(ctx *Context, altName string) error {
+	// create document {"keyAltName": keyAltName}
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendStringElement(doc, "keyAltName", altName)
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+
+	keyAltBinary := newBinaryFromBytes(doc)
+	defer keyAltBinary.close()
+
+	if ok := C.mongocrypt_ctx_setopt_key_alt_name(ctx.wrapped, keyAltBinary.wrapped); !ok {
+		return ctx.createErrorFromStatus()
+	}
+	return nil
+}
+
+func setKeyMaterial(ctx *Context, keyMaterial []byte) error {
+	// Create document {"keyMaterial": keyMaterial} using the generic binary sybtype 0x00.
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendBinaryElement(doc, "keyMaterial", 0x00, keyMaterial)
+	doc, err := bsoncore.AppendDocumentEnd(doc, idx)
+	if err != nil {
+		return err
+	}
+
+	keyMaterialBinary := newBinaryFromBytes(doc)
+	defer keyMaterialBinary.close()
+
+	if ok := C.mongocrypt_ctx_setopt_key_material(ctx.wrapped, keyMaterialBinary.wrapped); !ok {
+		return ctx.createErrorFromStatus()
+	}
+	return nil
+}
+
+func rewrapDataKey(ctx *Context, filter []byte) error {
+	filterBinary := newBinaryFromBytes(filter)
+	defer filterBinary.close()
+
+	if ok := C.mongocrypt_ctx_rewrap_many_datakey_init(ctx.wrapped, filterBinary.wrapped); !ok {
+		return ctx.createErrorFromStatus()
+	}
+	return nil
+}
+
+// CreateDataKeyContext creates a Context to use for creating a data key.
+func (m *MongoCrypt) CreateDataKeyContext(kmsProvider string, opts *options.DataKeyOptions) (*Context, error) {
+	ctx := newContext(C.mongocrypt_ctx_new(m.wrapped))
+	if ctx.wrapped == nil {
+		return nil, m.createErrorFromStatus()
+	}
+
+	// Create a masterKey document of the form { "provider": <provider string>, other options... }.
+	var masterKey bsoncore.Document
+	switch {
+	case opts.MasterKey != nil:
+		// The original key passed into the top-level API was already transformed into a raw BSON document and passed
+		// down to here, so we can modify it without copying. Remove the terminating byte to add the "provider" field.
+		masterKey = opts.MasterKey[:len(opts.MasterKey)-1]
+		masterKey = bsoncore.AppendStringElement(masterKey, "provider", kmsProvider)
+		masterKey, _ = bsoncore.AppendDocumentEnd(masterKey, 0)
+	default:
+		masterKey = bsoncore.NewDocumentBuilder().AppendString("provider", kmsProvider).Build()
+	}
+
+	masterKeyBinary := newBinaryFromBytes(masterKey)
+	defer masterKeyBinary.close()
+
+	if ok := C.mongocrypt_ctx_setopt_key_encryption_key(ctx.wrapped, masterKeyBinary.wrapped); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+
+	for _, altName := range opts.KeyAltNames {
+		if err := setAltName(ctx, altName); err != nil {
+			return nil, err
+		}
+	}
+
+	if opts.KeyMaterial != nil {
+		if err := setKeyMaterial(ctx, opts.KeyMaterial); err != nil {
+			return nil, err
+		}
+	}
+
+	if ok := C.mongocrypt_ctx_datakey_init(ctx.wrapped); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+	return ctx, nil
+}
+
+const (
+	IndexTypeUnindexed = 1
+	IndexTypeIndexed   = 2
+)
+
+// createExplicitEncryptionContext creates an explicit encryption context.
+func (m *MongoCrypt) createExplicitEncryptionContext(opts *options.ExplicitEncryptionOptions) (*Context, error) {
+	ctx := newContext(C.mongocrypt_ctx_new(m.wrapped))
+	if ctx.wrapped == nil {
+		return nil, m.createErrorFromStatus()
+	}
+
+	if opts.KeyID != nil {
+		keyIDBinary := newBinaryFromBytes(opts.KeyID.Data)
+		defer keyIDBinary.close()
+
+		if ok := C.mongocrypt_ctx_setopt_key_id(ctx.wrapped, keyIDBinary.wrapped); !ok {
+			return nil, ctx.createErrorFromStatus()
+		}
+	}
+	if opts.KeyAltName != nil {
+		if err := setAltName(ctx, *opts.KeyAltName); err != nil {
+			return nil, err
+		}
+	}
+
+	if opts.RangeOptions != nil {
+		idx, mongocryptDoc := bsoncore.AppendDocumentStart(nil)
+		if opts.RangeOptions.Min != nil {
+			mongocryptDoc = bsoncore.AppendValueElement(mongocryptDoc, "min", *opts.RangeOptions.Min)
+		}
+		if opts.RangeOptions.Max != nil {
+			mongocryptDoc = bsoncore.AppendValueElement(mongocryptDoc, "max", *opts.RangeOptions.Max)
+		}
+		if opts.RangeOptions.Precision != nil {
+			mongocryptDoc = bsoncore.AppendInt32Element(mongocryptDoc, "precision", *opts.RangeOptions.Precision)
+		}
+		if opts.RangeOptions.Sparsity != nil {
+			mongocryptDoc = bsoncore.AppendInt64Element(mongocryptDoc, "sparsity", *opts.RangeOptions.Sparsity)
+		}
+		if opts.RangeOptions.TrimFactor != nil {
+			mongocryptDoc = bsoncore.AppendInt32Element(mongocryptDoc, "trimFactor", *opts.RangeOptions.TrimFactor)
+		}
+
+		mongocryptDoc, err := bsoncore.AppendDocumentEnd(mongocryptDoc, idx)
+		if err != nil {
+			return nil, err
+		}
+
+		mongocryptBinary := newBinaryFromBytes(mongocryptDoc)
+		defer mongocryptBinary.close()
+
+		if ok := C.mongocrypt_ctx_setopt_algorithm_range(ctx.wrapped, mongocryptBinary.wrapped); !ok {
+			return nil, ctx.createErrorFromStatus()
+		}
+	}
+
+	algoStr := C.CString(opts.Algorithm)
+	defer C.free(unsafe.Pointer(algoStr))
+
+	if ok := C.mongocrypt_ctx_setopt_algorithm(ctx.wrapped, algoStr, -1); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+
+	if opts.QueryType != "" {
+		queryStr := C.CString(opts.QueryType)
+		defer C.free(unsafe.Pointer(queryStr))
+		if ok := C.mongocrypt_ctx_setopt_query_type(ctx.wrapped, queryStr, -1); !ok {
+			return nil, ctx.createErrorFromStatus()
+		}
+	}
+
+	if opts.ContentionFactor != nil {
+		if ok := C.mongocrypt_ctx_setopt_contention_factor(ctx.wrapped, C.int64_t(*opts.ContentionFactor)); !ok {
+			return nil, ctx.createErrorFromStatus()
+		}
+	}
+	return ctx, nil
+}
+
+// CreateExplicitEncryptionContext creates a Context to use for explicit encryption.
+func (m *MongoCrypt) CreateExplicitEncryptionContext(doc bsoncore.Document, opts *options.ExplicitEncryptionOptions) (*Context, error) {
+	ctx, err := m.createExplicitEncryptionContext(opts)
+	if err != nil {
+		return ctx, err
+	}
+	docBinary := newBinaryFromBytes(doc)
+	defer docBinary.close()
+	if ok := C.mongocrypt_ctx_explicit_encrypt_init(ctx.wrapped, docBinary.wrapped); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+
+	return ctx, nil
+}
+
+// CreateExplicitEncryptionExpressionContext creates a Context to use for explicit encryption of an expression.
+func (m *MongoCrypt) CreateExplicitEncryptionExpressionContext(doc bsoncore.Document, opts *options.ExplicitEncryptionOptions) (*Context, error) {
+	ctx, err := m.createExplicitEncryptionContext(opts)
+	if err != nil {
+		return ctx, err
+	}
+	docBinary := newBinaryFromBytes(doc)
+	defer docBinary.close()
+	if ok := C.mongocrypt_ctx_explicit_encrypt_expression_init(ctx.wrapped, docBinary.wrapped); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+
+	return ctx, nil
+}
+
+// CreateExplicitDecryptionContext creates a Context to use for explicit decryption.
+func (m *MongoCrypt) CreateExplicitDecryptionContext(doc bsoncore.Document) (*Context, error) {
+	ctx := newContext(C.mongocrypt_ctx_new(m.wrapped))
+	if ctx.wrapped == nil {
+		return nil, m.createErrorFromStatus()
+	}
+
+	docBinary := newBinaryFromBytes(doc)
+	defer docBinary.close()
+
+	if ok := C.mongocrypt_ctx_explicit_decrypt_init(ctx.wrapped, docBinary.wrapped); !ok {
+		return nil, ctx.createErrorFromStatus()
+	}
+	return ctx, nil
+}
+
+// CryptSharedLibVersion returns the version number for the loaded crypt_shared library, or 0 if the
+// crypt_shared library was not loaded.
+func (m *MongoCrypt) CryptSharedLibVersion() uint64 {
+	return uint64(C.mongocrypt_crypt_shared_lib_version(m.wrapped))
+}
+
+// CryptSharedLibVersionString returns the version string for the loaded crypt_shared library, or an
+// empty string if the crypt_shared library was not loaded.
+func (m *MongoCrypt) CryptSharedLibVersionString() string {
+	// Pass in a pointer for "len", but ignore the value because C.GoString can determine the string
+	// length without it.
+	len := C.uint(0)
+	str := C.GoString(C.mongocrypt_crypt_shared_lib_version_string(m.wrapped, &len))
+	return str
+}
+
+// Close cleans up any resources associated with the given MongoCrypt instance.
+func (m *MongoCrypt) Close() {
+	C.mongocrypt_destroy(m.wrapped)
+	if m.httpClient == httputil.DefaultHTTPClient {
+		httputil.CloseIdleHTTPConnections(m.httpClient)
+	}
+}
+
+// RewrapDataKeyContext create a Context to use for rewrapping a data key.
+func (m *MongoCrypt) RewrapDataKeyContext(filter []byte, opts *options.RewrapManyDataKeyOptions) (*Context, error) {
+	const masterKey = "masterKey"
+	const providerKey = "provider"
+
+	ctx := newContext(C.mongocrypt_ctx_new(m.wrapped))
+	if ctx.wrapped == nil {
+		return nil, m.createErrorFromStatus()
+	}
+
+	if opts.MasterKey != nil && opts.Provider == nil {
+		// Provider is nil, but MasterKey is set. This is an error.
+		return nil, fmt.Errorf("expected 'Provider' to be set to identify type of 'MasterKey'")
+	}
+
+	if opts.Provider != nil {
+		// If a provider has been specified, create an encryption key document for creating a data key or for rewrapping
+		// datakeys. If a new provider is not specified, then the filter portion of this logic returns the data as it
+		// exists in the collection.
+		idx, mongocryptDoc := bsoncore.AppendDocumentStart(nil)
+		mongocryptDoc = bsoncore.AppendStringElement(mongocryptDoc, providerKey, *opts.Provider)
+
+		if opts.MasterKey != nil {
+			mongocryptDoc = opts.MasterKey[:len(opts.MasterKey)-1]
+			mongocryptDoc = bsoncore.AppendStringElement(mongocryptDoc, providerKey, *opts.Provider)
+		}
+
+		mongocryptDoc, err := bsoncore.AppendDocumentEnd(mongocryptDoc, idx)
+		if err != nil {
+			return nil, err
+		}
+
+		mongocryptBinary := newBinaryFromBytes(mongocryptDoc)
+		defer mongocryptBinary.close()
+
+		// Add new masterKey to the mongocrypt context.
+		if ok := C.mongocrypt_ctx_setopt_key_encryption_key(ctx.wrapped, mongocryptBinary.wrapped); !ok {
+			return nil, ctx.createErrorFromStatus()
+		}
+	}
+
+	return ctx, rewrapDataKey(ctx, filter)
+}
+
+func (m *MongoCrypt) setProviderOptions(kmsProviders bsoncore.Document) error {
+	providersBinary := newBinaryFromBytes(kmsProviders)
+	defer providersBinary.close()
+
+	if ok := C.mongocrypt_setopt_kms_providers(m.wrapped, providersBinary.wrapped); !ok {
+		return m.createErrorFromStatus()
+	}
+	return nil
+}
+
+// setLocalSchemaMap sets the local schema map in mongocrypt.
+func (m *MongoCrypt) setLocalSchemaMap(schemaMap map[string]bsoncore.Document) error {
+	if len(schemaMap) == 0 {
+		return nil
+	}
+
+	// convert schema map to BSON document
+	schemaMapBSON, err := bson.Marshal(schemaMap)
+	if err != nil {
+		return fmt.Errorf("error marshalling SchemaMap: %v", err)
+	}
+
+	schemaMapBinary := newBinaryFromBytes(schemaMapBSON)
+	defer schemaMapBinary.close()
+
+	if ok := C.mongocrypt_setopt_schema_map(m.wrapped, schemaMapBinary.wrapped); !ok {
+		return m.createErrorFromStatus()
+	}
+	return nil
+}
+
+// setEncryptedFieldsMap sets the encryptedfields map in mongocrypt.
+func (m *MongoCrypt) setEncryptedFieldsMap(encryptedfieldsMap map[string]bsoncore.Document) error {
+	if len(encryptedfieldsMap) == 0 {
+		return nil
+	}
+
+	// convert encryptedfields map to BSON document
+	encryptedfieldsMapBSON, err := bson.Marshal(encryptedfieldsMap)
+	if err != nil {
+		return fmt.Errorf("error marshalling EncryptedFieldsMap: %v", err)
+	}
+
+	encryptedfieldsMapBinary := newBinaryFromBytes(encryptedfieldsMapBSON)
+	defer encryptedfieldsMapBinary.close()
+
+	if ok := C.mongocrypt_setopt_encrypted_field_config_map(m.wrapped, encryptedfieldsMapBinary.wrapped); !ok {
+		return m.createErrorFromStatus()
+	}
+	return nil
+}
+
+// createErrorFromStatus creates a new Error based on the status of the MongoCrypt instance.
+func (m *MongoCrypt) createErrorFromStatus() error {
+	status := C.mongocrypt_status_new()
+	defer C.mongocrypt_status_destroy(status)
+	C.mongocrypt_status(m.wrapped, status)
+	return errorFromStatus(status)
+}
+
+// needsKmsProvider returns true if provider was initially set to an empty document.
+// An empty document signals the driver to fetch credentials.
+func needsKmsProvider(kmsProviders bsoncore.Document, provider string) bool {
+	val, err := kmsProviders.LookupErr(provider)
+	if err != nil {
+		// KMS provider is not configured.
+		return false
+	}
+	doc, ok := val.DocumentOK()
+	// KMS provider is an empty document if the length is 5.
+	// An empty document contains 4 bytes of "\x00" and a null byte.
+	return ok && len(doc) == 5
+}
+
+// GetKmsProviders attempts to obtain credentials from environment.
+// It is expected to be called when a libmongocrypt context is in the mongocrypt.NeedKmsCredentials state.
+func (m *MongoCrypt) GetKmsProviders(ctx context.Context) (bsoncore.Document, error) {
+	builder := bsoncore.NewDocumentBuilder()
+	for k, p := range m.kmsProviders {
+		doc, err := p.GetCredentialsDoc(ctx)
+		if err != nil {
+			return nil, fmt.Errorf("unable to retrieve %s credentials: %w", k, err)
+		}
+		builder.AppendDocument(k, doc)
+	}
+	return builder.Build(), nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..04e98d01c17092b5c6b3d95cfa142fa6a0c74515
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go
@@ -0,0 +1,115 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build cse
+// +build cse
+
+package mongocrypt
+
+// #include <mongocrypt.h>
+import "C"
+import (
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// Context represents a mongocrypt_ctx_t handle
+type Context struct {
+	wrapped *C.mongocrypt_ctx_t
+}
+
+// newContext creates a Context wrapper around the given C type.
+func newContext(wrapped *C.mongocrypt_ctx_t) *Context {
+	return &Context{
+		wrapped: wrapped,
+	}
+}
+
+// State returns the current State of the Context.
+func (c *Context) State() State {
+	return State(int(C.mongocrypt_ctx_state(c.wrapped)))
+}
+
+// NextOperation gets the document for the next database operation to run.
+func (c *Context) NextOperation() (bsoncore.Document, error) {
+	opDocBinary := newBinary() // out param for mongocrypt_ctx_mongo_op to fill in operation
+	defer opDocBinary.close()
+
+	if ok := C.mongocrypt_ctx_mongo_op(c.wrapped, opDocBinary.wrapped); !ok {
+		return nil, c.createErrorFromStatus()
+	}
+	return opDocBinary.toBytes(), nil
+}
+
+// AddOperationResult feeds the result of a database operation to mongocrypt.
+func (c *Context) AddOperationResult(result bsoncore.Document) error {
+	resultBinary := newBinaryFromBytes(result)
+	defer resultBinary.close()
+
+	if ok := C.mongocrypt_ctx_mongo_feed(c.wrapped, resultBinary.wrapped); !ok {
+		return c.createErrorFromStatus()
+	}
+	return nil
+}
+
+// CompleteOperation signals a database operation has been completed.
+func (c *Context) CompleteOperation() error {
+	if ok := C.mongocrypt_ctx_mongo_done(c.wrapped); !ok {
+		return c.createErrorFromStatus()
+	}
+	return nil
+}
+
+// NextKmsContext returns the next KmsContext, or nil if there are no more.
+func (c *Context) NextKmsContext() *KmsContext {
+	ctx := C.mongocrypt_ctx_next_kms_ctx(c.wrapped)
+	if ctx == nil {
+		return nil
+	}
+	return newKmsContext(ctx)
+}
+
+// FinishKmsContexts signals that all KMS contexts have been completed.
+func (c *Context) FinishKmsContexts() error {
+	if ok := C.mongocrypt_ctx_kms_done(c.wrapped); !ok {
+		return c.createErrorFromStatus()
+	}
+	return nil
+}
+
+// Finish performs the final operations for the context and returns the resulting document.
+func (c *Context) Finish() (bsoncore.Document, error) {
+	docBinary := newBinary() // out param for mongocrypt_ctx_finalize to fill in resulting document
+	defer docBinary.close()
+
+	if ok := C.mongocrypt_ctx_finalize(c.wrapped, docBinary.wrapped); !ok {
+		return nil, c.createErrorFromStatus()
+	}
+	return docBinary.toBytes(), nil
+}
+
+// Close cleans up any resources associated with the given Context instance.
+func (c *Context) Close() {
+	C.mongocrypt_ctx_destroy(c.wrapped)
+}
+
+// createErrorFromStatus creates a new Error based on the status of the MongoCrypt instance.
+func (c *Context) createErrorFromStatus() error {
+	status := C.mongocrypt_status_new()
+	defer C.mongocrypt_status_destroy(status)
+	C.mongocrypt_ctx_status(c.wrapped, status)
+	return errorFromStatus(status)
+}
+
+// ProvideKmsProviders provides the KMS providers when in the NeedKmsCredentials state.
+func (c *Context) ProvideKmsProviders(kmsProviders bsoncore.Document) error {
+	kmsProvidersBinary := newBinaryFromBytes(kmsProviders)
+	defer kmsProvidersBinary.close()
+
+	if ok := C.mongocrypt_ctx_provide_kms_providers(c.wrapped, kmsProvidersBinary.wrapped); !ok {
+		return c.createErrorFromStatus()
+	}
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go
new file mode 100644
index 0000000000000000000000000000000000000000..734662e7150b66caae0ba9aa35fb83124912f4f6
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go
@@ -0,0 +1,62 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build !cse
+// +build !cse
+
+package mongocrypt
+
+import (
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// Context represents a mongocrypt_ctx_t handle
+type Context struct{}
+
+// State returns the current State of the Context.
+func (c *Context) State() State {
+	panic(cseNotSupportedMsg)
+}
+
+// NextOperation gets the document for the next database operation to run.
+func (c *Context) NextOperation() (bsoncore.Document, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// AddOperationResult feeds the result of a database operation to mongocrypt.
+func (c *Context) AddOperationResult(bsoncore.Document) error {
+	panic(cseNotSupportedMsg)
+}
+
+// CompleteOperation signals a database operation has been completed.
+func (c *Context) CompleteOperation() error {
+	panic(cseNotSupportedMsg)
+}
+
+// NextKmsContext returns the next KmsContext, or nil if there are no more.
+func (c *Context) NextKmsContext() *KmsContext {
+	panic(cseNotSupportedMsg)
+}
+
+// FinishKmsContexts signals that all KMS contexts have been completed.
+func (c *Context) FinishKmsContexts() error {
+	panic(cseNotSupportedMsg)
+}
+
+// Finish performs the final operations for the context and returns the resulting document.
+func (c *Context) Finish() (bsoncore.Document, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// Close cleans up any resources associated with the given Context instance.
+func (c *Context) Close() {
+	panic(cseNotSupportedMsg)
+}
+
+// ProvideKmsProviders provides the KMS providers when in the NeedKmsCredentials state.
+func (c *Context) ProvideKmsProviders(bsoncore.Document) error {
+	panic(cseNotSupportedMsg)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..296a22315c4eb628ec717990a6196559a6f1f8a9
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go
@@ -0,0 +1,76 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build cse
+// +build cse
+
+package mongocrypt
+
+// #include <mongocrypt.h>
+import "C"
+
+// KmsContext represents a mongocrypt_kms_ctx_t handle.
+type KmsContext struct {
+	wrapped *C.mongocrypt_kms_ctx_t
+}
+
+// newKmsContext creates a KmsContext wrapper around the given C type.
+func newKmsContext(wrapped *C.mongocrypt_kms_ctx_t) *KmsContext {
+	return &KmsContext{
+		wrapped: wrapped,
+	}
+}
+
+// HostName gets the host name of the KMS.
+func (kc *KmsContext) HostName() (string, error) {
+	var hostname *C.char // out param for mongocrypt function to fill in hostname
+	if ok := C.mongocrypt_kms_ctx_endpoint(kc.wrapped, &hostname); !ok {
+		return "", kc.createErrorFromStatus()
+	}
+	return C.GoString(hostname), nil
+}
+
+// KMSProvider gets the KMS provider of the KMS context.
+func (kc *KmsContext) KMSProvider() string {
+	kmsProvider := C.mongocrypt_kms_ctx_get_kms_provider(kc.wrapped, nil)
+	return C.GoString(kmsProvider)
+}
+
+// Message returns the message to send to the KMS.
+func (kc *KmsContext) Message() ([]byte, error) {
+	msgBinary := newBinary()
+	defer msgBinary.close()
+
+	if ok := C.mongocrypt_kms_ctx_message(kc.wrapped, msgBinary.wrapped); !ok {
+		return nil, kc.createErrorFromStatus()
+	}
+	return msgBinary.toBytes(), nil
+}
+
+// BytesNeeded returns the number of bytes that should be received from the KMS.
+// After sending the message to the KMS, this message should be called in a loop until the number returned is 0.
+func (kc *KmsContext) BytesNeeded() int32 {
+	return int32(C.mongocrypt_kms_ctx_bytes_needed(kc.wrapped))
+}
+
+// FeedResponse feeds the bytes received from the KMS to mongocrypt.
+func (kc *KmsContext) FeedResponse(response []byte) error {
+	responseBinary := newBinaryFromBytes(response)
+	defer responseBinary.close()
+
+	if ok := C.mongocrypt_kms_ctx_feed(kc.wrapped, responseBinary.wrapped); !ok {
+		return kc.createErrorFromStatus()
+	}
+	return nil
+}
+
+// createErrorFromStatus creates a new Error from the status of the KmsContext instance.
+func (kc *KmsContext) createErrorFromStatus() error {
+	status := C.mongocrypt_status_new()
+	defer C.mongocrypt_status_destroy(status)
+	C.mongocrypt_kms_ctx_status(kc.wrapped, status)
+	return errorFromStatus(status)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bce2f02999c4534163289562cf3e12b5b809473
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build !cse
+// +build !cse
+
+package mongocrypt
+
+// KmsContext represents a mongocrypt_kms_ctx_t handle.
+type KmsContext struct{}
+
+// HostName gets the host name of the KMS.
+func (kc *KmsContext) HostName() (string, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// Message returns the message to send to the KMS.
+func (kc *KmsContext) Message() ([]byte, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// KMSProvider gets the KMS provider of the KMS context.
+func (kc *KmsContext) KMSProvider() string {
+	panic(cseNotSupportedMsg)
+}
+
+// BytesNeeded returns the number of bytes that should be received from the KMS.
+// After sending the message to the KMS, this message should be called in a loop until the number returned is 0.
+func (kc *KmsContext) BytesNeeded() int32 {
+	panic(cseNotSupportedMsg)
+}
+
+// FeedResponse feeds the bytes received from the KMS to mongocrypt.
+func (kc *KmsContext) FeedResponse([]byte) error {
+	panic(cseNotSupportedMsg)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go
new file mode 100644
index 0000000000000000000000000000000000000000..80f500085cbd23b4a62026fdb0f5b746d2f571e9
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go
@@ -0,0 +1,97 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build !cse
+// +build !cse
+
+// Package mongocrypt is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package mongocrypt
+
+import (
+	"context"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options"
+)
+
+const cseNotSupportedMsg = "client-side encryption not enabled. add the cse build tag to support"
+
+// MongoCrypt represents a mongocrypt_t handle.
+type MongoCrypt struct{}
+
+// Version returns the version string for the loaded libmongocrypt, or an empty string
+// if libmongocrypt was not loaded.
+func Version() string {
+	return ""
+}
+
+// NewMongoCrypt constructs a new MongoCrypt instance configured using the provided MongoCryptOptions.
+func NewMongoCrypt(*options.MongoCryptOptions) (*MongoCrypt, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// CreateEncryptionContext creates a Context to use for encryption.
+func (m *MongoCrypt) CreateEncryptionContext(string, bsoncore.Document) (*Context, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// CreateExplicitEncryptionExpressionContext creates a Context to use for explicit encryption of an expression.
+func (m *MongoCrypt) CreateExplicitEncryptionExpressionContext(bsoncore.Document, *options.ExplicitEncryptionOptions) (*Context, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// CreateDecryptionContext creates a Context to use for decryption.
+func (m *MongoCrypt) CreateDecryptionContext(bsoncore.Document) (*Context, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// CreateDataKeyContext creates a Context to use for creating a data key.
+func (m *MongoCrypt) CreateDataKeyContext(string, *options.DataKeyOptions) (*Context, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// CreateExplicitEncryptionContext creates a Context to use for explicit encryption.
+func (m *MongoCrypt) CreateExplicitEncryptionContext(bsoncore.Document, *options.ExplicitEncryptionOptions) (*Context, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// RewrapDataKeyContext creates a Context to use for rewrapping a data key.
+func (m *MongoCrypt) RewrapDataKeyContext([]byte, *options.RewrapManyDataKeyOptions) (*Context, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// CreateExplicitDecryptionContext creates a Context to use for explicit decryption.
+func (m *MongoCrypt) CreateExplicitDecryptionContext(bsoncore.Document) (*Context, error) {
+	panic(cseNotSupportedMsg)
+}
+
+// CryptSharedLibVersion returns the version number for the loaded crypt_shared library, or 0 if the
+// crypt_shared library was not loaded.
+func (m *MongoCrypt) CryptSharedLibVersion() uint64 {
+	panic(cseNotSupportedMsg)
+}
+
+// CryptSharedLibVersionString returns the version string for the loaded crypt_shared library, or an
+// empty string if the crypt_shared library was not loaded.
+func (m *MongoCrypt) CryptSharedLibVersionString() string {
+	panic(cseNotSupportedMsg)
+}
+
+// Close cleans up any resources associated with the given MongoCrypt instance.
+func (m *MongoCrypt) Close() {
+	panic(cseNotSupportedMsg)
+}
+
+// GetKmsProviders returns the originally configured KMS providers.
+func (m *MongoCrypt) GetKmsProviders(context.Context) (bsoncore.Document, error) {
+	panic(cseNotSupportedMsg)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..e0cc77052a8a2d693ea751ed65b5a39dc5c04380
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2024-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package options is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package options
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..81805e7147ba3a67776c28d4f3c222037be8587f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go
@@ -0,0 +1,158 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// DataKeyOptions specifies options for creating a new data key.
+type DataKeyOptions struct {
+	KeyAltNames []string
+	KeyMaterial []byte
+	MasterKey   bsoncore.Document
+}
+
+// DataKey creates a new DataKeyOptions instance.
+func DataKey() *DataKeyOptions {
+	return &DataKeyOptions{}
+}
+
+// SetKeyAltNames specifies alternate key names.
+func (dko *DataKeyOptions) SetKeyAltNames(names []string) *DataKeyOptions {
+	dko.KeyAltNames = names
+	return dko
+}
+
+// SetMasterKey specifies the master key.
+func (dko *DataKeyOptions) SetMasterKey(key bsoncore.Document) *DataKeyOptions {
+	dko.MasterKey = key
+	return dko
+}
+
+// SetKeyMaterial specifies the key material.
+func (dko *DataKeyOptions) SetKeyMaterial(keyMaterial []byte) *DataKeyOptions {
+	dko.KeyMaterial = keyMaterial
+	return dko
+}
+
+// QueryType describes the type of query the result of Encrypt is used for.
+type QueryType int
+
+// These constants specify valid values for QueryType
+const (
+	QueryTypeEquality QueryType = 1
+)
+
+// ExplicitEncryptionOptions specifies options for configuring an explicit encryption context.
+type ExplicitEncryptionOptions struct {
+	KeyID            *primitive.Binary
+	KeyAltName       *string
+	Algorithm        string
+	QueryType        string
+	ContentionFactor *int64
+	RangeOptions     *ExplicitRangeOptions
+}
+
+// ExplicitRangeOptions specifies options for the range index.
+type ExplicitRangeOptions struct {
+	Min        *bsoncore.Value
+	Max        *bsoncore.Value
+	Sparsity   *int64
+	TrimFactor *int32
+	Precision  *int32
+}
+
+// ExplicitEncryption creates a new ExplicitEncryptionOptions instance.
+func ExplicitEncryption() *ExplicitEncryptionOptions {
+	return &ExplicitEncryptionOptions{}
+}
+
+// SetKeyID sets the key identifier.
+func (eeo *ExplicitEncryptionOptions) SetKeyID(keyID primitive.Binary) *ExplicitEncryptionOptions {
+	eeo.KeyID = &keyID
+	return eeo
+}
+
+// SetKeyAltName sets the key alternative name.
+func (eeo *ExplicitEncryptionOptions) SetKeyAltName(keyAltName string) *ExplicitEncryptionOptions {
+	eeo.KeyAltName = &keyAltName
+	return eeo
+}
+
+// SetAlgorithm specifies an encryption algorithm.
+func (eeo *ExplicitEncryptionOptions) SetAlgorithm(algorithm string) *ExplicitEncryptionOptions {
+	eeo.Algorithm = algorithm
+	return eeo
+}
+
+// SetQueryType specifies the query type.
+func (eeo *ExplicitEncryptionOptions) SetQueryType(queryType string) *ExplicitEncryptionOptions {
+	eeo.QueryType = queryType
+	return eeo
+}
+
+// SetContentionFactor specifies the contention factor.
+func (eeo *ExplicitEncryptionOptions) SetContentionFactor(contentionFactor int64) *ExplicitEncryptionOptions {
+	eeo.ContentionFactor = &contentionFactor
+	return eeo
+}
+
+// SetRangeOptions specifies the range options.
+func (eeo *ExplicitEncryptionOptions) SetRangeOptions(ro ExplicitRangeOptions) *ExplicitEncryptionOptions {
+	eeo.RangeOptions = &ro
+	return eeo
+}
+
+// RewrapManyDataKeyOptions represents all possible options used to decrypt and encrypt all matching data keys with a
+// possibly new masterKey.
+type RewrapManyDataKeyOptions struct {
+	// Provider identifies the new KMS provider. If omitted, encrypting uses the current KMS provider.
+	Provider *string
+
+	// MasterKey identifies the new masterKey. If omitted, rewraps with the current masterKey.
+	MasterKey bsoncore.Document
+}
+
+// RewrapManyDataKey creates a new RewrapManyDataKeyOptions instance.
+func RewrapManyDataKey() *RewrapManyDataKeyOptions {
+	return new(RewrapManyDataKeyOptions)
+}
+
+// SetProvider sets the value for the Provider field.
+func (rmdko *RewrapManyDataKeyOptions) SetProvider(provider string) *RewrapManyDataKeyOptions {
+	rmdko.Provider = &provider
+	return rmdko
+}
+
+// SetMasterKey sets the value for the MasterKey field.
+func (rmdko *RewrapManyDataKeyOptions) SetMasterKey(masterKey bsoncore.Document) *RewrapManyDataKeyOptions {
+	rmdko.MasterKey = masterKey
+	return rmdko
+}
+
+// MergeRewrapManyDataKeyOptions combines the given RewrapManyDataKeyOptions instances into a single
+// RewrapManyDataKeyOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeRewrapManyDataKeyOptions(opts ...*RewrapManyDataKeyOptions) *RewrapManyDataKeyOptions {
+	rmdkOpts := RewrapManyDataKey()
+	for _, rmdko := range opts {
+		if rmdko == nil {
+			continue
+		}
+		if provider := rmdko.Provider; provider != nil {
+			rmdkOpts.Provider = provider
+		}
+		if masterKey := rmdko.MasterKey; masterKey != nil {
+			rmdkOpts.MasterKey = masterKey
+		}
+	}
+	return rmdkOpts
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..d800bc8db76c21648d83ff6f614b9a6bc7552179
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go
@@ -0,0 +1,72 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// MongoCryptOptions specifies options to configure a MongoCrypt instance.
+type MongoCryptOptions struct {
+	KmsProviders               bsoncore.Document
+	LocalSchemaMap             map[string]bsoncore.Document
+	BypassQueryAnalysis        bool
+	EncryptedFieldsMap         map[string]bsoncore.Document
+	CryptSharedLibDisabled     bool
+	CryptSharedLibOverridePath string
+	HTTPClient                 *http.Client
+}
+
+// MongoCrypt creates a new MongoCryptOptions instance.
+func MongoCrypt() *MongoCryptOptions {
+	return &MongoCryptOptions{}
+}
+
+// SetKmsProviders specifies the KMS providers map.
+func (mo *MongoCryptOptions) SetKmsProviders(kmsProviders bsoncore.Document) *MongoCryptOptions {
+	mo.KmsProviders = kmsProviders
+	return mo
+}
+
+// SetLocalSchemaMap specifies the local schema map.
+func (mo *MongoCryptOptions) SetLocalSchemaMap(localSchemaMap map[string]bsoncore.Document) *MongoCryptOptions {
+	mo.LocalSchemaMap = localSchemaMap
+	return mo
+}
+
+// SetBypassQueryAnalysis skips the NeedMongoMarkings state.
+func (mo *MongoCryptOptions) SetBypassQueryAnalysis(bypassQueryAnalysis bool) *MongoCryptOptions {
+	mo.BypassQueryAnalysis = bypassQueryAnalysis
+	return mo
+}
+
+// SetEncryptedFieldsMap specifies the encrypted fields map.
+func (mo *MongoCryptOptions) SetEncryptedFieldsMap(efcMap map[string]bsoncore.Document) *MongoCryptOptions {
+	mo.EncryptedFieldsMap = efcMap
+	return mo
+}
+
+// SetCryptSharedLibDisabled explicitly disables loading the crypt_shared library if set to true.
+func (mo *MongoCryptOptions) SetCryptSharedLibDisabled(disabled bool) *MongoCryptOptions {
+	mo.CryptSharedLibDisabled = disabled
+	return mo
+}
+
+// SetCryptSharedLibOverridePath sets the override path to the crypt_shared library file. Setting
+// an override path disables the default operating system dynamic library search path.
+func (mo *MongoCryptOptions) SetCryptSharedLibOverridePath(path string) *MongoCryptOptions {
+	mo.CryptSharedLibOverridePath = path
+	return mo
+}
+
+// SetHTTPClient sets the http client.
+func (mo *MongoCryptOptions) SetHTTPClient(httpClient *http.Client) *MongoCryptOptions {
+	mo.HTTPClient = httpClient
+	return mo
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/state.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/state.go
new file mode 100644
index 0000000000000000000000000000000000000000..60546160ce1bbd7d522390d1a0161bd4cf8dd436
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/state.go
@@ -0,0 +1,47 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongocrypt
+
+// State represents a state that a MongocryptContext can be in.
+type State int
+
+// These constants are valid values for the State type.
+// The values must match the values defined in the mongocrypt_ctx_state_t enum in libmongocrypt.
+const (
+	StateError         State = 0
+	NeedMongoCollInfo  State = 1
+	NeedMongoMarkings  State = 2
+	NeedMongoKeys      State = 3
+	NeedKms            State = 4
+	Ready              State = 5
+	Done               State = 6
+	NeedKmsCredentials State = 7
+)
+
+// String implements the Stringer interface.
+func (s State) String() string {
+	switch s {
+	case StateError:
+		return "Error"
+	case NeedMongoCollInfo:
+		return "NeedMongoCollInfo"
+	case NeedMongoMarkings:
+		return "NeedMongoMarkings"
+	case NeedMongoKeys:
+		return "NeedMongoKeys"
+	case NeedKms:
+		return "NeedKms"
+	case Ready:
+		return "Ready"
+	case Done:
+		return "Done"
+	case NeedKmsCredentials:
+		return "NeedKmsCredentials"
+	default:
+		return "Unknown State"
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/cache.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..97c9b4ac05c78094c951b572494ceed9b44f2f34
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/cache.go
@@ -0,0 +1,121 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package ocsp
+
+import (
+	"crypto"
+	"sync"
+	"time"
+
+	"golang.org/x/crypto/ocsp"
+)
+
+type cacheKey struct {
+	HashAlgorithm  crypto.Hash
+	IssuerNameHash string
+	IssuerKeyHash  string
+	SerialNumber   string
+}
+
+// Cache represents an OCSP cache.
+type Cache interface {
+	Update(*ocsp.Request, *ResponseDetails) *ResponseDetails
+	Get(request *ocsp.Request) *ResponseDetails
+}
+
+// ConcurrentCache is an implementation of ocsp.Cache that's safe for concurrent use.
+type ConcurrentCache struct {
+	cache map[cacheKey]*ResponseDetails
+	sync.Mutex
+}
+
+var _ Cache = (*ConcurrentCache)(nil)
+
+// NewCache creates an empty OCSP cache.
+func NewCache() *ConcurrentCache {
+	return &ConcurrentCache{
+		cache: make(map[cacheKey]*ResponseDetails),
+	}
+}
+
+// Update updates the cache entry for the provided request. The provided response will only be cached if it has a
+// status that is not ocsp.Unknown and has a non-zero NextUpdate time. If there is an existing cache entry for request,
+// it will be overwritten by response if response.NextUpdate is further ahead in the future than the existing entry's
+// NextUpdate.
+//
+// This function returns the most up-to-date response corresponding to the request.
+func (c *ConcurrentCache) Update(request *ocsp.Request, response *ResponseDetails) *ResponseDetails {
+	unknown := response.Status == ocsp.Unknown
+	hasUpdateTime := !response.NextUpdate.IsZero()
+	canBeCached := !unknown && hasUpdateTime
+	key := createCacheKey(request)
+
+	c.Lock()
+	defer c.Unlock()
+
+	current, ok := c.cache[key]
+	if !ok {
+		if canBeCached {
+			c.cache[key] = response
+		}
+
+		// Return the provided response even though it might not have been cached because it's the most up-to-date
+		// response available.
+		return response
+	}
+
+	// If the new response is Unknown, we can't cache it. Return the existing cached response.
+	if unknown {
+		return current
+	}
+
+	// If a response has no nextUpdate set, the responder is telling us that newer information is always available.
+	// In this case, remove the existing cache entry because it is stale and return the new response because it is
+	// more up-to-date.
+	if !hasUpdateTime {
+		delete(c.cache, key)
+		return response
+	}
+
+	// If we get here, the new response is conclusive and has a non-empty nextUpdate so it can be cached. Overwrite
+	// the existing cache entry if the new one will be valid for longer.
+	newest := current
+	if response.NextUpdate.After(current.NextUpdate) {
+		c.cache[key] = response
+		newest = response
+	}
+	return newest
+}
+
+// Get returns the cached response for the request, or nil if there is no cached response. If the cached response has
+// expired, it will be removed from the cache and nil will be returned.
+func (c *ConcurrentCache) Get(request *ocsp.Request) *ResponseDetails {
+	key := createCacheKey(request)
+
+	c.Lock()
+	defer c.Unlock()
+
+	response, ok := c.cache[key]
+	if !ok {
+		return nil
+	}
+
+	if time.Now().UTC().Before(response.NextUpdate) {
+		return response
+	}
+	delete(c.cache, key)
+	return nil
+}
+
+func createCacheKey(request *ocsp.Request) cacheKey {
+	return cacheKey{
+		HashAlgorithm:  request.HashAlgorithm,
+		IssuerNameHash: string(request.IssuerNameHash),
+		IssuerKeyHash:  string(request.IssuerKeyHash),
+		SerialNumber:   request.SerialNumber.String(),
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b720cd59000c11173670bee0cc792d30c84414b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go
@@ -0,0 +1,68 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package ocsp
+
+import (
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/http"
+
+	"go.mongodb.org/mongo-driver/internal/httputil"
+	"golang.org/x/crypto/ocsp"
+)
+
+type config struct {
+	serverCert, issuer      *x509.Certificate
+	cache                   Cache
+	disableEndpointChecking bool
+	ocspRequest             *ocsp.Request
+	ocspRequestBytes        []byte
+	httpClient              *http.Client
+}
+
+func newConfig(certChain []*x509.Certificate, opts *VerifyOptions) (config, error) {
+	cfg := config{
+		cache:                   opts.Cache,
+		disableEndpointChecking: opts.DisableEndpointChecking,
+		httpClient:              opts.HTTPClient,
+	}
+
+	if cfg.httpClient == nil {
+		cfg.httpClient = httputil.DefaultHTTPClient
+	}
+
+	if len(certChain) == 0 {
+		return cfg, errors.New("verified certificate chain contained no certificates")
+	}
+
+	// In the case where the leaf certificate and CA are the same, the chain may only contain one certificate.
+	cfg.serverCert = certChain[0]
+	cfg.issuer = certChain[0]
+	if len(certChain) > 1 {
+		// If the chain has multiple certificates, the one directly after the leaf should be the issuer. Use
+		// CheckSignatureFrom to verify that it is the issuer.
+		cfg.issuer = certChain[1]
+
+		if err := cfg.serverCert.CheckSignatureFrom(cfg.issuer); err != nil {
+			errString := "error checking if server certificate is signed by the issuer in the verified chain: %v"
+			return cfg, fmt.Errorf(errString, err)
+		}
+	}
+
+	var err error
+	cfg.ocspRequestBytes, err = ocsp.CreateRequest(cfg.serverCert, cfg.issuer, nil)
+	if err != nil {
+		return cfg, fmt.Errorf("error creating OCSP request: %w", err)
+	}
+	cfg.ocspRequest, err = ocsp.ParseRequest(cfg.ocspRequestBytes)
+	if err != nil {
+		return cfg, fmt.Errorf("error parsing OCSP request bytes: %w", err)
+	}
+
+	return cfg, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go
new file mode 100644
index 0000000000000000000000000000000000000000..2bff94a659b6c80b184f1c14de9cdce6c4d2406d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go
@@ -0,0 +1,328 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package ocsp is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package ocsp
+
+import (
+	"bytes"
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/asn1"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"math/big"
+	"net/http"
+	"time"
+
+	"golang.org/x/crypto/ocsp"
+	"golang.org/x/sync/errgroup"
+)
+
+var (
+	tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}
+	mustStapleFeatureValue = big.NewInt(5)
+)
+
+// Error represents an OCSP verification error
+type Error struct {
+	wrapped error
+}
+
+// Error implements the error interface
+func (e *Error) Error() string {
+	return fmt.Sprintf("OCSP verification failed: %v", e.wrapped)
+}
+
+// Unwrap returns the underlying error.
+func (e *Error) Unwrap() error {
+	return e.wrapped
+}
+
+func newOCSPError(wrapped error) error {
+	return &Error{wrapped: wrapped}
+}
+
+// ResponseDetails contains a subset of the details needed from an OCSP response after the original response has been
+// validated.
+type ResponseDetails struct {
+	Status     int
+	NextUpdate time.Time
+}
+
+func extractResponseDetails(res *ocsp.Response) *ResponseDetails {
+	return &ResponseDetails{
+		Status:     res.Status,
+		NextUpdate: res.NextUpdate,
+	}
+}
+
+// Verify performs OCSP verification for the provided ConnectionState instance.
+func Verify(ctx context.Context, connState tls.ConnectionState, opts *VerifyOptions) error {
+	if opts.Cache == nil {
+		// There should always be an OCSP cache. Even if the user has specified the URI option to disable communication
+		// with OCSP responders, the driver will cache any stapled responses. Requiring that the cache is non-nil
+		// allows us to confirm that the cache is correctly being passed down from a higher level.
+		return newOCSPError(errors.New("no OCSP cache provided"))
+	}
+	if len(connState.VerifiedChains) == 0 {
+		return newOCSPError(errors.New("no verified certificate chains reported after TLS handshake"))
+	}
+
+	certChain := connState.VerifiedChains[0]
+	if numCerts := len(certChain); numCerts == 0 {
+		return newOCSPError(errors.New("verified chain contained no certificates"))
+	}
+
+	ocspCfg, err := newConfig(certChain, opts)
+	if err != nil {
+		return newOCSPError(err)
+	}
+
+	res, err := getParsedResponse(ctx, ocspCfg, connState)
+	if err != nil {
+		return err
+	}
+	if res == nil {
+		// If no response was parsed from the staple and responders, the status of the certificate is unknown, so don't
+		// error.
+		return nil
+	}
+
+	if res.Status == ocsp.Revoked {
+		return newOCSPError(errors.New("certificate is revoked"))
+	}
+	return nil
+}
+
+// getParsedResponse attempts to parse a response from the stapled OCSP data or by contacting OCSP responders if no
+// staple is present.
+func getParsedResponse(ctx context.Context, cfg config, connState tls.ConnectionState) (*ResponseDetails, error) {
+	stapledResponse, err := processStaple(cfg, connState.OCSPResponse)
+	if err != nil {
+		return nil, err
+	}
+
+	if stapledResponse != nil {
+		// If there is a staple, attempt to cache it. The cache.Update call will resolve conflicts with an existing
+		// cache enry if necessary.
+		return cfg.cache.Update(cfg.ocspRequest, stapledResponse), nil
+	}
+	if cachedResponse := cfg.cache.Get(cfg.ocspRequest); cachedResponse != nil {
+		return cachedResponse, nil
+	}
+
+	// If there is no stapled or cached response, fall back to querying the responders if that functionality has not
+	// been disabled.
+	if cfg.disableEndpointChecking {
+		return nil, nil
+	}
+	externalResponse := contactResponders(ctx, cfg)
+	if externalResponse == nil {
+		// None of the responders were available.
+		return nil, nil
+	}
+
+	// Similar to the stapled response case above, unconditionally call Update and it will either cache the response
+	// or resolve conflicts if a different connection has cached a response since the previous call to Get.
+	return cfg.cache.Update(cfg.ocspRequest, externalResponse), nil
+}
+
+// processStaple returns the OCSP response from the provided staple. An error will be returned if any of the following
+// are true:
+//
+// 1. cfg.serverCert has the Must-Staple extension but the staple is empty.
+// 2. The staple is malformed.
+// 3. The staple does not cover cfg.serverCert.
+// 4. The OCSP response has an error status.
+func processStaple(cfg config, staple []byte) (*ResponseDetails, error) {
+	mustStaple, err := isMustStapleCertificate(cfg.serverCert)
+	if err != nil {
+		return nil, err
+	}
+
+	// If the server has a Must-Staple certificate and the server does not present a stapled OCSP response, error.
+	if mustStaple && len(staple) == 0 {
+		return nil, errors.New("server provided a certificate with the Must-Staple extension but did not " +
+			"provide a stapled OCSP response")
+	}
+
+	if len(staple) == 0 {
+		return nil, nil
+	}
+
+	parsedResponse, err := ocsp.ParseResponseForCert(staple, cfg.serverCert, cfg.issuer)
+	if err != nil {
+		// If the stapled response could not be parsed correctly, error. This can happen if the response is malformed,
+		// the response does not cover the certificate presented by the server, or if the response contains an error
+		// status.
+		return nil, fmt.Errorf("error parsing stapled response: %w", err)
+	}
+	if err = verifyResponse(cfg, parsedResponse); err != nil {
+		return nil, fmt.Errorf("error validating stapled response: %w", err)
+	}
+
+	return extractResponseDetails(parsedResponse), nil
+}
+
+// isMustStapleCertificate determines whether or not an X509 certificate is a must-staple certificate.
+func isMustStapleCertificate(cert *x509.Certificate) (bool, error) {
+	var featureExtension pkix.Extension
+	var foundExtension bool
+	for _, ext := range cert.Extensions {
+		if ext.Id.Equal(tlsFeatureExtensionOID) {
+			featureExtension = ext
+			foundExtension = true
+			break
+		}
+	}
+	if !foundExtension {
+		return false, nil
+	}
+
+	// The value for the TLS feature extension is a sequence of integers. Per the asn1.Unmarshal documentation, an
+	// integer can be unmarshalled into an int, int32, int64, or *big.Int and unmarshalling will error if the integer
+	// cannot be encoded into the target type.
+	//
+	// Use []*big.Int to ensure that all values in the sequence can be successfully unmarshalled.
+	var featureValues []*big.Int
+	if _, err := asn1.Unmarshal(featureExtension.Value, &featureValues); err != nil {
+		return false, fmt.Errorf("error unmarshalling TLS feature extension values: %w", err)
+	}
+
+	for _, value := range featureValues {
+		if value.Cmp(mustStapleFeatureValue) == 0 {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// contactResponders will send a request to all OCSP responders reported by cfg.serverCert. The
+// first response that conclusively identifies cfg.serverCert as good or revoked will be returned.
+// If all responders are unavailable or no responder returns a conclusive status, it returns nil.
+// contactResponders will wait for up to 5 seconds to get a certificate status response.
+func contactResponders(ctx context.Context, cfg config) *ResponseDetails {
+	if len(cfg.serverCert.OCSPServer) == 0 {
+		return nil
+	}
+
+	// Limit all OCSP responder calls to a maximum of 5 seconds or when the passed-in context expires,
+	// whichever happens first.
+	ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+	defer cancel()
+
+	group, ctx := errgroup.WithContext(ctx)
+	ocspResponses := make(chan *ocsp.Response, len(cfg.serverCert.OCSPServer))
+	defer close(ocspResponses)
+
+	for _, endpoint := range cfg.serverCert.OCSPServer {
+		// Re-assign endpoint so it gets re-scoped rather than using the iteration variable in the goroutine. See
+		// https://golang.org/doc/faq#closures_and_goroutines.
+		endpoint := endpoint
+
+		// Start a group of goroutines that each attempt to request the certificate status from one
+		// of the OCSP endpoints listed in the server certificate. We want to "soft fail" on all
+		// errors, so this function never returns actual errors. Only a "done" error is returned
+		// when a response is received so the errgroup cancels any other in-progress requests.
+		group.Go(func() error {
+			// Use bytes.NewReader instead of bytes.NewBuffer because a bytes.Buffer is an owning representation and the
+			// docs recommend not using the underlying []byte after creating the buffer, so a new copy of the request
+			// bytes would be needed for each request.
+			request, err := http.NewRequest("POST", endpoint, bytes.NewReader(cfg.ocspRequestBytes))
+			if err != nil {
+				return nil
+			}
+			request = request.WithContext(ctx)
+
+			httpResponse, err := cfg.httpClient.Do(request)
+			if err != nil {
+				return nil
+			}
+			defer func() {
+				_ = httpResponse.Body.Close()
+			}()
+
+			if httpResponse.StatusCode != 200 {
+				return nil
+			}
+
+			httpBytes, err := ioutil.ReadAll(httpResponse.Body)
+			if err != nil {
+				return nil
+			}
+
+			ocspResponse, err := ocsp.ParseResponseForCert(httpBytes, cfg.serverCert, cfg.issuer)
+			if err != nil || verifyResponse(cfg, ocspResponse) != nil || ocspResponse.Status == ocsp.Unknown {
+				// If there was an error parsing/validating the response or the response was
+				// inconclusive, suppress the error because we want to ignore this responder.
+				return nil
+			}
+
+			// Send the conclusive response on the response channel and return a "done" error that
+			// will cause the errgroup to cancel all other in-progress requests.
+			ocspResponses <- ocspResponse
+			return errors.New("done")
+		})
+	}
+
+	_ = group.Wait()
+	select {
+	case res := <-ocspResponses:
+		return extractResponseDetails(res)
+	default:
+		// If there is no OCSP response on the response channel, all OCSP calls either failed or
+		// were inconclusive. Return nil.
+		return nil
+	}
+}
+
+// verifyResponse checks that the provided OCSP response is valid.
+func verifyResponse(cfg config, res *ocsp.Response) error {
+	if err := verifyExtendedKeyUsage(cfg, res); err != nil {
+		return err
+	}
+
+	currTime := time.Now().UTC()
+	if res.ThisUpdate.After(currTime) {
+		return fmt.Errorf("reported thisUpdate time %s is after current time %s", res.ThisUpdate, currTime)
+	}
+	if !res.NextUpdate.IsZero() && res.NextUpdate.Before(currTime) {
+		return fmt.Errorf("reported nextUpdate time %s is before current time %s", res.NextUpdate, currTime)
+	}
+	return nil
+}
+
+func verifyExtendedKeyUsage(cfg config, res *ocsp.Response) error {
+	if res.Certificate == nil {
+		return nil
+	}
+
+	namesMatch := res.RawResponderName != nil && bytes.Equal(res.RawResponderName, cfg.issuer.RawSubject)
+	keyHashesMatch := res.ResponderKeyHash != nil && bytes.Equal(res.ResponderKeyHash, cfg.ocspRequest.IssuerKeyHash)
+	if namesMatch || keyHashesMatch {
+		// The responder certificate is the same as the issuer certificate.
+		return nil
+	}
+
+	// There is a delegate.
+	for _, extKeyUsage := range res.Certificate.ExtKeyUsage {
+		if extKeyUsage == x509.ExtKeyUsageOCSPSigning {
+			return nil
+		}
+	}
+
+	return errors.New("delegate responder certificate is missing the OCSP signing extended key usage")
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..281bf515b7eb4c93058ec17a082bc992357802ca
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/options.go
@@ -0,0 +1,16 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package ocsp
+
+import "net/http"
+
+// VerifyOptions specifies options to configure OCSP verification.
+type VerifyOptions struct {
+	Cache                   Cache
+	DisableEndpointChecking bool
+	HTTPClient              *http.Client
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec6f69eca01a120133ad08942c3440c43ed29a29
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go
@@ -0,0 +1,2119 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"math"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/csot"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/internal/handshake"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage"
+)
+
+const defaultLocalThreshold = 15 * time.Millisecond
+
+var (
+	// ErrNoDocCommandResponse occurs when the server indicated a response existed, but none was found.
+	ErrNoDocCommandResponse = errors.New("command returned no documents")
+	// ErrMultiDocCommandResponse occurs when the server sent multiple documents in response to a command.
+	ErrMultiDocCommandResponse = errors.New("command returned multiple documents")
+	// ErrReplyDocumentMismatch occurs when the number of documents returned in an OP_QUERY does not match the numberReturned field.
+	ErrReplyDocumentMismatch = errors.New("number of documents returned does not match numberReturned field")
+	// ErrNonPrimaryReadPref is returned when a read is attempted in a transaction with a non-primary read preference.
+	ErrNonPrimaryReadPref = errors.New("read preference in a transaction must be primary")
+	// errDatabaseNameEmpty occurs when a database name is not provided.
+	errDatabaseNameEmpty = errors.New("database name cannot be empty")
+)
+
+const (
+	// maximum BSON object size when client side encryption is enabled
+	cryptMaxBsonObjectSize uint32 = 2097152
+	// minimum wire version necessary to use automatic encryption
+	cryptMinWireVersion int32 = 8
+	// minimum wire version necessary to use read snapshots
+	readSnapshotMinWireVersion int32 = 13
+)
+
+// RetryablePoolError is a connection pool error that can be retried while executing an operation.
+type RetryablePoolError interface {
+	Retryable() bool
+}
+
+// labeledError is an error that can have error labels added to it.
+type labeledError interface {
+	error
+	HasErrorLabel(string) bool
+}
+
+// InvalidOperationError is returned from Validate and indicates that a required field is missing
+// from an instance of Operation.
+type InvalidOperationError struct{ MissingField string }
+
+func (err InvalidOperationError) Error() string {
+	return "the " + err.MissingField + " field must be set on Operation"
+}
+
+// opReply stores information returned in an OP_REPLY response from the server.
+// The err field stores any error that occurred when decoding or validating the OP_REPLY response.
+type opReply struct {
+	responseFlags wiremessage.ReplyFlag
+	cursorID      int64
+	startingFrom  int32
+	numReturned   int32
+	documents     []bsoncore.Document
+	err           error
+}
+
+// startedInformation keeps track of all of the information necessary for monitoring started events.
+type startedInformation struct {
+	cmd                      bsoncore.Document
+	requestID                int32
+	cmdName                  string
+	documentSequenceIncluded bool
+	connID                   string
+	driverConnectionID       uint64 // TODO(GODRIVER-2824): change type to int64.
+	serverConnID             *int64
+	redacted                 bool
+	serviceID                *primitive.ObjectID
+	serverAddress            address.Address
+}
+
+// finishedInformation keeps track of all of the information necessary for monitoring success and failure events.
+type finishedInformation struct {
+	cmdName            string
+	requestID          int32
+	response           bsoncore.Document
+	cmdErr             error
+	connID             string
+	driverConnectionID uint64 // TODO(GODRIVER-2824): change type to int64.
+	serverConnID       *int64
+	redacted           bool
+	serviceID          *primitive.ObjectID
+	serverAddress      address.Address
+	duration           time.Duration
+}
+
+// convertInt64PtrToInt32Ptr will convert an int64 pointer reference to an int32 pointer
+// reference. If the int64 value cannot be converted to int32 without causing
+// an overflow, then this function will return nil.
+func convertInt64PtrToInt32Ptr(i64 *int64) *int32 {
+	if i64 == nil {
+		return nil
+	}
+
+	if *i64 > math.MaxInt32 || *i64 < math.MinInt32 {
+		return nil
+	}
+
+	i32 := int32(*i64)
+	return &i32
+}
+
+// success returns true if there was no command error or the command error is a
+// "WriteCommandError". Commands that executed on the server and return a status
+// of { ok: 1.0 } are considered successful commands and MUST generate a
+// CommandSucceededEvent and "command succeeded" log message. Commands that have
+// write errors are included since the actual command did succeed, only writes
+// failed.
+func (info finishedInformation) success() bool {
+	if _, ok := info.cmdErr.(WriteCommandError); ok {
+		return true
+	}
+
+	return info.cmdErr == nil
+}
+
+// ResponseInfo contains the context required to parse a server response.
+type ResponseInfo struct {
+	ServerResponse        bsoncore.Document
+	Server                Server
+	Connection            Connection
+	ConnectionDescription description.Server
+	CurrentIndex          int
+}
+
+func redactStartedInformationCmd(op Operation, info startedInformation) bson.Raw {
+	var cmdCopy bson.Raw
+
+	// Make a copy of the command. Redact if the command is security
+	// sensitive and cannot be monitored. If there was a type 1 payload for
+	// the current batch, convert it to a BSON array
+	if !info.redacted {
+		cmdCopy = make([]byte, len(info.cmd))
+		copy(cmdCopy, info.cmd)
+
+		if info.documentSequenceIncluded {
+			// remove 0 byte at end
+			cmdCopy = cmdCopy[:len(info.cmd)-1]
+			cmdCopy = op.addBatchArray(cmdCopy)
+
+			// add back 0 byte and update length
+			cmdCopy, _ = bsoncore.AppendDocumentEnd(cmdCopy, 0)
+		}
+	}
+
+	return cmdCopy
+}
+
+func redactFinishedInformationResponse(info finishedInformation) bson.Raw {
+	if !info.redacted {
+		return bson.Raw(info.response)
+	}
+
+	return bson.Raw{}
+}
+
+// Operation is used to execute an operation. It contains all of the common code required to
+// select a server, transform an operation into a command, write the command to a connection from
+// the selected server, read a response from that connection, process the response, and potentially
+// retry.
+//
+// The required fields are Database, CommandFn, and Deployment. All other fields are optional.
+//
+// While an Operation can be constructed manually, drivergen should be used to generate an
+// implementation of an operation instead. This will ensure that there are helpers for constructing
+// the operation and that this type isn't configured incorrectly.
+type Operation struct {
+	// CommandFn is used to create the command that will be wrapped in a wire message and sent to
+	// the server. This function should only add the elements of the command and not start or end
+	// the enclosing BSON document. Per the command API, the first element must be the name of the
+	// command to run. This field is required.
+	CommandFn func(dst []byte, desc description.SelectedServer) ([]byte, error)
+
+	// Database is the database that the command will be run against. This field is required.
+	Database string
+
+	// Deployment is the MongoDB Deployment to use. While most of the time this will be multiple
+	// servers, commands that need to run against a single, preselected server can use the
+	// SingleServerDeployment type. Commands that need to run on a preselected connection can use
+	// the SingleConnectionDeployment type.
+	Deployment Deployment
+
+	// ProcessResponseFn is called after a response to the command is returned. The server is
+	// provided for types like Cursor that are required to run subsequent commands using the same
+	// server.
+	ProcessResponseFn func(ResponseInfo) error
+
+	// Selector is the server selector that's used during both initial server selection and
+	// subsequent selection for retries. Depending on the Deployment implementation, the
+	// SelectServer method may not actually be called.
+	Selector description.ServerSelector
+
+	// ReadPreference is the read preference that will be attached to the command. If this field is
+	// not specified a default read preference of primary will be used.
+	ReadPreference *readpref.ReadPref
+
+	// ReadConcern is the read concern used when running read commands. This field should not be set
+	// for write operations. If this field is set, it will be encoded onto the commands sent to the
+	// server.
+	ReadConcern *readconcern.ReadConcern
+
+	// MinimumReadConcernWireVersion specifies the minimum wire version to add the read concern to
+	// the command being executed.
+	MinimumReadConcernWireVersion int32
+
+	// WriteConcern is the write concern used when running write commands. This field should not be
+	// set for read operations. If this field is set, it will be encoded onto the commands sent to
+	// the server.
+	WriteConcern *writeconcern.WriteConcern
+
+	// MinimumWriteConcernWireVersion specifies the minimum wire version to add the write concern to
+	// the command being executed.
+	MinimumWriteConcernWireVersion int32
+
+	// Client is the session used with this operation. This can be either an implicit or explicit
+	// session. If the server selected does not support sessions and Client is specified the
+	// behavior depends on the session type. If the session is implicit, the session fields will not
+	// be encoded onto the command. If the session is explicit, an error will be returned. The
+	// caller is responsible for ensuring that this field is nil if the Deployment does not support
+	// sessions.
+	Client *session.Client
+
+	// Clock is a cluster clock, different from the one contained within a session.Client. This
+	// allows updating cluster times for a global cluster clock while allowing individual session's
+	// cluster clocks to be only updated as far as the last command that's been run.
+	Clock *session.ClusterClock
+
+	// RetryMode specifies how to retry. There are three modes that enable retry: RetryOnce,
+	// RetryOncePerCommand, and RetryContext. For more information about what these modes do, please
+	// refer to their definitions. Both RetryMode and Type must be set for retryability to be enabled.
+	// If Timeout is set on the Client, the operation will automatically retry as many times as
+	// possible unless RetryNone is used.
+	RetryMode *RetryMode
+
+	// Type specifies the kind of operation this is. There is only one mode that enables retry: Write.
+	// For more information about what this mode does, please refer to it's definition. Both Type and
+	// RetryMode must be set for retryability to be enabled.
+	Type Type
+
+	// Batches contains the documents that are split when executing a write command that potentially
+	// has more documents than can fit in a single command. This should only be specified for
+	// commands that are batch compatible. For more information, please refer to the definition of
+	// Batches.
+	Batches *Batches
+
+	// Legacy sets the legacy type for this operation. There are only 3 types that require legacy
+	// support: find, getMore, and killCursors. For more information about LegacyOperationKind,
+	// please refer to it's definition.
+	Legacy LegacyOperationKind
+
+	// CommandMonitor specifies the monitor to use for APM events. If this field is not set,
+	// no events will be reported.
+	CommandMonitor *event.CommandMonitor
+
+	// Crypt specifies a Crypt object to use for automatic client side encryption and decryption.
+	Crypt Crypt
+
+	// ServerAPI specifies options used to configure the API version sent to the server.
+	ServerAPI *ServerAPIOptions
+
+	// IsOutputAggregate specifies whether this operation is an aggregate with an output stage. If true,
+	// read preference will not be added to the command on wire versions < 13.
+	IsOutputAggregate bool
+
+	// MaxTime specifies the maximum amount of time to allow the operation to run on the server.
+	MaxTime *time.Duration
+
+	// Timeout is the amount of time that this operation can execute before returning an error. The default value
+	// nil, which means that the timeout of the operation's caller will be used.
+	Timeout *time.Duration
+
+	Logger *logger.Logger
+
+	// Name is the name of the operation. This is used when serializing
+	// OP_MSG as well as for logging server selection data.
+	Name string
+
+	// OmitCSOTMaxTimeMS omits the automatically-calculated "maxTimeMS" from the
+	// command when CSOT is enabled. It does not effect "maxTimeMS" set by
+	// [Operation.MaxTime].
+	OmitCSOTMaxTimeMS bool
+
+	// Authenticator is the authenticator to use for this operation when a reauthentication is
+	// required.
+	Authenticator Authenticator
+
+	// omitReadPreference is a boolean that indicates whether to omit the
+	// read preference from the command. This omition includes the case
+	// where a default read preference is used when the operation
+	// ReadPreference is not specified.
+	omitReadPreference bool
+}
+
+// shouldEncrypt returns true if this operation should automatically be encrypted.
+func (op Operation) shouldEncrypt() bool {
+	return op.Crypt != nil && !op.Crypt.BypassAutoEncryption()
+}
+
+// filterDeprioritizedServers will filter out the server candidates that have
+// been deprioritized by the operation due to failure.
+//
+// The server selector should try to select a server that is not in the
+// deprioritization list. However, if this is not possible (e.g. there are no
+// other healthy servers in the cluster), the selector may return a
+// deprioritized server.
+func filterDeprioritizedServers(candidates, deprioritized []description.Server) []description.Server {
+	if len(deprioritized) == 0 {
+		return candidates
+	}
+
+	dpaSet := make(map[address.Address]*description.Server)
+	for i, srv := range deprioritized {
+		dpaSet[srv.Addr] = &deprioritized[i]
+	}
+
+	allowed := []description.Server{}
+
+	// Iterate over the candidates and append them to the allowdIndexes slice if
+	// they are not in the deprioritizedServers list.
+	for _, candidate := range candidates {
+		if srv, ok := dpaSet[candidate.Addr]; !ok || !srv.Equal(candidate) {
+			allowed = append(allowed, candidate)
+		}
+	}
+
+	// If nothing is allowed, then all available servers must have been
+	// deprioritized. In this case, return the candidates list as-is so that the
+	// selector can find a suitable server
+	if len(allowed) == 0 {
+		return candidates
+	}
+
+	return allowed
+}
+
+// opServerSelector is a wrapper for the server selector that is assigned to the
+// operation. The purpose of this wrapper is to filter candidates with
+// operation-specific logic, such as deprioritizing failing servers.
+type opServerSelector struct {
+	selector             description.ServerSelector
+	deprioritizedServers []description.Server
+}
+
+// SelectServer will filter candidates with operation-specific logic before
+// passing them onto the user-defined or default selector.
+func (oss *opServerSelector) SelectServer(
+	topo description.Topology,
+	candidates []description.Server,
+) ([]description.Server, error) {
+	selectedServers, err := oss.selector.SelectServer(topo, candidates)
+	if err != nil {
+		return nil, err
+	}
+
+	filteredServers := filterDeprioritizedServers(selectedServers, oss.deprioritizedServers)
+
+	return filteredServers, nil
+}
+
+// selectServer handles performing server selection for an operation.
+func (op Operation) selectServer(
+	ctx context.Context,
+	requestID int32,
+	deprioritized []description.Server,
+) (Server, error) {
+	if err := op.Validate(); err != nil {
+		return nil, err
+	}
+
+	selector := op.Selector
+	if selector == nil {
+		rp := op.ReadPreference
+		if rp == nil {
+			rp = readpref.Primary()
+		}
+		selector = description.CompositeSelector([]description.ServerSelector{
+			description.ReadPrefSelector(rp),
+			description.LatencySelector(defaultLocalThreshold),
+		})
+	}
+
+	oss := &opServerSelector{
+		selector:             selector,
+		deprioritizedServers: deprioritized,
+	}
+
+	ctx = logger.WithOperationName(ctx, op.Name)
+	ctx = logger.WithOperationID(ctx, requestID)
+
+	return op.Deployment.SelectServer(ctx, oss)
+}
+
+// getServerAndConnection should be used to retrieve a Server and Connection to execute an operation.
+func (op Operation) getServerAndConnection(
+	ctx context.Context,
+	requestID int32,
+	deprioritized []description.Server,
+) (Server, Connection, error) {
+	server, err := op.selectServer(ctx, requestID, deprioritized)
+	if err != nil {
+		if op.Client != nil &&
+			!(op.Client.Committing || op.Client.Aborting) && op.Client.TransactionRunning() {
+			err = Error{
+				Message: err.Error(),
+				Labels:  []string{TransientTransactionError},
+				Wrapped: err,
+			}
+		}
+		return nil, nil, err
+	}
+
+	// If the provided client session has a pinned connection, it should be used for the operation because this
+	// indicates that we're in a transaction and the target server is behind a load balancer.
+	if op.Client != nil && op.Client.PinnedConnection != nil {
+		return server, op.Client.PinnedConnection, nil
+	}
+
+	// Otherwise, default to checking out a connection from the server's pool.
+	conn, err := server.Connection(ctx)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// If we're in load balanced mode and this is the first operation in a transaction, pin the session to a connection.
+	if conn.Description().LoadBalanced() && op.Client != nil && op.Client.TransactionStarting() {
+		pinnedConn, ok := conn.(PinnedConnection)
+		if !ok {
+			// Close the original connection to avoid a leak.
+			_ = conn.Close()
+			return nil, nil, fmt.Errorf("expected Connection used to start a transaction to be a PinnedConnection, but got %T", conn)
+		}
+		if err := pinnedConn.PinToTransaction(); err != nil {
+			// Close the original connection to avoid a leak.
+			_ = conn.Close()
+			return nil, nil, fmt.Errorf("error incrementing connection reference count when starting a transaction: %w", err)
+		}
+		op.Client.PinnedConnection = pinnedConn
+	}
+
+	return server, conn, nil
+}
+
+// Validate validates this operation, ensuring the fields are set properly.
+func (op Operation) Validate() error {
+	if op.CommandFn == nil {
+		return InvalidOperationError{MissingField: "CommandFn"}
+	}
+	if op.Deployment == nil {
+		return InvalidOperationError{MissingField: "Deployment"}
+	}
+	if op.Database == "" {
+		return errDatabaseNameEmpty
+	}
+	if op.Client != nil && !writeconcern.AckWrite(op.WriteConcern) {
+		return errors.New("session provided for an unacknowledged write")
+	}
+	return nil
+}
+
+var memoryPool = sync.Pool{
+	New: func() interface{} {
+		// Start with 1kb buffers.
+		b := make([]byte, 1024)
+		// Return a pointer as the static analysis tool suggests.
+		return &b
+	},
+}
+
+// Execute runs this operation.
+func (op Operation) Execute(ctx context.Context) error {
+	err := op.Validate()
+	if err != nil {
+		return err
+	}
+
+	// If op.Timeout is set, and context is not already a Timeout context, honor
+	// op.Timeout in new Timeout context for operation execution.
+	if op.Timeout != nil && !csot.IsTimeoutContext(ctx) {
+		newCtx, cancelFunc := csot.MakeTimeoutContext(ctx, *op.Timeout)
+		// Redefine ctx to be the new timeout-derived context.
+		ctx = newCtx
+		// Cancel the timeout-derived context at the end of Execute to avoid a context leak.
+		defer cancelFunc()
+	}
+
+	if op.Client != nil {
+		if err := op.Client.StartCommand(); err != nil {
+			return err
+		}
+	}
+
+	var retries int
+	if op.RetryMode != nil {
+		switch op.Type {
+		case Write:
+			if op.Client == nil {
+				break
+			}
+			switch *op.RetryMode {
+			case RetryOnce, RetryOncePerCommand:
+				retries = 1
+			case RetryContext:
+				retries = -1
+			}
+		case Read:
+			switch *op.RetryMode {
+			case RetryOnce, RetryOncePerCommand:
+				retries = 1
+			case RetryContext:
+				retries = -1
+			}
+		}
+	}
+	// If context is a Timeout context, automatically set retries to -1 (infinite) if retrying is
+	// enabled.
+	retryEnabled := op.RetryMode != nil && op.RetryMode.Enabled()
+	if csot.IsTimeoutContext(ctx) && retryEnabled {
+		retries = -1
+	}
+
+	var srvr Server
+	var conn Connection
+	var res bsoncore.Document
+	var operationErr WriteCommandError
+	var prevErr error
+	var prevIndefiniteErr error
+	batching := op.Batches.Valid()
+	retrySupported := false
+	first := true
+	currIndex := 0
+
+	// deprioritizedServers are a running list of servers that should be
+	// deprioritized during server selection. Per the specifications, we should
+	// only ever deprioritize the "previous server".
+	var deprioritizedServers []description.Server
+
+	// resetForRetry records the error that caused the retry, decrements retries, and resets the
+	// retry loop variables to request a new server and a new connection for the next attempt.
+	resetForRetry := func(err error) {
+		retries--
+		prevErr = err
+
+		// Set the previous indefinite error to be returned in any case where a retryable write error does not have a
+		// NoWritesPerfomed label (the definite case).
+		if err, ok := err.(labeledError); ok {
+			// If the "prevIndefiniteErr" is nil, then the current error is the first error encountered
+			// during the retry attempt cycle. We must persist the first error in the case where all
+			// following errors are labeled "NoWritesPerformed", which would otherwise raise nil as the
+			// error.
+			if prevIndefiniteErr == nil {
+				prevIndefiniteErr = err
+			}
+
+			// If the error is not labeled NoWritesPerformed and is retryable, then set the previous
+			// indefinite error to be the current error.
+			if !err.HasErrorLabel(NoWritesPerformed) && err.HasErrorLabel(RetryableWriteError) {
+				prevIndefiniteErr = err
+			}
+		}
+
+		// If we got a connection, close it immediately to release pool resources
+		// for subsequent retries.
+		if conn != nil {
+			// If we are dealing with a sharded cluster, then mark the failed server
+			// as "deprioritized".
+			if desc := conn.Description; desc != nil && op.Deployment.Kind() == description.Sharded {
+				deprioritizedServers = []description.Server{conn.Description()}
+			}
+
+			conn.Close()
+		}
+
+		// Set the server and connection to nil to request a new server and connection.
+		srvr = nil
+		conn = nil
+	}
+
+	wm := memoryPool.Get().(*[]byte)
+	defer func() {
+		// Proper usage of a sync.Pool requires each entry to have approximately the same memory
+		// cost. To obtain this property when the stored type contains a variably-sized buffer,
+		// we add a hard limit on the maximum buffer to place back in the pool. We limit the
+		// size to 16MiB because that's the maximum wire message size supported by MongoDB.
+		//
+		// Comment copied from https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147
+		//
+		// Recycle byte slices that are smaller than 16MiB and at least half occupied.
+		if c := cap(*wm); c < 16*1024*1024 && c/2 < len(*wm) {
+			memoryPool.Put(wm)
+		}
+	}()
+	for {
+		// If we're starting a retry and the error from the previous try was
+		// a context canceled or deadline exceeded error, stop retrying and
+		// return that error.
+		if errors.Is(prevErr, context.Canceled) || errors.Is(prevErr, context.DeadlineExceeded) {
+			return prevErr
+		}
+
+		requestID := wiremessage.NextRequestID()
+
+		// If the server or connection are nil, try to select a new server and get a new connection.
+		if srvr == nil || conn == nil {
+			srvr, conn, err = op.getServerAndConnection(ctx, requestID, deprioritizedServers)
+			if err != nil {
+				// If the returned error is retryable and there are retries remaining (negative
+				// retries means retry indefinitely), then retry the operation. Set the server
+				// and connection to nil to request a new server and connection.
+				if rerr, ok := err.(RetryablePoolError); ok && rerr.Retryable() && retries != 0 {
+					resetForRetry(err)
+					continue
+				}
+
+				// If this is a retry and there's an error from a previous attempt, return the previous
+				// error instead of the current connection error.
+				if prevErr != nil {
+					return prevErr
+				}
+				return err
+			}
+			defer conn.Close()
+
+			// Set the server if it has not already been set and the session type is implicit. This will
+			// limit the number of implicit sessions to no greater than an application's maxPoolSize
+			// (ignoring operations that hold on to the session like cursors).
+			if op.Client != nil && op.Client.Server == nil && op.Client.IsImplicit {
+				if op.Client.Terminated {
+					return fmt.Errorf("unexpected nil session for a terminated implicit session")
+				}
+				if err := op.Client.SetServer(); err != nil {
+					return err
+				}
+			}
+		}
+
+		// Run steps that must only be run on the first attempt, but not again for retries.
+		if first {
+			// Determine if retries are supported for the current operation on the current server
+			// description. Per the retryable writes specification, only determine this for the
+			// first server selected:
+			//
+			//   If the server selected for the first attempt of a retryable write operation does
+			//   not support retryable writes, drivers MUST execute the write as if retryable writes
+			//   were not enabled.
+			retrySupported = op.retryable(conn.Description())
+
+			// If retries are supported for the current operation on the current server description,
+			// client retries are enabled, the operation type is write, and we haven't incremented
+			// the txn number yet, enable retry writes on the session and increment the txn number.
+			// Calling IncrementTxnNumber() for server descriptions or topologies that do not
+			// support retries (e.g. standalone topologies) will cause server errors. Only do this
+			// check for the first attempt to keep retried writes in the same transaction.
+			if retrySupported && op.RetryMode != nil && op.Type == Write && op.Client != nil {
+				op.Client.RetryWrite = false
+				if op.RetryMode.Enabled() {
+					op.Client.RetryWrite = true
+					if !op.Client.Committing && !op.Client.Aborting {
+						op.Client.IncrementTxnNumber()
+					}
+				}
+			}
+
+			first = false
+		}
+
+		maxTimeMS, err := op.calculateMaxTimeMS(ctx, srvr.RTTMonitor())
+		if err != nil {
+			return err
+		}
+
+		// Set maxTimeMS to 0 if connected to mongocryptd to avoid appending the field. The final
+		// encrypted command may contain multiple maxTimeMS fields otherwise.
+		if conn.Description().IsCryptd {
+			maxTimeMS = 0
+		}
+
+		desc := description.SelectedServer{Server: conn.Description(), Kind: op.Deployment.Kind()}
+
+		if batching {
+			targetBatchSize := desc.MaxDocumentSize
+			maxDocSize := desc.MaxDocumentSize
+			if op.shouldEncrypt() {
+				// For client-side encryption, we want the batch to be split at 2 MiB instead of 16MiB.
+				// If there's only one document in the batch, it can be up to 16MiB, so we set target batch size to
+				// 2MiB but max document size to 16MiB. This will allow the AdvanceBatch call to create a batch
+				// with a single large document.
+				targetBatchSize = cryptMaxBsonObjectSize
+			}
+
+			err = op.Batches.AdvanceBatch(int(desc.MaxBatchCount), int(targetBatchSize), int(maxDocSize))
+			if err != nil {
+				// TODO(GODRIVER-982): Should we also be returning operationErr?
+				return err
+			}
+		}
+
+		var startedInfo startedInformation
+		*wm, startedInfo, err = op.createWireMessage(ctx, maxTimeMS, (*wm)[:0], desc, conn, requestID)
+
+		if err != nil {
+			return err
+		}
+
+		// set extra data and send event if possible
+		startedInfo.connID = conn.ID()
+		startedInfo.driverConnectionID = conn.DriverConnectionID()
+		startedInfo.cmdName = op.getCommandName(startedInfo.cmd)
+
+		// If the command name does not match the operation name, update
+		// the operation name as a sanity check. It's more correct to
+		// be aligned with the data passed to the server via the
+		// wire message.
+		if startedInfo.cmdName != op.Name {
+			op.Name = startedInfo.cmdName
+		}
+
+		startedInfo.redacted = op.redactCommand(startedInfo.cmdName, startedInfo.cmd)
+		startedInfo.serviceID = conn.Description().ServiceID
+		startedInfo.serverConnID = conn.ServerConnectionID()
+		startedInfo.serverAddress = conn.Description().Addr
+
+		op.publishStartedEvent(ctx, startedInfo)
+
+		// get the moreToCome flag information before we compress
+		moreToCome := wiremessage.IsMsgMoreToCome(*wm)
+
+		// compress wiremessage if allowed
+		if compressor, ok := conn.(Compressor); ok && op.canCompress(startedInfo.cmdName) {
+			b := memoryPool.Get().(*[]byte)
+			*b, err = compressor.CompressWireMessage(*wm, (*b)[:0])
+			memoryPool.Put(wm)
+			wm = b
+			if err != nil {
+				return err
+			}
+		}
+
+		finishedInfo := finishedInformation{
+			cmdName:            startedInfo.cmdName,
+			driverConnectionID: startedInfo.driverConnectionID,
+			requestID:          startedInfo.requestID,
+			connID:             startedInfo.connID,
+			serverConnID:       startedInfo.serverConnID,
+			redacted:           startedInfo.redacted,
+			serviceID:          startedInfo.serviceID,
+			serverAddress:      desc.Server.Addr,
+		}
+
+		startedTime := time.Now()
+
+		// Check for possible context error. If no context error, check if there's enough time to perform a
+		// round trip before the Context deadline. If ctx is a Timeout Context, use the 90th percentile RTT
+		// as a threshold. Otherwise, use the minimum observed RTT.
+		if ctx.Err() != nil {
+			err = ctx.Err()
+		} else if deadline, ok := ctx.Deadline(); ok {
+			if csot.IsTimeoutContext(ctx) && time.Now().Add(srvr.RTTMonitor().P90()).After(deadline) {
+				err = fmt.Errorf(
+					"remaining time %v until context deadline is less than 90th percentile network round-trip time: %w\n%v",
+					time.Until(deadline),
+					ErrDeadlineWouldBeExceeded,
+					srvr.RTTMonitor().Stats())
+			} else if time.Now().Add(srvr.RTTMonitor().Min()).After(deadline) {
+				err = context.DeadlineExceeded
+			}
+		}
+
+		if err == nil {
+			// roundtrip using either the full roundTripper or a special one for when the moreToCome
+			// flag is set
+			roundTrip := op.roundTrip
+			if moreToCome {
+				roundTrip = op.moreToComeRoundTrip
+			}
+			res, err = roundTrip(ctx, conn, *wm)
+
+			if ep, ok := srvr.(ErrorProcessor); ok {
+				_ = ep.ProcessError(err, conn)
+			}
+		}
+
+		finishedInfo.response = res
+		finishedInfo.cmdErr = err
+		finishedInfo.duration = time.Since(startedTime)
+
+		op.publishFinishedEvent(ctx, finishedInfo)
+
+		// prevIndefiniteErrorIsSet is "true" if the "err" variable has been set to the "prevIndefiniteErr" in
+		// a case in the switch statement below.
+		var prevIndefiniteErrIsSet bool
+
+		// TODO(GODRIVER-2579): When refactoring the "Execute" method, consider creating a separate method for the
+		// error handling logic below. This will remove the necessity of the "checkError" goto label.
+	checkError:
+		var perr error
+		switch tt := err.(type) {
+		case WriteCommandError:
+			if e := err.(WriteCommandError); retrySupported && op.Type == Write && e.UnsupportedStorageEngine() {
+				return ErrUnsupportedStorageEngine
+			}
+
+			connDesc := conn.Description()
+			retryableErr := tt.Retryable(connDesc.WireVersion)
+			preRetryWriteLabelVersion := connDesc.WireVersion != nil && connDesc.WireVersion.Max < 9
+			inTransaction := op.Client != nil &&
+				!(op.Client.Committing || op.Client.Aborting) && op.Client.TransactionRunning()
+			// If retry is enabled and the operation isn't in a transaction, add a RetryableWriteError label for
+			// retryable errors from pre-4.4 servers
+			if retryableErr && preRetryWriteLabelVersion && retryEnabled && !inTransaction {
+				tt.Labels = append(tt.Labels, RetryableWriteError)
+			}
+
+			// If retries are supported for the current operation on the first server description,
+			// the error is considered retryable, and there are retries remaining (negative retries
+			// means retry indefinitely), then retry the operation.
+			if retrySupported && retryableErr && retries != 0 {
+				if op.Client != nil && op.Client.Committing {
+					// Apply majority write concern for retries
+					op.Client.UpdateCommitTransactionWriteConcern()
+					op.WriteConcern = op.Client.CurrentWc
+				}
+				resetForRetry(tt)
+				continue
+			}
+
+			// If the error is no longer retryable and has the NoWritesPerformed label, then we should
+			// set the error to the "previous indefinite error" unless the current error is already the
+			// "previous indefinite error". After resetting, repeat the error check.
+			if tt.HasErrorLabel(NoWritesPerformed) && !prevIndefiniteErrIsSet {
+				err = prevIndefiniteErr
+				prevIndefiniteErrIsSet = true
+
+				goto checkError
+			}
+
+			// If the operation isn't being retried, process the response
+			if op.ProcessResponseFn != nil {
+				info := ResponseInfo{
+					ServerResponse:        res,
+					Server:                srvr,
+					Connection:            conn,
+					ConnectionDescription: desc.Server,
+					CurrentIndex:          currIndex,
+				}
+				_ = op.ProcessResponseFn(info)
+			}
+
+			if batching && len(tt.WriteErrors) > 0 && currIndex > 0 {
+				for i := range tt.WriteErrors {
+					tt.WriteErrors[i].Index += int64(currIndex)
+				}
+			}
+
+			// If batching is enabled and either ordered is the default (which is true) or
+			// explicitly set to true and we have write errors, return the errors.
+			if batching && (op.Batches.Ordered == nil || *op.Batches.Ordered) && len(tt.WriteErrors) > 0 {
+				return tt
+			}
+			if op.Client != nil && op.Client.Committing && tt.WriteConcernError != nil {
+				// When running commitTransaction we return WriteConcernErrors as an Error.
+				err := Error{
+					Name:    tt.WriteConcernError.Name,
+					Code:    int32(tt.WriteConcernError.Code),
+					Message: tt.WriteConcernError.Message,
+					Labels:  tt.Labels,
+					Raw:     tt.Raw,
+				}
+				// The UnknownTransactionCommitResult label is added to all writeConcernErrors besides unknownReplWriteConcernCode
+				// and unsatisfiableWriteConcernCode
+				if err.Code != unknownReplWriteConcernCode && err.Code != unsatisfiableWriteConcernCode {
+					err.Labels = append(err.Labels, UnknownTransactionCommitResult)
+				}
+				if retryableErr && retryEnabled {
+					err.Labels = append(err.Labels, RetryableWriteError)
+				}
+				return err
+			}
+			operationErr.WriteConcernError = tt.WriteConcernError
+			operationErr.WriteErrors = append(operationErr.WriteErrors, tt.WriteErrors...)
+			operationErr.Labels = tt.Labels
+			operationErr.Raw = tt.Raw
+		case Error:
+			// 391 is the reauthentication required error code, so we will attempt a reauth and
+			// retry the operation, if it is successful.
+			if tt.Code == 391 {
+				if op.Authenticator != nil {
+					cfg := AuthConfig{
+						Description:  conn.Description(),
+						Connection:   conn,
+						ClusterClock: op.Clock,
+						ServerAPI:    op.ServerAPI,
+					}
+					if err := op.Authenticator.Reauth(ctx, &cfg); err != nil {
+						return fmt.Errorf("error reauthenticating: %w", err)
+					}
+					if op.Client != nil && op.Client.Committing {
+						// Apply majority write concern for retries
+						op.Client.UpdateCommitTransactionWriteConcern()
+						op.WriteConcern = op.Client.CurrentWc
+					}
+					resetForRetry(tt)
+					continue
+				}
+			}
+			if tt.HasErrorLabel(TransientTransactionError) || tt.HasErrorLabel(UnknownTransactionCommitResult) {
+				if err := op.Client.ClearPinnedResources(); err != nil {
+					return err
+				}
+			}
+
+			if e := err.(Error); retrySupported && op.Type == Write && e.UnsupportedStorageEngine() {
+				return ErrUnsupportedStorageEngine
+			}
+
+			connDesc := conn.Description()
+			var retryableErr bool
+			if op.Type == Write {
+				retryableErr = tt.RetryableWrite(connDesc.WireVersion)
+				preRetryWriteLabelVersion := connDesc.WireVersion != nil && connDesc.WireVersion.Max < 9
+				inTransaction := op.Client != nil &&
+					!(op.Client.Committing || op.Client.Aborting) && op.Client.TransactionRunning()
+				// If retryWrites is enabled and the operation isn't in a transaction, add a RetryableWriteError label
+				// for network errors and retryable errors from pre-4.4 servers
+				if retryEnabled && !inTransaction &&
+					(tt.HasErrorLabel(NetworkError) || (retryableErr && preRetryWriteLabelVersion)) {
+					tt.Labels = append(tt.Labels, RetryableWriteError)
+				}
+			} else {
+				retryableErr = tt.RetryableRead()
+			}
+
+			// If retries are supported for the current operation on the first server description,
+			// the error is considered retryable, and there are retries remaining (negative retries
+			// means retry indefinitely), then retry the operation.
+			if retrySupported && retryableErr && retries != 0 {
+				if op.Client != nil && op.Client.Committing {
+					// Apply majority write concern for retries
+					op.Client.UpdateCommitTransactionWriteConcern()
+					op.WriteConcern = op.Client.CurrentWc
+				}
+				resetForRetry(tt)
+				continue
+			}
+
+			// If the error is no longer retryable and has the NoWritesPerformed label, then we should
+			// set the error to the "previous indefinite error" unless the current error is already the
+			// "previous indefinite error". After resetting, repeat the error check.
+			if tt.HasErrorLabel(NoWritesPerformed) && !prevIndefiniteErrIsSet {
+				err = prevIndefiniteErr
+				prevIndefiniteErrIsSet = true
+
+				goto checkError
+			}
+
+			// If the operation isn't being retried, process the response
+			if op.ProcessResponseFn != nil {
+				info := ResponseInfo{
+					ServerResponse:        res,
+					Server:                srvr,
+					Connection:            conn,
+					ConnectionDescription: desc.Server,
+					CurrentIndex:          currIndex,
+				}
+				_ = op.ProcessResponseFn(info)
+			}
+
+			if op.Client != nil && op.Client.Committing && (retryableErr || tt.Code == 50) {
+				// If we got a retryable error or MaxTimeMSExpired error, we add UnknownTransactionCommitResult.
+				tt.Labels = append(tt.Labels, UnknownTransactionCommitResult)
+			}
+			return tt
+		case nil:
+			if moreToCome {
+				return ErrUnacknowledgedWrite
+			}
+			if op.ProcessResponseFn != nil {
+				info := ResponseInfo{
+					ServerResponse:        res,
+					Server:                srvr,
+					Connection:            conn,
+					ConnectionDescription: desc.Server,
+					CurrentIndex:          currIndex,
+				}
+				perr = op.ProcessResponseFn(info)
+			}
+			if perr != nil {
+				return perr
+			}
+		default:
+			if op.ProcessResponseFn != nil {
+				info := ResponseInfo{
+					ServerResponse:        res,
+					Server:                srvr,
+					Connection:            conn,
+					ConnectionDescription: desc.Server,
+					CurrentIndex:          currIndex,
+				}
+				_ = op.ProcessResponseFn(info)
+			}
+			return err
+		}
+
+		// If we're batching and there are batches remaining, advance to the next batch. This isn't
+		// a retry, so increment the transaction number, reset the retries number, and don't set
+		// server or connection to nil to continue using the same connection.
+		if batching && len(op.Batches.Documents) > 0 {
+			// If retries are supported for the current operation on the current server description,
+			// the session isn't nil, and client retries are enabled, increment the txn number.
+			// Calling IncrementTxnNumber() for server descriptions or topologies that do not
+			// support retries (e.g. standalone topologies) will cause server errors.
+			if retrySupported && op.Client != nil && op.RetryMode != nil {
+				if op.RetryMode.Enabled() {
+					op.Client.IncrementTxnNumber()
+				}
+				// Reset the retries number for RetryOncePerCommand unless context is a Timeout context, in
+				// which case retries should remain as -1 (as many times as possible).
+				if *op.RetryMode == RetryOncePerCommand && !csot.IsTimeoutContext(ctx) {
+					retries = 1
+				}
+			}
+			currIndex += len(op.Batches.Current)
+			op.Batches.ClearBatch()
+			continue
+		}
+		break
+	}
+	if len(operationErr.WriteErrors) > 0 || operationErr.WriteConcernError != nil {
+		return operationErr
+	}
+	return nil
+}
+
+// Retryable writes are supported if the server supports sessions, the operation is not
+// within a transaction, and the write is acknowledged
+func (op Operation) retryable(desc description.Server) bool {
+	switch op.Type {
+	case Write:
+		if op.Client != nil && (op.Client.Committing || op.Client.Aborting) {
+			return true
+		}
+		if retryWritesSupported(desc) &&
+			op.Client != nil && !(op.Client.TransactionInProgress() || op.Client.TransactionStarting()) &&
+			writeconcern.AckWrite(op.WriteConcern) {
+			return true
+		}
+	case Read:
+		if op.Client != nil && (op.Client.Committing || op.Client.Aborting) {
+			return true
+		}
+		if op.Client == nil || !(op.Client.TransactionInProgress() || op.Client.TransactionStarting()) {
+			return true
+		}
+	}
+	return false
+}
+
+// roundTrip writes a wiremessage to the connection and then reads a wiremessage. The wm parameter
+// is reused when reading the wiremessage.
+func (op Operation) roundTrip(ctx context.Context, conn Connection, wm []byte) ([]byte, error) {
+	err := conn.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return nil, op.networkError(err)
+	}
+	return op.readWireMessage(ctx, conn)
+}
+
+func (op Operation) readWireMessage(ctx context.Context, conn Connection) (result []byte, err error) {
+	wm, err := conn.ReadWireMessage(ctx)
+	if err != nil {
+		return nil, op.networkError(err)
+	}
+
+	// If we're using a streamable connection, we set its streaming state based on the moreToCome flag in the server
+	// response.
+	if streamer, ok := conn.(StreamerConnection); ok {
+		streamer.SetStreaming(wiremessage.IsMsgMoreToCome(wm))
+	}
+
+	length, _, _, opcode, rem, ok := wiremessage.ReadHeader(wm)
+	if !ok || len(wm) < int(length) {
+		return nil, errors.New("malformed wire message: insufficient bytes")
+	}
+	if opcode == wiremessage.OpCompressed {
+		rawsize := length - 16 // remove header size
+		// decompress wiremessage
+		opcode, rem, err = op.decompressWireMessage(rem[:rawsize])
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// decode
+	res, err := op.decodeResult(ctx, opcode, rem)
+	// Update cluster/operation time and recovery tokens before handling the error to ensure we're properly updating
+	// everything.
+	op.updateClusterTimes(res)
+	op.updateOperationTime(res)
+	op.Client.UpdateRecoveryToken(bson.Raw(res))
+
+	// Update snapshot time if operation was a "find", "aggregate" or "distinct".
+	if op.Name == driverutil.FindOp || op.Name == driverutil.AggregateOp || op.Name == driverutil.DistinctOp {
+		op.Client.UpdateSnapshotTime(res)
+	}
+
+	if err != nil {
+		return res, err
+	}
+
+	// If there is no error, automatically attempt to decrypt all results if client side encryption is enabled.
+	if op.Crypt != nil {
+		res, err = op.Crypt.Decrypt(ctx, res)
+	}
+	return res, err
+}
+
+// networkError wraps the provided error in an Error with label "NetworkError" and, if a transaction
+// is running or committing, the appropriate transaction state labels. The returned error indicates
+// the operation should be retried for reads and writes. If err is nil, networkError returns nil.
+func (op Operation) networkError(err error) error {
+	if err == nil {
+		return nil
+	}
+
+	labels := []string{NetworkError}
+	if op.Client != nil {
+		op.Client.MarkDirty()
+	}
+	if op.Client != nil && op.Client.TransactionRunning() && !op.Client.Committing {
+		labels = append(labels, TransientTransactionError)
+	}
+	if op.Client != nil && op.Client.Committing {
+		labels = append(labels, UnknownTransactionCommitResult)
+	}
+	return Error{Message: err.Error(), Labels: labels, Wrapped: err}
+}
+
+// moreToComeRoundTrip writes a wiremessage to the provided connection. This is used when an OP_MSG is
+// being sent with  the moreToCome bit set.
+func (op *Operation) moreToComeRoundTrip(ctx context.Context, conn Connection, wm []byte) (result []byte, err error) {
+	err = conn.WriteWireMessage(ctx, wm)
+	if err != nil {
+		if op.Client != nil {
+			op.Client.MarkDirty()
+		}
+		err = Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}, Wrapped: err}
+	}
+	return bsoncore.BuildDocument(nil, bsoncore.AppendInt32Element(nil, "ok", 1)), err
+}
+
+// decompressWireMessage handles decompressing a wiremessage without the header.
+func (Operation) decompressWireMessage(wm []byte) (wiremessage.OpCode, []byte, error) {
+	// get the original opcode and uncompressed size
+	opcode, rem, ok := wiremessage.ReadCompressedOriginalOpCode(wm)
+	if !ok {
+		return 0, nil, errors.New("malformed OP_COMPRESSED: missing original opcode")
+	}
+	uncompressedSize, rem, ok := wiremessage.ReadCompressedUncompressedSize(rem)
+	if !ok {
+		return 0, nil, errors.New("malformed OP_COMPRESSED: missing uncompressed size")
+	}
+	// get the compressor ID and decompress the message
+	compressorID, rem, ok := wiremessage.ReadCompressedCompressorID(rem)
+	if !ok {
+		return 0, nil, errors.New("malformed OP_COMPRESSED: missing compressor ID")
+	}
+
+	opts := CompressionOpts{
+		Compressor:       compressorID,
+		UncompressedSize: uncompressedSize,
+	}
+	uncompressed, err := DecompressPayload(rem, opts)
+	if err != nil {
+		return 0, nil, err
+	}
+
+	return opcode, uncompressed, nil
+}
+
+func (op Operation) addBatchArray(dst []byte) []byte {
+	aidx, dst := bsoncore.AppendArrayElementStart(dst, op.Batches.Identifier)
+	for i, doc := range op.Batches.Current {
+		dst = bsoncore.AppendDocumentElement(dst, strconv.Itoa(i), doc)
+	}
+	dst, _ = bsoncore.AppendArrayEnd(dst, aidx)
+	return dst
+}
+
+func (op Operation) createLegacyHandshakeWireMessage(
+	maxTimeMS uint64,
+	dst []byte,
+	desc description.SelectedServer,
+) ([]byte, startedInformation, error) {
+	var info startedInformation
+	flags := op.secondaryOK(desc)
+	var wmindex int32
+	info.requestID = wiremessage.NextRequestID()
+	wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpQuery)
+	dst = wiremessage.AppendQueryFlags(dst, flags)
+
+	dollarCmd := [...]byte{'.', '$', 'c', 'm', 'd'}
+
+	// FullCollectionName
+	dst = append(dst, op.Database...)
+	dst = append(dst, dollarCmd[:]...)
+	dst = append(dst, 0x00)
+	dst = wiremessage.AppendQueryNumberToSkip(dst, 0)
+	dst = wiremessage.AppendQueryNumberToReturn(dst, -1)
+
+	wrapper := int32(-1)
+	rp, err := op.createReadPref(desc, true)
+	if err != nil {
+		return dst, info, err
+	}
+	if len(rp) > 0 {
+		wrapper, dst = bsoncore.AppendDocumentStart(dst)
+		dst = bsoncore.AppendHeader(dst, bsontype.EmbeddedDocument, "$query")
+	}
+	idx, dst := bsoncore.AppendDocumentStart(dst)
+	dst, err = op.CommandFn(dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+
+	if op.Batches != nil && len(op.Batches.Current) > 0 {
+		dst = op.addBatchArray(dst)
+	}
+
+	dst, err = op.addReadConcern(dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+
+	dst, err = op.addWriteConcern(dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+
+	dst, err = op.addSession(dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+
+	dst = op.addClusterTime(dst, desc)
+	dst = op.addServerAPI(dst)
+	// If maxTimeMS is greater than 0 append it to wire message. A maxTimeMS value of 0 only explicitly
+	// specifies the default behavior of no timeout server-side.
+	if maxTimeMS > 0 {
+		dst = bsoncore.AppendInt64Element(dst, "maxTimeMS", int64(maxTimeMS))
+	}
+
+	dst, _ = bsoncore.AppendDocumentEnd(dst, idx)
+	// Command monitoring only reports the document inside $query
+	info.cmd = dst[idx:]
+
+	if len(rp) > 0 {
+		var err error
+		dst = bsoncore.AppendDocumentElement(dst, "$readPreference", rp)
+		dst, err = bsoncore.AppendDocumentEnd(dst, wrapper)
+		if err != nil {
+			return dst, info, err
+		}
+	}
+
+	return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil
+}
+
+func (op Operation) createMsgWireMessage(
+	ctx context.Context,
+	maxTimeMS uint64,
+	dst []byte,
+	desc description.SelectedServer,
+	conn Connection,
+	requestID int32,
+) ([]byte, startedInformation, error) {
+	var info startedInformation
+	var flags wiremessage.MsgFlag
+	var wmindex int32
+	// We set the MoreToCome bit if we have a write concern, it's unacknowledged, and we either
+	// aren't batching or we are encoding the last batch.
+	if op.WriteConcern != nil && !writeconcern.AckWrite(op.WriteConcern) && (op.Batches == nil || len(op.Batches.Documents) == 0) {
+		flags = wiremessage.MoreToCome
+	}
+	// Set the ExhaustAllowed flag if the connection supports streaming. This will tell the server that it can
+	// respond with the MoreToCome flag and then stream responses over this connection.
+	if streamer, ok := conn.(StreamerConnection); ok && streamer.SupportsStreaming() {
+		flags |= wiremessage.ExhaustAllowed
+	}
+
+	info.requestID = requestID
+	wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpMsg)
+	dst = wiremessage.AppendMsgFlags(dst, flags)
+	// Body
+	dst = wiremessage.AppendMsgSectionType(dst, wiremessage.SingleDocument)
+
+	idx, dst := bsoncore.AppendDocumentStart(dst)
+
+	dst, err := op.addCommandFields(ctx, dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+	dst, err = op.addReadConcern(dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+	dst, err = op.addWriteConcern(dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+	dst, err = op.addSession(dst, desc)
+	if err != nil {
+		return dst, info, err
+	}
+
+	dst = op.addClusterTime(dst, desc)
+	dst = op.addServerAPI(dst)
+	// If maxTimeMS is greater than 0 append it to wire message. A maxTimeMS value of 0 only explicitly
+	// specifies the default behavior of no timeout server-side.
+	if maxTimeMS > 0 {
+		dst = bsoncore.AppendInt64Element(dst, "maxTimeMS", int64(maxTimeMS))
+	}
+
+	dst = bsoncore.AppendStringElement(dst, "$db", op.Database)
+	rp, err := op.createReadPref(desc, false)
+	if err != nil {
+		return dst, info, err
+	}
+	if len(rp) > 0 {
+		dst = bsoncore.AppendDocumentElement(dst, "$readPreference", rp)
+	}
+
+	dst, _ = bsoncore.AppendDocumentEnd(dst, idx)
+	// The command document for monitoring shouldn't include the type 1 payload as a document sequence
+	info.cmd = dst[idx:]
+
+	// add batch as a document sequence if auto encryption is not enabled
+	// if auto encryption is enabled, the batch will already be an array in the command document
+	if !op.shouldEncrypt() && op.Batches != nil && len(op.Batches.Current) > 0 {
+		info.documentSequenceIncluded = true
+		dst = wiremessage.AppendMsgSectionType(dst, wiremessage.DocumentSequence)
+		idx, dst = bsoncore.ReserveLength(dst)
+
+		dst = append(dst, op.Batches.Identifier...)
+		dst = append(dst, 0x00)
+
+		for _, doc := range op.Batches.Current {
+			dst = append(dst, doc...)
+		}
+
+		dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
+	}
+
+	return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil
+}
+
+// isLegacyHandshake returns True if the operation is the first message of
+// the initial handshake and should use a legacy hello.
+func isLegacyHandshake(op Operation, desc description.SelectedServer) bool {
+	isInitialHandshake := desc.WireVersion == nil || desc.WireVersion.Max == 0
+
+	return op.Legacy == LegacyHandshake && isInitialHandshake
+}
+
+func (op Operation) createWireMessage(
+	ctx context.Context,
+	maxTimeMS uint64,
+	dst []byte,
+	desc description.SelectedServer,
+	conn Connection,
+	requestID int32,
+) ([]byte, startedInformation, error) {
+	if isLegacyHandshake(op, desc) {
+		return op.createLegacyHandshakeWireMessage(maxTimeMS, dst, desc)
+	}
+
+	return op.createMsgWireMessage(ctx, maxTimeMS, dst, desc, conn, requestID)
+}
+
+// addCommandFields adds the fields for a command to the wire message in dst. This assumes that the start of the document
+// has already been added and does not add the final 0 byte.
+func (op Operation) addCommandFields(ctx context.Context, dst []byte, desc description.SelectedServer) ([]byte, error) {
+	if !op.shouldEncrypt() {
+		return op.CommandFn(dst, desc)
+	}
+
+	if desc.WireVersion.Max < cryptMinWireVersion {
+		return dst, errors.New("auto-encryption requires a MongoDB version of 4.2")
+	}
+
+	// create temporary command document
+	cidx, cmdDst := bsoncore.AppendDocumentStart(nil)
+	var err error
+	cmdDst, err = op.CommandFn(cmdDst, desc)
+	if err != nil {
+		return dst, err
+	}
+	// use a BSON array instead of a type 1 payload because mongocryptd will convert to arrays regardless
+	if op.Batches != nil && len(op.Batches.Current) > 0 {
+		cmdDst = op.addBatchArray(cmdDst)
+	}
+	cmdDst, _ = bsoncore.AppendDocumentEnd(cmdDst, cidx)
+
+	// encrypt the command
+	encrypted, err := op.Crypt.Encrypt(ctx, op.Database, cmdDst)
+	if err != nil {
+		return dst, err
+	}
+	// append encrypted command to original destination, removing the first 4 bytes (length) and final byte (terminator)
+	dst = append(dst, encrypted[4:len(encrypted)-1]...)
+	return dst, nil
+}
+
+// addServerAPI adds the relevant fields for server API specification to the wire message in dst.
+func (op Operation) addServerAPI(dst []byte) []byte {
+	sa := op.ServerAPI
+	if sa == nil {
+		return dst
+	}
+
+	dst = bsoncore.AppendStringElement(dst, "apiVersion", sa.ServerAPIVersion)
+	if sa.Strict != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "apiStrict", *sa.Strict)
+	}
+	if sa.DeprecationErrors != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "apiDeprecationErrors", *sa.DeprecationErrors)
+	}
+	return dst
+}
+
+func (op Operation) addReadConcern(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	if op.MinimumReadConcernWireVersion > 0 && (desc.WireVersion == nil || !desc.WireVersion.Includes(op.MinimumReadConcernWireVersion)) {
+		return dst, nil
+	}
+	rc := op.ReadConcern
+	client := op.Client
+	// Starting transaction's read concern overrides all others
+	if client != nil && client.TransactionStarting() && client.CurrentRc != nil {
+		rc = client.CurrentRc
+	}
+
+	// start transaction must append afterclustertime IF causally consistent and operation time exists
+	if rc == nil && client != nil && client.TransactionStarting() && client.Consistent && client.OperationTime != nil {
+		rc = readconcern.New()
+	}
+
+	if client != nil && client.Snapshot {
+		if desc.WireVersion.Max < readSnapshotMinWireVersion {
+			return dst, errors.New("snapshot reads require MongoDB 5.0 or later")
+		}
+		rc = readconcern.Snapshot()
+	}
+
+	if rc == nil {
+		return dst, nil
+	}
+
+	_, data, err := rc.MarshalBSONValue() // always returns a document
+	if err != nil {
+		return dst, err
+	}
+
+	if sessionsSupported(desc.WireVersion) && client != nil {
+		if client.Consistent && client.OperationTime != nil {
+			data = data[:len(data)-1] // remove the null byte
+			data = bsoncore.AppendTimestampElement(data, "afterClusterTime", client.OperationTime.T, client.OperationTime.I)
+			data, _ = bsoncore.AppendDocumentEnd(data, 0)
+		}
+		if client.Snapshot && client.SnapshotTime != nil {
+			data = data[:len(data)-1] // remove the null byte
+			data = bsoncore.AppendTimestampElement(data, "atClusterTime", client.SnapshotTime.T, client.SnapshotTime.I)
+			data, _ = bsoncore.AppendDocumentEnd(data, 0)
+		}
+	}
+
+	if len(data) == bsoncore.EmptyDocumentLength {
+		return dst, nil
+	}
+	return bsoncore.AppendDocumentElement(dst, "readConcern", data), nil
+}
+
+func (op Operation) addWriteConcern(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	if op.MinimumWriteConcernWireVersion > 0 && (desc.WireVersion == nil || !desc.WireVersion.Includes(op.MinimumWriteConcernWireVersion)) {
+		return dst, nil
+	}
+	wc := op.WriteConcern
+	if wc == nil {
+		return dst, nil
+	}
+
+	t, data, err := wc.MarshalBSONValue()
+	if errors.Is(err, writeconcern.ErrEmptyWriteConcern) {
+		return dst, nil
+	}
+	if err != nil {
+		return dst, err
+	}
+
+	return append(bsoncore.AppendHeader(dst, t, "writeConcern"), data...), nil
+}
+
+func (op Operation) addSession(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	client := op.Client
+
+	// If the operation is defined for an explicit session but the server
+	// does not support sessions, then throw an error.
+	if client != nil && !client.IsImplicit && desc.SessionTimeoutMinutesPtr == nil {
+		return nil, fmt.Errorf("current topology does not support sessions")
+	}
+
+	if client == nil || !sessionsSupported(desc.WireVersion) || desc.SessionTimeoutMinutesPtr == nil {
+		return dst, nil
+	}
+	if err := client.UpdateUseTime(); err != nil {
+		return dst, err
+	}
+	dst = bsoncore.AppendDocumentElement(dst, "lsid", client.SessionID)
+
+	var addedTxnNumber bool
+	if op.Type == Write && client.RetryWrite {
+		addedTxnNumber = true
+		dst = bsoncore.AppendInt64Element(dst, "txnNumber", op.Client.TxnNumber)
+	}
+	if client.TransactionRunning() || client.RetryingCommit {
+		if !addedTxnNumber {
+			dst = bsoncore.AppendInt64Element(dst, "txnNumber", op.Client.TxnNumber)
+		}
+		if client.TransactionStarting() {
+			dst = bsoncore.AppendBooleanElement(dst, "startTransaction", true)
+		}
+		dst = bsoncore.AppendBooleanElement(dst, "autocommit", false)
+	}
+
+	return dst, client.ApplyCommand(desc.Server)
+}
+
+func (op Operation) addClusterTime(dst []byte, desc description.SelectedServer) []byte {
+	client, clock := op.Client, op.Clock
+	if (clock == nil && client == nil) || !sessionsSupported(desc.WireVersion) {
+		return dst
+	}
+	clusterTime := clock.GetClusterTime()
+	if client != nil {
+		clusterTime = session.MaxClusterTime(clusterTime, client.ClusterTime)
+	}
+	if clusterTime == nil {
+		return dst
+	}
+	val, err := clusterTime.LookupErr("$clusterTime")
+	if err != nil {
+		return dst
+	}
+	return append(bsoncore.AppendHeader(dst, val.Type, "$clusterTime"), val.Value...)
+	// return bsoncore.AppendDocumentElement(dst, "$clusterTime", clusterTime)
+}
+
+// calculateMaxTimeMS calculates the value of the 'maxTimeMS' field to potentially append
+// to the wire message based on the current context's deadline and the 90th percentile RTT
+// if the ctx is a Timeout context. If the context is not a Timeout context, it uses the
+// operation's MaxTimeMS if set. If no MaxTimeMS is set on the operation, and context is
+// not a Timeout context, calculateMaxTimeMS returns 0.
+func (op Operation) calculateMaxTimeMS(ctx context.Context, mon RTTMonitor) (uint64, error) {
+	// If CSOT is enabled and we're not omitting the CSOT-calculated maxTimeMS
+	// value, then calculate maxTimeMS.
+	//
+	// This allows commands that do not currently send CSOT-calculated maxTimeMS
+	// (e.g. Find and Aggregate) to still use a manually-provided maxTimeMS
+	// value.
+	//
+	// TODO(GODRIVER-2944): Remove or refactor this logic when we add the
+	// "timeoutMode" option, which will allow users to opt-in to the
+	// CSOT-calculated maxTimeMS values if that's the behavior they want.
+	if csot.IsTimeoutContext(ctx) && !op.OmitCSOTMaxTimeMS {
+		if deadline, ok := ctx.Deadline(); ok {
+			remainingTimeout := time.Until(deadline)
+			rtt90 := mon.P90()
+			maxTime := remainingTimeout - rtt90
+
+			// Always round up to the next millisecond value so we never truncate the calculated
+			// maxTimeMS value (e.g. 400 microseconds evaluates to 1ms, not 0ms).
+			maxTimeMS := int64((maxTime + (time.Millisecond - 1)) / time.Millisecond)
+			if maxTimeMS <= 0 {
+				return 0, fmt.Errorf(
+					"negative maxTimeMS: remaining time %v until context deadline is less than 90th percentile network round-trip time (%v): %w",
+					remainingTimeout,
+					mon.Stats(),
+					ErrDeadlineWouldBeExceeded)
+			}
+
+			// The server will return a "BadValue" error if maxTimeMS is greater
+			// than the maximum positive int32 value (about 24.9 days). If the
+			// user specified a timeout value greater than that,  omit maxTimeMS
+			// and let the client-side timeout handle cancelling the op if the
+			// timeout is ever reached.
+			if maxTimeMS > math.MaxInt32 {
+				return 0, nil
+			}
+
+			return uint64(maxTimeMS), nil
+		}
+	} else if op.MaxTime != nil {
+		// Users are not allowed to pass a negative value as MaxTime. A value of 0 would indicate
+		// no timeout and is allowed.
+		if *op.MaxTime < 0 {
+			return 0, ErrNegativeMaxTime
+		}
+		// Always round up to the next millisecond value so we never truncate the requested
+		// MaxTime value (e.g. 400 microseconds evaluates to 1ms, not 0ms).
+		return uint64((*op.MaxTime + (time.Millisecond - 1)) / time.Millisecond), nil
+	}
+	return 0, nil
+}
+
+// updateClusterTimes updates the cluster times for the session and cluster clock attached to this
+// operation. While the session's AdvanceClusterTime may return an error, this method does not
+// because an error being returned from this method will not be returned further up.
+func (op Operation) updateClusterTimes(response bsoncore.Document) {
+	// Extract cluster time.
+	value, err := response.LookupErr("$clusterTime")
+	if err != nil {
+		// $clusterTime not included by the server
+		return
+	}
+	clusterTime := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendValueElement(nil, "$clusterTime", value))
+
+	sess, clock := op.Client, op.Clock
+
+	if sess != nil {
+		_ = sess.AdvanceClusterTime(bson.Raw(clusterTime))
+	}
+
+	if clock != nil {
+		clock.AdvanceClusterTime(bson.Raw(clusterTime))
+	}
+}
+
+// updateOperationTime updates the operation time on the session attached to this operation. While
+// the session's AdvanceOperationTime method may return an error, this method does not because an
+// error being returned from this method will not be returned further up.
+func (op Operation) updateOperationTime(response bsoncore.Document) {
+	sess := op.Client
+	if sess == nil {
+		return
+	}
+
+	opTimeElem, err := response.LookupErr("operationTime")
+	if err != nil {
+		// operationTime not included by the server
+		return
+	}
+
+	t, i := opTimeElem.Timestamp()
+	_ = sess.AdvanceOperationTime(&primitive.Timestamp{
+		T: t,
+		I: i,
+	})
+}
+
+func (op Operation) getReadPrefBasedOnTransaction() (*readpref.ReadPref, error) {
+	if op.Client != nil && op.Client.TransactionRunning() {
+		// Transaction's read preference always takes priority
+		rp := op.Client.CurrentRp
+		// Reads in a transaction must have read preference primary
+		// This must not be checked in startTransaction
+		if rp != nil && !op.Client.TransactionStarting() && rp.Mode() != readpref.PrimaryMode {
+			return nil, ErrNonPrimaryReadPref
+		}
+		return rp, nil
+	}
+	return op.ReadPreference, nil
+}
+
+// createReadPref will attempt to create a document with the "readPreference"
+// object and various related fields such as "mode", "tags", and
+// "maxStalenessSeconds".
+func (op Operation) createReadPref(desc description.SelectedServer, isOpQuery bool) (bsoncore.Document, error) {
+	if op.omitReadPreference {
+		return nil, nil
+	}
+
+	// TODO(GODRIVER-2231): Instead of checking if isOutputAggregate and desc.Server.WireVersion.Max < 13, somehow check
+	// TODO if supplied readPreference was "overwritten" with primary in description.selectForReplicaSet.
+	if desc.Server.Kind == description.Standalone || (isOpQuery && desc.Server.Kind != description.Mongos) ||
+		op.Type == Write || (op.IsOutputAggregate && desc.Server.WireVersion.Max < 13) {
+		// Don't send read preference for:
+		// 1. all standalones
+		// 2. non-mongos when using OP_QUERY
+		// 3. all writes
+		// 4. when operation is an aggregate with an output stage, and selected server's wire
+		//    version is < 13
+		return nil, nil
+	}
+
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	rp, err := op.getReadPrefBasedOnTransaction()
+	if err != nil {
+		return nil, err
+	}
+
+	if rp == nil {
+		if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+			doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred")
+			doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+			return doc, nil
+		}
+		return nil, nil
+	}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		if desc.Server.Kind == description.Mongos {
+			return nil, nil
+		}
+		if desc.Kind == description.Single {
+			doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred")
+			doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+			return doc, nil
+		}
+
+		// OP_MSG requires never sending read preference "primary"
+		// except for topology "single".
+		//
+		// It is important to note that although the Go Driver does not
+		// support legacy opcodes, OP_QUERY has different rules for
+		// adding read preference to commands.
+		return nil, nil
+	case readpref.PrimaryPreferredMode:
+		doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred")
+	case readpref.SecondaryPreferredMode:
+		_, ok := rp.MaxStaleness()
+		if desc.Server.Kind == description.Mongos && isOpQuery && !ok && len(rp.TagSets()) == 0 && rp.HedgeEnabled() == nil {
+			return nil, nil
+		}
+		doc = bsoncore.AppendStringElement(doc, "mode", "secondaryPreferred")
+	case readpref.SecondaryMode:
+		doc = bsoncore.AppendStringElement(doc, "mode", "secondary")
+	case readpref.NearestMode:
+		doc = bsoncore.AppendStringElement(doc, "mode", "nearest")
+	}
+
+	sets := make([]bsoncore.Document, 0, len(rp.TagSets()))
+	for _, ts := range rp.TagSets() {
+		i, set := bsoncore.AppendDocumentStart(nil)
+		for _, t := range ts {
+			set = bsoncore.AppendStringElement(set, t.Name, t.Value)
+		}
+		set, _ = bsoncore.AppendDocumentEnd(set, i)
+		sets = append(sets, set)
+	}
+	if len(sets) > 0 {
+		var aidx int32
+		aidx, doc = bsoncore.AppendArrayElementStart(doc, "tags")
+		for i, set := range sets {
+			doc = bsoncore.AppendDocumentElement(doc, strconv.Itoa(i), set)
+		}
+		doc, _ = bsoncore.AppendArrayEnd(doc, aidx)
+	}
+
+	if d, ok := rp.MaxStaleness(); ok {
+		doc = bsoncore.AppendInt32Element(doc, "maxStalenessSeconds", int32(d.Seconds()))
+	}
+
+	if hedgeEnabled := rp.HedgeEnabled(); hedgeEnabled != nil {
+		var hedgeIdx int32
+		hedgeIdx, doc = bsoncore.AppendDocumentElementStart(doc, "hedge")
+		doc = bsoncore.AppendBooleanElement(doc, "enabled", *hedgeEnabled)
+		doc, err = bsoncore.AppendDocumentEnd(doc, hedgeIdx)
+		if err != nil {
+			return nil, fmt.Errorf("error creating hedge document: %w", err)
+		}
+	}
+
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+	return doc, nil
+}
+
+func (op Operation) secondaryOK(desc description.SelectedServer) wiremessage.QueryFlag {
+	if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+		return wiremessage.SecondaryOK
+	}
+
+	if rp := op.ReadPreference; rp != nil && rp.Mode() != readpref.PrimaryMode {
+		return wiremessage.SecondaryOK
+	}
+
+	return 0
+}
+
+func (Operation) canCompress(cmd string) bool {
+	if cmd == handshake.LegacyHello || cmd == "hello" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" ||
+		cmd == "createUser" || cmd == "updateUser" || cmd == "copydbSaslStart" || cmd == "copydbgetnonce" || cmd == "copydb" {
+		return false
+	}
+	return true
+}
+
+// decodeOpReply extracts the necessary information from an OP_REPLY wire message.
+// Returns the decoded OP_REPLY. If the err field of the returned opReply is non-nil, an error occurred while decoding
+// or validating the response and the other fields are undefined.
+func (Operation) decodeOpReply(wm []byte) opReply {
+	var reply opReply
+	var ok bool
+
+	reply.responseFlags, wm, ok = wiremessage.ReadReplyFlags(wm)
+	if !ok {
+		reply.err = errors.New("malformed OP_REPLY: missing flags")
+		return reply
+	}
+	reply.cursorID, wm, ok = wiremessage.ReadReplyCursorID(wm)
+	if !ok {
+		reply.err = errors.New("malformed OP_REPLY: missing cursorID")
+		return reply
+	}
+	reply.startingFrom, wm, ok = wiremessage.ReadReplyStartingFrom(wm)
+	if !ok {
+		reply.err = errors.New("malformed OP_REPLY: missing startingFrom")
+		return reply
+	}
+	reply.numReturned, wm, ok = wiremessage.ReadReplyNumberReturned(wm)
+	if !ok {
+		reply.err = errors.New("malformed OP_REPLY: missing numberReturned")
+		return reply
+	}
+	reply.documents, _, ok = wiremessage.ReadReplyDocuments(wm)
+	if !ok {
+		reply.err = errors.New("malformed OP_REPLY: could not read documents from reply")
+	}
+
+	if reply.responseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
+		reply.err = QueryFailureError{
+			Message:  "command failure",
+			Response: reply.documents[0],
+		}
+		return reply
+	}
+	if reply.responseFlags&wiremessage.CursorNotFound == wiremessage.CursorNotFound {
+		reply.err = ErrCursorNotFound
+		return reply
+	}
+	if reply.numReturned != int32(len(reply.documents)) {
+		reply.err = ErrReplyDocumentMismatch
+		return reply
+	}
+
+	return reply
+}
+
+func (op Operation) decodeResult(ctx context.Context, opcode wiremessage.OpCode, wm []byte) (bsoncore.Document, error) {
+	switch opcode {
+	case wiremessage.OpReply:
+		reply := op.decodeOpReply(wm)
+		if reply.err != nil {
+			return nil, reply.err
+		}
+		if reply.numReturned == 0 {
+			return nil, ErrNoDocCommandResponse
+		}
+		if reply.numReturned > 1 {
+			return nil, ErrMultiDocCommandResponse
+		}
+		rdr := reply.documents[0]
+		if err := rdr.Validate(); err != nil {
+			return nil, NewCommandResponseError("malformed OP_REPLY: invalid document", err)
+		}
+
+		return rdr, ExtractErrorFromServerResponse(ctx, rdr)
+	case wiremessage.OpMsg:
+		_, wm, ok := wiremessage.ReadMsgFlags(wm)
+		if !ok {
+			return nil, errors.New("malformed wire message: missing OP_MSG flags")
+		}
+
+		var res bsoncore.Document
+		for len(wm) > 0 {
+			var stype wiremessage.SectionType
+			stype, wm, ok = wiremessage.ReadMsgSectionType(wm)
+			if !ok {
+				return nil, errors.New("malformed wire message: insuffienct bytes to read section type")
+			}
+
+			switch stype {
+			case wiremessage.SingleDocument:
+				res, wm, ok = wiremessage.ReadMsgSectionSingleDocument(wm)
+				if !ok {
+					return nil, errors.New("malformed wire message: insufficient bytes to read single document")
+				}
+			case wiremessage.DocumentSequence:
+				_, _, wm, ok = wiremessage.ReadMsgSectionDocumentSequence(wm)
+				if !ok {
+					return nil, errors.New("malformed wire message: insufficient bytes to read document sequence")
+				}
+			default:
+				return nil, fmt.Errorf("malformed wire message: unknown section type %v", stype)
+			}
+		}
+
+		err := res.Validate()
+		if err != nil {
+			return nil, NewCommandResponseError("malformed OP_MSG: invalid document", err)
+		}
+
+		return res, ExtractErrorFromServerResponse(ctx, res)
+	default:
+		return nil, fmt.Errorf("cannot decode result from %s", opcode)
+	}
+}
+
+// getCommandName returns the name of the command from the given BSON document.
+func (op Operation) getCommandName(doc []byte) string {
+	// skip 4 bytes for document length and 1 byte for element type
+	idx := bytes.IndexByte(doc[5:], 0x00) // look for the 0 byte after the command name
+	return string(doc[5 : idx+5])
+}
+
+func (op *Operation) redactCommand(cmd string, doc bsoncore.Document) bool {
+	if cmd == "authenticate" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "createUser" ||
+		cmd == "updateUser" || cmd == "copydbgetnonce" || cmd == "copydbsaslstart" || cmd == "copydb" {
+
+		return true
+	}
+	if strings.ToLower(cmd) != handshake.LegacyHelloLowercase && cmd != "hello" {
+		return false
+	}
+
+	// A hello without speculative authentication can be monitored.
+	_, err := doc.LookupErr("speculativeAuthenticate")
+	return err == nil
+}
+
+// canLogCommandMessage returns true if the command can be logged.
+func (op Operation) canLogCommandMessage() bool {
+	return op.Logger != nil && op.Logger.LevelComponentEnabled(logger.LevelDebug, logger.ComponentCommand)
+}
+
+func (op Operation) canPublishStartedEvent() bool {
+	return op.CommandMonitor != nil && op.CommandMonitor.Started != nil
+}
+
+// publishStartedEvent publishes a CommandStartedEvent to the operation's command monitor if possible. If the command is
+// an unacknowledged write, a CommandSucceededEvent will be published as well. If started events are not being monitored,
+// no events are published.
+func (op Operation) publishStartedEvent(ctx context.Context, info startedInformation) {
+	// If logging is enabled for the command component at the debug level, log the command response.
+	if op.canLogCommandMessage() {
+		host, port, _ := net.SplitHostPort(info.serverAddress.String())
+
+		redactedCmd := redactStartedInformationCmd(op, info).String()
+		formattedCmd := logger.FormatMessage(redactedCmd, op.Logger.MaxDocumentLength)
+
+		op.Logger.Print(logger.LevelDebug,
+			logger.ComponentCommand,
+			logger.CommandStarted,
+			logger.SerializeCommand(logger.Command{
+				DriverConnectionID: info.driverConnectionID,
+				Message:            logger.CommandStarted,
+				Name:               info.cmdName,
+				DatabaseName:       op.Database,
+				RequestID:          int64(info.requestID),
+				ServerConnectionID: info.serverConnID,
+				ServerHost:         host,
+				ServerPort:         port,
+				ServiceID:          info.serviceID,
+			},
+				logger.KeyCommand, formattedCmd)...)
+
+	}
+
+	if op.canPublishStartedEvent() {
+		started := &event.CommandStartedEvent{
+			Command:              redactStartedInformationCmd(op, info),
+			DatabaseName:         op.Database,
+			CommandName:          info.cmdName,
+			RequestID:            int64(info.requestID),
+			ConnectionID:         info.connID,
+			ServerConnectionID:   convertInt64PtrToInt32Ptr(info.serverConnID),
+			ServerConnectionID64: info.serverConnID,
+			ServiceID:            info.serviceID,
+		}
+		op.CommandMonitor.Started(ctx, started)
+	}
+}
+
+// canPublishFinishedEvent returns true if a CommandSucceededEvent can be
+// published for the given command. This is true if the command is not an
+// unacknowledged write and the command monitor is monitoring succeeded events.
+func (op Operation) canPublishFinishedEvent(info finishedInformation) bool {
+	success := info.success()
+
+	return op.CommandMonitor != nil &&
+		(!success || op.CommandMonitor.Succeeded != nil) &&
+		(success || op.CommandMonitor.Failed != nil)
+}
+
+// publishFinishedEvent publishes either a CommandSucceededEvent or a CommandFailedEvent to the operation's command
+// monitor if possible. If success/failure events aren't being monitored, no events are published.
+func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInformation) {
+	if op.canLogCommandMessage() && info.success() {
+		host, port, _ := net.SplitHostPort(info.serverAddress.String())
+
+		redactedReply := redactFinishedInformationResponse(info).String()
+		formattedReply := logger.FormatMessage(redactedReply, op.Logger.MaxDocumentLength)
+
+		op.Logger.Print(logger.LevelDebug,
+			logger.ComponentCommand,
+			logger.CommandSucceeded,
+			logger.SerializeCommand(logger.Command{
+				DriverConnectionID: info.driverConnectionID,
+				Message:            logger.CommandSucceeded,
+				Name:               info.cmdName,
+				DatabaseName:       op.Database,
+				RequestID:          int64(info.requestID),
+				ServerConnectionID: info.serverConnID,
+				ServerHost:         host,
+				ServerPort:         port,
+				ServiceID:          info.serviceID,
+			},
+				logger.KeyDurationMS, info.duration.Milliseconds(),
+				logger.KeyReply, formattedReply)...)
+	}
+
+	if op.canLogCommandMessage() && !info.success() {
+		host, port, _ := net.SplitHostPort(info.serverAddress.String())
+
+		formattedReply := logger.FormatMessage(info.cmdErr.Error(), op.Logger.MaxDocumentLength)
+
+		op.Logger.Print(logger.LevelDebug,
+			logger.ComponentCommand,
+			logger.CommandFailed,
+			logger.SerializeCommand(logger.Command{
+				DriverConnectionID: info.driverConnectionID,
+				Message:            logger.CommandFailed,
+				Name:               info.cmdName,
+				DatabaseName:       op.Database,
+				RequestID:          int64(info.requestID),
+				ServerConnectionID: info.serverConnID,
+				ServerHost:         host,
+				ServerPort:         port,
+				ServiceID:          info.serviceID,
+			},
+				logger.KeyDurationMS, info.duration.Milliseconds(),
+				logger.KeyFailure, formattedReply)...)
+	}
+
+	// If the finished event cannot be published, return early.
+	if !op.canPublishFinishedEvent(info) {
+		return
+	}
+
+	finished := event.CommandFinishedEvent{
+		CommandName:          info.cmdName,
+		DatabaseName:         op.Database,
+		RequestID:            int64(info.requestID),
+		ConnectionID:         info.connID,
+		Duration:             info.duration,
+		DurationNanos:        info.duration.Nanoseconds(),
+		ServerConnectionID:   convertInt64PtrToInt32Ptr(info.serverConnID),
+		ServerConnectionID64: info.serverConnID,
+		ServiceID:            info.serviceID,
+	}
+
+	if info.success() {
+		successEvent := &event.CommandSucceededEvent{
+			Reply:                redactFinishedInformationResponse(info),
+			CommandFinishedEvent: finished,
+		}
+		op.CommandMonitor.Succeeded(ctx, successEvent)
+
+		return
+	}
+
+	failedEvent := &event.CommandFailedEvent{
+		Failure:              info.cmdErr.Error(),
+		CommandFinishedEvent: finished,
+	}
+	op.CommandMonitor.Failed(ctx, failedEvent)
+}
+
+// sessionsSupported returns true of the given server version indicates that it supports sessions.
+func sessionsSupported(wireVersion *description.VersionRange) bool {
+	return wireVersion != nil
+}
+
+// retryWritesSupported returns true if this description represents a server that supports retryable writes.
+func retryWritesSupported(s description.Server) bool {
+	return s.SessionTimeoutMinutesPtr != nil && s.Kind != description.Standalone
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go
new file mode 100644
index 0000000000000000000000000000000000000000..aeee533533b74778d3ee2d80b81f813aa7818397
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go
@@ -0,0 +1,213 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// AbortTransaction performs an abortTransaction operation.
+type AbortTransaction struct {
+	authenticator driver.Authenticator
+	recoveryToken bsoncore.Document
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	writeConcern  *writeconcern.WriteConcern
+	retry         *driver.RetryMode
+	serverAPI     *driver.ServerAPIOptions
+}
+
+// NewAbortTransaction constructs and returns a new AbortTransaction.
+func NewAbortTransaction() *AbortTransaction {
+	return &AbortTransaction{}
+}
+
+func (at *AbortTransaction) processResponse(driver.ResponseInfo) error {
+	var err error
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (at *AbortTransaction) Execute(ctx context.Context) error {
+	if at.deployment == nil {
+		return errors.New("the AbortTransaction operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         at.command,
+		ProcessResponseFn: at.processResponse,
+		RetryMode:         at.retry,
+		Type:              driver.Write,
+		Client:            at.session,
+		Clock:             at.clock,
+		CommandMonitor:    at.monitor,
+		Crypt:             at.crypt,
+		Database:          at.database,
+		Deployment:        at.deployment,
+		Selector:          at.selector,
+		WriteConcern:      at.writeConcern,
+		ServerAPI:         at.serverAPI,
+		Name:              driverutil.AbortTransactionOp,
+		Authenticator:     at.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (at *AbortTransaction) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+
+	dst = bsoncore.AppendInt32Element(dst, "abortTransaction", 1)
+	if at.recoveryToken != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "recoveryToken", at.recoveryToken)
+	}
+	return dst, nil
+}
+
+// RecoveryToken sets the recovery token to use when committing or aborting a sharded transaction.
+func (at *AbortTransaction) RecoveryToken(recoveryToken bsoncore.Document) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.recoveryToken = recoveryToken
+	return at
+}
+
+// Session sets the session for this operation.
+func (at *AbortTransaction) Session(session *session.Client) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.session = session
+	return at
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (at *AbortTransaction) ClusterClock(clock *session.ClusterClock) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.clock = clock
+	return at
+}
+
+// Collection sets the collection that this command will run against.
+func (at *AbortTransaction) Collection(collection string) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.collection = collection
+	return at
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (at *AbortTransaction) CommandMonitor(monitor *event.CommandMonitor) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.monitor = monitor
+	return at
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (at *AbortTransaction) Crypt(crypt driver.Crypt) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.crypt = crypt
+	return at
+}
+
+// Database sets the database to run this operation against.
+func (at *AbortTransaction) Database(database string) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.database = database
+	return at
+}
+
+// Deployment sets the deployment to use for this operation.
+func (at *AbortTransaction) Deployment(deployment driver.Deployment) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.deployment = deployment
+	return at
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (at *AbortTransaction) ServerSelector(selector description.ServerSelector) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.selector = selector
+	return at
+}
+
+// WriteConcern sets the write concern for this operation.
+func (at *AbortTransaction) WriteConcern(writeConcern *writeconcern.WriteConcern) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.writeConcern = writeConcern
+	return at
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (at *AbortTransaction) Retry(retry driver.RetryMode) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.retry = &retry
+	return at
+}
+
+// ServerAPI sets the server API version for this operation.
+func (at *AbortTransaction) ServerAPI(serverAPI *driver.ServerAPIOptions) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.serverAPI = serverAPI
+	return at
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (at *AbortTransaction) Authenticator(authenticator driver.Authenticator) *AbortTransaction {
+	if at == nil {
+		at = new(AbortTransaction)
+	}
+
+	at.authenticator = authenticator
+	return at
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go
new file mode 100644
index 0000000000000000000000000000000000000000..df6b8fa9dd7d1dacf34b368cad2f16cb93b944e2
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go
@@ -0,0 +1,447 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Aggregate represents an aggregate operation.
+type Aggregate struct {
+	authenticator            driver.Authenticator
+	allowDiskUse             *bool
+	batchSize                *int32
+	bypassDocumentValidation *bool
+	collation                bsoncore.Document
+	comment                  *string
+	hint                     bsoncore.Value
+	maxTime                  *time.Duration
+	pipeline                 bsoncore.Document
+	session                  *session.Client
+	clock                    *session.ClusterClock
+	collection               string
+	monitor                  *event.CommandMonitor
+	database                 string
+	deployment               driver.Deployment
+	readConcern              *readconcern.ReadConcern
+	readPreference           *readpref.ReadPref
+	retry                    *driver.RetryMode
+	selector                 description.ServerSelector
+	writeConcern             *writeconcern.WriteConcern
+	crypt                    driver.Crypt
+	serverAPI                *driver.ServerAPIOptions
+	let                      bsoncore.Document
+	hasOutputStage           bool
+	customOptions            map[string]bsoncore.Value
+	timeout                  *time.Duration
+	omitCSOTMaxTimeMS        bool
+
+	result driver.CursorResponse
+}
+
+// NewAggregate constructs and returns a new Aggregate.
+func NewAggregate(pipeline bsoncore.Document) *Aggregate {
+	return &Aggregate{
+		pipeline: pipeline,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (a *Aggregate) Result(opts driver.CursorOptions) (*driver.BatchCursor, error) {
+
+	clientSession := a.session
+
+	clock := a.clock
+	opts.ServerAPI = a.serverAPI
+	return driver.NewBatchCursor(a.result, clientSession, clock, opts)
+}
+
+// ResultCursorResponse returns the underlying CursorResponse result of executing this
+// operation.
+func (a *Aggregate) ResultCursorResponse() driver.CursorResponse {
+	return a.result
+}
+
+func (a *Aggregate) processResponse(info driver.ResponseInfo) error {
+	var err error
+
+	a.result, err = driver.NewCursorResponse(info)
+	return err
+
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (a *Aggregate) Execute(ctx context.Context) error {
+	if a.deployment == nil {
+		return errors.New("the Aggregate operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         a.command,
+		ProcessResponseFn: a.processResponse,
+
+		Client:                         a.session,
+		Clock:                          a.clock,
+		CommandMonitor:                 a.monitor,
+		Database:                       a.database,
+		Deployment:                     a.deployment,
+		ReadConcern:                    a.readConcern,
+		ReadPreference:                 a.readPreference,
+		Type:                           driver.Read,
+		RetryMode:                      a.retry,
+		Selector:                       a.selector,
+		WriteConcern:                   a.writeConcern,
+		Crypt:                          a.crypt,
+		MinimumWriteConcernWireVersion: 5,
+		ServerAPI:                      a.serverAPI,
+		IsOutputAggregate:              a.hasOutputStage,
+		MaxTime:                        a.maxTime,
+		Timeout:                        a.timeout,
+		Name:                           driverutil.AggregateOp,
+		OmitCSOTMaxTimeMS:              a.omitCSOTMaxTimeMS,
+		Authenticator:                  a.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (a *Aggregate) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	header := bsoncore.Value{Type: bsontype.String, Data: bsoncore.AppendString(nil, a.collection)}
+	if a.collection == "" {
+		header = bsoncore.Value{Type: bsontype.Int32, Data: []byte{0x01, 0x00, 0x00, 0x00}}
+	}
+	dst = bsoncore.AppendValueElement(dst, "aggregate", header)
+
+	cursorIdx, cursorDoc := bsoncore.AppendDocumentStart(nil)
+	if a.allowDiskUse != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "allowDiskUse", *a.allowDiskUse)
+	}
+	if a.batchSize != nil {
+		cursorDoc = bsoncore.AppendInt32Element(cursorDoc, "batchSize", *a.batchSize)
+	}
+	if a.bypassDocumentValidation != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "bypassDocumentValidation", *a.bypassDocumentValidation)
+	}
+	if a.collation != nil {
+
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(5) {
+			return nil, errors.New("the 'collation' command parameter requires a minimum server wire version of 5")
+		}
+		dst = bsoncore.AppendDocumentElement(dst, "collation", a.collation)
+	}
+	if a.comment != nil {
+
+		dst = bsoncore.AppendStringElement(dst, "comment", *a.comment)
+	}
+	if a.hint.Type != bsontype.Type(0) {
+
+		dst = bsoncore.AppendValueElement(dst, "hint", a.hint)
+	}
+	if a.pipeline != nil {
+
+		dst = bsoncore.AppendArrayElement(dst, "pipeline", a.pipeline)
+	}
+	if a.let != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "let", a.let)
+	}
+	for optionName, optionValue := range a.customOptions {
+		dst = bsoncore.AppendValueElement(dst, optionName, optionValue)
+	}
+	cursorDoc, _ = bsoncore.AppendDocumentEnd(cursorDoc, cursorIdx)
+	dst = bsoncore.AppendDocumentElement(dst, "cursor", cursorDoc)
+
+	return dst, nil
+}
+
+// AllowDiskUse enables writing to temporary files. When true, aggregation stages can write to the dbPath/_tmp directory.
+func (a *Aggregate) AllowDiskUse(allowDiskUse bool) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.allowDiskUse = &allowDiskUse
+	return a
+}
+
+// BatchSize specifies the number of documents to return in every batch.
+func (a *Aggregate) BatchSize(batchSize int32) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.batchSize = &batchSize
+	return a
+}
+
+// BypassDocumentValidation allows the write to opt-out of document level validation. This only applies when the $out stage is specified.
+func (a *Aggregate) BypassDocumentValidation(bypassDocumentValidation bool) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.bypassDocumentValidation = &bypassDocumentValidation
+	return a
+}
+
+// Collation specifies a collation. This option is only valid for server versions 3.4 and above.
+func (a *Aggregate) Collation(collation bsoncore.Document) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.collation = collation
+	return a
+}
+
+// Comment specifies an arbitrary string to help trace the operation through the database profiler, currentOp, and logs.
+func (a *Aggregate) Comment(comment string) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.comment = &comment
+	return a
+}
+
+// Hint specifies the index to use.
+func (a *Aggregate) Hint(hint bsoncore.Value) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.hint = hint
+	return a
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (a *Aggregate) MaxTime(maxTime *time.Duration) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.maxTime = maxTime
+	return a
+}
+
+// Pipeline determines how data is transformed for an aggregation.
+func (a *Aggregate) Pipeline(pipeline bsoncore.Document) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.pipeline = pipeline
+	return a
+}
+
+// Session sets the session for this operation.
+func (a *Aggregate) Session(session *session.Client) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.session = session
+	return a
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (a *Aggregate) ClusterClock(clock *session.ClusterClock) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.clock = clock
+	return a
+}
+
+// Collection sets the collection that this command will run against.
+func (a *Aggregate) Collection(collection string) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.collection = collection
+	return a
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (a *Aggregate) CommandMonitor(monitor *event.CommandMonitor) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.monitor = monitor
+	return a
+}
+
+// Database sets the database to run this operation against.
+func (a *Aggregate) Database(database string) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.database = database
+	return a
+}
+
+// Deployment sets the deployment to use for this operation.
+func (a *Aggregate) Deployment(deployment driver.Deployment) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.deployment = deployment
+	return a
+}
+
+// ReadConcern specifies the read concern for this operation.
+func (a *Aggregate) ReadConcern(readConcern *readconcern.ReadConcern) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.readConcern = readConcern
+	return a
+}
+
+// ReadPreference set the read preference used with this operation.
+func (a *Aggregate) ReadPreference(readPreference *readpref.ReadPref) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.readPreference = readPreference
+	return a
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (a *Aggregate) ServerSelector(selector description.ServerSelector) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.selector = selector
+	return a
+}
+
+// WriteConcern sets the write concern for this operation.
+func (a *Aggregate) WriteConcern(writeConcern *writeconcern.WriteConcern) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.writeConcern = writeConcern
+	return a
+}
+
+// Retry enables retryable writes for this operation. Retries are not handled automatically,
+// instead a boolean is returned from Execute and SelectAndExecute that indicates if the
+// operation can be retried. Retrying is handled by calling RetryExecute.
+func (a *Aggregate) Retry(retry driver.RetryMode) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.retry = &retry
+	return a
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (a *Aggregate) Crypt(crypt driver.Crypt) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.crypt = crypt
+	return a
+}
+
+// ServerAPI sets the server API version for this operation.
+func (a *Aggregate) ServerAPI(serverAPI *driver.ServerAPIOptions) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.serverAPI = serverAPI
+	return a
+}
+
+// Let specifies the let document to use. This option is only valid for server versions 5.0 and above.
+func (a *Aggregate) Let(let bsoncore.Document) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.let = let
+	return a
+}
+
+// HasOutputStage specifies whether the aggregate contains an output stage. Used in determining when to
+// append read preference at the operation level.
+func (a *Aggregate) HasOutputStage(hos bool) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.hasOutputStage = hos
+	return a
+}
+
+// CustomOptions specifies extra options to use in the aggregate command.
+func (a *Aggregate) CustomOptions(co map[string]bsoncore.Value) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.customOptions = co
+	return a
+}
+
+// Timeout sets the timeout for this operation.
+func (a *Aggregate) Timeout(timeout *time.Duration) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.timeout = timeout
+	return a
+}
+
+// OmitCSOTMaxTimeMS omits the automatically-calculated "maxTimeMS" from the
+// command when CSOT is enabled. It does not effect "maxTimeMS" set by
+// [Aggregate.MaxTime].
+func (a *Aggregate) OmitCSOTMaxTimeMS(omit bool) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.omitCSOTMaxTimeMS = omit
+	return a
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (a *Aggregate) Authenticator(authenticator driver.Authenticator) *Aggregate {
+	if a == nil {
+		a = new(Aggregate)
+	}
+
+	a.authenticator = authenticator
+	return a
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go
new file mode 100644
index 0000000000000000000000000000000000000000..64c98ba19a79da20a2fc62a61c29e93c68f616a7
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go
@@ -0,0 +1,233 @@
+// Copyright (C) MongoDB, Inc. 2021-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Command is used to run a generic operation.
+type Command struct {
+	authenticator  driver.Authenticator
+	command        bsoncore.Document
+	database       string
+	deployment     driver.Deployment
+	selector       description.ServerSelector
+	readPreference *readpref.ReadPref
+	clock          *session.ClusterClock
+	session        *session.Client
+	monitor        *event.CommandMonitor
+	resultResponse bsoncore.Document
+	resultCursor   *driver.BatchCursor
+	crypt          driver.Crypt
+	serverAPI      *driver.ServerAPIOptions
+	createCursor   bool
+	cursorOpts     driver.CursorOptions
+	timeout        *time.Duration
+	logger         *logger.Logger
+}
+
+// NewCommand constructs and returns a new Command. Once the operation is executed, the result may only be accessed via
+// the Result() function.
+func NewCommand(command bsoncore.Document) *Command {
+	return &Command{
+		command: command,
+	}
+}
+
+// NewCursorCommand constructs a new Command. Once the operation is executed, the server response will be used to
+// construct a cursor, which can be accessed via the ResultCursor() function.
+func NewCursorCommand(command bsoncore.Document, cursorOpts driver.CursorOptions) *Command {
+	return &Command{
+		command:      command,
+		cursorOpts:   cursorOpts,
+		createCursor: true,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (c *Command) Result() bsoncore.Document { return c.resultResponse }
+
+// ResultCursor returns the BatchCursor that was constructed using the command response. If the operation was not
+// configured to create a cursor (i.e. it was created using NewCommand rather than NewCursorCommand), this function
+// will return nil and an error.
+func (c *Command) ResultCursor() (*driver.BatchCursor, error) {
+	if !c.createCursor {
+		return nil, errors.New("command operation was not configured to create a cursor, but a result cursor was requested")
+	}
+	return c.resultCursor, nil
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (c *Command) Execute(ctx context.Context) error {
+	if c.deployment == nil {
+		return errors.New("the Command operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn: func(dst []byte, _ description.SelectedServer) ([]byte, error) {
+			return append(dst, c.command[4:len(c.command)-1]...), nil
+		},
+		ProcessResponseFn: func(info driver.ResponseInfo) error {
+			c.resultResponse = info.ServerResponse
+
+			if c.createCursor {
+				cursorRes, err := driver.NewCursorResponse(info)
+				if err != nil {
+					return err
+				}
+
+				c.resultCursor, err = driver.NewBatchCursor(cursorRes, c.session, c.clock, c.cursorOpts)
+				return err
+			}
+
+			return nil
+		},
+		Client:         c.session,
+		Clock:          c.clock,
+		CommandMonitor: c.monitor,
+		Database:       c.database,
+		Deployment:     c.deployment,
+		ReadPreference: c.readPreference,
+		Selector:       c.selector,
+		Crypt:          c.crypt,
+		ServerAPI:      c.serverAPI,
+		Timeout:        c.timeout,
+		Logger:         c.logger,
+		Authenticator:  c.authenticator,
+	}.Execute(ctx)
+}
+
+// Session sets the session for this operation.
+func (c *Command) Session(session *session.Client) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.session = session
+	return c
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (c *Command) ClusterClock(clock *session.ClusterClock) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.clock = clock
+	return c
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (c *Command) CommandMonitor(monitor *event.CommandMonitor) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.monitor = monitor
+	return c
+}
+
+// Database sets the database to run this operation against.
+func (c *Command) Database(database string) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.database = database
+	return c
+}
+
+// Deployment sets the deployment to use for this operation.
+func (c *Command) Deployment(deployment driver.Deployment) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.deployment = deployment
+	return c
+}
+
+// ReadPreference set the read preference used with this operation.
+func (c *Command) ReadPreference(readPreference *readpref.ReadPref) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.readPreference = readPreference
+	return c
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (c *Command) ServerSelector(selector description.ServerSelector) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.selector = selector
+	return c
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (c *Command) Crypt(crypt driver.Crypt) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.crypt = crypt
+	return c
+}
+
+// ServerAPI sets the server API version for this operation.
+func (c *Command) ServerAPI(serverAPI *driver.ServerAPIOptions) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.serverAPI = serverAPI
+	return c
+}
+
+// Timeout sets the timeout for this operation.
+func (c *Command) Timeout(timeout *time.Duration) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.timeout = timeout
+	return c
+}
+
+// Logger sets the logger for this operation.
+func (c *Command) Logger(logger *logger.Logger) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.logger = logger
+	return c
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (c *Command) Authenticator(authenticator driver.Authenticator) *Command {
+	if c == nil {
+		c = new(Command)
+	}
+
+	c.authenticator = authenticator
+	return c
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b402bdf638e319dae46536bc8f7ef6226808ab2
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go
@@ -0,0 +1,215 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// CommitTransaction attempts to commit a transaction.
+type CommitTransaction struct {
+	authenticator driver.Authenticator
+	maxTime       *time.Duration
+	recoveryToken bsoncore.Document
+	session       *session.Client
+	clock         *session.ClusterClock
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	writeConcern  *writeconcern.WriteConcern
+	retry         *driver.RetryMode
+	serverAPI     *driver.ServerAPIOptions
+}
+
+// NewCommitTransaction constructs and returns a new CommitTransaction.
+func NewCommitTransaction() *CommitTransaction {
+	return &CommitTransaction{}
+}
+
+func (ct *CommitTransaction) processResponse(driver.ResponseInfo) error {
+	var err error
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (ct *CommitTransaction) Execute(ctx context.Context) error {
+	if ct.deployment == nil {
+		return errors.New("the CommitTransaction operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         ct.command,
+		ProcessResponseFn: ct.processResponse,
+		RetryMode:         ct.retry,
+		Type:              driver.Write,
+		Client:            ct.session,
+		Clock:             ct.clock,
+		CommandMonitor:    ct.monitor,
+		Crypt:             ct.crypt,
+		Database:          ct.database,
+		Deployment:        ct.deployment,
+		MaxTime:           ct.maxTime,
+		Selector:          ct.selector,
+		WriteConcern:      ct.writeConcern,
+		ServerAPI:         ct.serverAPI,
+		Name:              driverutil.CommitTransactionOp,
+		Authenticator:     ct.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (ct *CommitTransaction) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+
+	dst = bsoncore.AppendInt32Element(dst, "commitTransaction", 1)
+	if ct.recoveryToken != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "recoveryToken", ct.recoveryToken)
+	}
+	return dst, nil
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (ct *CommitTransaction) MaxTime(maxTime *time.Duration) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.maxTime = maxTime
+	return ct
+}
+
+// RecoveryToken sets the recovery token to use when committing or aborting a sharded transaction.
+func (ct *CommitTransaction) RecoveryToken(recoveryToken bsoncore.Document) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.recoveryToken = recoveryToken
+	return ct
+}
+
+// Session sets the session for this operation.
+func (ct *CommitTransaction) Session(session *session.Client) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.session = session
+	return ct
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (ct *CommitTransaction) ClusterClock(clock *session.ClusterClock) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.clock = clock
+	return ct
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (ct *CommitTransaction) CommandMonitor(monitor *event.CommandMonitor) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.monitor = monitor
+	return ct
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (ct *CommitTransaction) Crypt(crypt driver.Crypt) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.crypt = crypt
+	return ct
+}
+
+// Database sets the database to run this operation against.
+func (ct *CommitTransaction) Database(database string) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.database = database
+	return ct
+}
+
+// Deployment sets the deployment to use for this operation.
+func (ct *CommitTransaction) Deployment(deployment driver.Deployment) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.deployment = deployment
+	return ct
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (ct *CommitTransaction) ServerSelector(selector description.ServerSelector) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.selector = selector
+	return ct
+}
+
+// WriteConcern sets the write concern for this operation.
+func (ct *CommitTransaction) WriteConcern(writeConcern *writeconcern.WriteConcern) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.writeConcern = writeConcern
+	return ct
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (ct *CommitTransaction) Retry(retry driver.RetryMode) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.retry = &retry
+	return ct
+}
+
+// ServerAPI sets the server API version for this operation.
+func (ct *CommitTransaction) ServerAPI(serverAPI *driver.ServerAPIOptions) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.serverAPI = serverAPI
+	return ct
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (ct *CommitTransaction) Authenticator(authenticator driver.Authenticator) *CommitTransaction {
+	if ct == nil {
+		ct = new(CommitTransaction)
+	}
+
+	ct.authenticator = authenticator
+	return ct
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go
new file mode 100644
index 0000000000000000000000000000000000000000..eaafc9a24444c85becdcf83a1b30024081b0f24f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go
@@ -0,0 +1,325 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Count represents a count operation.
+type Count struct {
+	authenticator  driver.Authenticator
+	maxTime        *time.Duration
+	query          bsoncore.Document
+	session        *session.Client
+	clock          *session.ClusterClock
+	collection     string
+	comment        bsoncore.Value
+	monitor        *event.CommandMonitor
+	crypt          driver.Crypt
+	database       string
+	deployment     driver.Deployment
+	readConcern    *readconcern.ReadConcern
+	readPreference *readpref.ReadPref
+	selector       description.ServerSelector
+	retry          *driver.RetryMode
+	result         CountResult
+	serverAPI      *driver.ServerAPIOptions
+	timeout        *time.Duration
+}
+
+// CountResult represents a count result returned by the server.
+type CountResult struct {
+	// The number of documents found
+	N int64
+}
+
+func buildCountResult(response bsoncore.Document) (CountResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return CountResult{}, err
+	}
+	cr := CountResult{}
+	for _, element := range elements {
+		switch element.Key() {
+		case "n": // for count using original command
+			var ok bool
+			cr.N, ok = element.Value().AsInt64OK()
+			if !ok {
+				return cr, fmt.Errorf("response field 'n' is type int64, but received BSON type %s",
+					element.Value().Type)
+			}
+		case "cursor": // for count using aggregate with $collStats
+			firstBatch, err := element.Value().Document().LookupErr("firstBatch")
+			if err != nil {
+				return cr, err
+			}
+
+			// get count value from first batch
+			val := firstBatch.Array().Index(0)
+			count, err := val.Document().LookupErr("n")
+			if err != nil {
+				return cr, err
+			}
+
+			// use count as Int64 for result
+			var ok bool
+			cr.N, ok = count.AsInt64OK()
+			if !ok {
+				return cr, fmt.Errorf("response field 'n' is type int64, but received BSON type %s",
+					element.Value().Type)
+			}
+		}
+	}
+	return cr, nil
+}
+
+// NewCount constructs and returns a new Count.
+func NewCount() *Count {
+	return &Count{}
+}
+
+// Result returns the result of executing this operation.
+func (c *Count) Result() CountResult { return c.result }
+
+func (c *Count) processResponse(info driver.ResponseInfo) error {
+	var err error
+	c.result, err = buildCountResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (c *Count) Execute(ctx context.Context) error {
+	if c.deployment == nil {
+		return errors.New("the Count operation must have a Deployment set before Execute can be called")
+	}
+
+	err := driver.Operation{
+		CommandFn:         c.command,
+		ProcessResponseFn: c.processResponse,
+		RetryMode:         c.retry,
+		Type:              driver.Read,
+		Client:            c.session,
+		Clock:             c.clock,
+		CommandMonitor:    c.monitor,
+		Crypt:             c.crypt,
+		Database:          c.database,
+		Deployment:        c.deployment,
+		MaxTime:           c.maxTime,
+		ReadConcern:       c.readConcern,
+		ReadPreference:    c.readPreference,
+		Selector:          c.selector,
+		ServerAPI:         c.serverAPI,
+		Timeout:           c.timeout,
+		Name:              driverutil.CountOp,
+		Authenticator:     c.authenticator,
+	}.Execute(ctx)
+
+	// Swallow error if NamespaceNotFound(26) is returned from aggregate on non-existent namespace
+	if err != nil {
+		dErr, ok := err.(driver.Error)
+		if ok && dErr.Code == 26 {
+			err = nil
+		}
+	}
+	return err
+}
+
+func (c *Count) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "count", c.collection)
+	if c.query != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "query", c.query)
+	}
+	if c.comment.Type != bsontype.Type(0) {
+		dst = bsoncore.AppendValueElement(dst, "comment", c.comment)
+	}
+	return dst, nil
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (c *Count) MaxTime(maxTime *time.Duration) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.maxTime = maxTime
+	return c
+}
+
+// Query determines what results are returned from find.
+func (c *Count) Query(query bsoncore.Document) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.query = query
+	return c
+}
+
+// Session sets the session for this operation.
+func (c *Count) Session(session *session.Client) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.session = session
+	return c
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (c *Count) ClusterClock(clock *session.ClusterClock) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.clock = clock
+	return c
+}
+
+// Collection sets the collection that this command will run against.
+func (c *Count) Collection(collection string) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.collection = collection
+	return c
+}
+
+// Comment sets a value to help trace an operation.
+func (c *Count) Comment(comment bsoncore.Value) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.comment = comment
+	return c
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (c *Count) CommandMonitor(monitor *event.CommandMonitor) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.monitor = monitor
+	return c
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (c *Count) Crypt(crypt driver.Crypt) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.crypt = crypt
+	return c
+}
+
+// Database sets the database to run this operation against.
+func (c *Count) Database(database string) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.database = database
+	return c
+}
+
+// Deployment sets the deployment to use for this operation.
+func (c *Count) Deployment(deployment driver.Deployment) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.deployment = deployment
+	return c
+}
+
+// ReadConcern specifies the read concern for this operation.
+func (c *Count) ReadConcern(readConcern *readconcern.ReadConcern) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.readConcern = readConcern
+	return c
+}
+
+// ReadPreference set the read preference used with this operation.
+func (c *Count) ReadPreference(readPreference *readpref.ReadPref) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.readPreference = readPreference
+	return c
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (c *Count) ServerSelector(selector description.ServerSelector) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.selector = selector
+	return c
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (c *Count) Retry(retry driver.RetryMode) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.retry = &retry
+	return c
+}
+
+// ServerAPI sets the server API version for this operation.
+func (c *Count) ServerAPI(serverAPI *driver.ServerAPIOptions) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.serverAPI = serverAPI
+	return c
+}
+
+// Timeout sets the timeout for this operation.
+func (c *Count) Timeout(timeout *time.Duration) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.timeout = timeout
+	return c
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (c *Count) Authenticator(authenticator driver.Authenticator) *Count {
+	if c == nil {
+		c = new(Count)
+	}
+
+	c.authenticator = authenticator
+	return c
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go
new file mode 100644
index 0000000000000000000000000000000000000000..4878e2c7771defa30720d38eb48ecf71e3cdd59d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go
@@ -0,0 +1,413 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Create represents a create operation.
+type Create struct {
+	authenticator                driver.Authenticator
+	capped                       *bool
+	collation                    bsoncore.Document
+	changeStreamPreAndPostImages bsoncore.Document
+	collectionName               *string
+	indexOptionDefaults          bsoncore.Document
+	max                          *int64
+	pipeline                     bsoncore.Document
+	size                         *int64
+	storageEngine                bsoncore.Document
+	validationAction             *string
+	validationLevel              *string
+	validator                    bsoncore.Document
+	viewOn                       *string
+	session                      *session.Client
+	clock                        *session.ClusterClock
+	monitor                      *event.CommandMonitor
+	crypt                        driver.Crypt
+	database                     string
+	deployment                   driver.Deployment
+	selector                     description.ServerSelector
+	writeConcern                 *writeconcern.WriteConcern
+	serverAPI                    *driver.ServerAPIOptions
+	expireAfterSeconds           *int64
+	timeSeries                   bsoncore.Document
+	encryptedFields              bsoncore.Document
+	clusteredIndex               bsoncore.Document
+}
+
+// NewCreate constructs and returns a new Create.
+func NewCreate(collectionName string) *Create {
+	return &Create{
+		collectionName: &collectionName,
+	}
+}
+
+func (c *Create) processResponse(driver.ResponseInfo) error {
+	return nil
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (c *Create) Execute(ctx context.Context) error {
+	if c.deployment == nil {
+		return errors.New("the Create operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         c.command,
+		ProcessResponseFn: c.processResponse,
+		Client:            c.session,
+		Clock:             c.clock,
+		CommandMonitor:    c.monitor,
+		Crypt:             c.crypt,
+		Database:          c.database,
+		Deployment:        c.deployment,
+		Selector:          c.selector,
+		WriteConcern:      c.writeConcern,
+		ServerAPI:         c.serverAPI,
+		Authenticator:     c.authenticator,
+	}.Execute(ctx)
+}
+
+func (c *Create) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	if c.collectionName != nil {
+		dst = bsoncore.AppendStringElement(dst, "create", *c.collectionName)
+	}
+	if c.capped != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "capped", *c.capped)
+	}
+	if c.changeStreamPreAndPostImages != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "changeStreamPreAndPostImages", c.changeStreamPreAndPostImages)
+	}
+	if c.collation != nil {
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(5) {
+			return nil, errors.New("the 'collation' command parameter requires a minimum server wire version of 5")
+		}
+		dst = bsoncore.AppendDocumentElement(dst, "collation", c.collation)
+	}
+	if c.indexOptionDefaults != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "indexOptionDefaults", c.indexOptionDefaults)
+	}
+	if c.max != nil {
+		dst = bsoncore.AppendInt64Element(dst, "max", *c.max)
+	}
+	if c.pipeline != nil {
+		dst = bsoncore.AppendArrayElement(dst, "pipeline", c.pipeline)
+	}
+	if c.size != nil {
+		dst = bsoncore.AppendInt64Element(dst, "size", *c.size)
+	}
+	if c.storageEngine != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "storageEngine", c.storageEngine)
+	}
+	if c.validationAction != nil {
+		dst = bsoncore.AppendStringElement(dst, "validationAction", *c.validationAction)
+	}
+	if c.validationLevel != nil {
+		dst = bsoncore.AppendStringElement(dst, "validationLevel", *c.validationLevel)
+	}
+	if c.validator != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "validator", c.validator)
+	}
+	if c.viewOn != nil {
+		dst = bsoncore.AppendStringElement(dst, "viewOn", *c.viewOn)
+	}
+	if c.expireAfterSeconds != nil {
+		dst = bsoncore.AppendInt64Element(dst, "expireAfterSeconds", *c.expireAfterSeconds)
+	}
+	if c.timeSeries != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "timeseries", c.timeSeries)
+	}
+	if c.encryptedFields != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "encryptedFields", c.encryptedFields)
+	}
+	if c.clusteredIndex != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "clusteredIndex", c.clusteredIndex)
+	}
+	return dst, nil
+}
+
+// Capped specifies if the collection is capped.
+func (c *Create) Capped(capped bool) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.capped = &capped
+	return c
+}
+
+// Collation specifies a collation. This option is only valid for server versions 3.4 and above.
+func (c *Create) Collation(collation bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.collation = collation
+	return c
+}
+
+// ChangeStreamPreAndPostImages specifies how change streams opened against the collection can return pre-
+// and post-images of updated documents. This option is only valid for server versions 6.0 and above.
+func (c *Create) ChangeStreamPreAndPostImages(csppi bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.changeStreamPreAndPostImages = csppi
+	return c
+}
+
+// CollectionName specifies the name of the collection to create.
+func (c *Create) CollectionName(collectionName string) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.collectionName = &collectionName
+	return c
+}
+
+// IndexOptionDefaults specifies a default configuration for indexes on the collection.
+func (c *Create) IndexOptionDefaults(indexOptionDefaults bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.indexOptionDefaults = indexOptionDefaults
+	return c
+}
+
+// Max specifies the maximum number of documents allowed in a capped collection.
+func (c *Create) Max(max int64) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.max = &max
+	return c
+}
+
+// Pipeline specifies the agggregtion pipeline to be run against the source to create the view.
+func (c *Create) Pipeline(pipeline bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.pipeline = pipeline
+	return c
+}
+
+// Size specifies the maximum size in bytes for a capped collection.
+func (c *Create) Size(size int64) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.size = &size
+	return c
+}
+
+// StorageEngine specifies the storage engine to use for the index.
+func (c *Create) StorageEngine(storageEngine bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.storageEngine = storageEngine
+	return c
+}
+
+// ValidationAction specifies what should happen if a document being inserted does not pass validation.
+func (c *Create) ValidationAction(validationAction string) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.validationAction = &validationAction
+	return c
+}
+
+// ValidationLevel specifies how strictly the server applies validation rules to existing documents in the collection
+// during update operations.
+func (c *Create) ValidationLevel(validationLevel string) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.validationLevel = &validationLevel
+	return c
+}
+
+// Validator specifies validation rules for the collection.
+func (c *Create) Validator(validator bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.validator = validator
+	return c
+}
+
+// ViewOn specifies the name of the source collection or view on which the view will be created.
+func (c *Create) ViewOn(viewOn string) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.viewOn = &viewOn
+	return c
+}
+
+// Session sets the session for this operation.
+func (c *Create) Session(session *session.Client) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.session = session
+	return c
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (c *Create) ClusterClock(clock *session.ClusterClock) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.clock = clock
+	return c
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (c *Create) CommandMonitor(monitor *event.CommandMonitor) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.monitor = monitor
+	return c
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (c *Create) Crypt(crypt driver.Crypt) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.crypt = crypt
+	return c
+}
+
+// Database sets the database to run this operation against.
+func (c *Create) Database(database string) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.database = database
+	return c
+}
+
+// Deployment sets the deployment to use for this operation.
+func (c *Create) Deployment(deployment driver.Deployment) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.deployment = deployment
+	return c
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (c *Create) ServerSelector(selector description.ServerSelector) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.selector = selector
+	return c
+}
+
+// WriteConcern sets the write concern for this operation.
+func (c *Create) WriteConcern(writeConcern *writeconcern.WriteConcern) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.writeConcern = writeConcern
+	return c
+}
+
+// ServerAPI sets the server API version for this operation.
+func (c *Create) ServerAPI(serverAPI *driver.ServerAPIOptions) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.serverAPI = serverAPI
+	return c
+}
+
+// ExpireAfterSeconds sets the seconds to wait before deleting old time-series data.
+func (c *Create) ExpireAfterSeconds(eas int64) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.expireAfterSeconds = &eas
+	return c
+}
+
+// TimeSeries sets the time series options for this operation.
+func (c *Create) TimeSeries(timeSeries bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.timeSeries = timeSeries
+	return c
+}
+
+// EncryptedFields sets the EncryptedFields for this operation.
+func (c *Create) EncryptedFields(ef bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.encryptedFields = ef
+	return c
+}
+
+// ClusteredIndex sets the ClusteredIndex option for this operation.
+func (c *Create) ClusteredIndex(ci bsoncore.Document) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.clusteredIndex = ci
+	return c
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (c *Create) Authenticator(authenticator driver.Authenticator) *Create {
+	if c == nil {
+		c = new(Create)
+	}
+
+	c.authenticator = authenticator
+	return c
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_indexes.go
new file mode 100644
index 0000000000000000000000000000000000000000..464c1762de543c4bb60d5539ad77751864615363
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_indexes.go
@@ -0,0 +1,292 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// CreateIndexes performs a createIndexes operation.
+type CreateIndexes struct {
+	authenticator driver.Authenticator
+	commitQuorum  bsoncore.Value
+	indexes       bsoncore.Document
+	maxTime       *time.Duration
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	writeConcern  *writeconcern.WriteConcern
+	result        CreateIndexesResult
+	serverAPI     *driver.ServerAPIOptions
+	timeout       *time.Duration
+}
+
+// CreateIndexesResult represents a createIndexes result returned by the server.
+type CreateIndexesResult struct {
+	// If the collection was created automatically.
+	CreatedCollectionAutomatically bool
+	// The number of indexes existing after this command.
+	IndexesAfter int32
+	// The number of indexes existing before this command.
+	IndexesBefore int32
+}
+
+func buildCreateIndexesResult(response bsoncore.Document) (CreateIndexesResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return CreateIndexesResult{}, err
+	}
+	cir := CreateIndexesResult{}
+	for _, element := range elements {
+		switch element.Key() {
+		case "createdCollectionAutomatically":
+			var ok bool
+			cir.CreatedCollectionAutomatically, ok = element.Value().BooleanOK()
+			if !ok {
+				return cir, fmt.Errorf("response field 'createdCollectionAutomatically' is type bool, but received BSON type %s", element.Value().Type)
+			}
+		case "indexesAfter":
+			var ok bool
+			cir.IndexesAfter, ok = element.Value().AsInt32OK()
+			if !ok {
+				return cir, fmt.Errorf("response field 'indexesAfter' is type int32, but received BSON type %s", element.Value().Type)
+			}
+		case "indexesBefore":
+			var ok bool
+			cir.IndexesBefore, ok = element.Value().AsInt32OK()
+			if !ok {
+				return cir, fmt.Errorf("response field 'indexesBefore' is type int32, but received BSON type %s", element.Value().Type)
+			}
+		}
+	}
+	return cir, nil
+}
+
+// NewCreateIndexes constructs and returns a new CreateIndexes.
+func NewCreateIndexes(indexes bsoncore.Document) *CreateIndexes {
+	return &CreateIndexes{
+		indexes: indexes,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (ci *CreateIndexes) Result() CreateIndexesResult { return ci.result }
+
+func (ci *CreateIndexes) processResponse(info driver.ResponseInfo) error {
+	var err error
+	ci.result, err = buildCreateIndexesResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (ci *CreateIndexes) Execute(ctx context.Context) error {
+	if ci.deployment == nil {
+		return errors.New("the CreateIndexes operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         ci.command,
+		ProcessResponseFn: ci.processResponse,
+		Client:            ci.session,
+		Clock:             ci.clock,
+		CommandMonitor:    ci.monitor,
+		Crypt:             ci.crypt,
+		Database:          ci.database,
+		Deployment:        ci.deployment,
+		MaxTime:           ci.maxTime,
+		Selector:          ci.selector,
+		WriteConcern:      ci.writeConcern,
+		ServerAPI:         ci.serverAPI,
+		Timeout:           ci.timeout,
+		Name:              driverutil.CreateIndexesOp,
+		Authenticator:     ci.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (ci *CreateIndexes) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "createIndexes", ci.collection)
+	if ci.commitQuorum.Type != bsontype.Type(0) {
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(9) {
+			return nil, errors.New("the 'commitQuorum' command parameter requires a minimum server wire version of 9")
+		}
+		dst = bsoncore.AppendValueElement(dst, "commitQuorum", ci.commitQuorum)
+	}
+	if ci.indexes != nil {
+		dst = bsoncore.AppendArrayElement(dst, "indexes", ci.indexes)
+	}
+	return dst, nil
+}
+
+// CommitQuorum specifies the number of data-bearing members of a replica set, including the primary, that must
+// complete the index builds successfully before the primary marks the indexes as ready. This should either be a
+// string or int32 value.
+func (ci *CreateIndexes) CommitQuorum(commitQuorum bsoncore.Value) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.commitQuorum = commitQuorum
+	return ci
+}
+
+// Indexes specifies an array containing index specification documents for the indexes being created.
+func (ci *CreateIndexes) Indexes(indexes bsoncore.Document) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.indexes = indexes
+	return ci
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (ci *CreateIndexes) MaxTime(maxTime *time.Duration) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.maxTime = maxTime
+	return ci
+}
+
+// Session sets the session for this operation.
+func (ci *CreateIndexes) Session(session *session.Client) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.session = session
+	return ci
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (ci *CreateIndexes) ClusterClock(clock *session.ClusterClock) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.clock = clock
+	return ci
+}
+
+// Collection sets the collection that this command will run against.
+func (ci *CreateIndexes) Collection(collection string) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.collection = collection
+	return ci
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (ci *CreateIndexes) CommandMonitor(monitor *event.CommandMonitor) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.monitor = monitor
+	return ci
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (ci *CreateIndexes) Crypt(crypt driver.Crypt) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.crypt = crypt
+	return ci
+}
+
+// Database sets the database to run this operation against.
+func (ci *CreateIndexes) Database(database string) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.database = database
+	return ci
+}
+
+// Deployment sets the deployment to use for this operation.
+func (ci *CreateIndexes) Deployment(deployment driver.Deployment) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.deployment = deployment
+	return ci
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (ci *CreateIndexes) ServerSelector(selector description.ServerSelector) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.selector = selector
+	return ci
+}
+
+// WriteConcern sets the write concern for this operation.
+func (ci *CreateIndexes) WriteConcern(writeConcern *writeconcern.WriteConcern) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.writeConcern = writeConcern
+	return ci
+}
+
+// ServerAPI sets the server API version for this operation.
+func (ci *CreateIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.serverAPI = serverAPI
+	return ci
+}
+
+// Timeout sets the timeout for this operation.
+func (ci *CreateIndexes) Timeout(timeout *time.Duration) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.timeout = timeout
+	return ci
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (ci *CreateIndexes) Authenticator(authenticator driver.Authenticator) *CreateIndexes {
+	if ci == nil {
+		ci = new(CreateIndexes)
+	}
+
+	ci.authenticator = authenticator
+	return ci
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go
new file mode 100644
index 0000000000000000000000000000000000000000..8185d27fe1df509ac734d8632d5ddfd2510ce0ae
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go
@@ -0,0 +1,251 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// CreateSearchIndexes performs a createSearchIndexes operation.
+type CreateSearchIndexes struct {
+	authenticator driver.Authenticator
+	indexes       bsoncore.Document
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	result        CreateSearchIndexesResult
+	serverAPI     *driver.ServerAPIOptions
+	timeout       *time.Duration
+}
+
+// CreateSearchIndexResult represents a single search index result in CreateSearchIndexesResult.
+type CreateSearchIndexResult struct {
+	Name string
+}
+
+// CreateSearchIndexesResult represents a createSearchIndexes result returned by the server.
+type CreateSearchIndexesResult struct {
+	IndexesCreated []CreateSearchIndexResult
+}
+
+func buildCreateSearchIndexesResult(response bsoncore.Document) (CreateSearchIndexesResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return CreateSearchIndexesResult{}, err
+	}
+	csir := CreateSearchIndexesResult{}
+	for _, element := range elements {
+		switch element.Key() {
+		case "indexesCreated":
+			arr, ok := element.Value().ArrayOK()
+			if !ok {
+				return csir, fmt.Errorf("response field 'indexesCreated' is type array, but received BSON type %s", element.Value().Type)
+			}
+
+			var values []bsoncore.Value
+			values, err = arr.Values()
+			if err != nil {
+				break
+			}
+
+			for _, val := range values {
+				valDoc, ok := val.DocumentOK()
+				if !ok {
+					return csir, fmt.Errorf("indexesCreated value is type document, but received BSON type %s", val.Type)
+				}
+				var indexesCreated CreateSearchIndexResult
+				if err = bson.Unmarshal(valDoc, &indexesCreated); err != nil {
+					return csir, err
+				}
+				csir.IndexesCreated = append(csir.IndexesCreated, indexesCreated)
+			}
+		}
+	}
+	return csir, nil
+}
+
+// NewCreateSearchIndexes constructs and returns a new CreateSearchIndexes.
+func NewCreateSearchIndexes(indexes bsoncore.Document) *CreateSearchIndexes {
+	return &CreateSearchIndexes{
+		indexes: indexes,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (csi *CreateSearchIndexes) Result() CreateSearchIndexesResult { return csi.result }
+
+func (csi *CreateSearchIndexes) processResponse(info driver.ResponseInfo) error {
+	var err error
+	csi.result, err = buildCreateSearchIndexesResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (csi *CreateSearchIndexes) Execute(ctx context.Context) error {
+	if csi.deployment == nil {
+		return errors.New("the CreateSearchIndexes operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         csi.command,
+		ProcessResponseFn: csi.processResponse,
+		Client:            csi.session,
+		Clock:             csi.clock,
+		CommandMonitor:    csi.monitor,
+		Crypt:             csi.crypt,
+		Database:          csi.database,
+		Deployment:        csi.deployment,
+		Selector:          csi.selector,
+		ServerAPI:         csi.serverAPI,
+		Timeout:           csi.timeout,
+		Authenticator:     csi.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (csi *CreateSearchIndexes) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "createSearchIndexes", csi.collection)
+	if csi.indexes != nil {
+		dst = bsoncore.AppendArrayElement(dst, "indexes", csi.indexes)
+	}
+	return dst, nil
+}
+
+// Indexes specifies an array containing index specification documents for the indexes being created.
+func (csi *CreateSearchIndexes) Indexes(indexes bsoncore.Document) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.indexes = indexes
+	return csi
+}
+
+// Session sets the session for this operation.
+func (csi *CreateSearchIndexes) Session(session *session.Client) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.session = session
+	return csi
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (csi *CreateSearchIndexes) ClusterClock(clock *session.ClusterClock) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.clock = clock
+	return csi
+}
+
+// Collection sets the collection that this command will run against.
+func (csi *CreateSearchIndexes) Collection(collection string) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.collection = collection
+	return csi
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (csi *CreateSearchIndexes) CommandMonitor(monitor *event.CommandMonitor) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.monitor = monitor
+	return csi
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (csi *CreateSearchIndexes) Crypt(crypt driver.Crypt) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.crypt = crypt
+	return csi
+}
+
+// Database sets the database to run this operation against.
+func (csi *CreateSearchIndexes) Database(database string) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.database = database
+	return csi
+}
+
+// Deployment sets the deployment to use for this operation.
+func (csi *CreateSearchIndexes) Deployment(deployment driver.Deployment) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.deployment = deployment
+	return csi
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (csi *CreateSearchIndexes) ServerSelector(selector description.ServerSelector) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.selector = selector
+	return csi
+}
+
+// ServerAPI sets the server API version for this operation.
+func (csi *CreateSearchIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.serverAPI = serverAPI
+	return csi
+}
+
+// Timeout sets the timeout for this operation.
+func (csi *CreateSearchIndexes) Timeout(timeout *time.Duration) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.timeout = timeout
+	return csi
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (csi *CreateSearchIndexes) Authenticator(authenticator driver.Authenticator) *CreateSearchIndexes {
+	if csi == nil {
+		csi = new(CreateSearchIndexes)
+	}
+
+	csi.authenticator = authenticator
+	return csi
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b520a54804d96a5bfd3d2f181e6fc48b89fe12b
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go
@@ -0,0 +1,341 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Delete performs a delete operation
+type Delete struct {
+	authenticator driver.Authenticator
+	comment       bsoncore.Value
+	deletes       []bsoncore.Document
+	ordered       *bool
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	writeConcern  *writeconcern.WriteConcern
+	retry         *driver.RetryMode
+	hint          *bool
+	result        DeleteResult
+	serverAPI     *driver.ServerAPIOptions
+	let           bsoncore.Document
+	timeout       *time.Duration
+	logger        *logger.Logger
+}
+
+// DeleteResult represents a delete result returned by the server.
+type DeleteResult struct {
+	// Number of documents successfully deleted.
+	N int64
+}
+
+func buildDeleteResult(response bsoncore.Document) (DeleteResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return DeleteResult{}, err
+	}
+	dr := DeleteResult{}
+	for _, element := range elements {
+		if element.Key() == "n" {
+			var ok bool
+			dr.N, ok = element.Value().AsInt64OK()
+			if !ok {
+				return dr, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type)
+			}
+		}
+	}
+	return dr, nil
+}
+
+// NewDelete constructs and returns a new Delete.
+func NewDelete(deletes ...bsoncore.Document) *Delete {
+	return &Delete{
+		deletes: deletes,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (d *Delete) Result() DeleteResult { return d.result }
+
+func (d *Delete) processResponse(info driver.ResponseInfo) error {
+	dr, err := buildDeleteResult(info.ServerResponse)
+	d.result.N += dr.N
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (d *Delete) Execute(ctx context.Context) error {
+	if d.deployment == nil {
+		return errors.New("the Delete operation must have a Deployment set before Execute can be called")
+	}
+	batches := &driver.Batches{
+		Identifier: "deletes",
+		Documents:  d.deletes,
+		Ordered:    d.ordered,
+	}
+
+	return driver.Operation{
+		CommandFn:         d.command,
+		ProcessResponseFn: d.processResponse,
+		Batches:           batches,
+		RetryMode:         d.retry,
+		Type:              driver.Write,
+		Client:            d.session,
+		Clock:             d.clock,
+		CommandMonitor:    d.monitor,
+		Crypt:             d.crypt,
+		Database:          d.database,
+		Deployment:        d.deployment,
+		Selector:          d.selector,
+		WriteConcern:      d.writeConcern,
+		ServerAPI:         d.serverAPI,
+		Timeout:           d.timeout,
+		Logger:            d.logger,
+		Name:              driverutil.DeleteOp,
+		Authenticator:     d.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (d *Delete) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "delete", d.collection)
+	if d.comment.Type != bsontype.Type(0) {
+		dst = bsoncore.AppendValueElement(dst, "comment", d.comment)
+	}
+	if d.ordered != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "ordered", *d.ordered)
+	}
+	if d.hint != nil && *d.hint {
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(5) {
+			return nil, errors.New("the 'hint' command parameter requires a minimum server wire version of 5")
+		}
+		if !d.writeConcern.Acknowledged() {
+			return nil, errUnacknowledgedHint
+		}
+	}
+	if d.let != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "let", d.let)
+	}
+	return dst, nil
+}
+
+// Deletes adds documents to this operation that will be used to determine what documents to delete when this operation
+// is executed. These documents should have the form {q: <query>, limit: <integer limit>, collation: <document>}. The
+// collation field is optional. If limit is 0, there will be no limit on the number of documents deleted.
+func (d *Delete) Deletes(deletes ...bsoncore.Document) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.deletes = deletes
+	return d
+}
+
+// Ordered sets ordered. If true, when a write fails, the operation will return the error, when
+// false write failures do not stop execution of the operation.
+func (d *Delete) Ordered(ordered bool) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.ordered = &ordered
+	return d
+}
+
+// Session sets the session for this operation.
+func (d *Delete) Session(session *session.Client) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.session = session
+	return d
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (d *Delete) ClusterClock(clock *session.ClusterClock) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.clock = clock
+	return d
+}
+
+// Collection sets the collection that this command will run against.
+func (d *Delete) Collection(collection string) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.collection = collection
+	return d
+}
+
+// Comment sets a value to help trace an operation.
+func (d *Delete) Comment(comment bsoncore.Value) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.comment = comment
+	return d
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (d *Delete) CommandMonitor(monitor *event.CommandMonitor) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.monitor = monitor
+	return d
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (d *Delete) Crypt(crypt driver.Crypt) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.crypt = crypt
+	return d
+}
+
+// Database sets the database to run this operation against.
+func (d *Delete) Database(database string) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.database = database
+	return d
+}
+
+// Deployment sets the deployment to use for this operation.
+func (d *Delete) Deployment(deployment driver.Deployment) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.deployment = deployment
+	return d
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (d *Delete) ServerSelector(selector description.ServerSelector) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.selector = selector
+	return d
+}
+
+// WriteConcern sets the write concern for this operation.
+func (d *Delete) WriteConcern(writeConcern *writeconcern.WriteConcern) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.writeConcern = writeConcern
+	return d
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (d *Delete) Retry(retry driver.RetryMode) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.retry = &retry
+	return d
+}
+
+// Hint is a flag to indicate that the update document contains a hint. Hint is only supported by
+// servers >= 4.4. Older servers >= 3.4 will report an error for using the hint option. For servers <
+// 3.4, the driver will return an error if the hint option is used.
+func (d *Delete) Hint(hint bool) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.hint = &hint
+	return d
+}
+
+// ServerAPI sets the server API version for this operation.
+func (d *Delete) ServerAPI(serverAPI *driver.ServerAPIOptions) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.serverAPI = serverAPI
+	return d
+}
+
+// Let specifies the let document to use. This option is only valid for server versions 5.0 and above.
+func (d *Delete) Let(let bsoncore.Document) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.let = let
+	return d
+}
+
+// Timeout sets the timeout for this operation.
+func (d *Delete) Timeout(timeout *time.Duration) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.timeout = timeout
+	return d
+}
+
+// Logger sets the logger for this operation.
+func (d *Delete) Logger(logger *logger.Logger) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.logger = logger
+
+	return d
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (d *Delete) Authenticator(authenticator driver.Authenticator) *Delete {
+	if d == nil {
+		d = new(Delete)
+	}
+
+	d.authenticator = authenticator
+	return d
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go
new file mode 100644
index 0000000000000000000000000000000000000000..0c39027e76956c6e918cf1d5773e37285d068496
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go
@@ -0,0 +1,324 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Distinct performs a distinct operation.
+type Distinct struct {
+	authenticator  driver.Authenticator
+	collation      bsoncore.Document
+	key            *string
+	maxTime        *time.Duration
+	query          bsoncore.Document
+	session        *session.Client
+	clock          *session.ClusterClock
+	collection     string
+	comment        bsoncore.Value
+	monitor        *event.CommandMonitor
+	crypt          driver.Crypt
+	database       string
+	deployment     driver.Deployment
+	readConcern    *readconcern.ReadConcern
+	readPreference *readpref.ReadPref
+	selector       description.ServerSelector
+	retry          *driver.RetryMode
+	result         DistinctResult
+	serverAPI      *driver.ServerAPIOptions
+	timeout        *time.Duration
+}
+
+// DistinctResult represents a distinct result returned by the server.
+type DistinctResult struct {
+	// The distinct values for the field.
+	Values bsoncore.Value
+}
+
+func buildDistinctResult(response bsoncore.Document) (DistinctResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return DistinctResult{}, err
+	}
+	dr := DistinctResult{}
+	for _, element := range elements {
+		if element.Key() == "values" {
+			dr.Values = element.Value()
+		}
+	}
+	return dr, nil
+}
+
+// NewDistinct constructs and returns a new Distinct.
+func NewDistinct(key string, query bsoncore.Document) *Distinct {
+	return &Distinct{
+		key:   &key,
+		query: query,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (d *Distinct) Result() DistinctResult { return d.result }
+
+func (d *Distinct) processResponse(info driver.ResponseInfo) error {
+	var err error
+	d.result, err = buildDistinctResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (d *Distinct) Execute(ctx context.Context) error {
+	if d.deployment == nil {
+		return errors.New("the Distinct operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         d.command,
+		ProcessResponseFn: d.processResponse,
+		RetryMode:         d.retry,
+		Type:              driver.Read,
+		Client:            d.session,
+		Clock:             d.clock,
+		CommandMonitor:    d.monitor,
+		Crypt:             d.crypt,
+		Database:          d.database,
+		Deployment:        d.deployment,
+		MaxTime:           d.maxTime,
+		ReadConcern:       d.readConcern,
+		ReadPreference:    d.readPreference,
+		Selector:          d.selector,
+		ServerAPI:         d.serverAPI,
+		Timeout:           d.timeout,
+		Name:              driverutil.DistinctOp,
+		Authenticator:     d.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (d *Distinct) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "distinct", d.collection)
+	if d.collation != nil {
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(5) {
+			return nil, errors.New("the 'collation' command parameter requires a minimum server wire version of 5")
+		}
+		dst = bsoncore.AppendDocumentElement(dst, "collation", d.collation)
+	}
+	if d.comment.Type != bsontype.Type(0) {
+		dst = bsoncore.AppendValueElement(dst, "comment", d.comment)
+	}
+	if d.key != nil {
+		dst = bsoncore.AppendStringElement(dst, "key", *d.key)
+	}
+	if d.query != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "query", d.query)
+	}
+	return dst, nil
+}
+
+// Collation specifies a collation to be used.
+func (d *Distinct) Collation(collation bsoncore.Document) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.collation = collation
+	return d
+}
+
+// Key specifies which field to return distinct values for.
+func (d *Distinct) Key(key string) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.key = &key
+	return d
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (d *Distinct) MaxTime(maxTime *time.Duration) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.maxTime = maxTime
+	return d
+}
+
+// Query specifies which documents to return distinct values from.
+func (d *Distinct) Query(query bsoncore.Document) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.query = query
+	return d
+}
+
+// Session sets the session for this operation.
+func (d *Distinct) Session(session *session.Client) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.session = session
+	return d
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (d *Distinct) ClusterClock(clock *session.ClusterClock) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.clock = clock
+	return d
+}
+
+// Collection sets the collection that this command will run against.
+func (d *Distinct) Collection(collection string) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.collection = collection
+	return d
+}
+
+// Comment sets a value to help trace an operation.
+func (d *Distinct) Comment(comment bsoncore.Value) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.comment = comment
+	return d
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (d *Distinct) CommandMonitor(monitor *event.CommandMonitor) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.monitor = monitor
+	return d
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (d *Distinct) Crypt(crypt driver.Crypt) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.crypt = crypt
+	return d
+}
+
+// Database sets the database to run this operation against.
+func (d *Distinct) Database(database string) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.database = database
+	return d
+}
+
+// Deployment sets the deployment to use for this operation.
+func (d *Distinct) Deployment(deployment driver.Deployment) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.deployment = deployment
+	return d
+}
+
+// ReadConcern specifies the read concern for this operation.
+func (d *Distinct) ReadConcern(readConcern *readconcern.ReadConcern) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.readConcern = readConcern
+	return d
+}
+
+// ReadPreference set the read preference used with this operation.
+func (d *Distinct) ReadPreference(readPreference *readpref.ReadPref) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.readPreference = readPreference
+	return d
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (d *Distinct) ServerSelector(selector description.ServerSelector) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.selector = selector
+	return d
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (d *Distinct) Retry(retry driver.RetryMode) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.retry = &retry
+	return d
+}
+
+// ServerAPI sets the server API version for this operation.
+func (d *Distinct) ServerAPI(serverAPI *driver.ServerAPIOptions) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.serverAPI = serverAPI
+	return d
+}
+
+// Timeout sets the timeout for this operation.
+func (d *Distinct) Timeout(timeout *time.Duration) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.timeout = timeout
+	return d
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (d *Distinct) Authenticator(authenticator driver.Authenticator) *Distinct {
+	if d == nil {
+		d = new(Distinct)
+	}
+
+	d.authenticator = authenticator
+	return d
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..e55b12a74880be7ecda60814898a80b767dbed17
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2024-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package operation is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package operation
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go
new file mode 100644
index 0000000000000000000000000000000000000000..5a32c2f8d4799128ba5d2b7aa76cc164fcad7e82
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go
@@ -0,0 +1,236 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// DropCollection performs a drop operation.
+type DropCollection struct {
+	authenticator driver.Authenticator
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	writeConcern  *writeconcern.WriteConcern
+	result        DropCollectionResult
+	serverAPI     *driver.ServerAPIOptions
+	timeout       *time.Duration
+}
+
+// DropCollectionResult represents a dropCollection result returned by the server.
+type DropCollectionResult struct {
+	// The number of indexes in the dropped collection.
+	NIndexesWas int32
+	// The namespace of the dropped collection.
+	Ns string
+}
+
+func buildDropCollectionResult(response bsoncore.Document) (DropCollectionResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return DropCollectionResult{}, err
+	}
+	dcr := DropCollectionResult{}
+	for _, element := range elements {
+		switch element.Key() {
+		case "nIndexesWas":
+			var ok bool
+			dcr.NIndexesWas, ok = element.Value().AsInt32OK()
+			if !ok {
+				return dcr, fmt.Errorf("response field 'nIndexesWas' is type int32, but received BSON type %s", element.Value().Type)
+			}
+		case "ns":
+			var ok bool
+			dcr.Ns, ok = element.Value().StringValueOK()
+			if !ok {
+				return dcr, fmt.Errorf("response field 'ns' is type string, but received BSON type %s", element.Value().Type)
+			}
+		}
+	}
+	return dcr, nil
+}
+
+// NewDropCollection constructs and returns a new DropCollection.
+func NewDropCollection() *DropCollection {
+	return &DropCollection{}
+}
+
+// Result returns the result of executing this operation.
+func (dc *DropCollection) Result() DropCollectionResult { return dc.result }
+
+func (dc *DropCollection) processResponse(info driver.ResponseInfo) error {
+	var err error
+	dc.result, err = buildDropCollectionResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (dc *DropCollection) Execute(ctx context.Context) error {
+	if dc.deployment == nil {
+		return errors.New("the DropCollection operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         dc.command,
+		ProcessResponseFn: dc.processResponse,
+		Client:            dc.session,
+		Clock:             dc.clock,
+		CommandMonitor:    dc.monitor,
+		Crypt:             dc.crypt,
+		Database:          dc.database,
+		Deployment:        dc.deployment,
+		Selector:          dc.selector,
+		WriteConcern:      dc.writeConcern,
+		ServerAPI:         dc.serverAPI,
+		Timeout:           dc.timeout,
+		Name:              driverutil.DropOp,
+		Authenticator:     dc.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (dc *DropCollection) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "drop", dc.collection)
+	return dst, nil
+}
+
+// Session sets the session for this operation.
+func (dc *DropCollection) Session(session *session.Client) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.session = session
+	return dc
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (dc *DropCollection) ClusterClock(clock *session.ClusterClock) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.clock = clock
+	return dc
+}
+
+// Collection sets the collection that this command will run against.
+func (dc *DropCollection) Collection(collection string) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.collection = collection
+	return dc
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (dc *DropCollection) CommandMonitor(monitor *event.CommandMonitor) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.monitor = monitor
+	return dc
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (dc *DropCollection) Crypt(crypt driver.Crypt) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.crypt = crypt
+	return dc
+}
+
+// Database sets the database to run this operation against.
+func (dc *DropCollection) Database(database string) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.database = database
+	return dc
+}
+
+// Deployment sets the deployment to use for this operation.
+func (dc *DropCollection) Deployment(deployment driver.Deployment) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.deployment = deployment
+	return dc
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (dc *DropCollection) ServerSelector(selector description.ServerSelector) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.selector = selector
+	return dc
+}
+
+// WriteConcern sets the write concern for this operation.
+func (dc *DropCollection) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.writeConcern = writeConcern
+	return dc
+}
+
+// ServerAPI sets the server API version for this operation.
+func (dc *DropCollection) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.serverAPI = serverAPI
+	return dc
+}
+
+// Timeout sets the timeout for this operation.
+func (dc *DropCollection) Timeout(timeout *time.Duration) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.timeout = timeout
+	return dc
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (dc *DropCollection) Authenticator(authenticator driver.Authenticator) *DropCollection {
+	if dc == nil {
+		dc = new(DropCollection)
+	}
+
+	dc.authenticator = authenticator
+	return dc
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go
new file mode 100644
index 0000000000000000000000000000000000000000..19956210d1b1ebf2f8a37f1563a7cac55f31661f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go
@@ -0,0 +1,168 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// DropDatabase performs a dropDatabase operation
+type DropDatabase struct {
+	authenticator driver.Authenticator
+	session       *session.Client
+	clock         *session.ClusterClock
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	writeConcern  *writeconcern.WriteConcern
+	serverAPI     *driver.ServerAPIOptions
+}
+
+// NewDropDatabase constructs and returns a new DropDatabase.
+func NewDropDatabase() *DropDatabase {
+	return &DropDatabase{}
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (dd *DropDatabase) Execute(ctx context.Context) error {
+	if dd.deployment == nil {
+		return errors.New("the DropDatabase operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:      dd.command,
+		Client:         dd.session,
+		Clock:          dd.clock,
+		CommandMonitor: dd.monitor,
+		Crypt:          dd.crypt,
+		Database:       dd.database,
+		Deployment:     dd.deployment,
+		Selector:       dd.selector,
+		WriteConcern:   dd.writeConcern,
+		ServerAPI:      dd.serverAPI,
+		Name:           driverutil.DropDatabaseOp,
+		Authenticator:  dd.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (dd *DropDatabase) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+
+	dst = bsoncore.AppendInt32Element(dst, "dropDatabase", 1)
+	return dst, nil
+}
+
+// Session sets the session for this operation.
+func (dd *DropDatabase) Session(session *session.Client) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.session = session
+	return dd
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (dd *DropDatabase) ClusterClock(clock *session.ClusterClock) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.clock = clock
+	return dd
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (dd *DropDatabase) CommandMonitor(monitor *event.CommandMonitor) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.monitor = monitor
+	return dd
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (dd *DropDatabase) Crypt(crypt driver.Crypt) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.crypt = crypt
+	return dd
+}
+
+// Database sets the database to run this operation against.
+func (dd *DropDatabase) Database(database string) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.database = database
+	return dd
+}
+
+// Deployment sets the deployment to use for this operation.
+func (dd *DropDatabase) Deployment(deployment driver.Deployment) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.deployment = deployment
+	return dd
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (dd *DropDatabase) ServerSelector(selector description.ServerSelector) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.selector = selector
+	return dd
+}
+
+// WriteConcern sets the write concern for this operation.
+func (dd *DropDatabase) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.writeConcern = writeConcern
+	return dd
+}
+
+// ServerAPI sets the server API version for this operation.
+func (dd *DropDatabase) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.serverAPI = serverAPI
+	return dd
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (dd *DropDatabase) Authenticator(authenticator driver.Authenticator) *DropDatabase {
+	if dd == nil {
+		dd = new(DropDatabase)
+	}
+
+	dd.authenticator = authenticator
+	return dd
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go
new file mode 100644
index 0000000000000000000000000000000000000000..a22496b1e889a4717ad14937029b8735415f7e7d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go
@@ -0,0 +1,262 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// DropIndexes performs an dropIndexes operation.
+type DropIndexes struct {
+	authenticator driver.Authenticator
+	index         any
+	maxTime       *time.Duration
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	writeConcern  *writeconcern.WriteConcern
+	result        DropIndexesResult
+	serverAPI     *driver.ServerAPIOptions
+	timeout       *time.Duration
+}
+
+// DropIndexesResult represents a dropIndexes result returned by the server.
+type DropIndexesResult struct {
+	// Number of indexes that existed before the drop was executed.
+	NIndexesWas int32
+}
+
+func buildDropIndexesResult(response bsoncore.Document) (DropIndexesResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return DropIndexesResult{}, err
+	}
+	dir := DropIndexesResult{}
+	for _, element := range elements {
+		if element.Key() == "nIndexesWas" {
+			var ok bool
+			dir.NIndexesWas, ok = element.Value().AsInt32OK()
+			if !ok {
+				return dir, fmt.Errorf("response field 'nIndexesWas' is type int32, but received BSON type %s", element.Value().Type)
+			}
+		}
+	}
+	return dir, nil
+}
+
+// NewDropIndexes constructs and returns a new DropIndexes.
+func NewDropIndexes(index any) *DropIndexes {
+	return &DropIndexes{
+		index: index,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (di *DropIndexes) Result() DropIndexesResult { return di.result }
+
+func (di *DropIndexes) processResponse(info driver.ResponseInfo) error {
+	var err error
+	di.result, err = buildDropIndexesResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (di *DropIndexes) Execute(ctx context.Context) error {
+	if di.deployment == nil {
+		return errors.New("the DropIndexes operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         di.command,
+		ProcessResponseFn: di.processResponse,
+		Client:            di.session,
+		Clock:             di.clock,
+		CommandMonitor:    di.monitor,
+		Crypt:             di.crypt,
+		Database:          di.database,
+		Deployment:        di.deployment,
+		MaxTime:           di.maxTime,
+		Selector:          di.selector,
+		WriteConcern:      di.writeConcern,
+		ServerAPI:         di.serverAPI,
+		Timeout:           di.timeout,
+		Name:              driverutil.DropIndexesOp,
+		Authenticator:     di.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (di *DropIndexes) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "dropIndexes", di.collection)
+
+	switch t := di.index.(type) {
+	case string:
+		dst = bsoncore.AppendStringElement(dst, "index", t)
+	case bsoncore.Document:
+		if di.index != nil {
+			dst = bsoncore.AppendDocumentElement(dst, "index", t)
+		}
+	}
+
+	return dst, nil
+}
+
+// Index specifies the name of the index to drop. If '*' is specified, all indexes will be dropped.
+func (di *DropIndexes) Index(index any) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.index = index
+	return di
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (di *DropIndexes) MaxTime(maxTime *time.Duration) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.maxTime = maxTime
+	return di
+}
+
+// Session sets the session for this operation.
+func (di *DropIndexes) Session(session *session.Client) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.session = session
+	return di
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (di *DropIndexes) ClusterClock(clock *session.ClusterClock) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.clock = clock
+	return di
+}
+
+// Collection sets the collection that this command will run against.
+func (di *DropIndexes) Collection(collection string) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.collection = collection
+	return di
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (di *DropIndexes) CommandMonitor(monitor *event.CommandMonitor) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.monitor = monitor
+	return di
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (di *DropIndexes) Crypt(crypt driver.Crypt) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.crypt = crypt
+	return di
+}
+
+// Database sets the database to run this operation against.
+func (di *DropIndexes) Database(database string) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.database = database
+	return di
+}
+
+// Deployment sets the deployment to use for this operation.
+func (di *DropIndexes) Deployment(deployment driver.Deployment) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.deployment = deployment
+	return di
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (di *DropIndexes) ServerSelector(selector description.ServerSelector) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.selector = selector
+	return di
+}
+
+// WriteConcern sets the write concern for this operation.
+func (di *DropIndexes) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.writeConcern = writeConcern
+	return di
+}
+
+// ServerAPI sets the server API version for this operation.
+func (di *DropIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.serverAPI = serverAPI
+	return di
+}
+
+// Timeout sets the timeout for this operation.
+func (di *DropIndexes) Timeout(timeout *time.Duration) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.timeout = timeout
+	return di
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (di *DropIndexes) Authenticator(authenticator driver.Authenticator) *DropIndexes {
+	if di == nil {
+		di = new(DropIndexes)
+	}
+
+	di.authenticator = authenticator
+	return di
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go
new file mode 100644
index 0000000000000000000000000000000000000000..94e4ddfb0de5f889f9d3ba8f490777212b8d6a68
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go
@@ -0,0 +1,225 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// DropSearchIndex performs an dropSearchIndex operation.
+type DropSearchIndex struct {
+	authenticator driver.Authenticator
+	index         string
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	result        DropSearchIndexResult
+	serverAPI     *driver.ServerAPIOptions
+	timeout       *time.Duration
+}
+
+// DropSearchIndexResult represents a dropSearchIndex result returned by the server.
+type DropSearchIndexResult struct {
+	Ok int32
+}
+
+func buildDropSearchIndexResult(response bsoncore.Document) (DropSearchIndexResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return DropSearchIndexResult{}, err
+	}
+	dsir := DropSearchIndexResult{}
+	for _, element := range elements {
+		if element.Key() == "ok" {
+			var ok bool
+			dsir.Ok, ok = element.Value().AsInt32OK()
+			if !ok {
+				return dsir, fmt.Errorf("response field 'ok' is type int32, but received BSON type %s", element.Value().Type)
+			}
+		}
+	}
+	return dsir, nil
+}
+
+// NewDropSearchIndex constructs and returns a new DropSearchIndex.
+func NewDropSearchIndex(index string) *DropSearchIndex {
+	return &DropSearchIndex{
+		index: index,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (dsi *DropSearchIndex) Result() DropSearchIndexResult { return dsi.result }
+
+func (dsi *DropSearchIndex) processResponse(info driver.ResponseInfo) error {
+	var err error
+	dsi.result, err = buildDropSearchIndexResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (dsi *DropSearchIndex) Execute(ctx context.Context) error {
+	if dsi.deployment == nil {
+		return errors.New("the DropSearchIndex operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         dsi.command,
+		ProcessResponseFn: dsi.processResponse,
+		Client:            dsi.session,
+		Clock:             dsi.clock,
+		CommandMonitor:    dsi.monitor,
+		Crypt:             dsi.crypt,
+		Database:          dsi.database,
+		Deployment:        dsi.deployment,
+		Selector:          dsi.selector,
+		ServerAPI:         dsi.serverAPI,
+		Timeout:           dsi.timeout,
+		Authenticator:     dsi.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (dsi *DropSearchIndex) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "dropSearchIndex", dsi.collection)
+	dst = bsoncore.AppendStringElement(dst, "name", dsi.index)
+	return dst, nil
+}
+
+// Index specifies the name of the index to drop. If '*' is specified, all indexes will be dropped.
+func (dsi *DropSearchIndex) Index(index string) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.index = index
+	return dsi
+}
+
+// Session sets the session for this operation.
+func (dsi *DropSearchIndex) Session(session *session.Client) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.session = session
+	return dsi
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (dsi *DropSearchIndex) ClusterClock(clock *session.ClusterClock) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.clock = clock
+	return dsi
+}
+
+// Collection sets the collection that this command will run against.
+func (dsi *DropSearchIndex) Collection(collection string) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.collection = collection
+	return dsi
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (dsi *DropSearchIndex) CommandMonitor(monitor *event.CommandMonitor) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.monitor = monitor
+	return dsi
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (dsi *DropSearchIndex) Crypt(crypt driver.Crypt) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.crypt = crypt
+	return dsi
+}
+
+// Database sets the database to run this operation against.
+func (dsi *DropSearchIndex) Database(database string) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.database = database
+	return dsi
+}
+
+// Deployment sets the deployment to use for this operation.
+func (dsi *DropSearchIndex) Deployment(deployment driver.Deployment) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.deployment = deployment
+	return dsi
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (dsi *DropSearchIndex) ServerSelector(selector description.ServerSelector) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.selector = selector
+	return dsi
+}
+
+// ServerAPI sets the server API version for this operation.
+func (dsi *DropSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.serverAPI = serverAPI
+	return dsi
+}
+
+// Timeout sets the timeout for this operation.
+func (dsi *DropSearchIndex) Timeout(timeout *time.Duration) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.timeout = timeout
+	return dsi
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (dsi *DropSearchIndex) Authenticator(authenticator driver.Authenticator) *DropSearchIndex {
+	if dsi == nil {
+		dsi = new(DropSearchIndex)
+	}
+
+	dsi.authenticator = authenticator
+	return dsi
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b24b3d8c25e354be16f1d5da1e0c9050b9354e3
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go
@@ -0,0 +1,175 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// EndSessions performs an endSessions operation.
+type EndSessions struct {
+	authenticator driver.Authenticator
+	sessionIDs    bsoncore.Document
+	session       *session.Client
+	clock         *session.ClusterClock
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	serverAPI     *driver.ServerAPIOptions
+}
+
+// NewEndSessions constructs and returns a new EndSessions.
+func NewEndSessions(sessionIDs bsoncore.Document) *EndSessions {
+	return &EndSessions{
+		sessionIDs: sessionIDs,
+	}
+}
+
+func (es *EndSessions) processResponse(driver.ResponseInfo) error {
+	var err error
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (es *EndSessions) Execute(ctx context.Context) error {
+	if es.deployment == nil {
+		return errors.New("the EndSessions operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         es.command,
+		ProcessResponseFn: es.processResponse,
+		Client:            es.session,
+		Clock:             es.clock,
+		CommandMonitor:    es.monitor,
+		Crypt:             es.crypt,
+		Database:          es.database,
+		Deployment:        es.deployment,
+		Selector:          es.selector,
+		ServerAPI:         es.serverAPI,
+		Name:              driverutil.EndSessionsOp,
+		Authenticator:     es.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (es *EndSessions) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	if es.sessionIDs != nil {
+		dst = bsoncore.AppendArrayElement(dst, "endSessions", es.sessionIDs)
+	}
+	return dst, nil
+}
+
+// SessionIDs specifies the sessions to be expired.
+func (es *EndSessions) SessionIDs(sessionIDs bsoncore.Document) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.sessionIDs = sessionIDs
+	return es
+}
+
+// Session sets the session for this operation.
+func (es *EndSessions) Session(session *session.Client) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.session = session
+	return es
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (es *EndSessions) ClusterClock(clock *session.ClusterClock) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.clock = clock
+	return es
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (es *EndSessions) CommandMonitor(monitor *event.CommandMonitor) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.monitor = monitor
+	return es
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (es *EndSessions) Crypt(crypt driver.Crypt) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.crypt = crypt
+	return es
+}
+
+// Database sets the database to run this operation against.
+func (es *EndSessions) Database(database string) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.database = database
+	return es
+}
+
+// Deployment sets the deployment to use for this operation.
+func (es *EndSessions) Deployment(deployment driver.Deployment) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.deployment = deployment
+	return es
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (es *EndSessions) ServerSelector(selector description.ServerSelector) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.selector = selector
+	return es
+}
+
+// ServerAPI sets the server API version for this operation.
+func (es *EndSessions) ServerAPI(serverAPI *driver.ServerAPIOptions) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.serverAPI = serverAPI
+	return es
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (es *EndSessions) Authenticator(authenticator driver.Authenticator) *EndSessions {
+	if es == nil {
+		es = new(EndSessions)
+	}
+
+	es.authenticator = authenticator
+	return es
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..d13a135cef603759c5ae1ea303d2b4767e6e3f16
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/errors.go
@@ -0,0 +1,13 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import "errors"
+
+var (
+	errUnacknowledgedHint = errors.New("the 'hint' command parameter cannot be used with unacknowledged writes")
+)
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go
new file mode 100644
index 0000000000000000000000000000000000000000..c71b7d755e8302c161470b808aa019992f4d4976
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go
@@ -0,0 +1,589 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Find performs a find operation.
+type Find struct {
+	authenticator       driver.Authenticator
+	allowDiskUse        *bool
+	allowPartialResults *bool
+	awaitData           *bool
+	batchSize           *int32
+	collation           bsoncore.Document
+	comment             *string
+	filter              bsoncore.Document
+	hint                bsoncore.Value
+	let                 bsoncore.Document
+	limit               *int64
+	max                 bsoncore.Document
+	maxTime             *time.Duration
+	min                 bsoncore.Document
+	noCursorTimeout     *bool
+	oplogReplay         *bool
+	projection          bsoncore.Document
+	returnKey           *bool
+	showRecordID        *bool
+	singleBatch         *bool
+	skip                *int64
+	snapshot            *bool
+	sort                bsoncore.Document
+	tailable            *bool
+	session             *session.Client
+	clock               *session.ClusterClock
+	collection          string
+	monitor             *event.CommandMonitor
+	crypt               driver.Crypt
+	database            string
+	deployment          driver.Deployment
+	readConcern         *readconcern.ReadConcern
+	readPreference      *readpref.ReadPref
+	selector            description.ServerSelector
+	retry               *driver.RetryMode
+	result              driver.CursorResponse
+	serverAPI           *driver.ServerAPIOptions
+	timeout             *time.Duration
+	omitCSOTMaxTimeMS   bool
+	logger              *logger.Logger
+}
+
+// NewFind constructs and returns a new Find.
+func NewFind(filter bsoncore.Document) *Find {
+	return &Find{
+		filter: filter,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (f *Find) Result(opts driver.CursorOptions) (*driver.BatchCursor, error) {
+	opts.ServerAPI = f.serverAPI
+	return driver.NewBatchCursor(f.result, f.session, f.clock, opts)
+}
+
+func (f *Find) processResponse(info driver.ResponseInfo) error {
+	var err error
+	f.result, err = driver.NewCursorResponse(info)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (f *Find) Execute(ctx context.Context) error {
+	if f.deployment == nil {
+		return errors.New("the Find operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         f.command,
+		ProcessResponseFn: f.processResponse,
+		RetryMode:         f.retry,
+		Type:              driver.Read,
+		Client:            f.session,
+		Clock:             f.clock,
+		CommandMonitor:    f.monitor,
+		Crypt:             f.crypt,
+		Database:          f.database,
+		Deployment:        f.deployment,
+		MaxTime:           f.maxTime,
+		ReadConcern:       f.readConcern,
+		ReadPreference:    f.readPreference,
+		Selector:          f.selector,
+		Legacy:            driver.LegacyFind,
+		ServerAPI:         f.serverAPI,
+		Timeout:           f.timeout,
+		Logger:            f.logger,
+		Name:              driverutil.FindOp,
+		OmitCSOTMaxTimeMS: f.omitCSOTMaxTimeMS,
+		Authenticator:     f.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (f *Find) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "find", f.collection)
+	if f.allowDiskUse != nil {
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(4) {
+			return nil, errors.New("the 'allowDiskUse' command parameter requires a minimum server wire version of 4")
+		}
+		dst = bsoncore.AppendBooleanElement(dst, "allowDiskUse", *f.allowDiskUse)
+	}
+	if f.allowPartialResults != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "allowPartialResults", *f.allowPartialResults)
+	}
+	if f.awaitData != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "awaitData", *f.awaitData)
+	}
+	if f.batchSize != nil {
+		dst = bsoncore.AppendInt32Element(dst, "batchSize", *f.batchSize)
+	}
+	if f.collation != nil {
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(5) {
+			return nil, errors.New("the 'collation' command parameter requires a minimum server wire version of 5")
+		}
+		dst = bsoncore.AppendDocumentElement(dst, "collation", f.collation)
+	}
+	if f.comment != nil {
+		dst = bsoncore.AppendStringElement(dst, "comment", *f.comment)
+	}
+	if f.filter != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "filter", f.filter)
+	}
+	if f.hint.Type != bsontype.Type(0) {
+		dst = bsoncore.AppendValueElement(dst, "hint", f.hint)
+	}
+	if f.let != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "let", f.let)
+	}
+	if f.limit != nil {
+		dst = bsoncore.AppendInt64Element(dst, "limit", *f.limit)
+	}
+	if f.max != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "max", f.max)
+	}
+	if f.min != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "min", f.min)
+	}
+	if f.noCursorTimeout != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "noCursorTimeout", *f.noCursorTimeout)
+	}
+	if f.oplogReplay != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "oplogReplay", *f.oplogReplay)
+	}
+	if f.projection != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "projection", f.projection)
+	}
+	if f.returnKey != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "returnKey", *f.returnKey)
+	}
+	if f.showRecordID != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "showRecordId", *f.showRecordID)
+	}
+	if f.singleBatch != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "singleBatch", *f.singleBatch)
+	}
+	if f.skip != nil {
+		dst = bsoncore.AppendInt64Element(dst, "skip", *f.skip)
+	}
+	if f.snapshot != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "snapshot", *f.snapshot)
+	}
+	if f.sort != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "sort", f.sort)
+	}
+	if f.tailable != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "tailable", *f.tailable)
+	}
+	return dst, nil
+}
+
+// AllowDiskUse when true allows temporary data to be written to disk during the find command."
+func (f *Find) AllowDiskUse(allowDiskUse bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.allowDiskUse = &allowDiskUse
+	return f
+}
+
+// AllowPartialResults when true allows partial results to be returned if some shards are down.
+func (f *Find) AllowPartialResults(allowPartialResults bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.allowPartialResults = &allowPartialResults
+	return f
+}
+
+// AwaitData when true makes a cursor block before returning when no data is available.
+func (f *Find) AwaitData(awaitData bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.awaitData = &awaitData
+	return f
+}
+
+// BatchSize specifies the number of documents to return in every batch.
+func (f *Find) BatchSize(batchSize int32) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.batchSize = &batchSize
+	return f
+}
+
+// Collation specifies a collation to be used.
+func (f *Find) Collation(collation bsoncore.Document) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.collation = collation
+	return f
+}
+
+// Comment sets a string to help trace an operation.
+func (f *Find) Comment(comment string) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.comment = &comment
+	return f
+}
+
+// Filter determines what results are returned from find.
+func (f *Find) Filter(filter bsoncore.Document) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.filter = filter
+	return f
+}
+
+// Hint specifies the index to use.
+func (f *Find) Hint(hint bsoncore.Value) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.hint = hint
+	return f
+}
+
+// Let specifies the let document to use. This option is only valid for server versions 5.0 and above.
+func (f *Find) Let(let bsoncore.Document) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.let = let
+	return f
+}
+
+// Limit sets a limit on the number of documents to return.
+func (f *Find) Limit(limit int64) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.limit = &limit
+	return f
+}
+
+// Max sets an exclusive upper bound for a specific index.
+func (f *Find) Max(max bsoncore.Document) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.max = max
+	return f
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (f *Find) MaxTime(maxTime *time.Duration) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.maxTime = maxTime
+	return f
+}
+
+// Min sets an inclusive lower bound for a specific index.
+func (f *Find) Min(min bsoncore.Document) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.min = min
+	return f
+}
+
+// NoCursorTimeout when true prevents cursor from timing out after an inactivity period.
+func (f *Find) NoCursorTimeout(noCursorTimeout bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.noCursorTimeout = &noCursorTimeout
+	return f
+}
+
+// OplogReplay when true replays a replica set's oplog.
+func (f *Find) OplogReplay(oplogReplay bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.oplogReplay = &oplogReplay
+	return f
+}
+
+// Projection limits the fields returned for all documents.
+func (f *Find) Projection(projection bsoncore.Document) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.projection = projection
+	return f
+}
+
+// ReturnKey when true returns index keys for all result documents.
+func (f *Find) ReturnKey(returnKey bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.returnKey = &returnKey
+	return f
+}
+
+// ShowRecordID when true adds a $recordId field with the record identifier to returned documents.
+func (f *Find) ShowRecordID(showRecordID bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.showRecordID = &showRecordID
+	return f
+}
+
+// SingleBatch specifies whether the results should be returned in a single batch.
+func (f *Find) SingleBatch(singleBatch bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.singleBatch = &singleBatch
+	return f
+}
+
+// Skip specifies the number of documents to skip before returning.
+func (f *Find) Skip(skip int64) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.skip = &skip
+	return f
+}
+
+// Snapshot prevents the cursor from returning a document more than once because of an intervening write operation.
+func (f *Find) Snapshot(snapshot bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.snapshot = &snapshot
+	return f
+}
+
+// Sort specifies the order in which to return results.
+func (f *Find) Sort(sort bsoncore.Document) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.sort = sort
+	return f
+}
+
+// Tailable keeps a cursor open and resumable after the last data has been retrieved.
+func (f *Find) Tailable(tailable bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.tailable = &tailable
+	return f
+}
+
+// Session sets the session for this operation.
+func (f *Find) Session(session *session.Client) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.session = session
+	return f
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (f *Find) ClusterClock(clock *session.ClusterClock) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.clock = clock
+	return f
+}
+
+// Collection sets the collection that this command will run against.
+func (f *Find) Collection(collection string) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.collection = collection
+	return f
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (f *Find) CommandMonitor(monitor *event.CommandMonitor) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.monitor = monitor
+	return f
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (f *Find) Crypt(crypt driver.Crypt) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.crypt = crypt
+	return f
+}
+
+// Database sets the database to run this operation against.
+func (f *Find) Database(database string) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.database = database
+	return f
+}
+
+// Deployment sets the deployment to use for this operation.
+func (f *Find) Deployment(deployment driver.Deployment) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.deployment = deployment
+	return f
+}
+
+// ReadConcern specifies the read concern for this operation.
+func (f *Find) ReadConcern(readConcern *readconcern.ReadConcern) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.readConcern = readConcern
+	return f
+}
+
+// ReadPreference set the read preference used with this operation.
+func (f *Find) ReadPreference(readPreference *readpref.ReadPref) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.readPreference = readPreference
+	return f
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (f *Find) ServerSelector(selector description.ServerSelector) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.selector = selector
+	return f
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (f *Find) Retry(retry driver.RetryMode) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.retry = &retry
+	return f
+}
+
+// ServerAPI sets the server API version for this operation.
+func (f *Find) ServerAPI(serverAPI *driver.ServerAPIOptions) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.serverAPI = serverAPI
+	return f
+}
+
+// Timeout sets the timeout for this operation.
+func (f *Find) Timeout(timeout *time.Duration) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.timeout = timeout
+	return f
+}
+
+// OmitCSOTMaxTimeMS omits the automatically-calculated "maxTimeMS" from the
+// command when CSOT is enabled. It does not effect "maxTimeMS" set by
+// [Find.MaxTime].
+func (f *Find) OmitCSOTMaxTimeMS(omit bool) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.omitCSOTMaxTimeMS = omit
+	return f
+}
+
+// Logger sets the logger for this operation.
+func (f *Find) Logger(logger *logger.Logger) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.logger = logger
+	return f
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (f *Find) Authenticator(authenticator driver.Authenticator) *Find {
+	if f == nil {
+		f = new(Find)
+	}
+
+	f.authenticator = authenticator
+	return f
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea365ccb237d42faa078fe07af9f73ea14b8f941
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go
@@ -0,0 +1,491 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// FindAndModify performs a findAndModify operation.
+type FindAndModify struct {
+	authenticator            driver.Authenticator
+	arrayFilters             bsoncore.Array
+	bypassDocumentValidation *bool
+	collation                bsoncore.Document
+	comment                  bsoncore.Value
+	fields                   bsoncore.Document
+	maxTime                  *time.Duration
+	newDocument              *bool
+	query                    bsoncore.Document
+	remove                   *bool
+	sort                     bsoncore.Document
+	update                   bsoncore.Value
+	upsert                   *bool
+	session                  *session.Client
+	clock                    *session.ClusterClock
+	collection               string
+	monitor                  *event.CommandMonitor
+	database                 string
+	deployment               driver.Deployment
+	selector                 description.ServerSelector
+	writeConcern             *writeconcern.WriteConcern
+	retry                    *driver.RetryMode
+	crypt                    driver.Crypt
+	hint                     bsoncore.Value
+	serverAPI                *driver.ServerAPIOptions
+	let                      bsoncore.Document
+	timeout                  *time.Duration
+
+	result FindAndModifyResult
+}
+
+// LastErrorObject represents information about updates and upserts returned by the server.
+type LastErrorObject struct {
+	// True if an update modified an existing document
+	UpdatedExisting bool
+	// Object ID of the upserted document.
+	Upserted interface{}
+}
+
+// FindAndModifyResult represents a findAndModify result returned by the server.
+type FindAndModifyResult struct {
+	// Either the old or modified document, depending on the value of the new parameter.
+	Value bsoncore.Document
+	// Contains information about updates and upserts.
+	LastErrorObject LastErrorObject
+}
+
+func buildFindAndModifyResult(response bsoncore.Document) (FindAndModifyResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return FindAndModifyResult{}, err
+	}
+	famr := FindAndModifyResult{}
+	for _, element := range elements {
+		switch element.Key() {
+		case "value":
+			var ok bool
+			famr.Value, ok = element.Value().DocumentOK()
+
+			// The 'value' field returned by a FindAndModify can be null in the case that no document was found.
+			if element.Value().Type != bsontype.Null && !ok {
+				return famr, fmt.Errorf("response field 'value' is type document or null, but received BSON type %s", element.Value().Type)
+			}
+		case "lastErrorObject":
+			valDoc, ok := element.Value().DocumentOK()
+			if !ok {
+				return famr, fmt.Errorf("response field 'lastErrorObject' is type document, but received BSON type %s", element.Value().Type)
+			}
+
+			var leo LastErrorObject
+			if err = bson.Unmarshal(valDoc, &leo); err != nil {
+				return famr, err
+			}
+			famr.LastErrorObject = leo
+		}
+	}
+	return famr, nil
+}
+
+// NewFindAndModify constructs and returns a new FindAndModify.
+func NewFindAndModify(query bsoncore.Document) *FindAndModify {
+	return &FindAndModify{
+		query: query,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (fam *FindAndModify) Result() FindAndModifyResult { return fam.result }
+
+func (fam *FindAndModify) processResponse(info driver.ResponseInfo) error {
+	var err error
+
+	fam.result, err = buildFindAndModifyResult(info.ServerResponse)
+	return err
+
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (fam *FindAndModify) Execute(ctx context.Context) error {
+	if fam.deployment == nil {
+		return errors.New("the FindAndModify operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         fam.command,
+		ProcessResponseFn: fam.processResponse,
+
+		RetryMode:      fam.retry,
+		Type:           driver.Write,
+		Client:         fam.session,
+		Clock:          fam.clock,
+		CommandMonitor: fam.monitor,
+		Database:       fam.database,
+		Deployment:     fam.deployment,
+		MaxTime:        fam.maxTime,
+		Selector:       fam.selector,
+		WriteConcern:   fam.writeConcern,
+		Crypt:          fam.crypt,
+		ServerAPI:      fam.serverAPI,
+		Timeout:        fam.timeout,
+		Name:           driverutil.FindAndModifyOp,
+		Authenticator:  fam.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (fam *FindAndModify) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "findAndModify", fam.collection)
+	if fam.arrayFilters != nil {
+
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(6) {
+			return nil, errors.New("the 'arrayFilters' command parameter requires a minimum server wire version of 6")
+		}
+		dst = bsoncore.AppendArrayElement(dst, "arrayFilters", fam.arrayFilters)
+	}
+	if fam.bypassDocumentValidation != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "bypassDocumentValidation", *fam.bypassDocumentValidation)
+	}
+	if fam.collation != nil {
+
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(5) {
+			return nil, errors.New("the 'collation' command parameter requires a minimum server wire version of 5")
+		}
+		dst = bsoncore.AppendDocumentElement(dst, "collation", fam.collation)
+	}
+	if fam.comment.Type != bsontype.Type(0) {
+		dst = bsoncore.AppendValueElement(dst, "comment", fam.comment)
+	}
+	if fam.fields != nil {
+
+		dst = bsoncore.AppendDocumentElement(dst, "fields", fam.fields)
+	}
+	if fam.newDocument != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "new", *fam.newDocument)
+	}
+	if fam.query != nil {
+
+		dst = bsoncore.AppendDocumentElement(dst, "query", fam.query)
+	}
+	if fam.remove != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "remove", *fam.remove)
+	}
+	if fam.sort != nil {
+
+		dst = bsoncore.AppendDocumentElement(dst, "sort", fam.sort)
+	}
+	if fam.update.Data != nil {
+		dst = bsoncore.AppendValueElement(dst, "update", fam.update)
+	}
+	if fam.upsert != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "upsert", *fam.upsert)
+	}
+	if fam.hint.Type != bsontype.Type(0) {
+
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(8) {
+			return nil, errors.New("the 'hint' command parameter requires a minimum server wire version of 8")
+		}
+		if !fam.writeConcern.Acknowledged() {
+			return nil, errUnacknowledgedHint
+		}
+		dst = bsoncore.AppendValueElement(dst, "hint", fam.hint)
+	}
+	if fam.let != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "let", fam.let)
+	}
+
+	return dst, nil
+}
+
+// ArrayFilters specifies an array of filter documents that determines which array elements to modify for an update operation on an array field.
+func (fam *FindAndModify) ArrayFilters(arrayFilters bsoncore.Array) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.arrayFilters = arrayFilters
+	return fam
+}
+
+// BypassDocumentValidation specifies if document validation can be skipped when executing the operation.
+func (fam *FindAndModify) BypassDocumentValidation(bypassDocumentValidation bool) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.bypassDocumentValidation = &bypassDocumentValidation
+	return fam
+}
+
+// Collation specifies a collation to be used.
+func (fam *FindAndModify) Collation(collation bsoncore.Document) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.collation = collation
+	return fam
+}
+
+// Comment sets a value to help trace an operation.
+func (fam *FindAndModify) Comment(comment bsoncore.Value) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.comment = comment
+	return fam
+}
+
+// Fields specifies a subset of fields to return.
+func (fam *FindAndModify) Fields(fields bsoncore.Document) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.fields = fields
+	return fam
+}
+
+// MaxTime specifies the maximum amount of time to allow the operation to run on the server.
+func (fam *FindAndModify) MaxTime(maxTime *time.Duration) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.maxTime = maxTime
+	return fam
+}
+
+// NewDocument specifies whether to return the modified document or the original. Defaults to false (return original).
+func (fam *FindAndModify) NewDocument(newDocument bool) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.newDocument = &newDocument
+	return fam
+}
+
+// Query specifies the selection criteria for the modification.
+func (fam *FindAndModify) Query(query bsoncore.Document) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.query = query
+	return fam
+}
+
+// Remove specifies that the matched document should be removed. Defaults to false.
+func (fam *FindAndModify) Remove(remove bool) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.remove = &remove
+	return fam
+}
+
+// Sort determines which document the operation modifies if the query matches multiple documents.The first document matched by the sort order will be modified.
+func (fam *FindAndModify) Sort(sort bsoncore.Document) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.sort = sort
+	return fam
+}
+
+// Update specifies the update document to perform on the matched document.
+func (fam *FindAndModify) Update(update bsoncore.Value) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.update = update
+	return fam
+}
+
+// Upsert specifies whether or not to create a new document if no documents match the query when doing an update. Defaults to false.
+func (fam *FindAndModify) Upsert(upsert bool) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.upsert = &upsert
+	return fam
+}
+
+// Session sets the session for this operation.
+func (fam *FindAndModify) Session(session *session.Client) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.session = session
+	return fam
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (fam *FindAndModify) ClusterClock(clock *session.ClusterClock) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.clock = clock
+	return fam
+}
+
+// Collection sets the collection that this command will run against.
+func (fam *FindAndModify) Collection(collection string) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.collection = collection
+	return fam
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (fam *FindAndModify) CommandMonitor(monitor *event.CommandMonitor) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.monitor = monitor
+	return fam
+}
+
+// Database sets the database to run this operation against.
+func (fam *FindAndModify) Database(database string) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.database = database
+	return fam
+}
+
+// Deployment sets the deployment to use for this operation.
+func (fam *FindAndModify) Deployment(deployment driver.Deployment) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.deployment = deployment
+	return fam
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (fam *FindAndModify) ServerSelector(selector description.ServerSelector) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.selector = selector
+	return fam
+}
+
+// WriteConcern sets the write concern for this operation.
+func (fam *FindAndModify) WriteConcern(writeConcern *writeconcern.WriteConcern) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.writeConcern = writeConcern
+	return fam
+}
+
+// Retry enables retryable writes for this operation. Retries are not handled automatically,
+// instead a boolean is returned from Execute and SelectAndExecute that indicates if the
+// operation can be retried. Retrying is handled by calling RetryExecute.
+func (fam *FindAndModify) Retry(retry driver.RetryMode) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.retry = &retry
+	return fam
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (fam *FindAndModify) Crypt(crypt driver.Crypt) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.crypt = crypt
+	return fam
+}
+
+// Hint specifies the index to use.
+func (fam *FindAndModify) Hint(hint bsoncore.Value) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.hint = hint
+	return fam
+}
+
+// ServerAPI sets the server API version for this operation.
+func (fam *FindAndModify) ServerAPI(serverAPI *driver.ServerAPIOptions) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.serverAPI = serverAPI
+	return fam
+}
+
+// Let specifies the let document to use. This option is only valid for server versions 5.0 and above.
+func (fam *FindAndModify) Let(let bsoncore.Document) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.let = let
+	return fam
+}
+
+// Timeout sets the timeout for this operation.
+func (fam *FindAndModify) Timeout(timeout *time.Duration) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.timeout = timeout
+	return fam
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (fam *FindAndModify) Authenticator(authenticator driver.Authenticator) *FindAndModify {
+	if fam == nil {
+		fam = new(FindAndModify)
+	}
+
+	fam.authenticator = authenticator
+	return fam
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go
new file mode 100644
index 0000000000000000000000000000000000000000..60c99f063d1491f370fdeb6f95e067cee47cb14f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go
@@ -0,0 +1,662 @@
+// Copyright (C) MongoDB, Inc. 2021-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/internal/bsonutil"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/internal/handshake"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/version"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// maxClientMetadataSize is the maximum size of the client metadata document
+// that can be sent to the server. Note that the maximum document size on
+// standalone and replica servers is 1024, but the maximum document size on
+// sharded clusters is 512.
+const maxClientMetadataSize = 512
+
+const driverName = "mongo-go-driver"
+
+// Hello is used to run the handshake operation.
+type Hello struct {
+	authenticator      driver.Authenticator
+	appname            string
+	compressors        []string
+	saslSupportedMechs string
+	d                  driver.Deployment
+	clock              *session.ClusterClock
+	speculativeAuth    bsoncore.Document
+	topologyVersion    *description.TopologyVersion
+	maxAwaitTimeMS     *int64
+	serverAPI          *driver.ServerAPIOptions
+	loadBalanced       bool
+
+	res bsoncore.Document
+}
+
+var _ driver.Handshaker = (*Hello)(nil)
+
+// NewHello constructs a Hello.
+func NewHello() *Hello { return &Hello{} }
+
+// AppName sets the application name in the client metadata sent in this operation.
+func (h *Hello) AppName(appname string) *Hello {
+	h.appname = appname
+	return h
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (h *Hello) ClusterClock(clock *session.ClusterClock) *Hello {
+	if h == nil {
+		h = new(Hello)
+	}
+
+	h.clock = clock
+	return h
+}
+
+// Compressors sets the compressors that can be used.
+func (h *Hello) Compressors(compressors []string) *Hello {
+	h.compressors = compressors
+	return h
+}
+
+// SASLSupportedMechs retrieves the supported SASL mechanism for the given user when this operation
+// is run.
+func (h *Hello) SASLSupportedMechs(username string) *Hello {
+	h.saslSupportedMechs = username
+	return h
+}
+
+// Deployment sets the Deployment for this operation.
+func (h *Hello) Deployment(d driver.Deployment) *Hello {
+	h.d = d
+	return h
+}
+
+// SpeculativeAuthenticate sets the document to be used for speculative authentication.
+func (h *Hello) SpeculativeAuthenticate(doc bsoncore.Document) *Hello {
+	h.speculativeAuth = doc
+	return h
+}
+
+// TopologyVersion sets the TopologyVersion to be used for heartbeats.
+func (h *Hello) TopologyVersion(tv *description.TopologyVersion) *Hello {
+	h.topologyVersion = tv
+	return h
+}
+
+// MaxAwaitTimeMS sets the maximum time for the server to wait for topology changes during a heartbeat.
+func (h *Hello) MaxAwaitTimeMS(awaitTime int64) *Hello {
+	h.maxAwaitTimeMS = &awaitTime
+	return h
+}
+
+// ServerAPI sets the server API version for this operation.
+func (h *Hello) ServerAPI(serverAPI *driver.ServerAPIOptions) *Hello {
+	h.serverAPI = serverAPI
+	return h
+}
+
+// LoadBalanced specifies whether or not this operation is being sent over a connection to a load balanced cluster.
+func (h *Hello) LoadBalanced(lb bool) *Hello {
+	h.loadBalanced = lb
+	return h
+}
+
+// Result returns the result of executing this operation.
+func (h *Hello) Result(addr address.Address) description.Server {
+	return description.NewServer(addr, bson.Raw(h.res))
+}
+
+const dockerEnvPath = "/.dockerenv"
+
+const (
+	// Runtime names
+	runtimeNameDocker = "docker"
+
+	// Orchestrator names
+	orchestratorNameK8s = "kubernetes"
+)
+
+// getFaasEnvName parses the FaaS environment variable name and returns the
+// corresponding name used by the client. If none of the variables or variables
+// for multiple names are populated the FaaS values MUST be entirely omitted.
+// When variables for multiple "client.env.name" values are present, "vercel"
+// takes precedence over "aws.lambda"; any other combination MUST cause FaaS
+// values to be entirely omitted.
+func getFaasEnvName() string {
+	envVars := []string{
+		driverutil.EnvVarAWSExecutionEnv,
+		driverutil.EnvVarAWSLambdaRuntimeAPI,
+		driverutil.EnvVarFunctionsWorkerRuntime,
+		driverutil.EnvVarKService,
+		driverutil.EnvVarFunctionName,
+		driverutil.EnvVarVercel,
+	}
+
+	// If none of the variables are populated the client.env value MUST be
+	// entirely omitted.
+	names := make(map[string]struct{})
+
+	for _, envVar := range envVars {
+		val := os.Getenv(envVar)
+		if val == "" {
+			continue
+		}
+
+		var name string
+
+		switch envVar {
+		case driverutil.EnvVarAWSExecutionEnv:
+			if !strings.HasPrefix(val, driverutil.AwsLambdaPrefix) {
+				continue
+			}
+
+			name = driverutil.EnvNameAWSLambda
+		case driverutil.EnvVarAWSLambdaRuntimeAPI:
+			name = driverutil.EnvNameAWSLambda
+		case driverutil.EnvVarFunctionsWorkerRuntime:
+			name = driverutil.EnvNameAzureFunc
+		case driverutil.EnvVarKService, driverutil.EnvVarFunctionName:
+			name = driverutil.EnvNameGCPFunc
+		case driverutil.EnvVarVercel:
+			// "vercel" takes precedence over "aws.lambda".
+			delete(names, driverutil.EnvNameAWSLambda)
+
+			name = driverutil.EnvNameVercel
+		}
+
+		names[name] = struct{}{}
+		if len(names) > 1 {
+			// If multiple names are populated the client.env value
+			// MUST be entirely omitted.
+			names = nil
+
+			break
+		}
+	}
+
+	for name := range names {
+		return name
+	}
+
+	return ""
+}
+
+type containerInfo struct {
+	runtime      string
+	orchestrator string
+}
+
+// getContainerEnvInfo returns runtime and orchestrator of a container.
+// If no fields is populated, the client.env.container value MUST be entirely
+// omitted.
+func getContainerEnvInfo() *containerInfo {
+	var runtime, orchestrator string
+	if _, err := os.Stat(dockerEnvPath); !os.IsNotExist(err) {
+		runtime = runtimeNameDocker
+	}
+	if v := os.Getenv(driverutil.EnvVarK8s); v != "" {
+		orchestrator = orchestratorNameK8s
+	}
+	if runtime != "" || orchestrator != "" {
+		return &containerInfo{
+			runtime:      runtime,
+			orchestrator: orchestrator,
+		}
+	}
+	return nil
+}
+
+// appendClientAppName appends the application metadata to the dst. It is the
+// responsibility of the caller to check that this appending does not cause dst
+// to exceed any size limitations.
+func appendClientAppName(dst []byte, name string) ([]byte, error) {
+	if name == "" {
+		return dst, nil
+	}
+
+	var idx int32
+	idx, dst = bsoncore.AppendDocumentElementStart(dst, "application")
+
+	dst = bsoncore.AppendStringElement(dst, "name", name)
+
+	return bsoncore.AppendDocumentEnd(dst, idx)
+}
+
+// appendClientDriver appends the driver metadata to dst. It is the
+// responsibility of the caller to check that this appending does not cause dst
+// to exceed any size limitations.
+func appendClientDriver(dst []byte) ([]byte, error) {
+	var idx int32
+	idx, dst = bsoncore.AppendDocumentElementStart(dst, "driver")
+
+	dst = bsoncore.AppendStringElement(dst, "name", driverName)
+	dst = bsoncore.AppendStringElement(dst, "version", version.Driver)
+
+	return bsoncore.AppendDocumentEnd(dst, idx)
+}
+
+// appendClientEnv appends the environment metadata to dst. It is the
+// responsibility of the caller to check that this appending does not cause dst
+// to exceed any size limitations.
+func appendClientEnv(dst []byte, omitNonName, omitDoc bool) ([]byte, error) {
+	if omitDoc {
+		return dst, nil
+	}
+
+	name := getFaasEnvName()
+	container := getContainerEnvInfo()
+	// Omit the entire 'env' if both name and container are empty because other
+	// fields depend on either of them.
+	if name == "" && container == nil {
+		return dst, nil
+	}
+
+	var idx int32
+
+	idx, dst = bsoncore.AppendDocumentElementStart(dst, "env")
+
+	if name != "" {
+		dst = bsoncore.AppendStringElement(dst, "name", name)
+	}
+
+	addMem := func(envVar string) []byte {
+		mem := os.Getenv(envVar)
+		if mem == "" {
+			return dst
+		}
+
+		memInt64, err := strconv.ParseInt(mem, 10, 32)
+		if err != nil {
+			return dst
+		}
+
+		memInt32 := int32(memInt64)
+
+		return bsoncore.AppendInt32Element(dst, "memory_mb", memInt32)
+	}
+
+	addRegion := func(envVar string) []byte {
+		region := os.Getenv(envVar)
+		if region == "" {
+			return dst
+		}
+
+		return bsoncore.AppendStringElement(dst, "region", region)
+	}
+
+	addTimeout := func(envVar string) []byte {
+		timeout := os.Getenv(envVar)
+		if timeout == "" {
+			return dst
+		}
+
+		timeoutInt64, err := strconv.ParseInt(timeout, 10, 32)
+		if err != nil {
+			return dst
+		}
+
+		timeoutInt32 := int32(timeoutInt64)
+		return bsoncore.AppendInt32Element(dst, "timeout_sec", timeoutInt32)
+	}
+
+	if !omitNonName {
+		// No other FaaS fields will be populated if the name is empty.
+		switch name {
+		case driverutil.EnvNameAWSLambda:
+			dst = addMem(driverutil.EnvVarAWSLambdaFunctionMemorySize)
+			dst = addRegion(driverutil.EnvVarAWSRegion)
+		case driverutil.EnvNameGCPFunc:
+			dst = addMem(driverutil.EnvVarFunctionMemoryMB)
+			dst = addRegion(driverutil.EnvVarFunctionRegion)
+			dst = addTimeout(driverutil.EnvVarFunctionTimeoutSec)
+		case driverutil.EnvNameVercel:
+			dst = addRegion(driverutil.EnvVarVercelRegion)
+		}
+	}
+
+	if container != nil {
+		var idxCntnr int32
+		idxCntnr, dst = bsoncore.AppendDocumentElementStart(dst, "container")
+		if container.runtime != "" {
+			dst = bsoncore.AppendStringElement(dst, "runtime", container.runtime)
+		}
+		if container.orchestrator != "" {
+			dst = bsoncore.AppendStringElement(dst, "orchestrator", container.orchestrator)
+		}
+		var err error
+		dst, err = bsoncore.AppendDocumentEnd(dst, idxCntnr)
+		if err != nil {
+			return dst, err
+		}
+	}
+
+	return bsoncore.AppendDocumentEnd(dst, idx)
+}
+
+// appendClientOS appends the OS metadata to dst. It is the responsibility of the
+// caller to check that this appending does not cause dst to exceed any size
+// limitations.
+func appendClientOS(dst []byte, omitNonType bool) ([]byte, error) {
+	var idx int32
+
+	idx, dst = bsoncore.AppendDocumentElementStart(dst, "os")
+
+	dst = bsoncore.AppendStringElement(dst, "type", runtime.GOOS)
+	if !omitNonType {
+		dst = bsoncore.AppendStringElement(dst, "architecture", runtime.GOARCH)
+	}
+
+	return bsoncore.AppendDocumentEnd(dst, idx)
+}
+
+// appendClientPlatform appends the platform metadata to dst. It is the
+// responsibility of the caller to check that this appending does not cause dst
+// to exceed any size limitations.
+func appendClientPlatform(dst []byte) []byte {
+	return bsoncore.AppendStringElement(dst, "platform", runtime.Version())
+}
+
+// encodeClientMetadata encodes the client metadata into a BSON document. maxLen
+// is the maximum length the document can be. If the document exceeds maxLen,
+// then an empty byte slice is returned. If there is not enough space to encode
+// a document, the document is truncated and returned.
+//
+// This function attempts to build the following document. Fields are omitted to
+// save space following the MongoDB Handshake.
+//
+//	{
+//		application: {
+//			name: "<string>"
+//		},
+//		driver: {
+//			name: "<string>",
+//			version: "<string>"
+//		},
+//		platform: "<string>",
+//		os: {
+//			type: "<string>",
+//			name: "<string>",
+//			architecture: "<string>",
+//			version: "<string>"
+//		},
+//		env: {
+//			name: "<string>",
+//			timeout_sec: 42,
+//			memory_mb: 1024,
+//			region: "<string>",
+//			container: {
+//				runtime: "<string>",
+//				orchestrator: "<string>"
+//			}
+//		}
+//	}
+func encodeClientMetadata(appname string, maxLen int) ([]byte, error) {
+	dst := make([]byte, 0, maxLen)
+
+	omitEnvDoc := false
+	omitEnvNonName := false
+	omitOSNonType := false
+	omitEnvDocument := false
+	truncatePlatform := false
+
+retry:
+	var idx int32
+	idx, dst = bsoncore.AppendDocumentStart(dst)
+
+	var err error
+	dst, err = appendClientAppName(dst, appname)
+	if err != nil {
+		return nil, err
+	}
+
+	dst, err = appendClientDriver(dst)
+	if err != nil {
+		return nil, err
+	}
+
+	dst, err = appendClientOS(dst, omitOSNonType)
+	if err != nil {
+		return nil, err
+	}
+
+	if !truncatePlatform {
+		dst = appendClientPlatform(dst)
+	}
+
+	if !omitEnvDocument {
+		dst, err = appendClientEnv(dst, omitEnvNonName, omitEnvDoc)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	dst, err = bsoncore.AppendDocumentEnd(dst, idx)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(dst) > maxLen {
+		// Implementers SHOULD cumulatively update fields in the
+		// following order until the document is under the size limit
+		//
+		//    1. Omit fields from ``env`` except ``env.name``
+		//    2. Omit fields from ``os`` except ``os.type``
+		//    3. Omit the ``env`` document entirely
+		//    4. Truncate ``platform``
+		dst = dst[:0]
+
+		if !omitEnvNonName {
+			omitEnvNonName = true
+
+			goto retry
+		}
+
+		if !omitOSNonType {
+			omitOSNonType = true
+
+			goto retry
+		}
+
+		if !omitEnvDoc {
+			omitEnvDoc = true
+
+			goto retry
+		}
+
+		if !truncatePlatform {
+			truncatePlatform = true
+
+			goto retry
+		}
+
+		// There is nothing left to update. Return an empty slice to
+		// tell caller not to append a `client` document.
+		return nil, nil
+	}
+
+	return dst, nil
+}
+
+// handshakeCommand appends all necessary command fields as well as client metadata, SASL supported mechs, and compression.
+func (h *Hello) handshakeCommand(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst, err := h.command(dst, desc)
+	if err != nil {
+		return dst, err
+	}
+
+	if h.saslSupportedMechs != "" {
+		dst = bsoncore.AppendStringElement(dst, "saslSupportedMechs", h.saslSupportedMechs)
+	}
+	if h.speculativeAuth != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "speculativeAuthenticate", h.speculativeAuth)
+	}
+	var idx int32
+	idx, dst = bsoncore.AppendArrayElementStart(dst, "compression")
+	for i, compressor := range h.compressors {
+		dst = bsoncore.AppendStringElement(dst, strconv.Itoa(i), compressor)
+	}
+	dst, _ = bsoncore.AppendArrayEnd(dst, idx)
+
+	clientMetadata, _ := encodeClientMetadata(h.appname, maxClientMetadataSize)
+
+	// If the client metadata is empty, do not append it to the command.
+	if len(clientMetadata) > 0 {
+		dst = bsoncore.AppendDocumentElement(dst, "client", clientMetadata)
+	}
+
+	return dst, nil
+}
+
+// command appends all necessary command fields.
+func (h *Hello) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	// Use "hello" if topology is LoadBalanced, API version is declared or server
+	// has responded with "helloOk". Otherwise, use legacy hello.
+	if h.loadBalanced || h.serverAPI != nil || desc.Server.HelloOK {
+		dst = bsoncore.AppendInt32Element(dst, "hello", 1)
+	} else {
+		dst = bsoncore.AppendInt32Element(dst, handshake.LegacyHello, 1)
+	}
+	dst = bsoncore.AppendBooleanElement(dst, "helloOk", true)
+
+	if tv := h.topologyVersion; tv != nil {
+		var tvIdx int32
+
+		tvIdx, dst = bsoncore.AppendDocumentElementStart(dst, "topologyVersion")
+		dst = bsoncore.AppendObjectIDElement(dst, "processId", tv.ProcessID)
+		dst = bsoncore.AppendInt64Element(dst, "counter", tv.Counter)
+		dst, _ = bsoncore.AppendDocumentEnd(dst, tvIdx)
+	}
+	if h.maxAwaitTimeMS != nil {
+		dst = bsoncore.AppendInt64Element(dst, "maxAwaitTimeMS", *h.maxAwaitTimeMS)
+	}
+	if h.loadBalanced {
+		// The loadBalanced parameter should only be added if it's true. We should never explicitly send
+		// loadBalanced=false per the load balancing spec.
+		dst = bsoncore.AppendBooleanElement(dst, "loadBalanced", true)
+	}
+
+	return dst, nil
+}
+
+// Execute runs this operation.
+func (h *Hello) Execute(ctx context.Context) error {
+	if h.d == nil {
+		return errors.New("a Hello must have a Deployment set before Execute can be called")
+	}
+
+	return h.createOperation().Execute(ctx)
+}
+
+// StreamResponse gets the next streaming Hello response from the server.
+func (h *Hello) StreamResponse(ctx context.Context, conn driver.StreamerConnection) error {
+	return h.createOperation().ExecuteExhaust(ctx, conn)
+}
+
+// isLegacyHandshake returns True if server API version is not requested and
+// loadBalanced is False. If this is the case, then the drivers MUST use legacy
+// hello for the first message of the initial handshake with the OP_QUERY
+// protocol
+func isLegacyHandshake(srvAPI *driver.ServerAPIOptions, loadbalanced bool) bool {
+	return srvAPI == nil && !loadbalanced
+}
+
+func (h *Hello) createOperation() driver.Operation {
+	op := driver.Operation{
+		Clock:      h.clock,
+		CommandFn:  h.command,
+		Database:   "admin",
+		Deployment: h.d,
+		ProcessResponseFn: func(info driver.ResponseInfo) error {
+			h.res = info.ServerResponse
+			return nil
+		},
+		ServerAPI: h.serverAPI,
+	}
+
+	if isLegacyHandshake(h.serverAPI, h.loadBalanced) {
+		op.Legacy = driver.LegacyHandshake
+	}
+
+	return op
+}
+
+// GetHandshakeInformation performs the MongoDB handshake for the provided connection and returns the relevant
+// information about the server. This function implements the driver.Handshaker interface.
+func (h *Hello) GetHandshakeInformation(ctx context.Context, _ address.Address, c driver.Connection) (driver.HandshakeInformation, error) {
+	deployment := driver.SingleConnectionDeployment{C: c}
+
+	op := driver.Operation{
+		Clock:      h.clock,
+		CommandFn:  h.handshakeCommand,
+		Deployment: deployment,
+		Database:   "admin",
+		ProcessResponseFn: func(info driver.ResponseInfo) error {
+			h.res = info.ServerResponse
+			return nil
+		},
+		ServerAPI: h.serverAPI,
+	}
+
+	if isLegacyHandshake(h.serverAPI, h.loadBalanced) {
+		op.Legacy = driver.LegacyHandshake
+	}
+
+	if err := op.Execute(ctx); err != nil {
+		return driver.HandshakeInformation{}, err
+	}
+
+	info := driver.HandshakeInformation{
+		Description: h.Result(c.Address()),
+	}
+	if speculativeAuthenticate, ok := h.res.Lookup("speculativeAuthenticate").DocumentOK(); ok {
+		info.SpeculativeAuthenticate = speculativeAuthenticate
+	}
+	if serverConnectionID, ok := h.res.Lookup("connectionId").AsInt64OK(); ok {
+		info.ServerConnectionID = &serverConnectionID
+	}
+
+	var err error
+
+	// Cast to bson.Raw to lookup saslSupportedMechs to avoid converting from bsoncore.Value to bson.RawValue for the
+	// StringSliceFromRawValue call.
+	if saslSupportedMechs, lookupErr := bson.Raw(h.res).LookupErr("saslSupportedMechs"); lookupErr == nil {
+		info.SaslSupportedMechs, err = bsonutil.StringSliceFromRawValue("saslSupportedMechs", saslSupportedMechs)
+	}
+	return info, err
+}
+
+// FinishHandshake implements the Handshaker interface. This is a no-op function because a non-authenticated connection
+// does not do anything besides the initial Hello for a handshake.
+func (h *Hello) FinishHandshake(context.Context, driver.Connection) error {
+	return nil
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (h *Hello) Authenticator(authenticator driver.Authenticator) *Hello {
+	if h == nil {
+		h = new(Hello)
+	}
+
+	h.authenticator = authenticator
+	return h
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go
new file mode 100644
index 0000000000000000000000000000000000000000..a65a4895f0634a1450db4981ce7b38308dcc2199
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go
@@ -0,0 +1,319 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Insert performs an insert operation.
+type Insert struct {
+	authenticator            driver.Authenticator
+	bypassDocumentValidation *bool
+	comment                  bsoncore.Value
+	documents                []bsoncore.Document
+	ordered                  *bool
+	session                  *session.Client
+	clock                    *session.ClusterClock
+	collection               string
+	monitor                  *event.CommandMonitor
+	crypt                    driver.Crypt
+	database                 string
+	deployment               driver.Deployment
+	selector                 description.ServerSelector
+	writeConcern             *writeconcern.WriteConcern
+	retry                    *driver.RetryMode
+	result                   InsertResult
+	serverAPI                *driver.ServerAPIOptions
+	timeout                  *time.Duration
+	logger                   *logger.Logger
+}
+
+// InsertResult represents an insert result returned by the server.
+type InsertResult struct {
+	// Number of documents successfully inserted.
+	N int64
+}
+
+func buildInsertResult(response bsoncore.Document) (InsertResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return InsertResult{}, err
+	}
+	ir := InsertResult{}
+	for _, element := range elements {
+		if element.Key() == "n" {
+			var ok bool
+			ir.N, ok = element.Value().AsInt64OK()
+			if !ok {
+				return ir, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type)
+			}
+		}
+	}
+	return ir, nil
+}
+
+// NewInsert constructs and returns a new Insert.
+func NewInsert(documents ...bsoncore.Document) *Insert {
+	return &Insert{
+		documents: documents,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (i *Insert) Result() InsertResult { return i.result }
+
+func (i *Insert) processResponse(info driver.ResponseInfo) error {
+	ir, err := buildInsertResult(info.ServerResponse)
+	i.result.N += ir.N
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (i *Insert) Execute(ctx context.Context) error {
+	if i.deployment == nil {
+		return errors.New("the Insert operation must have a Deployment set before Execute can be called")
+	}
+	batches := &driver.Batches{
+		Identifier: "documents",
+		Documents:  i.documents,
+		Ordered:    i.ordered,
+	}
+
+	return driver.Operation{
+		CommandFn:         i.command,
+		ProcessResponseFn: i.processResponse,
+		Batches:           batches,
+		RetryMode:         i.retry,
+		Type:              driver.Write,
+		Client:            i.session,
+		Clock:             i.clock,
+		CommandMonitor:    i.monitor,
+		Crypt:             i.crypt,
+		Database:          i.database,
+		Deployment:        i.deployment,
+		Selector:          i.selector,
+		WriteConcern:      i.writeConcern,
+		ServerAPI:         i.serverAPI,
+		Timeout:           i.timeout,
+		Logger:            i.logger,
+		Name:              driverutil.InsertOp,
+		Authenticator:     i.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (i *Insert) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "insert", i.collection)
+	if i.bypassDocumentValidation != nil && (desc.WireVersion != nil && desc.WireVersion.Includes(4)) {
+		dst = bsoncore.AppendBooleanElement(dst, "bypassDocumentValidation", *i.bypassDocumentValidation)
+	}
+	if i.comment.Type != bsontype.Type(0) {
+		dst = bsoncore.AppendValueElement(dst, "comment", i.comment)
+	}
+	if i.ordered != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "ordered", *i.ordered)
+	}
+	return dst, nil
+}
+
+// BypassDocumentValidation allows the operation to opt-out of document level validation. Valid
+// for server versions >= 3.2. For servers < 3.2, this setting is ignored.
+func (i *Insert) BypassDocumentValidation(bypassDocumentValidation bool) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.bypassDocumentValidation = &bypassDocumentValidation
+	return i
+}
+
+// Comment sets a value to help trace an operation.
+func (i *Insert) Comment(comment bsoncore.Value) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.comment = comment
+	return i
+}
+
+// Documents adds documents to this operation that will be inserted when this operation is
+// executed.
+func (i *Insert) Documents(documents ...bsoncore.Document) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.documents = documents
+	return i
+}
+
+// Ordered sets ordered. If true, when a write fails, the operation will return the error, when
+// false write failures do not stop execution of the operation.
+func (i *Insert) Ordered(ordered bool) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.ordered = &ordered
+	return i
+}
+
+// Session sets the session for this operation.
+func (i *Insert) Session(session *session.Client) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.session = session
+	return i
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (i *Insert) ClusterClock(clock *session.ClusterClock) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.clock = clock
+	return i
+}
+
+// Collection sets the collection that this command will run against.
+func (i *Insert) Collection(collection string) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.collection = collection
+	return i
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (i *Insert) CommandMonitor(monitor *event.CommandMonitor) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.monitor = monitor
+	return i
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (i *Insert) Crypt(crypt driver.Crypt) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.crypt = crypt
+	return i
+}
+
+// Database sets the database to run this operation against.
+func (i *Insert) Database(database string) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.database = database
+	return i
+}
+
+// Deployment sets the deployment to use for this operation.
+func (i *Insert) Deployment(deployment driver.Deployment) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.deployment = deployment
+	return i
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (i *Insert) ServerSelector(selector description.ServerSelector) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.selector = selector
+	return i
+}
+
+// WriteConcern sets the write concern for this operation.
+func (i *Insert) WriteConcern(writeConcern *writeconcern.WriteConcern) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.writeConcern = writeConcern
+	return i
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (i *Insert) Retry(retry driver.RetryMode) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.retry = &retry
+	return i
+}
+
+// ServerAPI sets the server API version for this operation.
+func (i *Insert) ServerAPI(serverAPI *driver.ServerAPIOptions) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.serverAPI = serverAPI
+	return i
+}
+
+// Timeout sets the timeout for this operation.
+func (i *Insert) Timeout(timeout *time.Duration) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.timeout = timeout
+	return i
+}
+
+// Logger sets the logger for this operation.
+func (i *Insert) Logger(logger *logger.Logger) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.logger = logger
+	return i
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (i *Insert) Authenticator(authenticator driver.Authenticator) *Insert {
+	if i == nil {
+		i = new(Insert)
+	}
+
+	i.authenticator = authenticator
+	return i
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go
new file mode 100644
index 0000000000000000000000000000000000000000..3df171e37a2da9be2a3aacac1e0811fcb06e0b9e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go
@@ -0,0 +1,341 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// ListDatabases performs a listDatabases operation.
+type ListDatabases struct {
+	authenticator       driver.Authenticator
+	filter              bsoncore.Document
+	authorizedDatabases *bool
+	nameOnly            *bool
+	session             *session.Client
+	clock               *session.ClusterClock
+	monitor             *event.CommandMonitor
+	database            string
+	deployment          driver.Deployment
+	readPreference      *readpref.ReadPref
+	retry               *driver.RetryMode
+	selector            description.ServerSelector
+	crypt               driver.Crypt
+	serverAPI           *driver.ServerAPIOptions
+	timeout             *time.Duration
+
+	result ListDatabasesResult
+}
+
+// ListDatabasesResult represents a listDatabases result returned by the server.
+type ListDatabasesResult struct {
+	// An array of documents, one document for each database
+	Databases []databaseRecord
+	// The sum of the size of all the database files on disk in bytes.
+	TotalSize int64
+}
+
+type databaseRecord struct {
+	Name       string
+	SizeOnDisk int64 `bson:"sizeOnDisk"`
+	Empty      bool
+}
+
+func buildListDatabasesResult(response bsoncore.Document) (ListDatabasesResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+	ir := ListDatabasesResult{}
+	for _, element := range elements {
+		switch element.Key() {
+		case "totalSize":
+			var ok bool
+			ir.TotalSize, ok = element.Value().AsInt64OK()
+			if !ok {
+				return ir, fmt.Errorf("response field 'totalSize' is type int64, but received BSON type %s: %s", element.Value().Type, element.Value())
+			}
+		case "databases":
+			arr, ok := element.Value().ArrayOK()
+			if !ok {
+				return ir, fmt.Errorf("response field 'databases' is type array, but received BSON type %s", element.Value().Type)
+			}
+
+			var tmp bsoncore.Document
+			err := bson.Unmarshal(arr, &tmp)
+			if err != nil {
+				return ir, err
+			}
+
+			records, err := tmp.Elements()
+			if err != nil {
+				return ir, err
+			}
+
+			ir.Databases = make([]databaseRecord, len(records))
+			for i, val := range records {
+				valueDoc, ok := val.Value().DocumentOK()
+				if !ok {
+					return ir, fmt.Errorf("'databases' element is type document, but received BSON type %s", val.Value().Type)
+				}
+
+				elems, err := valueDoc.Elements()
+				if err != nil {
+					return ir, err
+				}
+
+				for _, elem := range elems {
+					switch elem.Key() {
+					case "name":
+						ir.Databases[i].Name, ok = elem.Value().StringValueOK()
+						if !ok {
+							return ir, fmt.Errorf("response field 'name' is type string, but received BSON type %s", elem.Value().Type)
+						}
+					case "sizeOnDisk":
+						ir.Databases[i].SizeOnDisk, ok = elem.Value().AsInt64OK()
+						if !ok {
+							return ir, fmt.Errorf("response field 'sizeOnDisk' is type int64, but received BSON type %s", elem.Value().Type)
+						}
+					case "empty":
+						ir.Databases[i].Empty, ok = elem.Value().BooleanOK()
+						if !ok {
+							return ir, fmt.Errorf("response field 'empty' is type bool, but received BSON type %s", elem.Value().Type)
+						}
+					}
+				}
+			}
+		}
+	}
+	return ir, nil
+}
+
+// NewListDatabases constructs and returns a new ListDatabases.
+func NewListDatabases(filter bsoncore.Document) *ListDatabases {
+	return &ListDatabases{
+		filter: filter,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (ld *ListDatabases) Result() ListDatabasesResult { return ld.result }
+
+func (ld *ListDatabases) processResponse(info driver.ResponseInfo) error {
+	var err error
+
+	ld.result, err = buildListDatabasesResult(info.ServerResponse)
+	return err
+
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (ld *ListDatabases) Execute(ctx context.Context) error {
+	if ld.deployment == nil {
+		return errors.New("the ListDatabases operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         ld.command,
+		ProcessResponseFn: ld.processResponse,
+
+		Client:         ld.session,
+		Clock:          ld.clock,
+		CommandMonitor: ld.monitor,
+		Database:       ld.database,
+		Deployment:     ld.deployment,
+		ReadPreference: ld.readPreference,
+		RetryMode:      ld.retry,
+		Type:           driver.Read,
+		Selector:       ld.selector,
+		Crypt:          ld.crypt,
+		ServerAPI:      ld.serverAPI,
+		Timeout:        ld.timeout,
+		Name:           driverutil.ListDatabasesOp,
+		Authenticator:  ld.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (ld *ListDatabases) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendInt32Element(dst, "listDatabases", 1)
+	if ld.filter != nil {
+
+		dst = bsoncore.AppendDocumentElement(dst, "filter", ld.filter)
+	}
+	if ld.nameOnly != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "nameOnly", *ld.nameOnly)
+	}
+	if ld.authorizedDatabases != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "authorizedDatabases", *ld.authorizedDatabases)
+	}
+
+	return dst, nil
+}
+
+// Filter determines what results are returned from listDatabases.
+func (ld *ListDatabases) Filter(filter bsoncore.Document) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.filter = filter
+	return ld
+}
+
+// NameOnly specifies whether to only return database names.
+func (ld *ListDatabases) NameOnly(nameOnly bool) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.nameOnly = &nameOnly
+	return ld
+}
+
+// AuthorizedDatabases specifies whether to only return databases which the user is authorized to use."
+func (ld *ListDatabases) AuthorizedDatabases(authorizedDatabases bool) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.authorizedDatabases = &authorizedDatabases
+	return ld
+}
+
+// Session sets the session for this operation.
+func (ld *ListDatabases) Session(session *session.Client) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.session = session
+	return ld
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (ld *ListDatabases) ClusterClock(clock *session.ClusterClock) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.clock = clock
+	return ld
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (ld *ListDatabases) CommandMonitor(monitor *event.CommandMonitor) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.monitor = monitor
+	return ld
+}
+
+// Database sets the database to run this operation against.
+func (ld *ListDatabases) Database(database string) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.database = database
+	return ld
+}
+
+// Deployment sets the deployment to use for this operation.
+func (ld *ListDatabases) Deployment(deployment driver.Deployment) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.deployment = deployment
+	return ld
+}
+
+// ReadPreference set the read preference used with this operation.
+func (ld *ListDatabases) ReadPreference(readPreference *readpref.ReadPref) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.readPreference = readPreference
+	return ld
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (ld *ListDatabases) ServerSelector(selector description.ServerSelector) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.selector = selector
+	return ld
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (ld *ListDatabases) Retry(retry driver.RetryMode) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.retry = &retry
+	return ld
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (ld *ListDatabases) Crypt(crypt driver.Crypt) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.crypt = crypt
+	return ld
+}
+
+// ServerAPI sets the server API version for this operation.
+func (ld *ListDatabases) ServerAPI(serverAPI *driver.ServerAPIOptions) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.serverAPI = serverAPI
+	return ld
+}
+
+// Timeout sets the timeout for this operation.
+func (ld *ListDatabases) Timeout(timeout *time.Duration) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.timeout = timeout
+	return ld
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (ld *ListDatabases) Authenticator(authenticator driver.Authenticator) *ListDatabases {
+	if ld == nil {
+		ld = new(ListDatabases)
+	}
+
+	ld.authenticator = authenticator
+	return ld
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e39f5bfbe401b58cae7d20a4a3d60e71cf239f4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go
@@ -0,0 +1,273 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// ListCollections performs a listCollections operation.
+type ListCollections struct {
+	authenticator         driver.Authenticator
+	filter                bsoncore.Document
+	nameOnly              *bool
+	authorizedCollections *bool
+	session               *session.Client
+	clock                 *session.ClusterClock
+	monitor               *event.CommandMonitor
+	crypt                 driver.Crypt
+	database              string
+	deployment            driver.Deployment
+	readPreference        *readpref.ReadPref
+	selector              description.ServerSelector
+	retry                 *driver.RetryMode
+	result                driver.CursorResponse
+	batchSize             *int32
+	serverAPI             *driver.ServerAPIOptions
+	timeout               *time.Duration
+}
+
+// NewListCollections constructs and returns a new ListCollections.
+func NewListCollections(filter bsoncore.Document) *ListCollections {
+	return &ListCollections{
+		filter: filter,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (lc *ListCollections) Result(opts driver.CursorOptions) (*driver.BatchCursor, error) {
+	opts.ServerAPI = lc.serverAPI
+
+	return driver.NewBatchCursor(lc.result, lc.session, lc.clock, opts)
+}
+
+func (lc *ListCollections) processResponse(info driver.ResponseInfo) error {
+	var err error
+	lc.result, err = driver.NewCursorResponse(info)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (lc *ListCollections) Execute(ctx context.Context) error {
+	if lc.deployment == nil {
+		return errors.New("the ListCollections operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         lc.command,
+		ProcessResponseFn: lc.processResponse,
+		RetryMode:         lc.retry,
+		Type:              driver.Read,
+		Client:            lc.session,
+		Clock:             lc.clock,
+		CommandMonitor:    lc.monitor,
+		Crypt:             lc.crypt,
+		Database:          lc.database,
+		Deployment:        lc.deployment,
+		ReadPreference:    lc.readPreference,
+		Selector:          lc.selector,
+		Legacy:            driver.LegacyListCollections,
+		ServerAPI:         lc.serverAPI,
+		Timeout:           lc.timeout,
+		Name:              driverutil.ListCollectionsOp,
+		Authenticator:     lc.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (lc *ListCollections) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendInt32Element(dst, "listCollections", 1)
+	if lc.filter != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "filter", lc.filter)
+	}
+	if lc.nameOnly != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "nameOnly", *lc.nameOnly)
+	}
+	if lc.authorizedCollections != nil {
+		dst = bsoncore.AppendBooleanElement(dst, "authorizedCollections", *lc.authorizedCollections)
+	}
+
+	cursorDoc := bsoncore.NewDocumentBuilder()
+	if lc.batchSize != nil {
+		cursorDoc.AppendInt32("batchSize", *lc.batchSize)
+	}
+	dst = bsoncore.AppendDocumentElement(dst, "cursor", cursorDoc.Build())
+
+	return dst, nil
+}
+
+// Filter determines what results are returned from listCollections.
+func (lc *ListCollections) Filter(filter bsoncore.Document) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.filter = filter
+	return lc
+}
+
+// NameOnly specifies whether to only return collection names.
+func (lc *ListCollections) NameOnly(nameOnly bool) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.nameOnly = &nameOnly
+	return lc
+}
+
+// AuthorizedCollections specifies whether to only return collections the user
+// is authorized to use.
+func (lc *ListCollections) AuthorizedCollections(authorizedCollections bool) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.authorizedCollections = &authorizedCollections
+	return lc
+}
+
+// Session sets the session for this operation.
+func (lc *ListCollections) Session(session *session.Client) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.session = session
+	return lc
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (lc *ListCollections) ClusterClock(clock *session.ClusterClock) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.clock = clock
+	return lc
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (lc *ListCollections) CommandMonitor(monitor *event.CommandMonitor) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.monitor = monitor
+	return lc
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (lc *ListCollections) Crypt(crypt driver.Crypt) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.crypt = crypt
+	return lc
+}
+
+// Database sets the database to run this operation against.
+func (lc *ListCollections) Database(database string) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.database = database
+	return lc
+}
+
+// Deployment sets the deployment to use for this operation.
+func (lc *ListCollections) Deployment(deployment driver.Deployment) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.deployment = deployment
+	return lc
+}
+
+// ReadPreference set the read preference used with this operation.
+func (lc *ListCollections) ReadPreference(readPreference *readpref.ReadPref) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.readPreference = readPreference
+	return lc
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (lc *ListCollections) ServerSelector(selector description.ServerSelector) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.selector = selector
+	return lc
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (lc *ListCollections) Retry(retry driver.RetryMode) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.retry = &retry
+	return lc
+}
+
+// BatchSize specifies the number of documents to return in every batch.
+func (lc *ListCollections) BatchSize(batchSize int32) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.batchSize = &batchSize
+	return lc
+}
+
+// ServerAPI sets the server API version for this operation.
+func (lc *ListCollections) ServerAPI(serverAPI *driver.ServerAPIOptions) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.serverAPI = serverAPI
+	return lc
+}
+
+// Timeout sets the timeout for this operation.
+func (lc *ListCollections) Timeout(timeout *time.Duration) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.timeout = timeout
+	return lc
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (lc *ListCollections) Authenticator(authenticator driver.Authenticator) *ListCollections {
+	if lc == nil {
+		lc = new(ListCollections)
+	}
+
+	lc.authenticator = authenticator
+	return lc
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go
new file mode 100644
index 0000000000000000000000000000000000000000..433344f307c7a9ff8377ae300fd6f27966ea2dd0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go
@@ -0,0 +1,247 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// ListIndexes performs a listIndexes operation.
+type ListIndexes struct {
+	authenticator driver.Authenticator
+	batchSize     *int32
+	maxTime       *time.Duration
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	retry         *driver.RetryMode
+	crypt         driver.Crypt
+	serverAPI     *driver.ServerAPIOptions
+	timeout       *time.Duration
+
+	result driver.CursorResponse
+}
+
+// NewListIndexes constructs and returns a new ListIndexes.
+func NewListIndexes() *ListIndexes {
+	return &ListIndexes{}
+}
+
+// Result returns the result of executing this operation.
+func (li *ListIndexes) Result(opts driver.CursorOptions) (*driver.BatchCursor, error) {
+
+	clientSession := li.session
+
+	clock := li.clock
+	opts.ServerAPI = li.serverAPI
+	return driver.NewBatchCursor(li.result, clientSession, clock, opts)
+}
+
+func (li *ListIndexes) processResponse(info driver.ResponseInfo) error {
+	var err error
+
+	li.result, err = driver.NewCursorResponse(info)
+	return err
+
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (li *ListIndexes) Execute(ctx context.Context) error {
+	if li.deployment == nil {
+		return errors.New("the ListIndexes operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         li.command,
+		ProcessResponseFn: li.processResponse,
+
+		Client:         li.session,
+		Clock:          li.clock,
+		CommandMonitor: li.monitor,
+		Database:       li.database,
+		Deployment:     li.deployment,
+		MaxTime:        li.maxTime,
+		Selector:       li.selector,
+		Crypt:          li.crypt,
+		Legacy:         driver.LegacyListIndexes,
+		RetryMode:      li.retry,
+		Type:           driver.Read,
+		ServerAPI:      li.serverAPI,
+		Timeout:        li.timeout,
+		Name:           driverutil.ListIndexesOp,
+		Authenticator:  li.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (li *ListIndexes) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "listIndexes", li.collection)
+	cursorIdx, cursorDoc := bsoncore.AppendDocumentStart(nil)
+
+	if li.batchSize != nil {
+
+		cursorDoc = bsoncore.AppendInt32Element(cursorDoc, "batchSize", *li.batchSize)
+	}
+	cursorDoc, _ = bsoncore.AppendDocumentEnd(cursorDoc, cursorIdx)
+	dst = bsoncore.AppendDocumentElement(dst, "cursor", cursorDoc)
+
+	return dst, nil
+}
+
+// BatchSize specifies the number of documents to return in every batch.
+func (li *ListIndexes) BatchSize(batchSize int32) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.batchSize = &batchSize
+	return li
+}
+
+// MaxTime specifies the maximum amount of time to allow the query to run on the server.
+func (li *ListIndexes) MaxTime(maxTime *time.Duration) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.maxTime = maxTime
+	return li
+}
+
+// Session sets the session for this operation.
+func (li *ListIndexes) Session(session *session.Client) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.session = session
+	return li
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (li *ListIndexes) ClusterClock(clock *session.ClusterClock) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.clock = clock
+	return li
+}
+
+// Collection sets the collection that this command will run against.
+func (li *ListIndexes) Collection(collection string) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.collection = collection
+	return li
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (li *ListIndexes) CommandMonitor(monitor *event.CommandMonitor) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.monitor = monitor
+	return li
+}
+
+// Database sets the database to run this operation against.
+func (li *ListIndexes) Database(database string) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.database = database
+	return li
+}
+
+// Deployment sets the deployment to use for this operation.
+func (li *ListIndexes) Deployment(deployment driver.Deployment) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.deployment = deployment
+	return li
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (li *ListIndexes) ServerSelector(selector description.ServerSelector) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.selector = selector
+	return li
+}
+
+// Retry enables retryable mode for this operation. Retries are handled automatically in driver.Operation.Execute based
+// on how the operation is set.
+func (li *ListIndexes) Retry(retry driver.RetryMode) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.retry = &retry
+	return li
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (li *ListIndexes) Crypt(crypt driver.Crypt) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.crypt = crypt
+	return li
+}
+
+// ServerAPI sets the server API version for this operation.
+func (li *ListIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.serverAPI = serverAPI
+	return li
+}
+
+// Timeout sets the timeout for this operation.
+func (li *ListIndexes) Timeout(timeout *time.Duration) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.timeout = timeout
+	return li
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (li *ListIndexes) Authenticator(authenticator driver.Authenticator) *ListIndexes {
+	if li == nil {
+		li = new(ListIndexes)
+	}
+
+	li.authenticator = authenticator
+	return li
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go
new file mode 100644
index 0000000000000000000000000000000000000000..1070e7ca703e2ecc55f24586207f89668918a813
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go
@@ -0,0 +1,428 @@
+// Copyright (C) MongoDB, Inc. 2019-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsontype"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// Update performs an update operation.
+type Update struct {
+	authenticator            driver.Authenticator
+	bypassDocumentValidation *bool
+	comment                  bsoncore.Value
+	ordered                  *bool
+	updates                  []bsoncore.Document
+	session                  *session.Client
+	clock                    *session.ClusterClock
+	collection               string
+	monitor                  *event.CommandMonitor
+	database                 string
+	deployment               driver.Deployment
+	hint                     *bool
+	arrayFilters             *bool
+	selector                 description.ServerSelector
+	writeConcern             *writeconcern.WriteConcern
+	retry                    *driver.RetryMode
+	result                   UpdateResult
+	crypt                    driver.Crypt
+	serverAPI                *driver.ServerAPIOptions
+	let                      bsoncore.Document
+	timeout                  *time.Duration
+	logger                   *logger.Logger
+}
+
+// Upsert contains the information for an upsert in an Update operation.
+type Upsert struct {
+	Index int64
+	ID    interface{} `bson:"_id"`
+}
+
+// UpdateResult contains information for the result of an Update operation.
+type UpdateResult struct {
+	// Number of documents matched.
+	N int64
+	// Number of documents modified.
+	NModified int64
+	// Information about upserted documents.
+	Upserted []Upsert
+}
+
+func buildUpdateResult(response bsoncore.Document) (UpdateResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return UpdateResult{}, err
+	}
+	ur := UpdateResult{}
+	for _, element := range elements {
+		switch element.Key() {
+		case "nModified":
+			var ok bool
+			ur.NModified, ok = element.Value().AsInt64OK()
+			if !ok {
+				return ur, fmt.Errorf("response field 'nModified' is type int32 or int64, but received BSON type %s", element.Value().Type)
+			}
+		case "n":
+			var ok bool
+			ur.N, ok = element.Value().AsInt64OK()
+			if !ok {
+				return ur, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type)
+			}
+		case "upserted":
+			arr, ok := element.Value().ArrayOK()
+			if !ok {
+				return ur, fmt.Errorf("response field 'upserted' is type array, but received BSON type %s", element.Value().Type)
+			}
+
+			var values []bsoncore.Value
+			values, err = arr.Values()
+			if err != nil {
+				break
+			}
+
+			for _, val := range values {
+				valDoc, ok := val.DocumentOK()
+				if !ok {
+					return ur, fmt.Errorf("upserted value is type document, but received BSON type %s", val.Type)
+				}
+				var upsert Upsert
+				if err = bson.Unmarshal(valDoc, &upsert); err != nil {
+					return ur, err
+				}
+				ur.Upserted = append(ur.Upserted, upsert)
+			}
+		}
+	}
+	return ur, nil
+}
+
+// NewUpdate constructs and returns a new Update.
+func NewUpdate(updates ...bsoncore.Document) *Update {
+	return &Update{
+		updates: updates,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (u *Update) Result() UpdateResult { return u.result }
+
+func (u *Update) processResponse(info driver.ResponseInfo) error {
+	ur, err := buildUpdateResult(info.ServerResponse)
+
+	u.result.N += ur.N
+	u.result.NModified += ur.NModified
+	if info.CurrentIndex > 0 {
+		for ind := range ur.Upserted {
+			ur.Upserted[ind].Index += int64(info.CurrentIndex)
+		}
+	}
+	u.result.Upserted = append(u.result.Upserted, ur.Upserted...)
+	return err
+
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (u *Update) Execute(ctx context.Context) error {
+	if u.deployment == nil {
+		return errors.New("the Update operation must have a Deployment set before Execute can be called")
+	}
+	batches := &driver.Batches{
+		Identifier: "updates",
+		Documents:  u.updates,
+		Ordered:    u.ordered,
+	}
+
+	return driver.Operation{
+		CommandFn:         u.command,
+		ProcessResponseFn: u.processResponse,
+		Batches:           batches,
+		RetryMode:         u.retry,
+		Type:              driver.Write,
+		Client:            u.session,
+		Clock:             u.clock,
+		CommandMonitor:    u.monitor,
+		Database:          u.database,
+		Deployment:        u.deployment,
+		Selector:          u.selector,
+		WriteConcern:      u.writeConcern,
+		Crypt:             u.crypt,
+		ServerAPI:         u.serverAPI,
+		Timeout:           u.timeout,
+		Logger:            u.logger,
+		Name:              driverutil.UpdateOp,
+		Authenticator:     u.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (u *Update) command(dst []byte, desc description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "update", u.collection)
+	if u.bypassDocumentValidation != nil &&
+		(desc.WireVersion != nil && desc.WireVersion.Includes(4)) {
+
+		dst = bsoncore.AppendBooleanElement(dst, "bypassDocumentValidation", *u.bypassDocumentValidation)
+	}
+	if u.comment.Type != bsontype.Type(0) {
+		dst = bsoncore.AppendValueElement(dst, "comment", u.comment)
+	}
+	if u.ordered != nil {
+
+		dst = bsoncore.AppendBooleanElement(dst, "ordered", *u.ordered)
+	}
+	if u.hint != nil && *u.hint {
+
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(5) {
+			return nil, errors.New("the 'hint' command parameter requires a minimum server wire version of 5")
+		}
+		if !u.writeConcern.Acknowledged() {
+			return nil, errUnacknowledgedHint
+		}
+	}
+	if u.arrayFilters != nil && *u.arrayFilters {
+		if desc.WireVersion == nil || !desc.WireVersion.Includes(6) {
+			return nil, errors.New("the 'arrayFilters' command parameter requires a minimum server wire version of 6")
+		}
+	}
+	if u.let != nil {
+		dst = bsoncore.AppendDocumentElement(dst, "let", u.let)
+	}
+
+	return dst, nil
+}
+
+// BypassDocumentValidation allows the operation to opt-out of document level validation. Valid
+// for server versions >= 3.2. For servers < 3.2, this setting is ignored.
+func (u *Update) BypassDocumentValidation(bypassDocumentValidation bool) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.bypassDocumentValidation = &bypassDocumentValidation
+	return u
+}
+
+// Hint is a flag to indicate that the update document contains a hint. Hint is only supported by
+// servers >= 4.2. Older servers >= 3.4 will report an error for using the hint option. For servers <
+// 3.4, the driver will return an error if the hint option is used.
+func (u *Update) Hint(hint bool) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.hint = &hint
+	return u
+}
+
+// ArrayFilters is a flag to indicate that the update document contains an arrayFilters field. This option is only
+// supported on server versions 3.6 and higher. For servers < 3.6, the driver will return an error.
+func (u *Update) ArrayFilters(arrayFilters bool) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.arrayFilters = &arrayFilters
+	return u
+}
+
+// Ordered sets ordered. If true, when a write fails, the operation will return the error, when
+// false write failures do not stop execution of the operation.
+func (u *Update) Ordered(ordered bool) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.ordered = &ordered
+	return u
+}
+
+// Updates specifies an array of update statements to perform when this operation is executed.
+// Each update document must have the following structure:
+// {q: <query>, u: <update>, multi: <boolean>, collation: Optional<Document>, arrayFitlers: Optional<Array>, hint: Optional<string/Document>}.
+func (u *Update) Updates(updates ...bsoncore.Document) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.updates = updates
+	return u
+}
+
+// Session sets the session for this operation.
+func (u *Update) Session(session *session.Client) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.session = session
+	return u
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (u *Update) ClusterClock(clock *session.ClusterClock) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.clock = clock
+	return u
+}
+
+// Collection sets the collection that this command will run against.
+func (u *Update) Collection(collection string) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.collection = collection
+	return u
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (u *Update) CommandMonitor(monitor *event.CommandMonitor) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.monitor = monitor
+	return u
+}
+
+// Comment sets a value to help trace an operation.
+func (u *Update) Comment(comment bsoncore.Value) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.comment = comment
+	return u
+}
+
+// Database sets the database to run this operation against.
+func (u *Update) Database(database string) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.database = database
+	return u
+}
+
+// Deployment sets the deployment to use for this operation.
+func (u *Update) Deployment(deployment driver.Deployment) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.deployment = deployment
+	return u
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (u *Update) ServerSelector(selector description.ServerSelector) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.selector = selector
+	return u
+}
+
+// WriteConcern sets the write concern for this operation.
+func (u *Update) WriteConcern(writeConcern *writeconcern.WriteConcern) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.writeConcern = writeConcern
+	return u
+}
+
+// Retry enables retryable writes for this operation. Retries are not handled automatically,
+// instead a boolean is returned from Execute and SelectAndExecute that indicates if the
+// operation can be retried. Retrying is handled by calling RetryExecute.
+func (u *Update) Retry(retry driver.RetryMode) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.retry = &retry
+	return u
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (u *Update) Crypt(crypt driver.Crypt) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.crypt = crypt
+	return u
+}
+
+// ServerAPI sets the server API version for this operation.
+func (u *Update) ServerAPI(serverAPI *driver.ServerAPIOptions) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.serverAPI = serverAPI
+	return u
+}
+
+// Let specifies the let document to use. This option is only valid for server versions 5.0 and above.
+func (u *Update) Let(let bsoncore.Document) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.let = let
+	return u
+}
+
+// Timeout sets the timeout for this operation.
+func (u *Update) Timeout(timeout *time.Duration) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.timeout = timeout
+	return u
+}
+
+// Logger sets the logger for this operation.
+func (u *Update) Logger(logger *logger.Logger) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.logger = logger
+	return u
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (u *Update) Authenticator(authenticator driver.Authenticator) *Update {
+	if u == nil {
+		u = new(Update)
+	}
+
+	u.authenticator = authenticator
+	return u
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go
new file mode 100644
index 0000000000000000000000000000000000000000..c63e048f2103156022b43dd4d849d563f42f296d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go
@@ -0,0 +1,238 @@
+// Copyright (C) MongoDB, Inc. 2023-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package operation
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+// UpdateSearchIndex performs a updateSearchIndex operation.
+type UpdateSearchIndex struct {
+	authenticator driver.Authenticator
+	index         string
+	definition    bsoncore.Document
+	session       *session.Client
+	clock         *session.ClusterClock
+	collection    string
+	monitor       *event.CommandMonitor
+	crypt         driver.Crypt
+	database      string
+	deployment    driver.Deployment
+	selector      description.ServerSelector
+	result        UpdateSearchIndexResult
+	serverAPI     *driver.ServerAPIOptions
+	timeout       *time.Duration
+}
+
+// UpdateSearchIndexResult represents a single index in the updateSearchIndexResult result.
+type UpdateSearchIndexResult struct {
+	Ok int32
+}
+
+func buildUpdateSearchIndexResult(response bsoncore.Document) (UpdateSearchIndexResult, error) {
+	elements, err := response.Elements()
+	if err != nil {
+		return UpdateSearchIndexResult{}, err
+	}
+	usir := UpdateSearchIndexResult{}
+	for _, element := range elements {
+		if element.Key() == "ok" {
+			var ok bool
+			usir.Ok, ok = element.Value().AsInt32OK()
+			if !ok {
+				return usir, fmt.Errorf("response field 'ok' is type int32, but received BSON type %s", element.Value().Type)
+			}
+		}
+	}
+	return usir, nil
+}
+
+// NewUpdateSearchIndex constructs and returns a new UpdateSearchIndex.
+func NewUpdateSearchIndex(index string, definition bsoncore.Document) *UpdateSearchIndex {
+	return &UpdateSearchIndex{
+		index:      index,
+		definition: definition,
+	}
+}
+
+// Result returns the result of executing this operation.
+func (usi *UpdateSearchIndex) Result() UpdateSearchIndexResult { return usi.result }
+
+func (usi *UpdateSearchIndex) processResponse(info driver.ResponseInfo) error {
+	var err error
+	usi.result, err = buildUpdateSearchIndexResult(info.ServerResponse)
+	return err
+}
+
+// Execute runs this operations and returns an error if the operation did not execute successfully.
+func (usi *UpdateSearchIndex) Execute(ctx context.Context) error {
+	if usi.deployment == nil {
+		return errors.New("the UpdateSearchIndex operation must have a Deployment set before Execute can be called")
+	}
+
+	return driver.Operation{
+		CommandFn:         usi.command,
+		ProcessResponseFn: usi.processResponse,
+		Client:            usi.session,
+		Clock:             usi.clock,
+		CommandMonitor:    usi.monitor,
+		Crypt:             usi.crypt,
+		Database:          usi.database,
+		Deployment:        usi.deployment,
+		Selector:          usi.selector,
+		ServerAPI:         usi.serverAPI,
+		Timeout:           usi.timeout,
+		Authenticator:     usi.authenticator,
+	}.Execute(ctx)
+
+}
+
+func (usi *UpdateSearchIndex) command(dst []byte, _ description.SelectedServer) ([]byte, error) {
+	dst = bsoncore.AppendStringElement(dst, "updateSearchIndex", usi.collection)
+	dst = bsoncore.AppendStringElement(dst, "name", usi.index)
+	dst = bsoncore.AppendDocumentElement(dst, "definition", usi.definition)
+	return dst, nil
+}
+
+// Index specifies the index of the document being updated.
+func (usi *UpdateSearchIndex) Index(name string) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.index = name
+	return usi
+}
+
+// Definition specifies the definition for the document being created.
+func (usi *UpdateSearchIndex) Definition(definition bsoncore.Document) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.definition = definition
+	return usi
+}
+
+// Session sets the session for this operation.
+func (usi *UpdateSearchIndex) Session(session *session.Client) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.session = session
+	return usi
+}
+
+// ClusterClock sets the cluster clock for this operation.
+func (usi *UpdateSearchIndex) ClusterClock(clock *session.ClusterClock) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.clock = clock
+	return usi
+}
+
+// Collection sets the collection that this command will run against.
+func (usi *UpdateSearchIndex) Collection(collection string) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.collection = collection
+	return usi
+}
+
+// CommandMonitor sets the monitor to use for APM events.
+func (usi *UpdateSearchIndex) CommandMonitor(monitor *event.CommandMonitor) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.monitor = monitor
+	return usi
+}
+
+// Crypt sets the Crypt object to use for automatic encryption and decryption.
+func (usi *UpdateSearchIndex) Crypt(crypt driver.Crypt) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.crypt = crypt
+	return usi
+}
+
+// Database sets the database to run this operation against.
+func (usi *UpdateSearchIndex) Database(database string) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.database = database
+	return usi
+}
+
+// Deployment sets the deployment to use for this operation.
+func (usi *UpdateSearchIndex) Deployment(deployment driver.Deployment) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.deployment = deployment
+	return usi
+}
+
+// ServerSelector sets the selector used to retrieve a server.
+func (usi *UpdateSearchIndex) ServerSelector(selector description.ServerSelector) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.selector = selector
+	return usi
+}
+
+// ServerAPI sets the server API version for this operation.
+func (usi *UpdateSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.serverAPI = serverAPI
+	return usi
+}
+
+// Timeout sets the timeout for this operation.
+func (usi *UpdateSearchIndex) Timeout(timeout *time.Duration) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.timeout = timeout
+	return usi
+}
+
+// Authenticator sets the authenticator to use for this operation.
+func (usi *UpdateSearchIndex) Authenticator(authenticator driver.Authenticator) *UpdateSearchIndex {
+	if usi == nil {
+		usi = new(UpdateSearchIndex)
+	}
+
+	usi.authenticator = authenticator
+	return usi
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_exhaust.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_exhaust.go
new file mode 100644
index 0000000000000000000000000000000000000000..e0879de3162356490c3c28daf4ec0db71d81e6e0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_exhaust.go
@@ -0,0 +1,37 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"errors"
+)
+
+// ExecuteExhaust reads a response from the provided StreamerConnection. This will error if the connection's
+// CurrentlyStreaming function returns false.
+func (op Operation) ExecuteExhaust(ctx context.Context, conn StreamerConnection) error {
+	if !conn.CurrentlyStreaming() {
+		return errors.New("exhaust read must be done with a connection that is currently streaming")
+	}
+
+	res, err := op.readWireMessage(ctx, conn)
+	if err != nil {
+		return err
+	}
+	if op.ProcessResponseFn != nil {
+		// Server, ConnectionDescription, and CurrentIndex are unused in this mode.
+		info := ResponseInfo{
+			ServerResponse: res,
+			Connection:     conn,
+		}
+		if err = op.ProcessResponseFn(info); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/serverapioptions.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/serverapioptions.go
new file mode 100644
index 0000000000000000000000000000000000000000..a033cf12699290f9be197a5e4a5c2a6ed462294f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/serverapioptions.go
@@ -0,0 +1,36 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+// TestServerAPIVersion is the most recent, stable variant of options.ServerAPIVersion.
+// Only to be used in testing.
+const TestServerAPIVersion = "1"
+
+// ServerAPIOptions represents options used to configure the API version sent to the server
+// when running commands.
+type ServerAPIOptions struct {
+	ServerAPIVersion  string
+	Strict            *bool
+	DeprecationErrors *bool
+}
+
+// NewServerAPIOptions creates a new ServerAPIOptions configured with the provided serverAPIVersion.
+func NewServerAPIOptions(serverAPIVersion string) *ServerAPIOptions {
+	return &ServerAPIOptions{ServerAPIVersion: serverAPIVersion}
+}
+
+// SetStrict specifies whether the server should return errors for features that are not part of the API version.
+func (s *ServerAPIOptions) SetStrict(strict bool) *ServerAPIOptions {
+	s.Strict = &strict
+	return s
+}
+
+// SetDeprecationErrors specifies whether the server should return errors for deprecated features.
+func (s *ServerAPIOptions) SetDeprecationErrors(deprecationErrors bool) *ServerAPIOptions {
+	s.DeprecationErrors = &deprecationErrors
+	return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go
new file mode 100644
index 0000000000000000000000000000000000000000..eff27bfe33d9e47ace33d7a7c3f7bc7762859dc5
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go
@@ -0,0 +1,550 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session // import "go.mongodb.org/mongo-driver/x/mongo/driver/session"
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/internal/uuid"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// ErrSessionEnded is returned when a client session is used after a call to endSession().
+var ErrSessionEnded = errors.New("ended session was used")
+
+// ErrNoTransactStarted is returned if a transaction operation is called when no transaction has started.
+var ErrNoTransactStarted = errors.New("no transaction started")
+
+// ErrTransactInProgress is returned if startTransaction() is called when a transaction is in progress.
+var ErrTransactInProgress = errors.New("transaction already in progress")
+
+// ErrAbortAfterCommit is returned when abort is called after a commit.
+var ErrAbortAfterCommit = errors.New("cannot call abortTransaction after calling commitTransaction")
+
+// ErrAbortTwice is returned if abort is called after transaction is already aborted.
+var ErrAbortTwice = errors.New("cannot call abortTransaction twice")
+
+// ErrCommitAfterAbort is returned if commit is called after an abort.
+var ErrCommitAfterAbort = errors.New("cannot call commitTransaction after calling abortTransaction")
+
+// ErrUnackWCUnsupported is returned if an unacknowledged write concern is supported for a transaction.
+var ErrUnackWCUnsupported = errors.New("transactions do not support unacknowledged write concerns")
+
+// ErrSnapshotTransaction is returned if an transaction is started on a snapshot session.
+var ErrSnapshotTransaction = errors.New("transactions are not supported in snapshot sessions")
+
+// TransactionState indicates the state of the transactions FSM.
+type TransactionState uint8
+
+// Client Session states
+const (
+	None TransactionState = iota
+	Starting
+	InProgress
+	Committed
+	Aborted
+)
+
+// String implements the fmt.Stringer interface.
+func (s TransactionState) String() string {
+	switch s {
+	case None:
+		return "none"
+	case Starting:
+		return "starting"
+	case InProgress:
+		return "in progress"
+	case Committed:
+		return "committed"
+	case Aborted:
+		return "aborted"
+	default:
+		return "unknown"
+	}
+}
+
+// LoadBalancedTransactionConnection represents a connection that's pinned by a ClientSession because it's being used
+// to execute a transaction when running against a load balancer. This interface is a copy of driver.PinnedConnection
+// and exists to be able to pin transactions to a connection without causing an import cycle.
+type LoadBalancedTransactionConnection interface {
+	// Functions copied over from driver.Connection.
+	WriteWireMessage(context.Context, []byte) error
+	ReadWireMessage(ctx context.Context) ([]byte, error)
+	Description() description.Server
+	Close() error
+	ID() string
+	ServerConnectionID() *int64
+	DriverConnectionID() uint64 // TODO(GODRIVER-2824): change type to int64.
+	Address() address.Address
+	Stale() bool
+	OIDCTokenGenID() uint64
+	SetOIDCTokenGenID(uint64)
+
+	// Functions copied over from driver.PinnedConnection that are not part of Connection or Expirable.
+	PinToCursor() error
+	PinToTransaction() error
+	UnpinFromCursor() error
+	UnpinFromTransaction() error
+}
+
+// Client is a session for clients to run commands.
+type Client struct {
+	*Server
+	ClientID       uuid.UUID
+	ClusterTime    bson.Raw
+	Consistent     bool // causal consistency
+	OperationTime  *primitive.Timestamp
+	IsImplicit     bool
+	Terminated     bool
+	RetryingCommit bool
+	Committing     bool
+	Aborting       bool
+	RetryWrite     bool
+	RetryRead      bool
+	Snapshot       bool
+
+	// options for the current transaction
+	// most recently set by transactionopt
+	CurrentRc  *readconcern.ReadConcern
+	CurrentRp  *readpref.ReadPref
+	CurrentWc  *writeconcern.WriteConcern
+	CurrentMct *time.Duration
+
+	// default transaction options
+	transactionRc            *readconcern.ReadConcern
+	transactionRp            *readpref.ReadPref
+	transactionWc            *writeconcern.WriteConcern
+	transactionMaxCommitTime *time.Duration
+
+	pool             *Pool
+	TransactionState TransactionState
+	PinnedServer     *description.Server
+	RecoveryToken    bson.Raw
+	PinnedConnection LoadBalancedTransactionConnection
+	SnapshotTime     *primitive.Timestamp
+}
+
+func getClusterTime(clusterTime bson.Raw) (uint32, uint32) {
+	if clusterTime == nil {
+		return 0, 0
+	}
+
+	clusterTimeVal, err := clusterTime.LookupErr("$clusterTime")
+	if err != nil {
+		return 0, 0
+	}
+
+	timestampVal, err := bson.Raw(clusterTimeVal.Value).LookupErr("clusterTime")
+	if err != nil {
+		return 0, 0
+	}
+
+	return timestampVal.Timestamp()
+}
+
+// MaxClusterTime compares 2 clusterTime documents and returns the document representing the highest cluster time.
+func MaxClusterTime(ct1, ct2 bson.Raw) bson.Raw {
+	epoch1, ord1 := getClusterTime(ct1)
+	epoch2, ord2 := getClusterTime(ct2)
+
+	switch {
+	case epoch1 > epoch2:
+		return ct1
+	case epoch1 < epoch2:
+		return ct2
+	case ord1 > ord2:
+		return ct1
+	case ord1 < ord2:
+		return ct2
+	}
+
+	return ct1
+}
+
+// NewImplicitClientSession creates a new implicit client-side session.
+func NewImplicitClientSession(pool *Pool, clientID uuid.UUID) *Client {
+	// Server-side session checkout for implicit sessions is deferred until after checking out a
+	// connection, so don't check out a server-side session right now. This will limit the number of
+	// implicit sessions to no greater than an application's maxPoolSize.
+
+	return &Client{
+		pool:       pool,
+		ClientID:   clientID,
+		IsImplicit: true,
+	}
+}
+
+// NewClientSession creates a new explicit client-side session.
+func NewClientSession(pool *Pool, clientID uuid.UUID, opts ...*ClientOptions) (*Client, error) {
+	c := &Client{
+		pool:     pool,
+		ClientID: clientID,
+	}
+
+	mergedOpts := mergeClientOptions(opts...)
+	if mergedOpts.DefaultReadPreference != nil {
+		c.transactionRp = mergedOpts.DefaultReadPreference
+	}
+	if mergedOpts.DefaultReadConcern != nil {
+		c.transactionRc = mergedOpts.DefaultReadConcern
+	}
+	if mergedOpts.DefaultWriteConcern != nil {
+		c.transactionWc = mergedOpts.DefaultWriteConcern
+	}
+	if mergedOpts.DefaultMaxCommitTime != nil {
+		c.transactionMaxCommitTime = mergedOpts.DefaultMaxCommitTime
+	}
+	if mergedOpts.Snapshot != nil {
+		c.Snapshot = *mergedOpts.Snapshot
+	}
+
+	// For explicit sessions, the default for causalConsistency is true, unless Snapshot is
+	// enabled, then it's false. Set the default and then allow any explicit causalConsistency
+	// setting to override it.
+	c.Consistent = !c.Snapshot
+	if mergedOpts.CausalConsistency != nil {
+		c.Consistent = *mergedOpts.CausalConsistency
+	}
+
+	if c.Consistent && c.Snapshot {
+		return nil, errors.New("causal consistency and snapshot cannot both be set for a session")
+	}
+
+	if err := c.SetServer(); err != nil {
+		return nil, err
+	}
+
+	return c, nil
+}
+
+// SetServer will check out a session from the client session pool.
+func (c *Client) SetServer() error {
+	var err error
+	c.Server, err = c.pool.GetSession()
+	return err
+}
+
+// AdvanceClusterTime updates the session's cluster time.
+func (c *Client) AdvanceClusterTime(clusterTime bson.Raw) error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+	c.ClusterTime = MaxClusterTime(c.ClusterTime, clusterTime)
+	return nil
+}
+
+// AdvanceOperationTime updates the session's operation time.
+func (c *Client) AdvanceOperationTime(opTime *primitive.Timestamp) error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+
+	if c.OperationTime == nil {
+		c.OperationTime = opTime
+		return nil
+	}
+
+	if opTime.T > c.OperationTime.T {
+		c.OperationTime = opTime
+	} else if (opTime.T == c.OperationTime.T) && (opTime.I > c.OperationTime.I) {
+		c.OperationTime = opTime
+	}
+
+	return nil
+}
+
+// UpdateUseTime sets the session's last used time to the current time. This must be called whenever the session is
+// used to send a command to the server to ensure that the session is not prematurely marked expired in the driver's
+// session pool. If the session has already been ended, this method will return ErrSessionEnded.
+func (c *Client) UpdateUseTime() error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+	c.updateUseTime()
+	return nil
+}
+
+// UpdateRecoveryToken updates the session's recovery token from the server response.
+func (c *Client) UpdateRecoveryToken(response bson.Raw) {
+	if c == nil {
+		return
+	}
+
+	token, err := response.LookupErr("recoveryToken")
+	if err != nil {
+		return
+	}
+
+	c.RecoveryToken = token.Document()
+}
+
+// UpdateSnapshotTime updates the session's value for the atClusterTime field of ReadConcern.
+func (c *Client) UpdateSnapshotTime(response bsoncore.Document) {
+	if c == nil {
+		return
+	}
+
+	subDoc := response
+	if cur, ok := response.Lookup("cursor").DocumentOK(); ok {
+		subDoc = cur
+	}
+
+	ssTimeElem, err := subDoc.LookupErr("atClusterTime")
+	if err != nil {
+		// atClusterTime not included by the server
+		return
+	}
+
+	t, i := ssTimeElem.Timestamp()
+	c.SnapshotTime = &primitive.Timestamp{
+		T: t,
+		I: i,
+	}
+}
+
+// ClearPinnedResources clears the pinned server and/or connection associated with the session.
+func (c *Client) ClearPinnedResources() error {
+	if c == nil {
+		return nil
+	}
+
+	c.PinnedServer = nil
+	if c.PinnedConnection != nil {
+		if err := c.PinnedConnection.UnpinFromTransaction(); err != nil {
+			return err
+		}
+		if err := c.PinnedConnection.Close(); err != nil {
+			return err
+		}
+	}
+	c.PinnedConnection = nil
+	return nil
+}
+
+// unpinConnection gracefully unpins the connection associated with the session
+// if there is one. This is done via the pinned connection's
+// UnpinFromTransaction function.
+func (c *Client) unpinConnection() error {
+	if c == nil || c.PinnedConnection == nil {
+		return nil
+	}
+
+	err := c.PinnedConnection.UnpinFromTransaction()
+	closeErr := c.PinnedConnection.Close()
+	if err == nil && closeErr != nil {
+		err = closeErr
+	}
+	c.PinnedConnection = nil
+	return err
+}
+
+// EndSession ends the session.
+func (c *Client) EndSession() {
+	if c.Terminated {
+		return
+	}
+	c.Terminated = true
+
+	// Ignore the error when unpinning the connection because we can't do
+	// anything about it if it doesn't work. Typically the only errors that can
+	// happen here indicate that something went wrong with the connection state,
+	// like it wasn't marked as pinned or attempted to return to the wrong pool.
+	_ = c.unpinConnection()
+	c.pool.ReturnSession(c.Server)
+}
+
+// TransactionInProgress returns true if the client session is in an active transaction.
+func (c *Client) TransactionInProgress() bool {
+	return c.TransactionState == InProgress
+}
+
+// TransactionStarting returns true if the client session is starting a transaction.
+func (c *Client) TransactionStarting() bool {
+	return c.TransactionState == Starting
+}
+
+// TransactionRunning returns true if the client session has started the transaction
+// and it hasn't been committed or aborted
+func (c *Client) TransactionRunning() bool {
+	return c != nil && (c.TransactionState == Starting || c.TransactionState == InProgress)
+}
+
+// TransactionCommitted returns true of the client session just committed a transaction.
+func (c *Client) TransactionCommitted() bool {
+	return c.TransactionState == Committed
+}
+
+// CheckStartTransaction checks to see if allowed to start transaction and returns
+// an error if not allowed
+func (c *Client) CheckStartTransaction() error {
+	if c.TransactionState == InProgress || c.TransactionState == Starting {
+		return ErrTransactInProgress
+	}
+	if c.Snapshot {
+		return ErrSnapshotTransaction
+	}
+	return nil
+}
+
+// StartTransaction initializes the transaction options and advances the state machine.
+// It does not contact the server to start the transaction.
+func (c *Client) StartTransaction(opts *TransactionOptions) error {
+	err := c.CheckStartTransaction()
+	if err != nil {
+		return err
+	}
+
+	c.IncrementTxnNumber()
+	c.RetryingCommit = false
+
+	if opts != nil {
+		c.CurrentRc = opts.ReadConcern
+		c.CurrentRp = opts.ReadPreference
+		c.CurrentWc = opts.WriteConcern
+		c.CurrentMct = opts.MaxCommitTime
+	}
+
+	if c.CurrentRc == nil {
+		c.CurrentRc = c.transactionRc
+	}
+
+	if c.CurrentRp == nil {
+		c.CurrentRp = c.transactionRp
+	}
+
+	if c.CurrentWc == nil {
+		c.CurrentWc = c.transactionWc
+	}
+
+	if c.CurrentMct == nil {
+		c.CurrentMct = c.transactionMaxCommitTime
+	}
+
+	if !writeconcern.AckWrite(c.CurrentWc) {
+		_ = c.clearTransactionOpts()
+		return ErrUnackWCUnsupported
+	}
+
+	c.TransactionState = Starting
+	return c.ClearPinnedResources()
+}
+
+// CheckCommitTransaction checks to see if allowed to commit transaction and returns
+// an error if not allowed.
+func (c *Client) CheckCommitTransaction() error {
+	if c.TransactionState == None {
+		return ErrNoTransactStarted
+	} else if c.TransactionState == Aborted {
+		return ErrCommitAfterAbort
+	}
+	return nil
+}
+
+// CommitTransaction updates the state for a successfully committed transaction and returns
+// an error if not permissible.  It does not actually perform the commit.
+func (c *Client) CommitTransaction() error {
+	err := c.CheckCommitTransaction()
+	if err != nil {
+		return err
+	}
+	c.TransactionState = Committed
+	return nil
+}
+
+// UpdateCommitTransactionWriteConcern will set the write concern to majority and potentially set  a
+// w timeout of 10 seconds. This should be called after a commit transaction operation fails with a
+// retryable error or after a successful commit transaction operation.
+func (c *Client) UpdateCommitTransactionWriteConcern() {
+	wc := c.CurrentWc
+	timeout := 10 * time.Second
+	if wc != nil && wc.GetWTimeout() != 0 {
+		timeout = wc.GetWTimeout()
+	}
+	c.CurrentWc = wc.WithOptions(writeconcern.WMajority(), writeconcern.WTimeout(timeout))
+}
+
+// CheckAbortTransaction checks to see if allowed to abort transaction and returns
+// an error if not allowed.
+func (c *Client) CheckAbortTransaction() error {
+	switch {
+	case c.TransactionState == None:
+		return ErrNoTransactStarted
+	case c.TransactionState == Committed:
+		return ErrAbortAfterCommit
+	case c.TransactionState == Aborted:
+		return ErrAbortTwice
+	}
+	return nil
+}
+
+// AbortTransaction updates the state for a successfully aborted transaction and returns
+// an error if not permissible.  It does not actually perform the abort.
+func (c *Client) AbortTransaction() error {
+	err := c.CheckAbortTransaction()
+	if err != nil {
+		return err
+	}
+	c.TransactionState = Aborted
+	return c.clearTransactionOpts()
+}
+
+// StartCommand updates the session's internal state at the beginning of an operation. This must be called before
+// server selection is done for the operation as the session's state can impact the result of that process.
+func (c *Client) StartCommand() error {
+	if c == nil {
+		return nil
+	}
+
+	// If we're executing the first operation using this session after a transaction, we must ensure that the session
+	// is not pinned to any resources.
+	if !c.TransactionRunning() && !c.Committing && !c.Aborting {
+		return c.ClearPinnedResources()
+	}
+	return nil
+}
+
+// ApplyCommand advances the state machine upon command execution. This must be called after server selection is
+// complete.
+func (c *Client) ApplyCommand(desc description.Server) error {
+	if c.Committing {
+		// Do not change state if committing after already committed
+		return nil
+	}
+	if c.TransactionState == Starting {
+		c.TransactionState = InProgress
+		// If this is in a transaction and the server is a mongos, pin it
+		if desc.Kind == description.Mongos {
+			c.PinnedServer = &desc
+		}
+	} else if c.TransactionState == Committed || c.TransactionState == Aborted {
+		c.TransactionState = None
+		return c.clearTransactionOpts()
+	}
+
+	return nil
+}
+
+func (c *Client) clearTransactionOpts() error {
+	c.RetryingCommit = false
+	c.Aborting = false
+	c.Committing = false
+	c.CurrentWc = nil
+	c.CurrentRp = nil
+	c.CurrentRc = nil
+	c.RecoveryToken = nil
+
+	return c.ClearPinnedResources()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/cluster_clock.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/cluster_clock.go
new file mode 100644
index 0000000000000000000000000000000000000000..961f2274e2fa3799ad34a481ca3c343c7a16ca19
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/cluster_clock.go
@@ -0,0 +1,36 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"sync"
+
+	"go.mongodb.org/mongo-driver/bson"
+)
+
+// ClusterClock represents a logical clock for keeping track of cluster time.
+type ClusterClock struct {
+	clusterTime bson.Raw
+	lock        sync.Mutex
+}
+
+// GetClusterTime returns the cluster's current time.
+func (cc *ClusterClock) GetClusterTime() bson.Raw {
+	var ct bson.Raw
+	cc.lock.Lock()
+	ct = cc.clusterTime
+	cc.lock.Unlock()
+
+	return ct
+}
+
+// AdvanceClusterTime updates the cluster's current time.
+func (cc *ClusterClock) AdvanceClusterTime(clusterTime bson.Raw) {
+	cc.lock.Lock()
+	cc.clusterTime = MaxClusterTime(cc.clusterTime, clusterTime)
+	cc.lock.Unlock()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..80b2ac2dd5e872a2cd0dbc2e87de9f906caef2c8
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2024-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package session is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package session
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee7c301d649a8612f518edba984b0e75aaca2418
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/options.go
@@ -0,0 +1,62 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo/readconcern"
+	"go.mongodb.org/mongo-driver/mongo/readpref"
+	"go.mongodb.org/mongo-driver/mongo/writeconcern"
+)
+
+// ClientOptions represents all possible options for creating a client session.
+type ClientOptions struct {
+	CausalConsistency     *bool
+	DefaultReadConcern    *readconcern.ReadConcern
+	DefaultWriteConcern   *writeconcern.WriteConcern
+	DefaultReadPreference *readpref.ReadPref
+	DefaultMaxCommitTime  *time.Duration
+	Snapshot              *bool
+}
+
+// TransactionOptions represents all possible options for starting a transaction in a session.
+type TransactionOptions struct {
+	ReadConcern    *readconcern.ReadConcern
+	WriteConcern   *writeconcern.WriteConcern
+	ReadPreference *readpref.ReadPref
+	MaxCommitTime  *time.Duration
+}
+
+func mergeClientOptions(opts ...*ClientOptions) *ClientOptions {
+	c := &ClientOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.CausalConsistency != nil {
+			c.CausalConsistency = opt.CausalConsistency
+		}
+		if opt.DefaultReadConcern != nil {
+			c.DefaultReadConcern = opt.DefaultReadConcern
+		}
+		if opt.DefaultReadPreference != nil {
+			c.DefaultReadPreference = opt.DefaultReadPreference
+		}
+		if opt.DefaultWriteConcern != nil {
+			c.DefaultWriteConcern = opt.DefaultWriteConcern
+		}
+		if opt.DefaultMaxCommitTime != nil {
+			c.DefaultMaxCommitTime = opt.DefaultMaxCommitTime
+		}
+		if opt.Snapshot != nil {
+			c.Snapshot = opt.Snapshot
+		}
+	}
+
+	return c
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go
new file mode 100644
index 0000000000000000000000000000000000000000..b1e45552a70db13b76a8665dbc607d2e8db8b2d2
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go
@@ -0,0 +1,74 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/uuid"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// Server is an open session with the server.
+type Server struct {
+	SessionID bsoncore.Document
+	TxnNumber int64
+	LastUsed  time.Time
+	Dirty     bool
+}
+
+// returns whether or not a session has expired given a timeout in minutes
+// a session is considered expired if it has less than 1 minute left before becoming stale
+func (ss *Server) expired(topoDesc topologyDescription) bool {
+	// There is no server monitoring in LB mode, so we do not track session timeout minutes from server hello responses
+	// and never consider sessions to be expired.
+	if topoDesc.kind == description.LoadBalanced {
+		return false
+	}
+
+	if topoDesc.timeoutMinutes == nil || *topoDesc.timeoutMinutes <= 0 {
+		return true
+	}
+	timeUnused := time.Since(ss.LastUsed).Minutes()
+	return timeUnused > float64(*topoDesc.timeoutMinutes-1)
+}
+
+// update the last used time for this session.
+// must be called whenever this server session is used to send a command to the server.
+func (ss *Server) updateUseTime() {
+	ss.LastUsed = time.Now()
+}
+
+func newServerSession() (*Server, error) {
+	id, err := uuid.New()
+	if err != nil {
+		return nil, err
+	}
+
+	idx, idDoc := bsoncore.AppendDocumentStart(nil)
+	idDoc = bsoncore.AppendBinaryElement(idDoc, "id", UUIDSubtype, id[:])
+	idDoc, _ = bsoncore.AppendDocumentEnd(idDoc, idx)
+
+	return &Server{
+		SessionID: idDoc,
+		LastUsed:  time.Now(),
+	}, nil
+}
+
+// IncrementTxnNumber increments the transaction number.
+func (ss *Server) IncrementTxnNumber() {
+	ss.TxnNumber++
+}
+
+// MarkDirty marks the session as dirty.
+func (ss *Server) MarkDirty() {
+	ss.Dirty = true
+}
+
+// UUIDSubtype is the BSON binary subtype that a UUID should be encoded as
+const UUIDSubtype byte = 4
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go
new file mode 100644
index 0000000000000000000000000000000000000000..7336f54513a6c81c44d5c9816de5a926348fe512
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go
@@ -0,0 +1,192 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"sync"
+	"sync/atomic"
+
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// Node represents a server session in a linked list
+type Node struct {
+	*Server
+	next *Node
+	prev *Node
+}
+
+// topologyDescription is used to track a subset of the fields present in a description.Topology instance that are
+// relevant for determining session expiration.
+type topologyDescription struct {
+	kind           description.TopologyKind
+	timeoutMinutes *int64
+}
+
+// Pool is a pool of server sessions that can be reused.
+type Pool struct {
+	// number of sessions checked out of pool (accessed atomically)
+	checkedOut int64
+
+	descChan       <-chan description.Topology
+	head           *Node
+	tail           *Node
+	latestTopology topologyDescription
+	mutex          sync.Mutex // mutex to protect list and sessionTimeout
+}
+
+func (p *Pool) createServerSession() (*Server, error) {
+	s, err := newServerSession()
+	if err != nil {
+		return nil, err
+	}
+
+	atomic.AddInt64(&p.checkedOut, 1)
+	return s, nil
+}
+
+// NewPool creates a new server session pool
+func NewPool(descChan <-chan description.Topology) *Pool {
+	p := &Pool{
+		descChan: descChan,
+	}
+
+	return p
+}
+
+// assumes caller has mutex to protect the pool
+func (p *Pool) updateTimeout() {
+	select {
+	case newDesc := <-p.descChan:
+		p.latestTopology = topologyDescription{
+			kind:           newDesc.Kind,
+			timeoutMinutes: newDesc.SessionTimeoutMinutesPtr,
+		}
+	default:
+		// no new description waiting
+	}
+}
+
+// GetSession retrieves an unexpired session from the pool.
+func (p *Pool) GetSession() (*Server, error) {
+	p.mutex.Lock() // prevent changing the linked list while seeing if sessions have expired
+	defer p.mutex.Unlock()
+
+	// empty pool
+	if p.head == nil && p.tail == nil {
+		return p.createServerSession()
+	}
+
+	p.updateTimeout()
+	for p.head != nil {
+		// pull session from head of queue and return if it is valid for at least 1 more minute
+		if p.head.expired(p.latestTopology) {
+			p.head = p.head.next
+			continue
+		}
+
+		// found unexpired session
+		session := p.head.Server
+		if p.head.next != nil {
+			p.head.next.prev = nil
+		}
+		if p.tail == p.head {
+			p.tail = nil
+			p.head = nil
+		} else {
+			p.head = p.head.next
+		}
+
+		atomic.AddInt64(&p.checkedOut, 1)
+		return session, nil
+	}
+
+	// no valid session found
+	p.tail = nil // empty list
+	return p.createServerSession()
+}
+
+// ReturnSession returns a session to the pool if it has not expired.
+func (p *Pool) ReturnSession(ss *Server) {
+	if ss == nil {
+		return
+	}
+
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	atomic.AddInt64(&p.checkedOut, -1)
+	p.updateTimeout()
+	// check sessions at end of queue for expired
+	// stop checking after hitting the first valid session
+	for p.tail != nil && p.tail.expired(p.latestTopology) {
+		if p.tail.prev != nil {
+			p.tail.prev.next = nil
+		}
+		p.tail = p.tail.prev
+	}
+
+	// session expired
+	if ss.expired(p.latestTopology) {
+		return
+	}
+
+	// session is dirty
+	if ss.Dirty {
+		return
+	}
+
+	newNode := &Node{
+		Server: ss,
+		next:   nil,
+		prev:   nil,
+	}
+
+	// empty list
+	if p.tail == nil {
+		p.head = newNode
+		p.tail = newNode
+		return
+	}
+
+	// at least 1 valid session in list
+	newNode.next = p.head
+	p.head.prev = newNode
+	p.head = newNode
+}
+
+// IDSlice returns a slice of session IDs for each session in the pool
+func (p *Pool) IDSlice() []bsoncore.Document {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	var ids []bsoncore.Document
+	for node := p.head; node != nil; node = node.next {
+		ids = append(ids, node.SessionID)
+	}
+
+	return ids
+}
+
+// String implements the Stringer interface
+func (p *Pool) String() string {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	s := ""
+	for head := p.head; head != nil; head = head.next {
+		s += head.SessionID.String() + "\n"
+	}
+
+	return s
+}
+
+// CheckedOut returns number of sessions checked out from pool.
+func (p *Pool) CheckedOut() int64 {
+	return atomic.LoadInt64(&p.checkedOut)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md
new file mode 100644
index 0000000000000000000000000000000000000000..8a67dd99351ad7ce038349f06841e5f1558e1263
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md
@@ -0,0 +1,45 @@
+# Topology Package Design
+
+This document outlines the design for this package.
+
+## Topology
+
+The `Topology` type handles monitoring the state of a MongoDB deployment and selecting servers.
+Updating the description is handled by finite state machine which implements the server discovery
+and monitoring specification. A `Topology` can be connected and fully disconnected, which enables
+saving resources. The `Topology` type also handles server selection following the server selection
+specification.
+
+## Server
+
+The `Server` type handles heartbeating a MongoDB server and holds a pool of connections.
+
+## Connection
+
+Connections are handled by two main types and an auxiliary type. The two main types are `connection`
+and `Connection`. The first holds most of the logic required to actually read and write wire
+messages. Instances can be created with the `newConnection` method. Inside the `newConnection`
+method the auxiliary type, `initConnection` is used to perform the connection handshake. This is
+required because the `connection` type does not fully implement `driver.Connection` which is
+required during handshaking. The `Connection` type is what is actually returned to a consumer of the
+`topology` package. This type does implement the `driver.Connection` type, holds a reference to a
+`connection` instance, and exists mainly to prevent accidental continued usage of a connection after
+closing it.
+
+The connection implementations in this package are conduits for wire messages but they have no
+ability to encode, decode, or validate wire messages. That must be handled by consumers.
+
+## Pool
+
+The `pool` type implements a connection pool. It handles caching idle connections and dialing
+new ones, but it does not track a maximum number of connections. That is the responsibility of a
+wrapping type, like `Server`.
+
+The `pool` type has no concept of closing, instead it has concepts of connecting and disconnecting.
+This allows a `Topology` to be disconnected,but keeping the memory around to be reconnected later.
+There is a `close` method, but this is used to close a connection.
+
+There are three methods related to getting and putting connections: `get`, `close`, and `put`. The
+`get` method will either retrieve a connection from the cache or it will dial a new `connection`.
+The `close` method will close the underlying socket of a `connection`. The `put` method will put a
+connection into the pool, placing it in the cache if there is space, otherwise it will close it.
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/cancellation_listener.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/cancellation_listener.go
new file mode 100644
index 0000000000000000000000000000000000000000..caca988057a8756f37242f31e49d7b3293d4751d
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/cancellation_listener.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import "context"
+
+type cancellationListener interface {
+	Listen(context.Context, func())
+	StopListening() bool
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go
new file mode 100644
index 0000000000000000000000000000000000000000..e00363a5485bf7baa66084f47453542672226621
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go
@@ -0,0 +1,989 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"crypto/tls"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.mongodb.org/mongo-driver/internal/csot"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/ocsp"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage"
+)
+
+// Connection state constants.
+const (
+	connDisconnected int64 = iota
+	connConnected
+	connInitialized
+)
+
+var globalConnectionID uint64 = 1
+
+var (
+	defaultMaxMessageSize        uint32 = 48000000
+	errResponseTooLarge                 = errors.New("length of read message too large")
+	errLoadBalancedStateMismatch        = errors.New("driver attempted to initialize in load balancing mode, but the server does not support this mode")
+)
+
+func nextConnectionID() uint64 { return atomic.AddUint64(&globalConnectionID, 1) }
+
+type connection struct {
+	// state must be accessed using the atomic package and should be at the beginning of the struct.
+	// - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG
+	// - suggested layout: https://go101.org/article/memory-layout.html
+	state int64
+
+	id                   string
+	nc                   net.Conn // When nil, the connection is closed.
+	addr                 address.Address
+	idleTimeout          time.Duration
+	idleStart            atomic.Value // Stores a time.Time
+	readTimeout          time.Duration
+	writeTimeout         time.Duration
+	desc                 description.Server
+	helloRTT             time.Duration
+	compressor           wiremessage.CompressorID
+	zliblevel            int
+	zstdLevel            int
+	connectDone          chan struct{}
+	config               *connectionConfig
+	cancelConnectContext context.CancelFunc
+	connectContextMade   chan struct{}
+	canStream            bool
+	currentlyStreaming   bool
+	connectContextMutex  sync.Mutex
+	cancellationListener cancellationListener
+	serverConnectionID   *int64 // the server's ID for this client's connection
+
+	// pool related fields
+	pool *pool
+
+	// TODO(GODRIVER-2824): change driverConnectionID type to int64.
+	driverConnectionID uint64
+	generation         uint64
+
+	// awaitRemainingBytes indicates the size of server response that was not completely
+	// read before returning the connection to the pool.
+	awaitRemainingBytes *int32
+
+	// oidcTokenGenID is the monotonic generation ID for OIDC tokens, used to invalidate
+	// accessTokens in the OIDC authenticator cache.
+	oidcTokenGenID uint64
+}
+
+// newConnection handles the creation of a connection. It does not connect the connection.
+func newConnection(addr address.Address, opts ...ConnectionOption) *connection {
+	cfg := newConnectionConfig(opts...)
+
+	id := fmt.Sprintf("%s[-%d]", addr, nextConnectionID())
+
+	c := &connection{
+		id:                   id,
+		addr:                 addr,
+		idleTimeout:          cfg.idleTimeout,
+		readTimeout:          cfg.readTimeout,
+		writeTimeout:         cfg.writeTimeout,
+		connectDone:          make(chan struct{}),
+		config:               cfg,
+		connectContextMade:   make(chan struct{}),
+		cancellationListener: newCancellListener(),
+	}
+	// Connections to non-load balanced deployments should eagerly set the generation numbers so errors encountered
+	// at any point during connection establishment can be processed without the connection being considered stale.
+	if !c.config.loadBalanced {
+		c.setGenerationNumber()
+	}
+	atomic.StoreInt64(&c.state, connInitialized)
+
+	return c
+}
+
+// setGenerationNumber sets the connection's generation number if a callback has been provided to do so in connection
+// configuration.
+func (c *connection) setGenerationNumber() {
+	if c.config.getGenerationFn != nil {
+		c.generation = c.config.getGenerationFn(c.desc.ServiceID)
+	}
+}
+
+// hasGenerationNumber returns true if the connection has set its generation number. If so, this indicates that the
+// generationNumberFn provided via the connection options has been called exactly once.
+func (c *connection) hasGenerationNumber() bool {
+	if !c.config.loadBalanced {
+		// The generation is known for all non-LB clusters once the connection object has been created.
+		return true
+	}
+
+	// For LB clusters, we set the generation after the initial handshake, so we know it's set if the connection
+	// description has been updated to reflect that it's behind an LB.
+	return c.desc.LoadBalanced()
+}
+
+func configureTLS(ctx context.Context,
+	tlsConnSource tlsConnectionSource,
+	nc net.Conn,
+	addr address.Address,
+	config *tls.Config,
+	ocspOpts *ocsp.VerifyOptions,
+) (net.Conn, error) {
+	// Ensure config.ServerName is always set for SNI.
+	if config.ServerName == "" {
+		hostname := addr.String()
+		colonPos := strings.LastIndex(hostname, ":")
+		if colonPos == -1 {
+			colonPos = len(hostname)
+		}
+
+		hostname = hostname[:colonPos]
+		config.ServerName = hostname
+	}
+
+	client := tlsConnSource.Client(nc, config)
+	if err := clientHandshake(ctx, client); err != nil {
+		return nil, err
+	}
+
+	// Only do OCSP verification if TLS verification is requested.
+	if !config.InsecureSkipVerify {
+		if ocspErr := ocsp.Verify(ctx, client.ConnectionState(), ocspOpts); ocspErr != nil {
+			return nil, ocspErr
+		}
+	}
+	return client, nil
+}
+
+// connect handles the I/O for a connection. It will dial, configure TLS, and perform initialization
+// handshakes. All errors returned by connect are considered "before the handshake completes" and
+// must be handled by calling the appropriate SDAM handshake error handler.
+func (c *connection) connect(ctx context.Context) (err error) {
+	if !atomic.CompareAndSwapInt64(&c.state, connInitialized, connConnected) {
+		return nil
+	}
+
+	defer close(c.connectDone)
+
+	// If connect returns an error, set the connection status as disconnected and close the
+	// underlying net.Conn if it was created.
+	defer func() {
+		if err != nil {
+			atomic.StoreInt64(&c.state, connDisconnected)
+
+			if c.nc != nil {
+				_ = c.nc.Close()
+			}
+		}
+	}()
+
+	// Create separate contexts for dialing a connection and doing the MongoDB/auth handshakes.
+	//
+	// handshakeCtx is simply a cancellable version of ctx because there's no default timeout that needs to be applied
+	// to the full handshake. The cancellation allows consumers to bail out early when dialing a connection if it's no
+	// longer required. This is done in lock because it accesses the shared cancelConnectContext field.
+	//
+	// dialCtx is equal to handshakeCtx if connectTimeoutMS=0. Otherwise, it is derived from handshakeCtx so the
+	// cancellation still applies but with an added timeout to ensure the connectTimeoutMS option is applied to socket
+	// establishment and the TLS handshake as a whole. This is created outside of the connectContextMutex lock to avoid
+	// holding the lock longer than necessary.
+	c.connectContextMutex.Lock()
+	var handshakeCtx context.Context
+	handshakeCtx, c.cancelConnectContext = context.WithCancel(ctx)
+	c.connectContextMutex.Unlock()
+
+	dialCtx := handshakeCtx
+	var dialCancel context.CancelFunc
+	if c.config.connectTimeout != 0 {
+		dialCtx, dialCancel = context.WithTimeout(handshakeCtx, c.config.connectTimeout)
+		defer dialCancel()
+	}
+
+	defer func() {
+		var cancelFn context.CancelFunc
+
+		c.connectContextMutex.Lock()
+		cancelFn = c.cancelConnectContext
+		c.cancelConnectContext = nil
+		c.connectContextMutex.Unlock()
+
+		if cancelFn != nil {
+			cancelFn()
+		}
+	}()
+
+	close(c.connectContextMade)
+
+	// Assign the result of DialContext to a temporary net.Conn to ensure that c.nc is not set in an error case.
+	tempNc, err := c.config.dialer.DialContext(dialCtx, c.addr.Network(), c.addr.String())
+	if err != nil {
+		return ConnectionError{Wrapped: err, init: true}
+	}
+	c.nc = tempNc
+
+	if c.config.tlsConfig != nil {
+		tlsConfig := c.config.tlsConfig.Clone()
+
+		// store the result of configureTLS in a separate variable than c.nc to avoid overwriting c.nc with nil in
+		// error cases.
+		ocspOpts := &ocsp.VerifyOptions{
+			Cache:                   c.config.ocspCache,
+			DisableEndpointChecking: c.config.disableOCSPEndpointCheck,
+			HTTPClient:              c.config.httpClient,
+		}
+		tlsNc, err := configureTLS(dialCtx, c.config.tlsConnectionSource, c.nc, c.addr, tlsConfig, ocspOpts)
+		if err != nil {
+			return ConnectionError{Wrapped: err, init: true}
+		}
+		c.nc = tlsNc
+	}
+
+	// running hello and authentication is handled by a handshaker on the configuration instance.
+	handshaker := c.config.handshaker
+	if handshaker == nil {
+		return nil
+	}
+
+	var handshakeInfo driver.HandshakeInformation
+	handshakeStartTime := time.Now()
+	handshakeConn := initConnection{c}
+	handshakeInfo, err = handshaker.GetHandshakeInformation(handshakeCtx, c.addr, handshakeConn)
+	if err == nil {
+		// We only need to retain the Description field as the connection's description. The authentication-related
+		// fields in handshakeInfo are tracked by the handshaker if necessary.
+		c.desc = handshakeInfo.Description
+		c.serverConnectionID = handshakeInfo.ServerConnectionID
+		c.helloRTT = time.Since(handshakeStartTime)
+
+		// If the application has indicated that the cluster is load balanced, ensure the server has included serviceId
+		// in its handshake response to signal that it knows it's behind an LB as well.
+		if c.config.loadBalanced && c.desc.ServiceID == nil {
+			err = errLoadBalancedStateMismatch
+		}
+	}
+	if err == nil {
+		// For load-balanced connections, the generation number depends on the service ID, which isn't known until the
+		// initial MongoDB handshake is done. To account for this, we don't attempt to set the connection's generation
+		// number unless GetHandshakeInformation succeeds.
+		if c.config.loadBalanced {
+			c.setGenerationNumber()
+		}
+
+		// If we successfully finished the first part of the handshake and verified LB state, continue with the rest of
+		// the handshake.
+		err = handshaker.FinishHandshake(handshakeCtx, handshakeConn)
+	}
+
+	// We have a failed handshake here
+	if err != nil {
+		return ConnectionError{Wrapped: err, init: true}
+	}
+
+	if len(c.desc.Compression) > 0 {
+	clientMethodLoop:
+		for _, method := range c.config.compressors {
+			for _, serverMethod := range c.desc.Compression {
+				if method != serverMethod {
+					continue
+				}
+
+				switch strings.ToLower(method) {
+				case "snappy":
+					c.compressor = wiremessage.CompressorSnappy
+				case "zlib":
+					c.compressor = wiremessage.CompressorZLib
+					c.zliblevel = wiremessage.DefaultZlibLevel
+					if c.config.zlibLevel != nil {
+						c.zliblevel = *c.config.zlibLevel
+					}
+				case "zstd":
+					c.compressor = wiremessage.CompressorZstd
+					c.zstdLevel = wiremessage.DefaultZstdLevel
+					if c.config.zstdLevel != nil {
+						c.zstdLevel = *c.config.zstdLevel
+					}
+				}
+				break clientMethodLoop
+			}
+		}
+	}
+	return nil
+}
+
+func (c *connection) wait() {
+	if c.connectDone != nil {
+		<-c.connectDone
+	}
+}
+
+func (c *connection) closeConnectContext() {
+	<-c.connectContextMade
+	var cancelFn context.CancelFunc
+
+	c.connectContextMutex.Lock()
+	cancelFn = c.cancelConnectContext
+	c.cancelConnectContext = nil
+	c.connectContextMutex.Unlock()
+
+	if cancelFn != nil {
+		cancelFn()
+	}
+}
+
+func (c *connection) cancellationListenerCallback() {
+	_ = c.close()
+}
+
+func transformNetworkError(ctx context.Context, originalError error, contextDeadlineUsed bool) error {
+	if originalError == nil {
+		return nil
+	}
+
+	// If there was an error and the context was cancelled, we assume it happened due to the cancellation.
+	if errors.Is(ctx.Err(), context.Canceled) {
+		return ctx.Err()
+	}
+
+	// If there was a timeout error and the context deadline was used, we convert the error into
+	// context.DeadlineExceeded.
+	if !contextDeadlineUsed {
+		return originalError
+	}
+	if netErr, ok := originalError.(net.Error); ok && netErr.Timeout() {
+		return fmt.Errorf("%w: %s", context.DeadlineExceeded, originalError.Error())
+	}
+
+	return originalError
+}
+
+func (c *connection) writeWireMessage(ctx context.Context, wm []byte) error {
+	var err error
+	if atomic.LoadInt64(&c.state) != connConnected {
+		return ConnectionError{
+			ConnectionID: c.id,
+			message:      "connection is closed",
+		}
+	}
+
+	var deadline time.Time
+	if c.writeTimeout != 0 {
+		deadline = time.Now().Add(c.writeTimeout)
+	}
+
+	var contextDeadlineUsed bool
+	if dl, ok := ctx.Deadline(); ok && (deadline.IsZero() || dl.Before(deadline)) {
+		contextDeadlineUsed = true
+		deadline = dl
+	}
+
+	if err := c.nc.SetWriteDeadline(deadline); err != nil {
+		return ConnectionError{ConnectionID: c.id, Wrapped: err, message: "failed to set write deadline"}
+	}
+
+	err = c.write(ctx, wm)
+	if err != nil {
+		c.close()
+		return ConnectionError{
+			ConnectionID: c.id,
+			Wrapped:      transformNetworkError(ctx, err, contextDeadlineUsed),
+			message:      "unable to write wire message to network",
+		}
+	}
+
+	return nil
+}
+
+func (c *connection) write(ctx context.Context, wm []byte) (err error) {
+	go c.cancellationListener.Listen(ctx, c.cancellationListenerCallback)
+	defer func() {
+		// There is a race condition between Write and StopListening. If the context is cancelled after c.nc.Write
+		// succeeds, the cancellation listener could fire and close the connection. In this case, the connection has
+		// been invalidated but the error is nil. To account for this, overwrite the error to context.Cancelled if
+		// the abortedForCancellation flag was set.
+
+		if aborted := c.cancellationListener.StopListening(); aborted && err == nil {
+			err = context.Canceled
+		}
+	}()
+
+	_, err = c.nc.Write(wm)
+	return err
+}
+
+// readWireMessage reads a wiremessage from the connection. The dst parameter will be overwritten.
+func (c *connection) readWireMessage(ctx context.Context) ([]byte, error) {
+	if atomic.LoadInt64(&c.state) != connConnected {
+		return nil, ConnectionError{
+			ConnectionID: c.id,
+			message:      "connection is closed",
+		}
+	}
+
+	var deadline time.Time
+	if c.readTimeout != 0 {
+		deadline = time.Now().Add(c.readTimeout)
+	}
+
+	var contextDeadlineUsed bool
+	if dl, ok := ctx.Deadline(); ok && (deadline.IsZero() || dl.Before(deadline)) {
+		contextDeadlineUsed = true
+		deadline = dl
+	}
+
+	if err := c.nc.SetReadDeadline(deadline); err != nil {
+		return nil, ConnectionError{ConnectionID: c.id, Wrapped: err, message: "failed to set read deadline"}
+	}
+
+	dst, errMsg, err := c.read(ctx)
+	if err != nil {
+		if c.awaitRemainingBytes == nil {
+			// If the connection was not marked as awaiting response, use the
+			// pre-CSOT behavior and close the connection because we don't know
+			// if there are other bytes left to read.
+			c.close()
+		}
+		message := errMsg
+		if errors.Is(err, io.EOF) {
+			message = "socket was unexpectedly closed"
+		}
+		return nil, ConnectionError{
+			ConnectionID: c.id,
+			Wrapped:      transformNetworkError(ctx, err, contextDeadlineUsed),
+			message:      message,
+		}
+	}
+
+	return dst, nil
+}
+
+func (c *connection) parseWmSizeBytes(wmSizeBytes [4]byte) (int32, error) {
+	// read the length as an int32
+	size := int32(binary.LittleEndian.Uint32(wmSizeBytes[:]))
+
+	if size < 4 {
+		return 0, fmt.Errorf("malformed message length: %d", size)
+	}
+	// In the case of a hello response where MaxMessageSize has not yet been set, use the hard-coded
+	// defaultMaxMessageSize instead.
+	maxMessageSize := c.desc.MaxMessageSize
+	if maxMessageSize == 0 {
+		maxMessageSize = defaultMaxMessageSize
+	}
+	if uint32(size) > maxMessageSize {
+		return 0, errResponseTooLarge
+	}
+
+	return size, nil
+}
+
+func (c *connection) read(ctx context.Context) (bytesRead []byte, errMsg string, err error) {
+	go c.cancellationListener.Listen(ctx, c.cancellationListenerCallback)
+	defer func() {
+		// If the context is cancelled after we finish reading the server response, the cancellation listener could fire
+		// even though the socket reads succeed. To account for this, we overwrite err to be context.Canceled if the
+		// abortedForCancellation flag is set.
+
+		if aborted := c.cancellationListener.StopListening(); aborted && err == nil {
+			errMsg = "unable to read server response"
+			err = context.Canceled
+		}
+	}()
+
+	isCSOTTimeout := func(err error) bool {
+		// If the error was a timeout error and CSOT is enabled, instead of
+		// closing the connection mark it as awaiting response so the pool
+		// can read the response before making it available to other
+		// operations.
+		nerr := net.Error(nil)
+		return errors.As(err, &nerr) && nerr.Timeout() && csot.IsTimeoutContext(ctx)
+	}
+
+	// We use an array here because it only costs 4 bytes on the stack and means we'll only need to
+	// reslice dst once instead of twice.
+	var sizeBuf [4]byte
+
+	// We do a ReadFull into an array here instead of doing an opportunistic ReadAtLeast into dst
+	// because there might be more than one wire message waiting to be read, for example when
+	// reading messages from an exhaust cursor.
+	n, err := io.ReadFull(c.nc, sizeBuf[:])
+	if err != nil {
+		if l := int32(n); l == 0 && isCSOTTimeout(err) {
+			c.awaitRemainingBytes = &l
+		}
+		return nil, "incomplete read of message header", err
+	}
+	size, err := c.parseWmSizeBytes(sizeBuf)
+	if err != nil {
+		return nil, err.Error(), err
+	}
+
+	dst := make([]byte, size)
+	copy(dst, sizeBuf[:])
+
+	n, err = io.ReadFull(c.nc, dst[4:])
+	if err != nil {
+		remainingBytes := size - 4 - int32(n)
+		if remainingBytes > 0 && isCSOTTimeout(err) {
+			c.awaitRemainingBytes = &remainingBytes
+		}
+		return dst, "incomplete read of full message", err
+	}
+
+	return dst, "", nil
+}
+
+func (c *connection) close() error {
+	// Overwrite the connection state as the first step so only the first close call will execute.
+	if !atomic.CompareAndSwapInt64(&c.state, connConnected, connDisconnected) {
+		return nil
+	}
+
+	var err error
+	if c.nc != nil {
+		err = c.nc.Close()
+	}
+
+	return err
+}
+
+// closed returns true if the connection has been closed by the driver.
+func (c *connection) closed() bool {
+	return atomic.LoadInt64(&c.state) == connDisconnected
+}
+
+// isAlive returns true if the connection is alive and ready to be used for an
+// operation.
+//
+// Note that the liveness check can be slow (at least 1ms), so isAlive only
+// checks the liveness of the connection if it's been idle for at least 10
+// seconds. For frequently in-use connections, a network error during an
+// operation will be the first indication of a dead connection.
+func (c *connection) isAlive() bool {
+	if c.nc == nil {
+		return false
+	}
+
+	// If the connection has been idle for less than 10 seconds, skip the
+	// liveness check.
+	//
+	// The 10-seconds idle bypass is based on the liveness check implementation
+	// in the Python Driver. That implementation uses 1 second as the idle
+	// threshold, but we chose to be more conservative in the Go Driver because
+	// this is new behavior with unknown side-effects. See
+	// https://github.com/mongodb/mongo-python-driver/blob/e6b95f65953e01e435004af069a6976473eaf841/pymongo/synchronous/pool.py#L983-L985
+	idleStart, ok := c.idleStart.Load().(time.Time)
+	if !ok || idleStart.Add(10*time.Second).After(time.Now()) {
+		return true
+	}
+
+	// Set a 1ms read deadline and attempt to read 1 byte from the connection.
+	// Expect it to block for 1ms then return a deadline exceeded error. If it
+	// returns any other error, the connection is not usable, so return false.
+	// If it doesn't return an error and actually reads data, the connection is
+	// also not usable, so return false.
+	//
+	// Note that we don't need to un-set the read deadline because the "read"
+	// and "write" methods always reset the deadlines.
+	err := c.nc.SetReadDeadline(time.Now().Add(1 * time.Millisecond))
+	if err != nil {
+		return false
+	}
+	var b [1]byte
+	_, err = c.nc.Read(b[:])
+	return errors.Is(err, os.ErrDeadlineExceeded)
+}
+
+func (c *connection) idleTimeoutExpired() bool {
+	if c.idleTimeout == 0 {
+		return false
+	}
+
+	idleStart, ok := c.idleStart.Load().(time.Time)
+	return ok && idleStart.Add(c.idleTimeout).Before(time.Now())
+}
+
+func (c *connection) bumpIdleStart() {
+	if c.idleTimeout > 0 {
+		c.idleStart.Store(time.Now())
+	}
+}
+
+func (c *connection) setCanStream(canStream bool) {
+	c.canStream = canStream
+}
+
+func (c *connection) setStreaming(streaming bool) {
+	c.currentlyStreaming = streaming
+}
+
+func (c *connection) getCurrentlyStreaming() bool {
+	return c.currentlyStreaming
+}
+
+func (c *connection) setSocketTimeout(timeout time.Duration) {
+	c.readTimeout = timeout
+	c.writeTimeout = timeout
+}
+
+// DriverConnectionID returns the driver connection ID.
+// TODO(GODRIVER-2824): change return type to int64.
+func (c *connection) DriverConnectionID() uint64 {
+	return c.driverConnectionID
+}
+
+func (c *connection) ID() string {
+	return c.id
+}
+
+func (c *connection) ServerConnectionID() *int64 {
+	return c.serverConnectionID
+}
+
+func (c *connection) OIDCTokenGenID() uint64 {
+	return c.oidcTokenGenID
+}
+
+func (c *connection) SetOIDCTokenGenID(genID uint64) {
+	c.oidcTokenGenID = genID
+}
+
+// initConnection is an adapter used during connection initialization. It has the minimum
+// functionality necessary to implement the driver.Connection interface, which is required to pass a
+// *connection to a Handshaker.
+type initConnection struct{ *connection }
+
+var _ driver.Connection = initConnection{}
+var _ driver.StreamerConnection = initConnection{}
+
+func (c initConnection) Description() description.Server {
+	if c.connection == nil {
+		return description.Server{}
+	}
+	return c.connection.desc
+}
+func (c initConnection) Close() error             { return nil }
+func (c initConnection) ID() string               { return c.id }
+func (c initConnection) Address() address.Address { return c.addr }
+func (c initConnection) Stale() bool              { return false }
+func (c initConnection) LocalAddress() address.Address {
+	if c.connection == nil || c.nc == nil {
+		return address.Address("0.0.0.0")
+	}
+	return address.Address(c.nc.LocalAddr().String())
+}
+func (c initConnection) WriteWireMessage(ctx context.Context, wm []byte) error {
+	return c.writeWireMessage(ctx, wm)
+}
+func (c initConnection) ReadWireMessage(ctx context.Context) ([]byte, error) {
+	return c.readWireMessage(ctx)
+}
+func (c initConnection) SetStreaming(streaming bool) {
+	c.setStreaming(streaming)
+}
+func (c initConnection) CurrentlyStreaming() bool {
+	return c.getCurrentlyStreaming()
+}
+func (c initConnection) SupportsStreaming() bool {
+	return c.canStream
+}
+
+// Connection implements the driver.Connection interface to allow reading and writing wire
+// messages and the driver.Expirable interface to allow expiring. It wraps an underlying
+// topology.connection to make it more goroutine-safe and nil-safe.
+type Connection struct {
+	connection    *connection
+	refCount      int
+	cleanupPoolFn func()
+
+	oidcTokenGenID uint64
+
+	// cleanupServerFn resets the server state when a connection is returned to the connection pool
+	// via Close() or expired via Expire().
+	cleanupServerFn func()
+
+	mu sync.RWMutex
+}
+
+var _ driver.Connection = (*Connection)(nil)
+var _ driver.Expirable = (*Connection)(nil)
+var _ driver.PinnedConnection = (*Connection)(nil)
+
+// WriteWireMessage handles writing a wire message to the underlying connection.
+func (c *Connection) WriteWireMessage(ctx context.Context, wm []byte) error {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if c.connection == nil {
+		return ErrConnectionClosed
+	}
+	return c.connection.writeWireMessage(ctx, wm)
+}
+
+// ReadWireMessage handles reading a wire message from the underlying connection. The dst parameter
+// will be overwritten with the new wire message.
+func (c *Connection) ReadWireMessage(ctx context.Context) ([]byte, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if c.connection == nil {
+		return nil, ErrConnectionClosed
+	}
+	return c.connection.readWireMessage(ctx)
+}
+
+// CompressWireMessage handles compressing the provided wire message using the underlying
+// connection's compressor. The dst parameter will be overwritten with the new wire message. If
+// there is no compressor set on the underlying connection, then no compression will be performed.
+func (c *Connection) CompressWireMessage(src, dst []byte) ([]byte, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if c.connection == nil {
+		return dst, ErrConnectionClosed
+	}
+	if c.connection.compressor == wiremessage.CompressorNoOp {
+		return append(dst, src...), nil
+	}
+	_, reqid, respto, origcode, rem, ok := wiremessage.ReadHeader(src)
+	if !ok {
+		return dst, errors.New("wiremessage is too short to compress, less than 16 bytes")
+	}
+	idx, dst := wiremessage.AppendHeaderStart(dst, reqid, respto, wiremessage.OpCompressed)
+	dst = wiremessage.AppendCompressedOriginalOpCode(dst, origcode)
+	dst = wiremessage.AppendCompressedUncompressedSize(dst, int32(len(rem)))
+	dst = wiremessage.AppendCompressedCompressorID(dst, c.connection.compressor)
+	opts := driver.CompressionOpts{
+		Compressor: c.connection.compressor,
+		ZlibLevel:  c.connection.zliblevel,
+		ZstdLevel:  c.connection.zstdLevel,
+	}
+	compressed, err := driver.CompressPayload(rem, opts)
+	if err != nil {
+		return nil, err
+	}
+	dst = wiremessage.AppendCompressedCompressedMessage(dst, compressed)
+	return bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:]))), nil
+}
+
+// Description returns the server description of the server this connection is connected to.
+func (c *Connection) Description() description.Server {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if c.connection == nil {
+		return description.Server{}
+	}
+	return c.connection.desc
+}
+
+// Close returns this connection to the connection pool. This method may not closeConnection the underlying
+// socket.
+func (c *Connection) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.connection == nil || c.refCount > 0 {
+		return nil
+	}
+
+	return c.cleanupReferences()
+}
+
+// Expire closes this connection and will closeConnection the underlying socket.
+func (c *Connection) Expire() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.connection == nil {
+		return nil
+	}
+
+	_ = c.connection.close()
+	return c.cleanupReferences()
+}
+
+func (c *Connection) cleanupReferences() error {
+	err := c.connection.pool.checkIn(c.connection)
+	if c.cleanupPoolFn != nil {
+		c.cleanupPoolFn()
+		c.cleanupPoolFn = nil
+	}
+	if c.cleanupServerFn != nil {
+		c.cleanupServerFn()
+		c.cleanupServerFn = nil
+	}
+	c.connection = nil
+	return err
+}
+
+// Alive returns if the connection is still alive.
+func (c *Connection) Alive() bool {
+	return c.connection != nil
+}
+
+// ID returns the ID of this connection.
+func (c *Connection) ID() string {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if c.connection == nil {
+		return "<closed>"
+	}
+	return c.connection.id
+}
+
+// ServerConnectionID returns the server connection ID of this connection.
+func (c *Connection) ServerConnectionID() *int64 {
+	if c.connection == nil {
+		return nil
+	}
+	return c.connection.serverConnectionID
+}
+
+// Stale returns if the connection is stale.
+func (c *Connection) Stale() bool {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.connection.pool.stale(c.connection)
+}
+
+// Address returns the address of this connection.
+func (c *Connection) Address() address.Address {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if c.connection == nil {
+		return address.Address("0.0.0.0")
+	}
+	return c.connection.addr
+}
+
+// LocalAddress returns the local address of the connection
+func (c *Connection) LocalAddress() address.Address {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if c.connection == nil || c.connection.nc == nil {
+		return address.Address("0.0.0.0")
+	}
+	return address.Address(c.connection.nc.LocalAddr().String())
+}
+
+// PinToCursor updates this connection to reflect that it is pinned to a cursor.
+func (c *Connection) PinToCursor() error {
+	return c.pin("cursor", c.connection.pool.pinConnectionToCursor, c.connection.pool.unpinConnectionFromCursor)
+}
+
+// PinToTransaction updates this connection to reflect that it is pinned to a transaction.
+func (c *Connection) PinToTransaction() error {
+	return c.pin("transaction", c.connection.pool.pinConnectionToTransaction, c.connection.pool.unpinConnectionFromTransaction)
+}
+
+func (c *Connection) pin(reason string, updatePoolFn, cleanupPoolFn func()) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.connection == nil {
+		return fmt.Errorf("attempted to pin a connection for a %s, but the connection has already been returned to the pool", reason)
+	}
+
+	// Only use the provided callbacks for the first reference to avoid double-counting pinned connection statistics
+	// in the pool.
+	if c.refCount == 0 {
+		updatePoolFn()
+		c.cleanupPoolFn = cleanupPoolFn
+	}
+	c.refCount++
+	return nil
+}
+
+// UnpinFromCursor updates this connection to reflect that it is no longer pinned to a cursor.
+func (c *Connection) UnpinFromCursor() error {
+	return c.unpin("cursor")
+}
+
+// UnpinFromTransaction updates this connection to reflect that it is no longer pinned to a transaction.
+func (c *Connection) UnpinFromTransaction() error {
+	return c.unpin("transaction")
+}
+
+func (c *Connection) unpin(reason string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.connection == nil {
+		// We don't error here because the resource could have been forcefully closed via Expire.
+		return nil
+	}
+	if c.refCount == 0 {
+		return fmt.Errorf("attempted to unpin a connection from a %s, but the connection is not pinned by any resources", reason)
+	}
+
+	c.refCount--
+	return nil
+}
+
+// DriverConnectionID returns the driver connection ID.
+// TODO(GODRIVER-2824): change return type to int64.
+func (c *Connection) DriverConnectionID() uint64 {
+	return c.connection.DriverConnectionID()
+}
+
+// OIDCTokenGenID returns the OIDC token generation ID.
+func (c *Connection) OIDCTokenGenID() uint64 {
+	return c.oidcTokenGenID
+}
+
+// SetOIDCTokenGenID sets the OIDC token generation ID.
+func (c *Connection) SetOIDCTokenGenID(genID uint64) {
+	c.oidcTokenGenID = genID
+}
+
+// TODO: Naming?
+
+// cancellListener listens for context cancellation and notifies listeners via a
+// callback function.
+type cancellListener struct {
+	aborted bool
+	done    chan struct{}
+}
+
+// newCancellListener constructs a cancellListener.
+func newCancellListener() *cancellListener {
+	return &cancellListener{
+		done: make(chan struct{}),
+	}
+}
+
+// Listen blocks until the provided context is cancelled or listening is aborted
+// via the StopListening function. If this detects that the context has been
+// cancelled (i.e. errors.Is(ctx.Err(), context.Canceled), the provided callback is
+// called to abort in-progress work. Even if the context expires, this function
+// will block until StopListening is called.
+func (c *cancellListener) Listen(ctx context.Context, abortFn func()) {
+	c.aborted = false
+
+	select {
+	case <-ctx.Done():
+		if errors.Is(ctx.Err(), context.Canceled) {
+			c.aborted = true
+			abortFn()
+		}
+
+		<-c.done
+	case <-c.done:
+	}
+}
+
+// StopListening stops the in-progress Listen call. This blocks if there is no
+// in-progress Listen call. This function will return true if the provided abort
+// callback was called when listening for cancellation on the previous context.
+func (c *cancellListener) StopListening() bool {
+	c.done <- struct{}{}
+	return c.aborted
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy.go
new file mode 100644
index 0000000000000000000000000000000000000000..e602faf2078d85fbdcafe985bf52c71ac277750e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy.go
@@ -0,0 +1,7 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..43e6f3f50707e2dd15fc1634a6bf15251311d816
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go
@@ -0,0 +1,216 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/httputil"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/ocsp"
+)
+
+// Dialer is used to make network connections.
+type Dialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// DialerFunc is a type implemented by functions that can be used as a Dialer.
+type DialerFunc func(ctx context.Context, network, address string) (net.Conn, error)
+
+// DialContext implements the Dialer interface.
+func (df DialerFunc) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return df(ctx, network, address)
+}
+
+// DefaultDialer is the Dialer implementation that is used by this package. Changing this
+// will also change the Dialer used for this package. This should only be changed why all
+// of the connections being made need to use a different Dialer. Most of the time, using a
+// WithDialer option is more appropriate than changing this variable.
+var DefaultDialer Dialer = &net.Dialer{}
+
+// Handshaker is the interface implemented by types that can perform a MongoDB
+// handshake over a provided driver.Connection. This is used during connection
+// initialization. Implementations must be goroutine safe.
+type Handshaker = driver.Handshaker
+
+// generationNumberFn is a callback type used by a connection to fetch its generation number given its service ID.
+type generationNumberFn func(serviceID *primitive.ObjectID) uint64
+
+type connectionConfig struct {
+	connectTimeout           time.Duration
+	dialer                   Dialer
+	handshaker               Handshaker
+	idleTimeout              time.Duration
+	cmdMonitor               *event.CommandMonitor
+	readTimeout              time.Duration
+	writeTimeout             time.Duration
+	tlsConfig                *tls.Config
+	httpClient               *http.Client
+	compressors              []string
+	zlibLevel                *int
+	zstdLevel                *int
+	ocspCache                ocsp.Cache
+	disableOCSPEndpointCheck bool
+	tlsConnectionSource      tlsConnectionSource
+	loadBalanced             bool
+	getGenerationFn          generationNumberFn
+}
+
+func newConnectionConfig(opts ...ConnectionOption) *connectionConfig {
+	cfg := &connectionConfig{
+		connectTimeout:      30 * time.Second,
+		dialer:              nil,
+		tlsConnectionSource: defaultTLSConnectionSource,
+		httpClient:          httputil.DefaultHTTPClient,
+	}
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		opt(cfg)
+	}
+
+	if cfg.dialer == nil {
+		// Use a zero value of net.Dialer when nothing is specified, so the Go driver applies default default behaviors
+		// such as Timeout, KeepAlive, DNS resolving, etc. See https://golang.org/pkg/net/#Dialer for more information.
+		cfg.dialer = &net.Dialer{}
+	}
+
+	return cfg
+}
+
+// ConnectionOption is used to configure a connection.
+type ConnectionOption func(*connectionConfig)
+
+func withTLSConnectionSource(fn func(tlsConnectionSource) tlsConnectionSource) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.tlsConnectionSource = fn(c.tlsConnectionSource)
+	}
+}
+
+// WithCompressors sets the compressors that can be used for communication.
+func WithCompressors(fn func([]string) []string) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.compressors = fn(c.compressors)
+	}
+}
+
+// WithConnectTimeout configures the maximum amount of time a dial will wait for a
+// Connect to complete. The default is 30 seconds.
+func WithConnectTimeout(fn func(time.Duration) time.Duration) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.connectTimeout = fn(c.connectTimeout)
+	}
+}
+
+// WithDialer configures the Dialer to use when making a new connection to MongoDB.
+func WithDialer(fn func(Dialer) Dialer) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.dialer = fn(c.dialer)
+	}
+}
+
+// WithHandshaker configures the Handshaker that wll be used to initialize newly
+// dialed connections.
+func WithHandshaker(fn func(Handshaker) Handshaker) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.handshaker = fn(c.handshaker)
+	}
+}
+
+// WithIdleTimeout configures the maximum idle time to allow for a connection.
+func WithIdleTimeout(fn func(time.Duration) time.Duration) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.idleTimeout = fn(c.idleTimeout)
+	}
+}
+
+// WithReadTimeout configures the maximum read time for a connection.
+func WithReadTimeout(fn func(time.Duration) time.Duration) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.readTimeout = fn(c.readTimeout)
+	}
+}
+
+// WithWriteTimeout configures the maximum write time for a connection.
+func WithWriteTimeout(fn func(time.Duration) time.Duration) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.writeTimeout = fn(c.writeTimeout)
+	}
+}
+
+// WithTLSConfig configures the TLS options for a connection.
+func WithTLSConfig(fn func(*tls.Config) *tls.Config) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.tlsConfig = fn(c.tlsConfig)
+	}
+}
+
+// WithHTTPClient configures the HTTP client for a connection.
+func WithHTTPClient(fn func(*http.Client) *http.Client) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.httpClient = fn(c.httpClient)
+	}
+}
+
+// WithMonitor configures a event for command monitoring.
+func WithMonitor(fn func(*event.CommandMonitor) *event.CommandMonitor) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.cmdMonitor = fn(c.cmdMonitor)
+	}
+}
+
+// WithZlibLevel sets the zLib compression level.
+func WithZlibLevel(fn func(*int) *int) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.zlibLevel = fn(c.zlibLevel)
+	}
+}
+
+// WithZstdLevel sets the zstd compression level.
+func WithZstdLevel(fn func(*int) *int) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.zstdLevel = fn(c.zstdLevel)
+	}
+}
+
+// WithOCSPCache specifies a cache to use for OCSP verification.
+func WithOCSPCache(fn func(ocsp.Cache) ocsp.Cache) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.ocspCache = fn(c.ocspCache)
+	}
+}
+
+// WithDisableOCSPEndpointCheck specifies whether or the driver should perform non-stapled OCSP verification. If set
+// to true, the driver will only check stapled responses and will continue the connection without reaching out to
+// OCSP responders.
+func WithDisableOCSPEndpointCheck(fn func(bool) bool) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.disableOCSPEndpointCheck = fn(c.disableOCSPEndpointCheck)
+	}
+}
+
+// WithConnectionLoadBalanced specifies whether or not the connection is to a server behind a load balancer.
+func WithConnectionLoadBalanced(fn func(bool) bool) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.loadBalanced = fn(c.loadBalanced)
+	}
+}
+
+func withGenerationNumberFn(fn func(generationNumberFn) generationNumberFn) ConnectionOption {
+	return func(c *connectionConfig) {
+		c.getGenerationFn = fn(c.getGenerationFn)
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/diff.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/diff.go
new file mode 100644
index 0000000000000000000000000000000000000000..b9bf2c14c747c7ef6719901ac0013d37fab9e47e
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/diff.go
@@ -0,0 +1,73 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import "go.mongodb.org/mongo-driver/mongo/description"
+
+// hostlistDiff is the difference between a topology and a host list.
+type hostlistDiff struct {
+	Added   []string
+	Removed []string
+}
+
+// diffHostList compares the topology description and host list and returns the difference.
+func diffHostList(t description.Topology, hostlist []string) hostlistDiff {
+	var diff hostlistDiff
+
+	oldServers := make(map[string]bool)
+	for _, s := range t.Servers {
+		oldServers[s.Addr.String()] = true
+	}
+
+	for _, addr := range hostlist {
+		if oldServers[addr] {
+			delete(oldServers, addr)
+		} else {
+			diff.Added = append(diff.Added, addr)
+		}
+	}
+
+	for addr := range oldServers {
+		diff.Removed = append(diff.Removed, addr)
+	}
+
+	return diff
+}
+
+// topologyDiff is the difference between two different topology descriptions.
+type topologyDiff struct {
+	Added   []description.Server
+	Removed []description.Server
+}
+
+// diffTopology compares the two topology descriptions and returns the difference.
+func diffTopology(old, new description.Topology) topologyDiff {
+	var diff topologyDiff
+
+	oldServers := make(map[string]bool)
+	for _, s := range old.Servers {
+		oldServers[s.Addr.String()] = true
+	}
+
+	for _, s := range new.Servers {
+		addr := s.Addr.String()
+		if oldServers[addr] {
+			delete(oldServers, addr)
+		} else {
+			diff.Added = append(diff.Added, s)
+		}
+	}
+
+	for _, s := range old.Servers {
+		addr := s.Addr.String()
+		if oldServers[addr] {
+			diff.Removed = append(diff.Removed, s)
+		}
+	}
+
+	return diff
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..a6630aae767359602df0732f2b0e2592b3a1aeeb
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go
@@ -0,0 +1,124 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.mongodb.org/mongo-driver/mongo/description"
+)
+
+// ConnectionError represents a connection error.
+type ConnectionError struct {
+	ConnectionID string
+	Wrapped      error
+
+	// init will be set to true if this error occurred during connection initialization or
+	// during a connection handshake.
+	init    bool
+	message string
+}
+
+// Error implements the error interface.
+func (e ConnectionError) Error() string {
+	message := e.message
+	if e.init {
+		fullMsg := "error occurred during connection handshake"
+		if message != "" {
+			fullMsg = fmt.Sprintf("%s: %s", fullMsg, message)
+		}
+		message = fullMsg
+	}
+	if e.Wrapped != nil && message != "" {
+		return fmt.Sprintf("connection(%s) %s: %s", e.ConnectionID, message, e.Wrapped.Error())
+	}
+	if e.Wrapped != nil {
+		return fmt.Sprintf("connection(%s) %s", e.ConnectionID, e.Wrapped.Error())
+	}
+	return fmt.Sprintf("connection(%s) %s", e.ConnectionID, message)
+}
+
+// Unwrap returns the underlying error.
+func (e ConnectionError) Unwrap() error {
+	return e.Wrapped
+}
+
+// ServerSelectionError represents a Server Selection error.
+type ServerSelectionError struct {
+	Desc    description.Topology
+	Wrapped error
+}
+
+// Error implements the error interface.
+func (e ServerSelectionError) Error() string {
+	if e.Wrapped != nil {
+		return fmt.Sprintf("server selection error: %s, current topology: { %s }", e.Wrapped.Error(), e.Desc.String())
+	}
+	return fmt.Sprintf("server selection error: current topology: { %s }", e.Desc.String())
+}
+
+// Unwrap returns the underlying error.
+func (e ServerSelectionError) Unwrap() error {
+	return e.Wrapped
+}
+
+// WaitQueueTimeoutError represents a timeout when requesting a connection from the pool
+type WaitQueueTimeoutError struct {
+	Wrapped              error
+	pinnedConnections    *pinnedConnections
+	maxPoolSize          uint64
+	totalConnections     int
+	availableConnections int
+	waitDuration         time.Duration
+}
+
+type pinnedConnections struct {
+	cursorConnections      uint64
+	transactionConnections uint64
+}
+
+// Error implements the error interface.
+func (w WaitQueueTimeoutError) Error() string {
+	errorMsg := "timed out while checking out a connection from connection pool"
+	switch {
+	case w.Wrapped == nil:
+	case errors.Is(w.Wrapped, context.Canceled):
+		errorMsg = fmt.Sprintf(
+			"%s: %s",
+			"canceled while checking out a connection from connection pool",
+			w.Wrapped.Error(),
+		)
+	default:
+		errorMsg = fmt.Sprintf(
+			"%s: %s",
+			errorMsg,
+			w.Wrapped.Error(),
+		)
+	}
+
+	msg := fmt.Sprintf("%s; total connections: %d, maxPoolSize: %d, ", errorMsg, w.totalConnections, w.maxPoolSize)
+	if pinnedConnections := w.pinnedConnections; pinnedConnections != nil {
+		openConnectionCount := uint64(w.totalConnections) -
+			pinnedConnections.cursorConnections -
+			pinnedConnections.transactionConnections
+		msg += fmt.Sprintf("connections in use by cursors: %d, connections in use by transactions: %d, connections in use by other operations: %d, ",
+			pinnedConnections.cursorConnections,
+			pinnedConnections.transactionConnections,
+			openConnectionCount,
+		)
+	}
+	msg += fmt.Sprintf("idle connections: %d, wait duration: %s", w.availableConnections, w.waitDuration.String())
+	return msg
+}
+
+// Unwrap returns the underlying error.
+func (w WaitQueueTimeoutError) Unwrap() error {
+	return w.Wrapped
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d097b65c772fd1e8883d257c819f699d6bb968c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go
@@ -0,0 +1,487 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"bytes"
+	"fmt"
+	"sync/atomic"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/internal/ptrutil"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+)
+
+var (
+	// MinSupportedMongoDBVersion is the version string for the lowest MongoDB version supported by the driver.
+	MinSupportedMongoDBVersion = "3.6"
+
+	// SupportedWireVersions is the range of wire versions supported by the driver.
+	SupportedWireVersions = description.NewVersionRange(6, 25)
+)
+
+type fsm struct {
+	description.Topology
+	maxElectionID    primitive.ObjectID
+	maxSetVersion    uint32
+	compatible       atomic.Value
+	compatibilityErr error
+}
+
+func newFSM() *fsm {
+	f := fsm{}
+	f.compatible.Store(true)
+	return &f
+}
+
+// selectFSMSessionTimeout selects the timeout to return for the topology's
+// finite state machine. If the logicalSessionTimeoutMinutes on the FSM exists
+// and the server is data-bearing, then we determine this value by returning
+//
+//	min{server timeout, FSM timeout}
+//
+// where a "nil" value is considered less than 0.
+//
+// Otherwise, if the FSM's logicalSessionTimeoutMinutes exist, then this
+// function returns the FSM timeout.
+//
+// In the case where the FSM timeout DNE, we check all servers to see if any
+// still do not have a timeout. This function chooses the lowest of the existing
+// timeouts.
+func selectFSMSessionTimeout(f *fsm, s description.Server) *int64 {
+	oldMinutes := f.SessionTimeoutMinutesPtr
+	comp := ptrutil.CompareInt64(oldMinutes, s.SessionTimeoutMinutesPtr)
+
+	// If the server is data-bearing and the current timeout exists and is
+	// either:
+	//
+	// 1. larger than the server timeout, or
+	// 2. non-nil while the server timeout is nil
+	//
+	// then return the server timeout.
+	if s.DataBearing() && (comp == 1 || comp == 2) {
+		return s.SessionTimeoutMinutesPtr
+	}
+
+	// If the current timeout exists and the server is not data-bearing OR
+	// min{server timeout, current timeout} = current timeout, then return
+	// the current timeout.
+	if oldMinutes != nil {
+		return oldMinutes
+	}
+
+	timeout := s.SessionTimeoutMinutesPtr
+	for _, server := range f.Servers {
+		// If the server is not data-bearing, then we do not consider
+		// it's timeout whether set or not.
+		if !server.DataBearing() {
+			continue
+		}
+
+		srvTimeout := server.SessionTimeoutMinutesPtr
+		comp := ptrutil.CompareInt64(timeout, srvTimeout)
+
+		if comp <= 0 { // timeout <= srvTimout
+			continue
+		}
+
+		timeout = server.SessionTimeoutMinutesPtr
+	}
+
+	return timeout
+}
+
+// apply takes a new server description and modifies the FSM's topology description based on it. It returns the
+// updated topology description as well as a server description. The returned server description is either the same
+// one that was passed in, or a new one in the case that it had to be changed.
+//
+// apply should operation on immutable descriptions so we don't have to lock for the entire time we're applying the
+// server description.
+func (f *fsm) apply(s description.Server) (description.Topology, description.Server) {
+	newServers := make([]description.Server, len(f.Servers))
+	copy(newServers, f.Servers)
+
+	// Reset the logicalSessionTimeoutMinutes to the minimum of the FSM
+	// and the description.server/f.servers.
+	serverTimeoutMinutes := selectFSMSessionTimeout(f, s)
+
+	f.Topology = description.Topology{
+		Kind:    f.Kind,
+		Servers: newServers,
+		SetName: f.SetName,
+	}
+
+	f.Topology.SessionTimeoutMinutesPtr = serverTimeoutMinutes
+
+	if serverTimeoutMinutes != nil {
+		f.SessionTimeoutMinutes = uint32(*serverTimeoutMinutes)
+	}
+
+	if _, ok := f.findServer(s.Addr); !ok {
+		return f.Topology, s
+	}
+
+	updatedDesc := s
+	switch f.Kind {
+	case description.Unknown:
+		updatedDesc = f.applyToUnknown(s)
+	case description.Sharded:
+		updatedDesc = f.applyToSharded(s)
+	case description.ReplicaSetNoPrimary:
+		updatedDesc = f.applyToReplicaSetNoPrimary(s)
+	case description.ReplicaSetWithPrimary:
+		updatedDesc = f.applyToReplicaSetWithPrimary(s)
+	case description.Single:
+		updatedDesc = f.applyToSingle(s)
+	}
+
+	for _, server := range f.Servers {
+		if server.WireVersion != nil {
+			if server.WireVersion.Max < SupportedWireVersions.Min {
+				f.compatible.Store(false)
+				f.compatibilityErr = fmt.Errorf(
+					"server at %s reports wire version %d, but this version of the Go driver requires "+
+						"at least %d (MongoDB %s)",
+					server.Addr.String(),
+					server.WireVersion.Max,
+					SupportedWireVersions.Min,
+					MinSupportedMongoDBVersion,
+				)
+				f.Topology.CompatibilityErr = f.compatibilityErr
+				return f.Topology, s
+			}
+
+			if server.WireVersion.Min > SupportedWireVersions.Max {
+				f.compatible.Store(false)
+				f.compatibilityErr = fmt.Errorf(
+					"server at %s requires wire version %d, but this version of the Go driver only supports up to %d",
+					server.Addr.String(),
+					server.WireVersion.Min,
+					SupportedWireVersions.Max,
+				)
+				f.Topology.CompatibilityErr = f.compatibilityErr
+				return f.Topology, s
+			}
+		}
+	}
+
+	f.compatible.Store(true)
+	f.compatibilityErr = nil
+
+	return f.Topology, updatedDesc
+}
+
+func (f *fsm) applyToReplicaSetNoPrimary(s description.Server) description.Server {
+	switch s.Kind {
+	case description.Standalone, description.Mongos:
+		f.removeServerByAddr(s.Addr)
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.updateRSWithoutPrimary(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+	}
+
+	return s
+}
+
+func (f *fsm) applyToReplicaSetWithPrimary(s description.Server) description.Server {
+	switch s.Kind {
+	case description.Standalone, description.Mongos:
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.updateRSWithPrimaryFromMember(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+		f.checkIfHasPrimary()
+	}
+
+	return s
+}
+
+func (f *fsm) applyToSharded(s description.Server) description.Server {
+	switch s.Kind {
+	case description.Mongos, description.Unknown:
+		f.replaceServer(s)
+	case description.Standalone, description.RSPrimary, description.RSSecondary, description.RSArbiter, description.RSMember, description.RSGhost:
+		f.removeServerByAddr(s.Addr)
+	}
+
+	return s
+}
+
+func (f *fsm) applyToSingle(s description.Server) description.Server {
+	switch s.Kind {
+	case description.Unknown:
+		f.replaceServer(s)
+	case description.Standalone, description.Mongos:
+		if f.SetName != "" {
+			f.removeServerByAddr(s.Addr)
+			return s
+		}
+
+		f.replaceServer(s)
+	case description.RSPrimary, description.RSSecondary, description.RSArbiter, description.RSMember, description.RSGhost:
+		// A replica set name can be provided when creating a direct connection. In this case, if the set name returned
+		// by the hello response doesn't match up with the one provided during configuration, the server description
+		// is replaced with a default Unknown description.
+		//
+		// We create a new server description rather than doing s.Kind = description.Unknown because the other fields,
+		// such as RTT, need to be cleared for Unknown descriptions as well.
+		if f.SetName != "" && f.SetName != s.SetName {
+			s = description.Server{
+				Addr: s.Addr,
+				Kind: description.Unknown,
+			}
+		}
+
+		f.replaceServer(s)
+	}
+
+	return s
+}
+
+func (f *fsm) applyToUnknown(s description.Server) description.Server {
+	switch s.Kind {
+	case description.Mongos:
+		f.setKind(description.Sharded)
+		f.replaceServer(s)
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.setKind(description.ReplicaSetNoPrimary)
+		f.updateRSWithoutPrimary(s)
+	case description.Standalone:
+		f.updateUnknownWithStandalone(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+	}
+
+	return s
+}
+
+func (f *fsm) checkIfHasPrimary() {
+	if _, ok := f.findPrimary(); ok {
+		f.setKind(description.ReplicaSetWithPrimary)
+	} else {
+		f.setKind(description.ReplicaSetNoPrimary)
+	}
+}
+
+// hasStalePrimary returns true if the topology has a primary that is "stale".
+func hasStalePrimary(fsm fsm, srv description.Server) bool {
+	// Compare the election ID values of the server and the topology lexicographically.
+	compRes := bytes.Compare(srv.ElectionID[:], fsm.maxElectionID[:])
+
+	if wireVersion := srv.WireVersion; wireVersion != nil && wireVersion.Max >= 17 {
+		// In the Post-6.0 case, a primary is considered "stale" if the server's election ID is greater than the
+		// topology's max election ID. In these versions, the primary is also considered "stale" if the server's
+		// election ID is LTE to the topologies election ID and the server's "setVersion" is less than the topology's
+		// max "setVersion".
+		return compRes == -1 || (compRes != 1 && srv.SetVersion < fsm.maxSetVersion)
+	}
+
+	// If the server's election ID is less than the topology's max election ID, the primary is considered
+	// "stale". Similarly, if the server's "setVersion" is less than the topology's max "setVersion", the
+	// primary is considered stale.
+	return compRes == -1 || fsm.maxSetVersion > srv.SetVersion
+}
+
+// transferEVTuple will transfer the ("ElectionID", "SetVersion") tuple from the description server to the topology.
+// If the primary is stale, the tuple will not be transferred, the topology will update it's "Kind" value, and this
+// routine will return "false".
+func transferEVTuple(srv description.Server, fsm *fsm) bool {
+	stalePrimary := hasStalePrimary(*fsm, srv)
+
+	if wireVersion := srv.WireVersion; wireVersion != nil && wireVersion.Max >= 17 {
+		if stalePrimary {
+			fsm.checkIfHasPrimary()
+			return false
+		}
+
+		fsm.maxElectionID = srv.ElectionID
+		fsm.maxSetVersion = srv.SetVersion
+
+		return true
+	}
+
+	if srv.SetVersion != 0 && !srv.ElectionID.IsZero() {
+		if stalePrimary {
+			fsm.replaceServer(description.Server{
+				Addr: srv.Addr,
+				LastError: fmt.Errorf(
+					"was a primary, but its set version or election id is stale"),
+			})
+
+			fsm.checkIfHasPrimary()
+
+			return false
+		}
+
+		fsm.maxElectionID = srv.ElectionID
+	}
+
+	if srv.SetVersion > fsm.maxSetVersion {
+		fsm.maxSetVersion = srv.SetVersion
+	}
+
+	return true
+}
+
+func (f *fsm) updateRSFromPrimary(srv description.Server) {
+	if f.SetName == "" {
+		f.SetName = srv.SetName
+	} else if f.SetName != srv.SetName {
+		f.removeServerByAddr(srv.Addr)
+		f.checkIfHasPrimary()
+
+		return
+	}
+
+	if ok := transferEVTuple(srv, f); !ok {
+		return
+	}
+
+	if j, ok := f.findPrimary(); ok {
+		f.setServer(j, description.Server{
+			Addr:      f.Servers[j].Addr,
+			LastError: fmt.Errorf("was a primary, but a new primary was discovered"),
+		})
+	}
+
+	f.replaceServer(srv)
+
+	for j := len(f.Servers) - 1; j >= 0; j-- {
+		found := false
+		for _, member := range srv.Members {
+			if member == f.Servers[j].Addr {
+				found = true
+				break
+			}
+		}
+
+		if !found {
+			f.removeServer(j)
+		}
+	}
+
+	for _, member := range srv.Members {
+		if _, ok := f.findServer(member); !ok {
+			f.addServer(member)
+		}
+	}
+
+	f.checkIfHasPrimary()
+}
+
+func (f *fsm) updateRSWithPrimaryFromMember(s description.Server) {
+	if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	if s.Addr != s.CanonicalAddr {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	f.replaceServer(s)
+
+	if _, ok := f.findPrimary(); !ok {
+		f.setKind(description.ReplicaSetNoPrimary)
+	}
+}
+
+func (f *fsm) updateRSWithoutPrimary(s description.Server) {
+	if f.SetName == "" {
+		f.SetName = s.SetName
+	} else if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	for _, member := range s.Members {
+		if _, ok := f.findServer(member); !ok {
+			f.addServer(member)
+		}
+	}
+
+	if s.Addr != s.CanonicalAddr {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	f.replaceServer(s)
+}
+
+func (f *fsm) updateUnknownWithStandalone(s description.Server) {
+	if len(f.Servers) > 1 {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	f.setKind(description.Single)
+	f.replaceServer(s)
+}
+
+func (f *fsm) addServer(addr address.Address) {
+	f.Servers = append(f.Servers, description.Server{
+		Addr: addr.Canonicalize(),
+	})
+}
+
+func (f *fsm) findPrimary() (int, bool) {
+	for i, s := range f.Servers {
+		if s.Kind == description.RSPrimary {
+			return i, true
+		}
+	}
+
+	return 0, false
+}
+
+func (f *fsm) findServer(addr address.Address) (int, bool) {
+	canon := addr.Canonicalize()
+	for i, s := range f.Servers {
+		if canon == s.Addr {
+			return i, true
+		}
+	}
+
+	return 0, false
+}
+
+func (f *fsm) removeServer(i int) {
+	f.Servers = append(f.Servers[:i], f.Servers[i+1:]...)
+}
+
+func (f *fsm) removeServerByAddr(addr address.Address) {
+	if i, ok := f.findServer(addr); ok {
+		f.removeServer(i)
+	}
+}
+
+func (f *fsm) replaceServer(s description.Server) {
+	if i, ok := f.findServer(s.Addr); ok {
+		f.setServer(i, s)
+	}
+}
+
+func (f *fsm) setServer(i int, s description.Server) {
+	f.Servers[i] = s
+}
+
+func (f *fsm) setKind(k description.TopologyKind) {
+	f.Kind = k
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go
new file mode 100644
index 0000000000000000000000000000000000000000..e9565425d99a7b62616dcc7e8fba901957b8f116
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go
@@ -0,0 +1,1524 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+)
+
+// Connection pool state constants.
+const (
+	poolPaused int = iota
+	poolReady
+	poolClosed
+)
+
+// ErrPoolNotPaused is returned when attempting to mark a connection pool "ready" that is not
+// currently "paused".
+var ErrPoolNotPaused = PoolError("only a paused pool can be marked ready")
+
+// ErrPoolClosed is returned when attempting to check out a connection from a closed pool.
+var ErrPoolClosed = PoolError("attempted to check out a connection from closed connection pool")
+
+// ErrConnectionClosed is returned from an attempt to use an already closed connection.
+var ErrConnectionClosed = ConnectionError{ConnectionID: "<closed>", message: "connection is closed"}
+
+// ErrWrongPool is return when a connection is returned to a pool it doesn't belong to.
+var ErrWrongPool = PoolError("connection does not belong to this pool")
+
+// PoolError is an error returned from a Pool method.
+type PoolError string
+
+func (pe PoolError) Error() string { return string(pe) }
+
+// poolClearedError is an error returned when the connection pool is cleared or currently paused. It
+// is a retryable error.
+type poolClearedError struct {
+	err     error
+	address address.Address
+}
+
+func (pce poolClearedError) Error() string {
+	return fmt.Sprintf(
+		"connection pool for %v was cleared because another operation failed with: %v",
+		pce.address,
+		pce.err)
+}
+
+// Retryable returns true. All poolClearedErrors are retryable.
+func (poolClearedError) Retryable() bool { return true }
+
+// Assert that poolClearedError is a driver.RetryablePoolError.
+var _ driver.RetryablePoolError = poolClearedError{}
+
+// poolConfig contains all aspects of the pool that can be configured
+type poolConfig struct {
+	Address          address.Address
+	MinPoolSize      uint64
+	MaxPoolSize      uint64
+	MaxConnecting    uint64
+	MaxIdleTime      time.Duration
+	MaintainInterval time.Duration
+	LoadBalanced     bool
+	PoolMonitor      *event.PoolMonitor
+	Logger           *logger.Logger
+	handshakeErrFn   func(error, uint64, *primitive.ObjectID)
+}
+
+type pool struct {
+	// The following integer fields must be accessed using the atomic package
+	// and should be at the beginning of the struct.
+	// - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG
+	// - suggested layout: https://go101.org/article/memory-layout.html
+
+	nextID                       uint64 // nextID is the next pool ID for a new connection.
+	pinnedCursorConnections      uint64
+	pinnedTransactionConnections uint64
+
+	address       address.Address
+	minSize       uint64
+	maxSize       uint64
+	maxConnecting uint64
+	loadBalanced  bool
+	monitor       *event.PoolMonitor
+	logger        *logger.Logger
+
+	// handshakeErrFn is used to handle any errors that happen during connection establishment and
+	// handshaking.
+	handshakeErrFn func(error, uint64, *primitive.ObjectID)
+
+	connOpts   []ConnectionOption
+	generation *poolGenerationMap
+
+	maintainInterval time.Duration   // maintainInterval is the maintain() loop interval.
+	maintainReady    chan struct{}   // maintainReady is a signal channel that starts the maintain() loop when ready() is called.
+	backgroundDone   *sync.WaitGroup // backgroundDone waits for all background goroutines to return.
+
+	stateMu      sync.RWMutex // stateMu guards state, lastClearErr
+	state        int          // state is the current state of the connection pool.
+	lastClearErr error        // lastClearErr is the last error that caused the pool to be cleared.
+
+	// createConnectionsCond is the condition variable that controls when the createConnections()
+	// loop runs or waits. Its lock guards cancelBackgroundCtx, conns, and newConnWait. Any changes
+	// to the state of the guarded values must be made while holding the lock to prevent undefined
+	// behavior in the createConnections() waiting logic.
+	createConnectionsCond *sync.Cond
+	cancelBackgroundCtx   context.CancelFunc     // cancelBackgroundCtx is called to signal background goroutines to stop.
+	conns                 map[uint64]*connection // conns holds all currently open connections.
+	newConnWait           wantConnQueue          // newConnWait holds all wantConn requests for new connections.
+
+	idleMu       sync.Mutex    // idleMu guards idleConns, idleConnWait
+	idleConns    []*connection // idleConns holds all idle connections.
+	idleConnWait wantConnQueue // idleConnWait holds all wantConn requests for idle connections.
+}
+
+// getState returns the current state of the pool. Callers must not hold the stateMu lock.
+func (p *pool) getState() int {
+	p.stateMu.RLock()
+	defer p.stateMu.RUnlock()
+
+	return p.state
+}
+
+func mustLogPoolMessage(pool *pool) bool {
+	return pool.logger != nil && pool.logger.LevelComponentEnabled(
+		logger.LevelDebug, logger.ComponentConnection)
+}
+
+func logPoolMessage(pool *pool, msg string, keysAndValues ...interface{}) {
+	host, port, err := net.SplitHostPort(pool.address.String())
+	if err != nil {
+		host = pool.address.String()
+		port = ""
+	}
+
+	pool.logger.Print(logger.LevelDebug,
+		logger.ComponentConnection,
+		msg,
+		logger.SerializeConnection(logger.Connection{
+			Message:    msg,
+			ServerHost: host,
+			ServerPort: port,
+		}, keysAndValues...)...)
+
+}
+
+type reason struct {
+	loggerConn string
+	event      string
+}
+
+// connectionPerished checks if a given connection is perished and should be removed from the pool.
+func connectionPerished(conn *connection) (reason, bool) {
+	switch {
+	case conn.closed() || !conn.isAlive():
+		// A connection would only be closed if it encountered a network error
+		// during an operation and closed itself. If a connection is not alive
+		// (e.g. the connection was closed by the server-side), it's also
+		// considered a network error.
+		return reason{
+			loggerConn: logger.ReasonConnClosedError,
+			event:      event.ReasonError,
+		}, true
+	case conn.idleTimeoutExpired():
+		return reason{
+			loggerConn: logger.ReasonConnClosedIdle,
+			event:      event.ReasonIdle,
+		}, true
+	case conn.pool.stale(conn):
+		return reason{
+			loggerConn: logger.ReasonConnClosedStale,
+			event:      event.ReasonStale,
+		}, true
+	}
+
+	return reason{}, false
+}
+
+// newPool creates a new pool. It will use the provided options when creating connections.
+func newPool(config poolConfig, connOpts ...ConnectionOption) *pool {
+	if config.MaxIdleTime != time.Duration(0) {
+		connOpts = append(connOpts, WithIdleTimeout(func(_ time.Duration) time.Duration { return config.MaxIdleTime }))
+	}
+
+	var maxConnecting uint64 = 2
+	if config.MaxConnecting > 0 {
+		maxConnecting = config.MaxConnecting
+	}
+
+	maintainInterval := 10 * time.Second
+	if config.MaintainInterval != 0 {
+		maintainInterval = config.MaintainInterval
+	}
+
+	pool := &pool{
+		address:               config.Address,
+		minSize:               config.MinPoolSize,
+		maxSize:               config.MaxPoolSize,
+		maxConnecting:         maxConnecting,
+		loadBalanced:          config.LoadBalanced,
+		monitor:               config.PoolMonitor,
+		logger:                config.Logger,
+		handshakeErrFn:        config.handshakeErrFn,
+		connOpts:              connOpts,
+		generation:            newPoolGenerationMap(),
+		state:                 poolPaused,
+		maintainInterval:      maintainInterval,
+		maintainReady:         make(chan struct{}, 1),
+		backgroundDone:        &sync.WaitGroup{},
+		createConnectionsCond: sync.NewCond(&sync.Mutex{}),
+		conns:                 make(map[uint64]*connection, config.MaxPoolSize),
+		idleConns:             make([]*connection, 0, config.MaxPoolSize),
+	}
+	// minSize must not exceed maxSize if maxSize is not 0
+	if pool.maxSize != 0 && pool.minSize > pool.maxSize {
+		pool.minSize = pool.maxSize
+	}
+	pool.connOpts = append(pool.connOpts, withGenerationNumberFn(func(_ generationNumberFn) generationNumberFn { return pool.getGenerationForNewConnection }))
+
+	pool.generation.connect()
+
+	// Create a Context with cancellation that's used to signal the createConnections() and
+	// maintain() background goroutines to stop. Also create a "backgroundDone" WaitGroup that is
+	// used to wait for the background goroutines to return.
+	var ctx context.Context
+	ctx, pool.cancelBackgroundCtx = context.WithCancel(context.Background())
+
+	for i := 0; i < int(pool.maxConnecting); i++ {
+		pool.backgroundDone.Add(1)
+		go pool.createConnections(ctx, pool.backgroundDone)
+	}
+
+	// If maintainInterval is not positive, don't start the maintain() goroutine. Expect that
+	// negative values are only used in testing; this config value is not user-configurable.
+	if maintainInterval > 0 {
+		pool.backgroundDone.Add(1)
+		go pool.maintain(ctx, pool.backgroundDone)
+	}
+
+	if mustLogPoolMessage(pool) {
+		keysAndValues := logger.KeyValues{
+			logger.KeyMaxIdleTimeMS, config.MaxIdleTime.Milliseconds(),
+			logger.KeyMinPoolSize, config.MinPoolSize,
+			logger.KeyMaxPoolSize, config.MaxPoolSize,
+			logger.KeyMaxConnecting, config.MaxConnecting,
+		}
+
+		logPoolMessage(pool, logger.ConnectionPoolCreated, keysAndValues...)
+	}
+
+	if pool.monitor != nil {
+		pool.monitor.Event(&event.PoolEvent{
+			Type: event.PoolCreated,
+			PoolOptions: &event.MonitorPoolOptions{
+				MaxPoolSize: config.MaxPoolSize,
+				MinPoolSize: config.MinPoolSize,
+			},
+			Address: pool.address.String(),
+		})
+	}
+
+	return pool
+}
+
+// stale checks if a given connection's generation is below the generation of the pool
+func (p *pool) stale(conn *connection) bool {
+	return conn == nil || p.generation.stale(conn.desc.ServiceID, conn.generation)
+}
+
+// ready puts the pool into the "ready" state and starts the background connection creation and
+// monitoring goroutines. ready must be called before connections can be checked out. An unused,
+// connected pool must be closed or it will leak goroutines and will not be garbage collected.
+func (p *pool) ready() error {
+	// While holding the stateMu lock, set the pool to "ready" if it is currently "paused".
+	p.stateMu.Lock()
+	if p.state == poolReady {
+		p.stateMu.Unlock()
+		return nil
+	}
+	if p.state != poolPaused {
+		p.stateMu.Unlock()
+		return ErrPoolNotPaused
+	}
+	p.lastClearErr = nil
+	p.state = poolReady
+	p.stateMu.Unlock()
+
+	if mustLogPoolMessage(p) {
+		logPoolMessage(p, logger.ConnectionPoolReady)
+	}
+
+	// Send event.PoolReady before resuming the maintain() goroutine to guarantee that the
+	// "pool ready" event is always sent before maintain() starts creating connections.
+	if p.monitor != nil {
+		p.monitor.Event(&event.PoolEvent{
+			Type:    event.PoolReady,
+			Address: p.address.String(),
+		})
+	}
+
+	// Signal maintain() to wake up immediately when marking the pool "ready".
+	select {
+	case p.maintainReady <- struct{}{}:
+	default:
+	}
+
+	return nil
+}
+
+// close closes the pool, closes all connections associated with the pool, and stops all background
+// goroutines. All subsequent checkOut requests will return an error. An unused, ready pool must be
+// closed or it will leak goroutines and will not be garbage collected.
+func (p *pool) close(ctx context.Context) {
+	p.stateMu.Lock()
+	if p.state == poolClosed {
+		p.stateMu.Unlock()
+		return
+	}
+	p.state = poolClosed
+	p.stateMu.Unlock()
+
+	// Call cancelBackgroundCtx() to exit the maintain() and createConnections() background
+	// goroutines. Broadcast to the createConnectionsCond to wake up all createConnections()
+	// goroutines. We must hold the createConnectionsCond lock here because we're changing the
+	// condition by cancelling the "background goroutine" Context, even tho cancelling the Context
+	// is also synchronized by a lock. Otherwise, we run into an intermittent bug that prevents the
+	// createConnections() goroutines from exiting.
+	p.createConnectionsCond.L.Lock()
+	p.cancelBackgroundCtx()
+	p.createConnectionsCond.Broadcast()
+	p.createConnectionsCond.L.Unlock()
+
+	// Wait for all background goroutines to exit.
+	p.backgroundDone.Wait()
+
+	p.generation.disconnect()
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	// If we have a deadline then we interpret it as a request to gracefully shutdown. We wait until
+	// either all the connections have been checked back into the pool (i.e. total open connections
+	// equals idle connections) or until the Context deadline is reached.
+	if _, ok := ctx.Deadline(); ok {
+		ticker := time.NewTicker(100 * time.Millisecond)
+		defer ticker.Stop()
+
+	graceful:
+		for {
+			if p.totalConnectionCount() == p.availableConnectionCount() {
+				break graceful
+			}
+
+			select {
+			case <-ticker.C:
+			case <-ctx.Done():
+				break graceful
+			default:
+			}
+		}
+	}
+
+	// Empty the idle connections stack and try to deliver ErrPoolClosed to any waiting wantConns
+	// from idleConnWait while holding the idleMu lock.
+	p.idleMu.Lock()
+	for _, conn := range p.idleConns {
+		_ = p.removeConnection(conn, reason{
+			loggerConn: logger.ReasonConnClosedPoolClosed,
+			event:      event.ReasonPoolClosed,
+		}, nil)
+		_ = p.closeConnection(conn) // We don't care about errors while closing the connection.
+	}
+	p.idleConns = p.idleConns[:0]
+	for {
+		w := p.idleConnWait.popFront()
+		if w == nil {
+			break
+		}
+		w.tryDeliver(nil, ErrPoolClosed)
+	}
+	p.idleMu.Unlock()
+
+	// Collect all conns from the pool and try to deliver ErrPoolClosed to any waiting wantConns
+	// from newConnWait while holding the createConnectionsCond lock. We can't call removeConnection
+	// on the connections while holding any locks, so do that after we release the lock.
+	p.createConnectionsCond.L.Lock()
+	conns := make([]*connection, 0, len(p.conns))
+	for _, conn := range p.conns {
+		conns = append(conns, conn)
+	}
+	for {
+		w := p.newConnWait.popFront()
+		if w == nil {
+			break
+		}
+		w.tryDeliver(nil, ErrPoolClosed)
+	}
+	p.createConnectionsCond.L.Unlock()
+
+	if mustLogPoolMessage(p) {
+		logPoolMessage(p, logger.ConnectionPoolClosed)
+	}
+
+	if p.monitor != nil {
+		p.monitor.Event(&event.PoolEvent{
+			Type:    event.PoolClosedEvent,
+			Address: p.address.String(),
+		})
+	}
+
+	// Now that we're not holding any locks, remove all of the connections we collected from the
+	// pool.
+	for _, conn := range conns {
+		_ = p.removeConnection(conn, reason{
+			loggerConn: logger.ReasonConnClosedPoolClosed,
+			event:      event.ReasonPoolClosed,
+		}, nil)
+		_ = p.closeConnection(conn) // We don't care about errors while closing the connection.
+	}
+}
+
+func (p *pool) pinConnectionToCursor() {
+	atomic.AddUint64(&p.pinnedCursorConnections, 1)
+}
+
+func (p *pool) unpinConnectionFromCursor() {
+	// See https://golang.org/pkg/sync/atomic/#AddUint64 for an explanation of the ^uint64(0) syntax.
+	atomic.AddUint64(&p.pinnedCursorConnections, ^uint64(0))
+}
+
+func (p *pool) pinConnectionToTransaction() {
+	atomic.AddUint64(&p.pinnedTransactionConnections, 1)
+}
+
+func (p *pool) unpinConnectionFromTransaction() {
+	// See https://golang.org/pkg/sync/atomic/#AddUint64 for an explanation of the ^uint64(0) syntax.
+	atomic.AddUint64(&p.pinnedTransactionConnections, ^uint64(0))
+}
+
+// checkOut checks out a connection from the pool. If an idle connection is not available, the
+// checkOut enters a queue waiting for either the next idle or new connection. If the pool is not
+// ready, checkOut returns an error.
+// Based partially on https://cs.opensource.google/go/go/+/refs/tags/go1.16.6:src/net/http/transport.go;l=1324
+func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) {
+	if mustLogPoolMessage(p) {
+		logPoolMessage(p, logger.ConnectionCheckoutStarted)
+	}
+
+	// TODO(CSOT): If a Timeout was specified at any level, respect the Timeout is server selection, connection
+	// TODO checkout.
+	if p.monitor != nil {
+		p.monitor.Event(&event.PoolEvent{
+			Type:    event.GetStarted,
+			Address: p.address.String(),
+		})
+	}
+
+	start := time.Now()
+	// Check the pool state while holding a stateMu read lock. If the pool state is not "ready",
+	// return an error. Do all of this while holding the stateMu read lock to prevent a state change between
+	// checking the state and entering the wait queue. Not holding the stateMu read lock here may
+	// allow a checkOut() to enter the wait queue after clear() pauses the pool and clears the wait
+	// queue, resulting in createConnections() doing work while the pool is "paused".
+	p.stateMu.RLock()
+	switch p.state {
+	case poolClosed:
+		p.stateMu.RUnlock()
+
+		duration := time.Since(start)
+		if mustLogPoolMessage(p) {
+			keysAndValues := logger.KeyValues{
+				logger.KeyDurationMS, duration.Milliseconds(),
+				logger.KeyReason, logger.ReasonConnCheckoutFailedPoolClosed,
+			}
+
+			logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...)
+		}
+
+		if p.monitor != nil {
+			p.monitor.Event(&event.PoolEvent{
+				Type:     event.GetFailed,
+				Address:  p.address.String(),
+				Duration: duration,
+				Reason:   event.ReasonPoolClosed,
+			})
+		}
+		return nil, ErrPoolClosed
+	case poolPaused:
+		err := poolClearedError{err: p.lastClearErr, address: p.address}
+		p.stateMu.RUnlock()
+
+		duration := time.Since(start)
+		if mustLogPoolMessage(p) {
+			keysAndValues := logger.KeyValues{
+				logger.KeyDurationMS, duration.Milliseconds(),
+				logger.KeyReason, logger.ReasonConnCheckoutFailedError,
+			}
+
+			logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...)
+		}
+
+		if p.monitor != nil {
+			p.monitor.Event(&event.PoolEvent{
+				Type:     event.GetFailed,
+				Address:  p.address.String(),
+				Duration: duration,
+				Reason:   event.ReasonConnectionErrored,
+				Error:    err,
+			})
+		}
+		return nil, err
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	// Create a wantConn, which we will use to request an existing idle or new connection. Always
+	// cancel the wantConn if checkOut() returned an error to make sure any delivered connections
+	// are returned to the pool (e.g. if a connection was delivered immediately after the Context
+	// timed out).
+	w := newWantConn()
+	defer func() {
+		if err != nil {
+			w.cancel(p, err)
+		}
+	}()
+
+	// Get in the queue for an idle connection. If getOrQueueForIdleConn returns true, it was able to
+	// immediately deliver an idle connection to the wantConn, so we can return the connection or
+	// error from the wantConn without waiting for "ready".
+	if delivered := p.getOrQueueForIdleConn(w); delivered {
+		// If delivered = true, we didn't enter the wait queue and will return either a connection
+		// or an error, so unlock the stateMu lock here.
+		p.stateMu.RUnlock()
+
+		duration := time.Since(start)
+		if w.err != nil {
+			if mustLogPoolMessage(p) {
+				keysAndValues := logger.KeyValues{
+					logger.KeyDurationMS, duration.Milliseconds(),
+					logger.KeyReason, logger.ReasonConnCheckoutFailedError,
+				}
+
+				logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...)
+			}
+
+			if p.monitor != nil {
+				p.monitor.Event(&event.PoolEvent{
+					Type:     event.GetFailed,
+					Address:  p.address.String(),
+					Duration: duration,
+					Reason:   event.ReasonConnectionErrored,
+					Error:    w.err,
+				})
+			}
+			return nil, w.err
+		}
+
+		duration = time.Since(start)
+		if mustLogPoolMessage(p) {
+			keysAndValues := logger.KeyValues{
+				logger.KeyDriverConnectionID, w.conn.driverConnectionID,
+				logger.KeyDurationMS, duration.Milliseconds(),
+			}
+
+			logPoolMessage(p, logger.ConnectionCheckedOut, keysAndValues...)
+		}
+
+		if p.monitor != nil {
+			p.monitor.Event(&event.PoolEvent{
+				Type:         event.GetSucceeded,
+				Address:      p.address.String(),
+				ConnectionID: w.conn.driverConnectionID,
+				Duration:     duration,
+			})
+		}
+
+		return w.conn, nil
+	}
+
+	// If we didn't get an immediately available idle connection, also get in the queue for a new
+	// connection while we're waiting for an idle connection.
+	p.queueForNewConn(w)
+	p.stateMu.RUnlock()
+
+	// Wait for either the wantConn to be ready or for the Context to time out.
+	waitQueueStart := time.Now()
+	select {
+	case <-w.ready:
+		if w.err != nil {
+			duration := time.Since(start)
+			if mustLogPoolMessage(p) {
+				keysAndValues := logger.KeyValues{
+					logger.KeyDurationMS, duration.Milliseconds(),
+					logger.KeyReason, logger.ReasonConnCheckoutFailedError,
+					logger.KeyError, w.err.Error(),
+				}
+
+				logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...)
+			}
+
+			if p.monitor != nil {
+				p.monitor.Event(&event.PoolEvent{
+					Type:     event.GetFailed,
+					Address:  p.address.String(),
+					Duration: duration,
+					Reason:   event.ReasonConnectionErrored,
+					Error:    w.err,
+				})
+			}
+
+			return nil, w.err
+		}
+
+		duration := time.Since(start)
+		if mustLogPoolMessage(p) {
+			keysAndValues := logger.KeyValues{
+				logger.KeyDriverConnectionID, w.conn.driverConnectionID,
+				logger.KeyDurationMS, duration.Milliseconds(),
+			}
+
+			logPoolMessage(p, logger.ConnectionCheckedOut, keysAndValues...)
+		}
+
+		if p.monitor != nil {
+			p.monitor.Event(&event.PoolEvent{
+				Type:         event.GetSucceeded,
+				Address:      p.address.String(),
+				ConnectionID: w.conn.driverConnectionID,
+				Duration:     duration,
+			})
+		}
+		return w.conn, nil
+	case <-ctx.Done():
+		waitQueueDuration := time.Since(waitQueueStart)
+
+		duration := time.Since(start)
+		if mustLogPoolMessage(p) {
+			keysAndValues := logger.KeyValues{
+				logger.KeyDurationMS, duration.Milliseconds(),
+				logger.KeyReason, logger.ReasonConnCheckoutFailedTimout,
+			}
+
+			logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...)
+		}
+
+		if p.monitor != nil {
+			p.monitor.Event(&event.PoolEvent{
+				Type:     event.GetFailed,
+				Address:  p.address.String(),
+				Duration: duration,
+				Reason:   event.ReasonTimedOut,
+				Error:    ctx.Err(),
+			})
+		}
+
+		err := WaitQueueTimeoutError{
+			Wrapped:              ctx.Err(),
+			maxPoolSize:          p.maxSize,
+			totalConnections:     p.totalConnectionCount(),
+			availableConnections: p.availableConnectionCount(),
+			waitDuration:         waitQueueDuration,
+		}
+		if p.loadBalanced {
+			err.pinnedConnections = &pinnedConnections{
+				cursorConnections:      atomic.LoadUint64(&p.pinnedCursorConnections),
+				transactionConnections: atomic.LoadUint64(&p.pinnedTransactionConnections),
+			}
+		}
+		return nil, err
+	}
+}
+
+// closeConnection closes a connection.
+func (p *pool) closeConnection(conn *connection) error {
+	if conn.pool != p {
+		return ErrWrongPool
+	}
+
+	if atomic.LoadInt64(&conn.state) == connConnected {
+		conn.closeConnectContext()
+		conn.wait() // Make sure that the connection has finished connecting.
+	}
+
+	err := conn.close()
+	if err != nil {
+		return ConnectionError{ConnectionID: conn.id, Wrapped: err, message: "failed to close net.Conn"}
+	}
+
+	return nil
+}
+
+func (p *pool) getGenerationForNewConnection(serviceID *primitive.ObjectID) uint64 {
+	return p.generation.addConnection(serviceID)
+}
+
+// removeConnection removes a connection from the pool and emits a "ConnectionClosed" event.
+func (p *pool) removeConnection(conn *connection, reason reason, err error) error {
+	if conn == nil {
+		return nil
+	}
+
+	if conn.pool != p {
+		return ErrWrongPool
+	}
+
+	p.createConnectionsCond.L.Lock()
+	_, ok := p.conns[conn.driverConnectionID]
+	if !ok {
+		// If the connection has been removed from the pool already, exit without doing any
+		// additional state changes.
+		p.createConnectionsCond.L.Unlock()
+		return nil
+	}
+	delete(p.conns, conn.driverConnectionID)
+	// Signal the createConnectionsCond so any goroutines waiting for a new connection slot in the
+	// pool will proceed.
+	p.createConnectionsCond.Signal()
+	p.createConnectionsCond.L.Unlock()
+
+	// Only update the generation numbers map if the connection has retrieved its generation number.
+	// Otherwise, we'd decrement the count for the generation even though it had never been
+	// incremented.
+	if conn.hasGenerationNumber() {
+		p.generation.removeConnection(conn.desc.ServiceID)
+	}
+
+	if mustLogPoolMessage(p) {
+		keysAndValues := logger.KeyValues{
+			logger.KeyDriverConnectionID, conn.driverConnectionID,
+			logger.KeyReason, reason.loggerConn,
+		}
+
+		if err != nil {
+			keysAndValues.Add(logger.KeyError, err.Error())
+		}
+
+		logPoolMessage(p, logger.ConnectionClosed, keysAndValues...)
+	}
+
+	if p.monitor != nil {
+		p.monitor.Event(&event.PoolEvent{
+			Type:         event.ConnectionClosed,
+			Address:      p.address.String(),
+			ConnectionID: conn.driverConnectionID,
+			Reason:       reason.event,
+			Error:        err,
+		})
+	}
+
+	return nil
+}
+
+var (
+	// BGReadTimeout is the maximum amount of the to wait when trying to read
+	// the server reply on a connection after an operation timed out. The
+	// default is 1 second.
+	//
+	// Deprecated: BGReadTimeout is intended for internal use only and may be
+	// removed or modified at any time.
+	BGReadTimeout = 1 * time.Second
+
+	// BGReadCallback is a callback for monitoring the behavior of the
+	// background-read-on-timeout connection preserving mechanism.
+	//
+	// Deprecated: BGReadCallback is intended for internal use only and may be
+	// removed or modified at any time.
+	BGReadCallback func(addr string, start, read time.Time, errs []error, connClosed bool)
+)
+
+// bgRead sets a new read deadline on the provided connection (1 second in the
+// future) and tries to read any bytes returned by the server. If successful, it
+// checks the connection into the provided pool. If there are any errors, it
+// closes the connection.
+//
+// It calls the package-global BGReadCallback function, if set, with the
+// address, timings, and any errors that occurred.
+func bgRead(pool *pool, conn *connection, size int32) {
+	var err error
+	start := time.Now()
+
+	defer func() {
+		read := time.Now()
+		errs := make([]error, 0)
+		connClosed := false
+		if err != nil {
+			errs = append(errs, err)
+			connClosed = true
+			err = conn.close()
+			if err != nil {
+				errs = append(errs, fmt.Errorf("error closing conn after reading: %w", err))
+			}
+		}
+
+		// No matter what happens, always check the connection back into the
+		// pool, which will either make it available for other operations or
+		// remove it from the pool if it was closed.
+		err = pool.checkInNoEvent(conn)
+		if err != nil {
+			errs = append(errs, fmt.Errorf("error checking in: %w", err))
+		}
+
+		if BGReadCallback != nil {
+			BGReadCallback(conn.addr.String(), start, read, errs, connClosed)
+		}
+	}()
+
+	err = conn.nc.SetReadDeadline(time.Now().Add(BGReadTimeout))
+	if err != nil {
+		err = fmt.Errorf("error setting a read deadline: %w", err)
+		return
+	}
+
+	if size == 0 {
+		var sizeBuf [4]byte
+		_, err = io.ReadFull(conn.nc, sizeBuf[:])
+		if err != nil {
+			err = fmt.Errorf("error reading the message size: %w", err)
+			return
+		}
+		size, err = conn.parseWmSizeBytes(sizeBuf)
+		if err != nil {
+			return
+		}
+		size -= 4
+	}
+	_, err = io.CopyN(io.Discard, conn.nc, int64(size))
+	if err != nil {
+		err = fmt.Errorf("error discarding %d byte message: %w", size, err)
+	}
+}
+
+// checkIn returns an idle connection to the pool. If the connection is perished or the pool is
+// closed, it is removed from the connection pool and closed.
+func (p *pool) checkIn(conn *connection) error {
+	if conn == nil {
+		return nil
+	}
+	if conn.pool != p {
+		return ErrWrongPool
+	}
+
+	if mustLogPoolMessage(p) {
+		keysAndValues := logger.KeyValues{
+			logger.KeyDriverConnectionID, conn.driverConnectionID,
+		}
+
+		logPoolMessage(p, logger.ConnectionCheckedIn, keysAndValues...)
+	}
+
+	if p.monitor != nil {
+		p.monitor.Event(&event.PoolEvent{
+			Type:         event.ConnectionReturned,
+			ConnectionID: conn.driverConnectionID,
+			Address:      conn.addr.String(),
+		})
+	}
+
+	return p.checkInNoEvent(conn)
+}
+
+// checkInNoEvent returns a connection to the pool. It behaves identically to checkIn except it does
+// not publish events. It is only intended for use by pool-internal functions.
+func (p *pool) checkInNoEvent(conn *connection) error {
+	if conn == nil {
+		return nil
+	}
+	if conn.pool != p {
+		return ErrWrongPool
+	}
+
+	// If the connection has an awaiting server response, try to read the
+	// response in another goroutine before checking it back into the pool.
+	//
+	// Do this here because we want to publish checkIn events when the operation
+	// is done with the connection, not when it's ready to be used again. That
+	// means that connections in "awaiting response" state are checked in but
+	// not usable, which is not covered by the current pool events. We may need
+	// to add pool event information in the future to communicate that.
+	if conn.awaitRemainingBytes != nil {
+		size := *conn.awaitRemainingBytes
+		conn.awaitRemainingBytes = nil
+		go bgRead(p, conn, size)
+		return nil
+	}
+
+	// Bump the connection idle start time here because we're about to make the
+	// connection "available". The idle start time is used to determine how long
+	// a connection has been idle and when it has reached its max idle time and
+	// should be closed. A connection reaches its max idle time when it has been
+	// "available" in the idle connections stack for more than the configured
+	// duration (maxIdleTimeMS). Set it before we call connectionPerished(),
+	// which checks the idle deadline, because a newly "available" connection
+	// should never be perished due to max idle time.
+	conn.bumpIdleStart()
+
+	r, perished := connectionPerished(conn)
+	if !perished && conn.pool.getState() == poolClosed {
+		perished = true
+		r = reason{
+			loggerConn: logger.ReasonConnClosedPoolClosed,
+			event:      event.ReasonPoolClosed,
+		}
+	}
+	if perished {
+		_ = p.removeConnection(conn, r, nil)
+		go func() {
+			_ = p.closeConnection(conn)
+		}()
+		return nil
+	}
+
+	p.idleMu.Lock()
+	defer p.idleMu.Unlock()
+
+	for {
+		w := p.idleConnWait.popFront()
+		if w == nil {
+			break
+		}
+		if w.tryDeliver(conn, nil) {
+			return nil
+		}
+	}
+
+	for _, idle := range p.idleConns {
+		if idle == conn {
+			return fmt.Errorf("duplicate idle conn %p in idle connections stack", conn)
+		}
+	}
+
+	p.idleConns = append(p.idleConns, conn)
+	return nil
+}
+
+// clear calls clearImpl internally with a false interruptAllConnections value.
+func (p *pool) clear(err error, serviceID *primitive.ObjectID) {
+	p.clearImpl(err, serviceID, false)
+}
+
+// clearAll does same as the "clear" method but interrupts all connections.
+func (p *pool) clearAll(err error, serviceID *primitive.ObjectID) {
+	p.clearImpl(err, serviceID, true)
+}
+
+// interruptConnections interrupts the input connections.
+func (p *pool) interruptConnections(conns []*connection) {
+	for _, conn := range conns {
+		_ = p.removeConnection(conn, reason{
+			loggerConn: logger.ReasonConnClosedStale,
+			event:      event.ReasonStale,
+		}, nil)
+		go func(c *connection) {
+			_ = p.closeConnection(c)
+		}(conn)
+	}
+}
+
+// clear marks all connections as stale by incrementing the generation number, stops all background
+// goroutines, removes all requests from idleConnWait and newConnWait, and sets the pool state to
+// "paused". If serviceID is nil, clear marks all connections as stale. If serviceID is not nil,
+// clear marks only connections associated with the given serviceID stale (for use in load balancer
+// mode).
+// If interruptAllConnections is true, this function calls interruptConnections to interrupt all
+// non-idle connections.
+func (p *pool) clearImpl(err error, serviceID *primitive.ObjectID, interruptAllConnections bool) {
+	if p.getState() == poolClosed {
+		return
+	}
+
+	p.generation.clear(serviceID)
+
+	// If serviceID is nil (i.e. not in load balancer mode), transition the pool to a paused state
+	// by stopping all background goroutines, clearing the wait queues, and setting the pool state
+	// to "paused".
+	sendEvent := true
+	if serviceID == nil {
+		// While holding the stateMu lock, set the pool state to "paused" if it's currently "ready",
+		// and set lastClearErr to the error that caused the pool to be cleared. If the pool is
+		// already paused, don't send another "ConnectionPoolCleared" event.
+		p.stateMu.Lock()
+		if p.state == poolPaused {
+			sendEvent = false
+		}
+		if p.state == poolReady {
+			p.state = poolPaused
+		}
+		p.lastClearErr = err
+		p.stateMu.Unlock()
+	}
+
+	if mustLogPoolMessage(p) {
+		keysAndValues := logger.KeyValues{
+			logger.KeyServiceID, serviceID,
+		}
+
+		logPoolMessage(p, logger.ConnectionPoolCleared, keysAndValues...)
+	}
+
+	if sendEvent && p.monitor != nil {
+		event := &event.PoolEvent{
+			Type:         event.PoolCleared,
+			Address:      p.address.String(),
+			ServiceID:    serviceID,
+			Interruption: interruptAllConnections,
+			Error:        err,
+		}
+		p.monitor.Event(event)
+	}
+
+	p.removePerishedConns()
+	if interruptAllConnections {
+		p.createConnectionsCond.L.Lock()
+		p.idleMu.Lock()
+
+		idleConns := make(map[*connection]bool, len(p.idleConns))
+		for _, idle := range p.idleConns {
+			idleConns[idle] = true
+		}
+
+		conns := make([]*connection, 0, len(p.conns))
+		for _, conn := range p.conns {
+			if _, ok := idleConns[conn]; !ok && p.stale(conn) {
+				conns = append(conns, conn)
+			}
+		}
+
+		p.idleMu.Unlock()
+		p.createConnectionsCond.L.Unlock()
+
+		p.interruptConnections(conns)
+	}
+
+	if serviceID == nil {
+		pcErr := poolClearedError{err: err, address: p.address}
+
+		// Clear the idle connections wait queue.
+		p.idleMu.Lock()
+		for {
+			w := p.idleConnWait.popFront()
+			if w == nil {
+				break
+			}
+			w.tryDeliver(nil, pcErr)
+		}
+		p.idleMu.Unlock()
+
+		// Clear the new connections wait queue. This effectively pauses the createConnections()
+		// background goroutine because newConnWait is empty and checkOut() won't insert any more
+		// wantConns into newConnWait until the pool is marked "ready" again.
+		p.createConnectionsCond.L.Lock()
+		for {
+			w := p.newConnWait.popFront()
+			if w == nil {
+				break
+			}
+			w.tryDeliver(nil, pcErr)
+		}
+		p.createConnectionsCond.L.Unlock()
+	}
+}
+
+// getOrQueueForIdleConn attempts to deliver an idle connection to the given wantConn. If there is
+// an idle connection in the idle connections stack, it pops an idle connection, delivers it to the
+// wantConn, and returns true. If there are no idle connections in the idle connections stack, it
+// adds the wantConn to the idleConnWait queue and returns false.
+func (p *pool) getOrQueueForIdleConn(w *wantConn) bool {
+	p.idleMu.Lock()
+	defer p.idleMu.Unlock()
+
+	// Try to deliver an idle connection from the idleConns stack first.
+	for len(p.idleConns) > 0 {
+		conn := p.idleConns[len(p.idleConns)-1]
+		p.idleConns = p.idleConns[:len(p.idleConns)-1]
+
+		if conn == nil {
+			continue
+		}
+
+		if reason, perished := connectionPerished(conn); perished {
+			_ = conn.pool.removeConnection(conn, reason, nil)
+			go func() {
+				_ = conn.pool.closeConnection(conn)
+			}()
+			continue
+		}
+
+		if !w.tryDeliver(conn, nil) {
+			// If we couldn't deliver the conn to w, put it back in the idleConns stack.
+			p.idleConns = append(p.idleConns, conn)
+		}
+
+		// If we got here, we tried to deliver an idle conn to w. No matter if tryDeliver() returned
+		// true or false, w is no longer waiting and doesn't need to be added to any wait queues, so
+		// return delivered = true.
+		return true
+	}
+
+	p.idleConnWait.cleanFront()
+	p.idleConnWait.pushBack(w)
+	return false
+}
+
+func (p *pool) queueForNewConn(w *wantConn) {
+	p.createConnectionsCond.L.Lock()
+	defer p.createConnectionsCond.L.Unlock()
+
+	p.newConnWait.cleanFront()
+	p.newConnWait.pushBack(w)
+	p.createConnectionsCond.Signal()
+}
+
+func (p *pool) totalConnectionCount() int {
+	p.createConnectionsCond.L.Lock()
+	defer p.createConnectionsCond.L.Unlock()
+
+	return len(p.conns)
+}
+
+func (p *pool) availableConnectionCount() int {
+	p.idleMu.Lock()
+	defer p.idleMu.Unlock()
+
+	return len(p.idleConns)
+}
+
+// createConnections creates connections for wantConn requests on the newConnWait queue.
+func (p *pool) createConnections(ctx context.Context, wg *sync.WaitGroup) {
+	defer wg.Done()
+
+	// condition returns true if the createConnections() loop should continue and false if it should
+	// wait. Note that the condition also listens for Context cancellation, which also causes the
+	// loop to continue, allowing for a subsequent check to return from createConnections().
+	condition := func() bool {
+		checkOutWaiting := p.newConnWait.len() > 0
+		poolHasSpace := p.maxSize == 0 || uint64(len(p.conns)) < p.maxSize
+		cancelled := ctx.Err() != nil
+		return (checkOutWaiting && poolHasSpace) || cancelled
+	}
+
+	// wait waits for there to be an available wantConn and for the pool to have space for a new
+	// connection. When the condition becomes true, it creates a new connection and returns the
+	// waiting wantConn and new connection. If the Context is cancelled or there are any
+	// errors, wait returns with "ok = false".
+	wait := func() (*wantConn, *connection, bool) {
+		p.createConnectionsCond.L.Lock()
+		defer p.createConnectionsCond.L.Unlock()
+
+		for !condition() {
+			p.createConnectionsCond.Wait()
+		}
+
+		if ctx.Err() != nil {
+			return nil, nil, false
+		}
+
+		p.newConnWait.cleanFront()
+		w := p.newConnWait.popFront()
+		if w == nil {
+			return nil, nil, false
+		}
+
+		conn := newConnection(p.address, p.connOpts...)
+		conn.pool = p
+		conn.driverConnectionID = atomic.AddUint64(&p.nextID, 1)
+		p.conns[conn.driverConnectionID] = conn
+
+		return w, conn, true
+	}
+
+	for ctx.Err() == nil {
+		w, conn, ok := wait()
+		if !ok {
+			continue
+		}
+
+		if mustLogPoolMessage(p) {
+			keysAndValues := logger.KeyValues{
+				logger.KeyDriverConnectionID, conn.driverConnectionID,
+			}
+
+			logPoolMessage(p, logger.ConnectionCreated, keysAndValues...)
+		}
+
+		if p.monitor != nil {
+			p.monitor.Event(&event.PoolEvent{
+				Type:         event.ConnectionCreated,
+				Address:      p.address.String(),
+				ConnectionID: conn.driverConnectionID,
+			})
+		}
+
+		start := time.Now()
+		// Pass the createConnections context to connect to allow pool close to cancel connection
+		// establishment so shutdown doesn't block indefinitely if connectTimeout=0.
+		err := conn.connect(ctx)
+		if err != nil {
+			w.tryDeliver(nil, err)
+
+			// If there's an error connecting the new connection, call the handshake error handler
+			// that implements the SDAM handshake error handling logic. This must be called after
+			// delivering the connection error to the waiting wantConn. If it's called before, the
+			// handshake error handler may clear the connection pool, leading to a different error
+			// message being delivered to the same waiting wantConn in idleConnWait when the wait
+			// queues are cleared.
+			if p.handshakeErrFn != nil {
+				p.handshakeErrFn(err, conn.generation, conn.desc.ServiceID)
+			}
+
+			_ = p.removeConnection(conn, reason{
+				loggerConn: logger.ReasonConnClosedError,
+				event:      event.ReasonError,
+			}, err)
+
+			_ = p.closeConnection(conn)
+
+			continue
+		}
+
+		duration := time.Since(start)
+		if mustLogPoolMessage(p) {
+			keysAndValues := logger.KeyValues{
+				logger.KeyDriverConnectionID, conn.driverConnectionID,
+				logger.KeyDurationMS, duration.Milliseconds(),
+			}
+
+			logPoolMessage(p, logger.ConnectionReady, keysAndValues...)
+		}
+
+		if p.monitor != nil {
+			p.monitor.Event(&event.PoolEvent{
+				Type:         event.ConnectionReady,
+				Address:      p.address.String(),
+				ConnectionID: conn.driverConnectionID,
+				Duration:     duration,
+			})
+		}
+
+		if w.tryDeliver(conn, nil) {
+			continue
+		}
+
+		_ = p.checkInNoEvent(conn)
+	}
+}
+
+func (p *pool) maintain(ctx context.Context, wg *sync.WaitGroup) {
+	defer wg.Done()
+
+	ticker := time.NewTicker(p.maintainInterval)
+	defer ticker.Stop()
+
+	// remove removes the *wantConn at index i from the slice and returns the new slice. The order
+	// of the slice is not maintained.
+	remove := func(arr []*wantConn, i int) []*wantConn {
+		end := len(arr) - 1
+		arr[i], arr[end] = arr[end], arr[i]
+		return arr[:end]
+	}
+
+	// removeNotWaiting removes any wantConns that are no longer waiting from given slice of
+	// wantConns. That allows maintain() to use the size of its wantConns slice as an indication of
+	// how many new connection requests are outstanding and subtract that from the number of
+	// connections to ask for when maintaining minPoolSize.
+	removeNotWaiting := func(arr []*wantConn) []*wantConn {
+		for i := len(arr) - 1; i >= 0; i-- {
+			w := arr[i]
+			if !w.waiting() {
+				arr = remove(arr, i)
+			}
+		}
+
+		return arr
+	}
+
+	wantConns := make([]*wantConn, 0, p.minSize)
+	defer func() {
+		for _, w := range wantConns {
+			w.tryDeliver(nil, ErrPoolClosed)
+		}
+	}()
+
+	for {
+		select {
+		case <-ticker.C:
+		case <-p.maintainReady:
+		case <-ctx.Done():
+			return
+		}
+
+		// Only maintain the pool while it's in the "ready" state. If the pool state is not "ready",
+		// wait for the next tick or "ready" signal. Do all of this while holding the stateMu read
+		// lock to prevent a state change between checking the state and entering the wait queue.
+		// Not holding the stateMu read lock here may allow maintain() to request wantConns after
+		// clear() pauses the pool and clears the wait queue, resulting in createConnections()
+		// doing work while the pool is "paused".
+		p.stateMu.RLock()
+		if p.state != poolReady {
+			p.stateMu.RUnlock()
+			continue
+		}
+
+		p.removePerishedConns()
+
+		// Remove any wantConns that are no longer waiting.
+		wantConns = removeNotWaiting(wantConns)
+
+		// Figure out how many more wantConns we need to satisfy minPoolSize. Assume that the
+		// outstanding wantConns (i.e. the ones that weren't removed from the slice) will all return
+		// connections when they're ready, so only add wantConns to make up the difference. Limit
+		// the number of connections requested to max 10 at a time to prevent overshooting
+		// minPoolSize in case other checkOut() calls are requesting new connections, too.
+		total := p.totalConnectionCount()
+		n := int(p.minSize) - total - len(wantConns)
+		if n > 10 {
+			n = 10
+		}
+
+		for i := 0; i < n; i++ {
+			w := newWantConn()
+			p.queueForNewConn(w)
+			wantConns = append(wantConns, w)
+
+			// Start a goroutine for each new wantConn, waiting for it to be ready.
+			go func() {
+				<-w.ready
+				if w.conn != nil {
+					_ = p.checkInNoEvent(w.conn)
+				}
+			}()
+		}
+		p.stateMu.RUnlock()
+	}
+}
+
+func (p *pool) removePerishedConns() {
+	p.idleMu.Lock()
+	defer p.idleMu.Unlock()
+
+	for i := range p.idleConns {
+		conn := p.idleConns[i]
+		if conn == nil {
+			continue
+		}
+
+		if reason, perished := connectionPerished(conn); perished {
+			p.idleConns[i] = nil
+
+			_ = p.removeConnection(conn, reason, nil)
+			go func() {
+				_ = p.closeConnection(conn)
+			}()
+		}
+	}
+
+	p.idleConns = compact(p.idleConns)
+}
+
+// compact removes any nil pointers from the slice and keeps the non-nil pointers, retaining the
+// order of the non-nil pointers.
+func compact(arr []*connection) []*connection {
+	offset := 0
+	for i := range arr {
+		if arr[i] == nil {
+			continue
+		}
+		arr[offset] = arr[i]
+		offset++
+	}
+	return arr[:offset]
+}
+
+// A wantConn records state about a wanted connection (that is, an active call to checkOut).
+// The conn may be gotten by creating a new connection or by finding an idle connection, or a
+// cancellation may make the conn no longer wanted. These three options are racing against each
+// other and use wantConn to coordinate and agree about the winning outcome.
+// Based on https://cs.opensource.google/go/go/+/refs/tags/go1.16.6:src/net/http/transport.go;l=1174-1240
+type wantConn struct {
+	ready chan struct{}
+
+	mu   sync.Mutex // Guards conn, err
+	conn *connection
+	err  error
+}
+
+func newWantConn() *wantConn {
+	return &wantConn{
+		ready: make(chan struct{}, 1),
+	}
+}
+
+// waiting reports whether w is still waiting for an answer (connection or error).
+func (w *wantConn) waiting() bool {
+	select {
+	case <-w.ready:
+		return false
+	default:
+		return true
+	}
+}
+
+// tryDeliver attempts to deliver conn, err to w and reports whether it succeeded.
+func (w *wantConn) tryDeliver(conn *connection, err error) bool {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	if w.conn != nil || w.err != nil {
+		return false
+	}
+
+	w.conn = conn
+	w.err = err
+	if w.conn == nil && w.err == nil {
+		panic("x/mongo/driver/topology: internal error: misuse of tryDeliver")
+	}
+
+	close(w.ready)
+
+	return true
+}
+
+// cancel marks w as no longer wanting a result (for example, due to cancellation). If a connection
+// has been delivered already, cancel returns it with p.checkInNoEvent(). Note that the caller must
+// not hold any locks on the pool while calling cancel.
+func (w *wantConn) cancel(p *pool, err error) {
+	if err == nil {
+		panic("x/mongo/driver/topology: internal error: misuse of cancel")
+	}
+
+	w.mu.Lock()
+	if w.conn == nil && w.err == nil {
+		close(w.ready) // catch misbehavior in future delivery
+	}
+	conn := w.conn
+	w.conn = nil
+	w.err = err
+	w.mu.Unlock()
+
+	if conn != nil {
+		_ = p.checkInNoEvent(conn)
+	}
+}
+
+// A wantConnQueue is a queue of wantConns.
+// Based on https://cs.opensource.google/go/go/+/refs/tags/go1.16.6:src/net/http/transport.go;l=1242-1306
+type wantConnQueue struct {
+	// This is a queue, not a deque.
+	// It is split into two stages - head[headPos:] and tail.
+	// popFront is trivial (headPos++) on the first stage, and
+	// pushBack is trivial (append) on the second stage.
+	// If the first stage is empty, popFront can swap the
+	// first and second stages to remedy the situation.
+	//
+	// This two-stage split is analogous to the use of two lists
+	// in Okasaki's purely functional queue but without the
+	// overhead of reversing the list when swapping stages.
+	head    []*wantConn
+	headPos int
+	tail    []*wantConn
+}
+
+// len returns the number of items in the queue.
+func (q *wantConnQueue) len() int {
+	return len(q.head) - q.headPos + len(q.tail)
+}
+
+// pushBack adds w to the back of the queue.
+func (q *wantConnQueue) pushBack(w *wantConn) {
+	q.tail = append(q.tail, w)
+}
+
+// popFront removes and returns the wantConn at the front of the queue.
+func (q *wantConnQueue) popFront() *wantConn {
+	if q.headPos >= len(q.head) {
+		if len(q.tail) == 0 {
+			return nil
+		}
+		// Pick up tail as new head, clear tail.
+		q.head, q.headPos, q.tail = q.tail, 0, q.head[:0]
+	}
+	w := q.head[q.headPos]
+	q.head[q.headPos] = nil
+	q.headPos++
+	return w
+}
+
+// peekFront returns the wantConn at the front of the queue without removing it.
+func (q *wantConnQueue) peekFront() *wantConn {
+	if q.headPos < len(q.head) {
+		return q.head[q.headPos]
+	}
+	if len(q.tail) > 0 {
+		return q.tail[0]
+	}
+	return nil
+}
+
+// cleanFront pops any wantConns that are no longer waiting from the head of the queue.
+func (q *wantConnQueue) cleanFront() {
+	for {
+		w := q.peekFront()
+		if w == nil || w.waiting() {
+			return
+		}
+		q.popFront()
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd10c0ce7af0c00b162897a8fc3bbba319490d7c
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go
@@ -0,0 +1,148 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"sync"
+	"sync/atomic"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// Pool generation state constants.
+const (
+	generationDisconnected int64 = iota
+	generationConnected
+)
+
+// generationStats represents the version of a pool. It tracks the generation number as well as the number of
+// connections that have been created in the generation.
+type generationStats struct {
+	generation uint64
+	numConns   uint64
+}
+
+// poolGenerationMap tracks the version for each service ID present in a pool. For deployments that are not behind a
+// load balancer, there is only one service ID: primitive.NilObjectID. For load-balanced deployments, each server behind
+// the load balancer will have a unique service ID.
+type poolGenerationMap struct {
+	// state must be accessed using the atomic package and should be at the beginning of the struct.
+	// - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG
+	// - suggested layout: https://go101.org/article/memory-layout.html
+	state         int64
+	generationMap map[primitive.ObjectID]*generationStats
+
+	sync.Mutex
+}
+
+func newPoolGenerationMap() *poolGenerationMap {
+	pgm := &poolGenerationMap{
+		generationMap: make(map[primitive.ObjectID]*generationStats),
+	}
+	pgm.generationMap[primitive.NilObjectID] = &generationStats{}
+	return pgm
+}
+
+func (p *poolGenerationMap) connect() {
+	atomic.StoreInt64(&p.state, generationConnected)
+}
+
+func (p *poolGenerationMap) disconnect() {
+	atomic.StoreInt64(&p.state, generationDisconnected)
+}
+
+// addConnection increments the connection count for the generation associated with the given service ID and returns the
+// generation number for the connection.
+func (p *poolGenerationMap) addConnection(serviceIDPtr *primitive.ObjectID) uint64 {
+	serviceID := getServiceID(serviceIDPtr)
+	p.Lock()
+	defer p.Unlock()
+
+	stats, ok := p.generationMap[serviceID]
+	if ok {
+		// If the serviceID is already being tracked, we only need to increment the connection count.
+		stats.numConns++
+		return stats.generation
+	}
+
+	// If the serviceID is untracked, create a new entry with a starting generation number of 0.
+	stats = &generationStats{
+		numConns: 1,
+	}
+	p.generationMap[serviceID] = stats
+	return 0
+}
+
+func (p *poolGenerationMap) removeConnection(serviceIDPtr *primitive.ObjectID) {
+	serviceID := getServiceID(serviceIDPtr)
+	p.Lock()
+	defer p.Unlock()
+
+	stats, ok := p.generationMap[serviceID]
+	if !ok {
+		return
+	}
+
+	// If the serviceID is being tracked, decrement the connection count and delete this serviceID to prevent the map
+	// from growing unboundedly. This case would happen if a server behind a load-balancer was permanently removed
+	// and its connections were pruned after a network error or idle timeout.
+	stats.numConns--
+	if stats.numConns == 0 {
+		delete(p.generationMap, serviceID)
+	}
+}
+
+func (p *poolGenerationMap) clear(serviceIDPtr *primitive.ObjectID) {
+	serviceID := getServiceID(serviceIDPtr)
+	p.Lock()
+	defer p.Unlock()
+
+	if stats, ok := p.generationMap[serviceID]; ok {
+		stats.generation++
+	}
+}
+
+func (p *poolGenerationMap) stale(serviceIDPtr *primitive.ObjectID, knownGeneration uint64) bool {
+	// If the map has been disconnected, all connections should be considered stale to ensure that they're closed.
+	if atomic.LoadInt64(&p.state) == generationDisconnected {
+		return true
+	}
+
+	if generation, ok := p.getGeneration(serviceIDPtr); ok {
+		return knownGeneration < generation
+	}
+	return false
+}
+
+func (p *poolGenerationMap) getGeneration(serviceIDPtr *primitive.ObjectID) (uint64, bool) {
+	serviceID := getServiceID(serviceIDPtr)
+	p.Lock()
+	defer p.Unlock()
+
+	if stats, ok := p.generationMap[serviceID]; ok {
+		return stats.generation, true
+	}
+	return 0, false
+}
+
+func (p *poolGenerationMap) getNumConns(serviceIDPtr *primitive.ObjectID) uint64 {
+	serviceID := getServiceID(serviceIDPtr)
+	p.Lock()
+	defer p.Unlock()
+
+	if stats, ok := p.generationMap[serviceID]; ok {
+		return stats.numConns
+	}
+	return 0
+}
+
+func getServiceID(oid *primitive.ObjectID) primitive.ObjectID {
+	if oid == nil {
+		return primitive.NilObjectID
+	}
+	return *oid
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go
new file mode 100644
index 0000000000000000000000000000000000000000..c7b168dc2c424d88db223135eb8534d2f0ee2426
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go
@@ -0,0 +1,325 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"sync"
+	"time"
+
+	"github.com/montanaflynn/stats"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+)
+
+const (
+	rttAlphaValue = 0.2
+	minSamples    = 10
+	maxSamples    = 500
+)
+
+type rttConfig struct {
+	// The minimum interval between RTT measurements. The actual interval may be greater if running
+	// the operation takes longer than the interval.
+	interval time.Duration
+
+	// The timeout applied to running the "hello" operation. If the timeout is reached while running
+	// the operation, the RTT sample is discarded. The default is 1 minute.
+	timeout time.Duration
+
+	minRTTWindow       time.Duration
+	createConnectionFn func() *connection
+	createOperationFn  func(driver.Connection) *operation.Hello
+}
+
+type rttMonitor struct {
+	mu sync.RWMutex // mu guards samples, offset, minRTT, averageRTT, and averageRTTSet
+
+	// connMu guards connecting and disconnecting. This is necessary since
+	// disconnecting will await the cancellation of a started connection. The
+	// use case for rttMonitor.connect needs to be goroutine safe.
+	connMu        sync.Mutex
+	samples       []time.Duration
+	offset        int
+	minRTT        time.Duration
+	rtt90         time.Duration
+	averageRTT    time.Duration
+	averageRTTSet bool
+
+	closeWg  sync.WaitGroup
+	cfg      *rttConfig
+	ctx      context.Context
+	cancelFn context.CancelFunc
+}
+
+var _ driver.RTTMonitor = &rttMonitor{}
+
+func newRTTMonitor(cfg *rttConfig) *rttMonitor {
+	if cfg.interval <= 0 {
+		panic("RTT monitor interval must be greater than 0")
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	// Determine the number of samples we need to keep to store the minWindow of RTT durations. The
+	// number of samples must be between [10, 500].
+	numSamples := int(math.Max(minSamples, math.Min(maxSamples, float64((cfg.minRTTWindow)/cfg.interval))))
+
+	return &rttMonitor{
+		samples:  make([]time.Duration, numSamples),
+		cfg:      cfg,
+		ctx:      ctx,
+		cancelFn: cancel,
+	}
+}
+
+func (r *rttMonitor) connect() {
+	r.connMu.Lock()
+	defer r.connMu.Unlock()
+
+	r.closeWg.Add(1)
+
+	go func() {
+		defer r.closeWg.Done()
+
+		r.start()
+	}()
+}
+
+func (r *rttMonitor) disconnect() {
+	r.connMu.Lock()
+	defer r.connMu.Unlock()
+
+	r.cancelFn()
+
+	// Wait for the existing connection to complete.
+	r.closeWg.Wait()
+}
+
+func (r *rttMonitor) start() {
+	var conn *connection
+	defer func() {
+		if conn != nil {
+			// If the connection exists, we need to wait for it to be connected because
+			// conn.connect() and conn.close() cannot be called concurrently. If the connection
+			// wasn't successfully opened, its state was set back to disconnected, so calling
+			// conn.close() will be a no-op.
+			conn.closeConnectContext()
+			conn.wait()
+			_ = conn.close()
+		}
+	}()
+
+	ticker := time.NewTicker(r.cfg.interval)
+	defer ticker.Stop()
+
+	for {
+		conn := r.cfg.createConnectionFn()
+		err := conn.connect(r.ctx)
+
+		// Add an RTT sample from the new connection handshake and start a runHellos() loop if we
+		// successfully established the new connection. Otherwise, close the connection and try to
+		// create another new connection.
+		if err == nil {
+			r.addSample(conn.helloRTT)
+			r.runHellos(conn)
+		}
+
+		// Close any connection here because we're either about to try to create another new
+		// connection or we're about to exit the loop.
+		_ = conn.close()
+
+		// If a connection error happens quickly, always wait for the monitoring interval to try
+		// to create a new connection to prevent creating connections too quickly.
+		select {
+		case <-ticker.C:
+		case <-r.ctx.Done():
+			return
+		}
+	}
+}
+
+// runHellos runs "hello" operations in a loop using the provided connection, measuring and
+// recording the operation durations as RTT samples. If it encounters any errors, it returns.
+func (r *rttMonitor) runHellos(conn *connection) {
+	ticker := time.NewTicker(r.cfg.interval)
+	defer ticker.Stop()
+
+	for {
+		// Assume that the connection establishment recorded the first RTT sample, so wait for the
+		// first tick before trying to record another RTT sample.
+		select {
+		case <-ticker.C:
+		case <-r.ctx.Done():
+			return
+		}
+
+		// Create a Context with the operation timeout specified in the RTT monitor config. If a
+		// timeout is not set in the RTT monitor config, default to the connection's
+		// "connectTimeoutMS". The purpose of the timeout is to allow the RTT monitor to continue
+		// monitoring server RTTs after an operation gets stuck. An operation can get stuck if the
+		// server or a proxy stops responding to requests on the RTT connection but does not close
+		// the TCP socket, effectively creating an operation that will never complete. We expect
+		// that "connectTimeoutMS" provides at least enough time for a single round trip.
+		timeout := r.cfg.timeout
+		if timeout <= 0 {
+			timeout = conn.config.connectTimeout
+		}
+		ctx, cancel := context.WithTimeout(r.ctx, timeout)
+
+		start := time.Now()
+		err := r.cfg.createOperationFn(initConnection{conn}).Execute(ctx)
+		cancel()
+		if err != nil {
+			return
+		}
+		// Only record a sample if the "hello" operation was successful. If it was not successful,
+		// the operation may not have actually performed a complete round trip, so the duration may
+		// be artificially short.
+		r.addSample(time.Since(start))
+	}
+}
+
+// reset sets the average and min RTT to 0. This should only be called from the server monitor when an error
+// occurs during a server check. Errors in the RTT monitor should not reset the RTTs.
+func (r *rttMonitor) reset() {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	for i := range r.samples {
+		r.samples[i] = 0
+	}
+	r.offset = 0
+	r.minRTT = 0
+	r.rtt90 = 0
+	r.averageRTT = 0
+	r.averageRTTSet = false
+}
+
+func (r *rttMonitor) addSample(rtt time.Duration) {
+	// Lock for the duration of this method. We're doing compuationally inexpensive work very infrequently, so lock
+	// contention isn't expected.
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	r.samples[r.offset] = rtt
+	r.offset = (r.offset + 1) % len(r.samples)
+	// Set the minRTT and 90th percentile RTT of all collected samples. Require at least 10 samples before
+	// setting these to prevent noisy samples on startup from artificially increasing RTT and to allow the
+	// calculation of a 90th percentile.
+	r.minRTT = min(r.samples, minSamples)
+	r.rtt90 = percentile(90.0, r.samples, minSamples)
+
+	if !r.averageRTTSet {
+		r.averageRTT = rtt
+		r.averageRTTSet = true
+		return
+	}
+
+	r.averageRTT = time.Duration(rttAlphaValue*float64(rtt) + (1-rttAlphaValue)*float64(r.averageRTT))
+}
+
+// min returns the minimum value of the slice of duration samples. Zero values are not considered
+// samples and are ignored. If no samples or fewer than minSamples are found in the slice, min
+// returns 0.
+func min(samples []time.Duration, minSamples int) time.Duration {
+	count := 0
+	min := time.Duration(math.MaxInt64)
+	for _, d := range samples {
+		if d > 0 {
+			count++
+		}
+		if d > 0 && d < min {
+			min = d
+		}
+	}
+	if count == 0 || count < minSamples {
+		return 0
+	}
+	return min
+}
+
+// percentile returns the specified percentile value of the slice of duration samples. Zero values
+// are not considered samples and are ignored. If no samples or fewer than minSamples are found
+// in the slice, percentile returns 0.
+func percentile(perc float64, samples []time.Duration, minSamples int) time.Duration {
+	// Convert Durations to float64s.
+	floatSamples := make([]float64, 0, len(samples))
+	for _, sample := range samples {
+		if sample > 0 {
+			floatSamples = append(floatSamples, float64(sample))
+		}
+	}
+	if len(floatSamples) == 0 || len(floatSamples) < minSamples {
+		return 0
+	}
+
+	p, err := stats.Percentile(floatSamples, perc)
+	if err != nil {
+		panic(fmt.Errorf("x/mongo/driver/topology: error calculating %f percentile RTT: %w for samples:\n%v", perc, err, floatSamples))
+	}
+	return time.Duration(p)
+}
+
+// EWMA returns the exponentially weighted moving average observed round-trip time.
+func (r *rttMonitor) EWMA() time.Duration {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	return r.averageRTT
+}
+
+// Min returns the minimum observed round-trip time over the window period.
+func (r *rttMonitor) Min() time.Duration {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	return r.minRTT
+}
+
+// P90 returns the 90th percentile observed round-trip time over the window period.
+func (r *rttMonitor) P90() time.Duration {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	return r.rtt90
+}
+
+// Stats returns stringified stats of the current state of the monitor.
+func (r *rttMonitor) Stats() string {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	// Calculate standard deviation and average (non-EWMA) of samples.
+	var sum float64
+	floatSamples := make([]float64, 0, len(r.samples))
+	for _, sample := range r.samples {
+		if sample > 0 {
+			floatSamples = append(floatSamples, float64(sample))
+			sum += float64(sample)
+		}
+	}
+
+	var avg, stdDev float64
+	if len(floatSamples) > 0 {
+		avg = sum / float64(len(floatSamples))
+
+		var err error
+		stdDev, err = stats.StandardDeviation(floatSamples)
+		if err != nil {
+			panic(fmt.Errorf("x/mongo/driver/topology: error calculating standard deviation RTT: %w for samples:\n%v", err, floatSamples))
+		}
+	}
+
+	return fmt.Sprintf(
+		"network round-trip time stats: avg: %v, min: %v, 90th pct: %v, stddev: %v",
+		time.Duration(avg),
+		r.minRTT,
+		r.rtt90,
+		time.Duration(stdDev))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..0623632672a8bbe10f089569d20d7038b472a264
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go
@@ -0,0 +1,1132 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/driverutil"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+)
+
+const minHeartbeatInterval = 500 * time.Millisecond
+const wireVersion42 = 8 // Wire version for MongoDB 4.2
+
+// Server state constants.
+const (
+	serverDisconnected int64 = iota
+	serverDisconnecting
+	serverConnected
+)
+
+func serverStateString(state int64) string {
+	switch state {
+	case serverDisconnected:
+		return "Disconnected"
+	case serverDisconnecting:
+		return "Disconnecting"
+	case serverConnected:
+		return "Connected"
+	}
+
+	return ""
+}
+
+var (
+	// ErrServerClosed occurs when an attempt to Get a connection is made after
+	// the server has been closed.
+	ErrServerClosed = errors.New("server is closed")
+	// ErrServerConnected occurs when at attempt to Connect is made after a server
+	// has already been connected.
+	ErrServerConnected = errors.New("server is connected")
+
+	errCheckCancelled = errors.New("server check cancelled")
+	emptyDescription  = description.NewDefaultServer("")
+)
+
+// SelectedServer represents a specific server that was selected during server selection.
+// It contains the kind of the topology it was selected from.
+type SelectedServer struct {
+	*Server
+
+	Kind description.TopologyKind
+}
+
+// Description returns a description of the server as of the last heartbeat.
+func (ss *SelectedServer) Description() description.SelectedServer {
+	sdesc := ss.Server.Description()
+	return description.SelectedServer{
+		Server: sdesc,
+		Kind:   ss.Kind,
+	}
+}
+
+// Server is a single server within a topology.
+type Server struct {
+	// The following integer fields must be accessed using the atomic package and should be at the
+	// beginning of the struct.
+	// - atomic bug: https://pkg.go.dev/sync/atomic#pkg-note-BUG
+	// - suggested layout: https://go101.org/article/memory-layout.html
+
+	state          int64
+	operationCount int64
+
+	cfg     *serverConfig
+	address address.Address
+
+	// connection related fields
+	pool *pool
+
+	// goroutine management fields
+	done          chan struct{}
+	checkNow      chan struct{}
+	disconnecting chan struct{}
+	closewg       sync.WaitGroup
+
+	// description related fields
+	desc                   atomic.Value // holds a description.Server
+	updateTopologyCallback atomic.Value
+	topologyID             primitive.ObjectID
+
+	// subscriber related fields
+	subLock             sync.Mutex
+	subscribers         map[uint64]chan description.Server
+	currentSubscriberID uint64
+	subscriptionsClosed bool
+
+	// heartbeat and cancellation related fields
+	// globalCtx should be created in NewServer and cancelled in Disconnect to signal that the server is shutting down.
+	// heartbeatCtx should be used for individual heartbeats and should be a child of globalCtx so that it will be
+	// cancelled automatically during shutdown.
+	heartbeatLock      sync.Mutex
+	conn               *connection
+	globalCtx          context.Context
+	globalCtxCancel    context.CancelFunc
+	heartbeatCtx       context.Context
+	heartbeatCtxCancel context.CancelFunc
+
+	processErrorLock sync.Mutex
+	rttMonitor       *rttMonitor
+	monitorOnce      sync.Once
+}
+
+// updateTopologyCallback is a callback used to create a server that should be called when the parent Topology instance
+// should be updated based on a new server description. The callback must return the server description that should be
+// stored by the server.
+type updateTopologyCallback func(description.Server) description.Server
+
+// ConnectServer creates a new Server and then initializes it using the
+// Connect method.
+func ConnectServer(
+	addr address.Address,
+	updateCallback updateTopologyCallback,
+	topologyID primitive.ObjectID,
+	opts ...ServerOption,
+) (*Server, error) {
+	srvr := NewServer(addr, topologyID, opts...)
+	err := srvr.Connect(updateCallback)
+	if err != nil {
+		return nil, err
+	}
+	return srvr, nil
+}
+
+// NewServer creates a new server. The mongodb server at the address will be monitored
+// on an internal monitoring goroutine.
+func NewServer(addr address.Address, topologyID primitive.ObjectID, opts ...ServerOption) *Server {
+	cfg := newServerConfig(opts...)
+	globalCtx, globalCtxCancel := context.WithCancel(context.Background())
+	s := &Server{
+		state: serverDisconnected,
+
+		cfg:     cfg,
+		address: addr,
+
+		done:          make(chan struct{}),
+		checkNow:      make(chan struct{}, 1),
+		disconnecting: make(chan struct{}),
+
+		topologyID: topologyID,
+
+		subscribers:     make(map[uint64]chan description.Server),
+		globalCtx:       globalCtx,
+		globalCtxCancel: globalCtxCancel,
+	}
+	s.desc.Store(description.NewDefaultServer(addr))
+	rttCfg := &rttConfig{
+		interval:           cfg.heartbeatInterval,
+		minRTTWindow:       5 * time.Minute,
+		createConnectionFn: s.createConnection,
+		createOperationFn:  s.createBaseOperation,
+	}
+	s.rttMonitor = newRTTMonitor(rttCfg)
+
+	pc := poolConfig{
+		Address:          addr,
+		MinPoolSize:      cfg.minConns,
+		MaxPoolSize:      cfg.maxConns,
+		MaxConnecting:    cfg.maxConnecting,
+		MaxIdleTime:      cfg.poolMaxIdleTime,
+		MaintainInterval: cfg.poolMaintainInterval,
+		LoadBalanced:     cfg.loadBalanced,
+		PoolMonitor:      cfg.poolMonitor,
+		Logger:           cfg.logger,
+		handshakeErrFn:   s.ProcessHandshakeError,
+	}
+
+	connectionOpts := copyConnectionOpts(cfg.connectionOpts)
+	s.pool = newPool(pc, connectionOpts...)
+	s.publishServerOpeningEvent(s.address)
+
+	return s
+}
+
+func mustLogServerMessage(srv *Server) bool {
+	return srv.cfg.logger != nil && srv.cfg.logger.LevelComponentEnabled(
+		logger.LevelDebug, logger.ComponentTopology)
+}
+
+func logServerMessage(srv *Server, msg string, keysAndValues ...interface{}) {
+	serverHost, serverPort, err := net.SplitHostPort(srv.address.String())
+	if err != nil {
+		serverHost = srv.address.String()
+		serverPort = ""
+	}
+
+	var driverConnectionID uint64
+	var serverConnectionID *int64
+
+	if srv.conn != nil {
+		driverConnectionID = srv.conn.driverConnectionID
+		serverConnectionID = srv.conn.serverConnectionID
+	}
+
+	srv.cfg.logger.Print(logger.LevelDebug,
+		logger.ComponentTopology,
+		msg,
+		logger.SerializeServer(logger.Server{
+			DriverConnectionID: driverConnectionID,
+			TopologyID:         srv.topologyID,
+			Message:            msg,
+			ServerConnectionID: serverConnectionID,
+			ServerHost:         serverHost,
+			ServerPort:         serverPort,
+		}, keysAndValues...)...)
+}
+
+// Connect initializes the Server by starting background monitoring goroutines.
+// This method must be called before a Server can be used.
+func (s *Server) Connect(updateCallback updateTopologyCallback) error {
+	if !atomic.CompareAndSwapInt64(&s.state, serverDisconnected, serverConnected) {
+		return ErrServerConnected
+	}
+
+	desc := description.NewDefaultServer(s.address)
+	if s.cfg.loadBalanced {
+		// LBs automatically start off with kind LoadBalancer because there is no monitoring routine for state changes.
+		desc.Kind = description.LoadBalancer
+	}
+	s.desc.Store(desc)
+	s.updateTopologyCallback.Store(updateCallback)
+
+	if !s.cfg.monitoringDisabled && !s.cfg.loadBalanced {
+		s.closewg.Add(1)
+		go s.update()
+	}
+
+	// The CMAP spec describes that pools should only be marked "ready" when the server description
+	// is updated to something other than "Unknown". However, we maintain the previous Server
+	// behavior here and immediately mark the pool as ready during Connect() to simplify and speed
+	// up the Client startup behavior. The risk of marking a pool as ready proactively during
+	// Connect() is that we could attempt to create connections to a server that was configured
+	// erroneously until the first server check or checkOut() failure occurs, when the SDAM error
+	// handler would transition the Server back to "Unknown" and set the pool to "paused".
+	return s.pool.ready()
+}
+
+// Disconnect closes sockets to the server referenced by this Server.
+// Subscriptions to this Server will be closed. Disconnect will shutdown
+// any monitoring goroutines, closeConnection the idle connection pool, and will
+// wait until all the in use connections have been returned to the connection
+// pool and are closed before returning. If the context expires via
+// cancellation, deadline, or timeout before the in use connections have been
+// returned, the in use connections will be closed, resulting in the failure of
+// any in flight read or write operations. If this method returns with no
+// errors, all connections associated with this Server have been closed.
+func (s *Server) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt64(&s.state, serverConnected, serverDisconnecting) {
+		return ErrServerClosed
+	}
+
+	s.updateTopologyCallback.Store((updateTopologyCallback)(nil))
+
+	// Cancel the global context so any new contexts created from it will be automatically cancelled. Close the done
+	// channel so the update() routine will know that it can stop. Cancel any in-progress monitoring checks at the end.
+	// The done channel is closed before cancelling the check so the update routine() will immediately detect that it
+	// can stop rather than trying to create new connections until the read from done succeeds.
+	s.globalCtxCancel()
+	close(s.done)
+	s.cancelCheck()
+
+	s.pool.close(ctx)
+
+	s.closewg.Wait()
+	s.rttMonitor.disconnect()
+	atomic.StoreInt64(&s.state, serverDisconnected)
+
+	return nil
+}
+
+// Connection gets a connection to the server.
+func (s *Server) Connection(ctx context.Context) (driver.Connection, error) {
+	if atomic.LoadInt64(&s.state) != serverConnected {
+		return nil, ErrServerClosed
+	}
+
+	// Increment the operation count before calling checkOut to make sure that all connection
+	// requests are included in the operation count, including those in the wait queue. If we got an
+	// error instead of a connection, immediately decrement the operation count.
+	atomic.AddInt64(&s.operationCount, 1)
+	conn, err := s.pool.checkOut(ctx)
+	if err != nil {
+		atomic.AddInt64(&s.operationCount, -1)
+		return nil, err
+	}
+
+	return &Connection{
+		connection: conn,
+		cleanupServerFn: func() {
+			// Decrement the operation count whenever the caller is done with the connection. Note
+			// that cleanupServerFn() is not called while the connection is pinned to a cursor or
+			// transaction, so the operation count is not decremented until the cursor is closed or
+			// the transaction is committed or aborted. Use an int64 instead of a uint64 to mitigate
+			// the impact of any possible bugs that could cause the uint64 to underflow, which would
+			// make the server much less selectable.
+			atomic.AddInt64(&s.operationCount, -1)
+		},
+	}, nil
+}
+
+// ProcessHandshakeError implements SDAM error handling for errors that occur before a connection
+// finishes handshaking.
+func (s *Server) ProcessHandshakeError(err error, startingGenerationNumber uint64, serviceID *primitive.ObjectID) {
+	// Ignore the error if the server is behind a load balancer but the service ID is unknown. This indicates that the
+	// error happened when dialing the connection or during the MongoDB handshake, so we don't know the service ID to
+	// use for clearing the pool.
+	if err == nil || s.cfg.loadBalanced && serviceID == nil {
+		return
+	}
+	// Ignore the error if the connection is stale.
+	if generation, _ := s.pool.generation.getGeneration(serviceID); startingGenerationNumber < generation {
+		return
+	}
+
+	// Unwrap any connection errors. If there is no wrapped connection error, then the error should
+	// not result in any Server state change (e.g. a command error from the database).
+	wrappedConnErr := unwrapConnectionError(err)
+	if wrappedConnErr == nil {
+		return
+	}
+
+	// Must hold the processErrorLock while updating the server description and clearing the pool.
+	// Not holding the lock leads to possible out-of-order processing of pool.clear() and
+	// pool.ready() calls from concurrent server description updates.
+	s.processErrorLock.Lock()
+	defer s.processErrorLock.Unlock()
+
+	// Since the only kind of ConnectionError we receive from pool.Get will be an initialization error, we should set
+	// the description.Server appropriately. The description should not have a TopologyVersion because the staleness
+	// checking logic above has already determined that this description is not stale.
+	s.updateDescription(description.NewServerFromError(s.address, wrappedConnErr, nil))
+	s.pool.clear(err, serviceID)
+	s.cancelCheck()
+}
+
+// Description returns a description of the server as of the last heartbeat.
+func (s *Server) Description() description.Server {
+	return s.desc.Load().(description.Server)
+}
+
+// SelectedDescription returns a description.SelectedServer with a Kind of
+// Single. This can be used when performing tasks like monitoring a batch
+// of servers and you want to run one off commands against those servers.
+func (s *Server) SelectedDescription() description.SelectedServer {
+	sdesc := s.Description()
+	return description.SelectedServer{
+		Server: sdesc,
+		Kind:   description.Single,
+	}
+}
+
+// Subscribe returns a ServerSubscription which has a channel on which all
+// updated server descriptions will be sent. The channel will have a buffer
+// size of one, and will be pre-populated with the current description.
+func (s *Server) Subscribe() (*ServerSubscription, error) {
+	if atomic.LoadInt64(&s.state) != serverConnected {
+		return nil, ErrSubscribeAfterClosed
+	}
+	ch := make(chan description.Server, 1)
+	ch <- s.desc.Load().(description.Server)
+
+	s.subLock.Lock()
+	defer s.subLock.Unlock()
+	if s.subscriptionsClosed {
+		return nil, ErrSubscribeAfterClosed
+	}
+	id := s.currentSubscriberID
+	s.subscribers[id] = ch
+	s.currentSubscriberID++
+
+	ss := &ServerSubscription{
+		C:  ch,
+		s:  s,
+		id: id,
+	}
+
+	return ss, nil
+}
+
+// RequestImmediateCheck will cause the server to send a heartbeat immediately
+// instead of waiting for the heartbeat timeout.
+func (s *Server) RequestImmediateCheck() {
+	select {
+	case s.checkNow <- struct{}{}:
+	default:
+	}
+}
+
+// getWriteConcernErrorForProcessing extracts a driver.WriteConcernError from the provided error. This function returns
+// (error, true) if the error is a WriteConcernError and the falls under the requirements for SDAM error
+// handling and (nil, false) otherwise.
+func getWriteConcernErrorForProcessing(err error) (*driver.WriteConcernError, bool) {
+	var writeCmdErr driver.WriteCommandError
+	if !errors.As(err, &writeCmdErr) {
+		return nil, false
+	}
+
+	wcerr := writeCmdErr.WriteConcernError
+	if wcerr != nil && (wcerr.NodeIsRecovering() || wcerr.NotPrimary()) {
+		return wcerr, true
+	}
+	return nil, false
+}
+
+// ProcessError handles SDAM error handling and implements driver.ErrorProcessor.
+func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessErrorResult {
+	// Ignore nil errors.
+	if err == nil {
+		return driver.NoChange
+	}
+
+	// Ignore errors from stale connections because the error came from a previous generation of the
+	// connection pool. The root cause of the error has already been handled, which is what caused
+	// the pool generation to increment. Processing errors for stale connections could result in
+	// handling the same error root cause multiple times (e.g. a temporary network interrupt causing
+	// all connections to the same server to return errors).
+	if conn.Stale() {
+		return driver.NoChange
+	}
+
+	// Must hold the processErrorLock while updating the server description and clearing the pool.
+	// Not holding the lock leads to possible out-of-order processing of pool.clear() and
+	// pool.ready() calls from concurrent server description updates.
+	s.processErrorLock.Lock()
+	defer s.processErrorLock.Unlock()
+
+	// Get the wire version and service ID from the connection description because they will never
+	// change for the lifetime of a connection and can possibly be different between connections to
+	// the same server.
+	connDesc := conn.Description()
+	wireVersion := connDesc.WireVersion
+	serviceID := connDesc.ServiceID
+
+	// Get the topology version from the Server description because the Server description is
+	// updated by heartbeats and errors, so typically has a more up-to-date topology version.
+	serverDesc := s.desc.Load().(description.Server)
+	topologyVersion := serverDesc.TopologyVersion
+
+	// We don't currently update the Server topology version when we create new application
+	// connections, so it's possible for a connection's topology version to be newer than the
+	// Server's topology version. Pick the "newest" of the two topology versions.
+	// Technically a nil topology version on a new database response should be considered a new
+	// topology version and replace the Server's topology version. However, we don't know if the
+	// connection's topology version is based on a new or old database response, so we ignore a nil
+	// topology version on the connection for now.
+	//
+	// TODO(GODRIVER-2841): Remove this logic once we set the Server description when we create
+	// TODO application connections because then the Server's topology version will always be the
+	// TODO latest known.
+	if tv := connDesc.TopologyVersion; tv != nil && topologyVersion.CompareToIncoming(tv) < 0 {
+		topologyVersion = tv
+	}
+
+	// Invalidate server description if not primary or node recovering error occurs.
+	// These errors can be reported as a command error or a write concern error.
+	if cerr, ok := err.(driver.Error); ok && (cerr.NodeIsRecovering() || cerr.NotPrimary()) {
+		// Ignore errors that came from when the database was on a previous topology version.
+		if topologyVersion.CompareToIncoming(cerr.TopologyVersion) >= 0 {
+			return driver.NoChange
+		}
+
+		// updates description to unknown
+		s.updateDescription(description.NewServerFromError(s.address, err, cerr.TopologyVersion))
+		s.RequestImmediateCheck()
+
+		res := driver.ServerMarkedUnknown
+		// If the node is shutting down or is older than 4.2, we synchronously clear the pool
+		if cerr.NodeIsShuttingDown() || wireVersion == nil || wireVersion.Max < wireVersion42 {
+			res = driver.ConnectionPoolCleared
+			s.pool.clear(err, serviceID)
+		}
+
+		return res
+	}
+	if wcerr, ok := getWriteConcernErrorForProcessing(err); ok {
+		// Ignore errors that came from when the database was on a previous topology version.
+		if topologyVersion.CompareToIncoming(wcerr.TopologyVersion) >= 0 {
+			return driver.NoChange
+		}
+
+		// updates description to unknown
+		s.updateDescription(description.NewServerFromError(s.address, err, wcerr.TopologyVersion))
+		s.RequestImmediateCheck()
+
+		res := driver.ServerMarkedUnknown
+		// If the node is shutting down or is older than 4.2, we synchronously clear the pool
+		if wcerr.NodeIsShuttingDown() || wireVersion == nil || wireVersion.Max < wireVersion42 {
+			res = driver.ConnectionPoolCleared
+			s.pool.clear(err, serviceID)
+		}
+		return res
+	}
+
+	wrappedConnErr := unwrapConnectionError(err)
+	if wrappedConnErr == nil {
+		return driver.NoChange
+	}
+
+	// Ignore transient timeout errors.
+	if netErr, ok := wrappedConnErr.(net.Error); ok && netErr.Timeout() {
+		return driver.NoChange
+	}
+	if errors.Is(wrappedConnErr, context.Canceled) || errors.Is(wrappedConnErr, context.DeadlineExceeded) {
+		return driver.NoChange
+	}
+
+	// For a non-timeout network error, we clear the pool, set the description to Unknown, and cancel the in-progress
+	// monitoring check. The check is cancelled last to avoid a post-cancellation reconnect racing with
+	// updateDescription.
+	s.updateDescription(description.NewServerFromError(s.address, err, nil))
+	s.pool.clear(err, serviceID)
+	s.cancelCheck()
+	return driver.ConnectionPoolCleared
+}
+
+// update handle performing heartbeats and updating any subscribers of the
+// newest description.Server retrieved.
+func (s *Server) update() {
+	defer s.closewg.Done()
+	heartbeatTicker := time.NewTicker(s.cfg.heartbeatInterval)
+	rateLimiter := time.NewTicker(minHeartbeatInterval)
+	defer heartbeatTicker.Stop()
+	defer rateLimiter.Stop()
+	checkNow := s.checkNow
+	done := s.done
+
+	defer logUnexpectedFailure(s.cfg.logger, "Encountered unexpected failure updating server")
+
+	closeServer := func() {
+		s.subLock.Lock()
+		for id, c := range s.subscribers {
+			close(c)
+			delete(s.subscribers, id)
+		}
+		s.subscriptionsClosed = true
+		s.subLock.Unlock()
+
+		// We don't need to take s.heartbeatLock here because closeServer is called synchronously when the select checks
+		// below detect that the server is being closed, so we can be sure that the connection isn't being used.
+		if s.conn != nil {
+			_ = s.conn.close()
+		}
+	}
+
+	waitUntilNextCheck := func() {
+		// Wait until heartbeatFrequency elapses, an application operation requests an immediate check, or the server
+		// is disconnecting.
+		select {
+		case <-heartbeatTicker.C:
+		case <-checkNow:
+		case <-done:
+			// Return because the next update iteration will check the done channel again and clean up.
+			return
+		}
+
+		// Ensure we only return if minHeartbeatFrequency has elapsed or the server is disconnecting.
+		select {
+		case <-rateLimiter.C:
+		case <-done:
+			return
+		}
+	}
+
+	timeoutCnt := 0
+	for {
+		// Check if the server is disconnecting. Even if waitForNextCheck has already read from the done channel, we
+		// can safely read from it again because Disconnect closes the channel.
+		select {
+		case <-done:
+			closeServer()
+			return
+		default:
+		}
+
+		previousDescription := s.Description()
+
+		// Perform the next check.
+		desc, err := s.check()
+		if errors.Is(err, errCheckCancelled) {
+			if atomic.LoadInt64(&s.state) != serverConnected {
+				continue
+			}
+
+			// If the server is not disconnecting, the check was cancelled by an application operation after an error.
+			// Wait before running the next check.
+			waitUntilNextCheck()
+			continue
+		}
+
+		if isShortcut := func() bool {
+			// Must hold the processErrorLock while updating the server description and clearing the
+			// pool. Not holding the lock leads to possible out-of-order processing of pool.clear() and
+			// pool.ready() calls from concurrent server description updates.
+			s.processErrorLock.Lock()
+			defer s.processErrorLock.Unlock()
+
+			s.updateDescription(desc)
+			// Retry after the first timeout before clearing the pool in case of a FAAS pause as
+			// described in GODRIVER-2577.
+			if err := unwrapConnectionError(desc.LastError); err != nil && timeoutCnt < 1 {
+				if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+					timeoutCnt++
+					// We want to immediately retry on timeout error. Continue to next loop.
+					return true
+				}
+				if err, ok := err.(net.Error); ok && err.Timeout() {
+					timeoutCnt++
+					// We want to immediately retry on timeout error. Continue to next loop.
+					return true
+				}
+			}
+			if err := desc.LastError; err != nil {
+				// Clear the pool once the description has been updated to Unknown. Pass in a nil service ID to clear
+				// because the monitoring routine only runs for non-load balanced deployments in which servers don't return
+				// IDs.
+				if timeoutCnt > 0 {
+					s.pool.clearAll(err, nil)
+				} else {
+					s.pool.clear(err, nil)
+				}
+			}
+			// We're either not handling a timeout error, or we just handled the 2nd consecutive
+			// timeout error. In either case, reset the timeout count to 0 and return false to
+			// continue the normal check process.
+			timeoutCnt = 0
+			return false
+		}(); isShortcut {
+			continue
+		}
+
+		// If the server supports streaming or we're already streaming, we want to move to streaming the next response
+		// without waiting. If the server has transitioned to Unknown from a network error, we want to do another
+		// check without waiting in case it was a transient error and the server isn't actually down.
+		connectionIsStreaming := s.conn != nil && s.conn.getCurrentlyStreaming()
+		transitionedFromNetworkError := desc.LastError != nil && unwrapConnectionError(desc.LastError) != nil &&
+			previousDescription.Kind != description.Unknown
+
+		if isStreamingEnabled(s) && isStreamable(s) {
+			s.monitorOnce.Do(s.rttMonitor.connect)
+		}
+
+		if isStreamingEnabled(s) && (isStreamable(s) || connectionIsStreaming) || transitionedFromNetworkError {
+			continue
+		}
+
+		// The server either does not support the streamable protocol or is not in a healthy state, so we wait until
+		// the next check.
+		waitUntilNextCheck()
+	}
+}
+
+// updateDescription handles updating the description on the Server, notifying
+// subscribers, and potentially draining the connection pool. The initial
+// parameter is used to determine if this is the first description from the
+// server.
+func (s *Server) updateDescription(desc description.Server) {
+	if s.cfg.loadBalanced {
+		// In load balanced mode, there are no updates from the monitoring routine. For errors encountered in pooled
+		// connections, the server should not be marked Unknown to ensure that the LB remains selectable.
+		return
+	}
+
+	defer logUnexpectedFailure(s.cfg.logger, "Encountered unexpected failure updating server description")
+
+	// Anytime we update the server description to something other than "unknown", set the pool to
+	// "ready". Do this before updating the description so that connections can be checked out as
+	// soon as the server is selectable. If the pool is already ready, this operation is a no-op.
+	// Note that this behavior is roughly consistent with the current Go driver behavior (connects
+	// to all servers, even non-data-bearing nodes) but deviates slightly from CMAP spec, which
+	// specifies a more restricted set of server descriptions and topologies that should mark the
+	// pool ready. We don't have access to the topology here, so prefer the current Go driver
+	// behavior for simplicity.
+	if desc.Kind != description.Unknown {
+		_ = s.pool.ready()
+	}
+
+	// Use the updateTopologyCallback to update the parent Topology and get the description that should be stored.
+	callback, ok := s.updateTopologyCallback.Load().(updateTopologyCallback)
+	if ok && callback != nil {
+		desc = callback(desc)
+	}
+	s.desc.Store(desc)
+
+	s.subLock.Lock()
+	for _, c := range s.subscribers {
+		select {
+		// drain the channel if it isn't empty
+		case <-c:
+		default:
+		}
+		c <- desc
+	}
+	s.subLock.Unlock()
+}
+
+// createConnection creates a new connection instance but does not call connect on it. The caller must call connect
+// before the connection can be used for network operations.
+func (s *Server) createConnection() *connection {
+	opts := copyConnectionOpts(s.cfg.connectionOpts)
+	opts = append(opts,
+		WithConnectTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+		WithReadTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+		WithWriteTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+		// We override whatever handshaker is currently attached to the options with a basic
+		// one because need to make sure we don't do auth.
+		WithHandshaker(func(Handshaker) Handshaker {
+			return operation.NewHello().AppName(s.cfg.appname).Compressors(s.cfg.compressionOpts).
+				ServerAPI(s.cfg.serverAPI)
+		}),
+		// Override any monitors specified in options with nil to avoid monitoring heartbeats.
+		WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor { return nil }),
+	)
+
+	return newConnection(s.address, opts...)
+}
+
+func copyConnectionOpts(opts []ConnectionOption) []ConnectionOption {
+	optsCopy := make([]ConnectionOption, len(opts))
+	copy(optsCopy, opts)
+	return optsCopy
+}
+
+func (s *Server) setupHeartbeatConnection() error {
+	conn := s.createConnection()
+
+	// Take the lock when assigning the context and connection because they're accessed by cancelCheck.
+	s.heartbeatLock.Lock()
+	if s.heartbeatCtxCancel != nil {
+		// Ensure the previous context is cancelled to avoid a leak.
+		s.heartbeatCtxCancel()
+	}
+	s.heartbeatCtx, s.heartbeatCtxCancel = context.WithCancel(s.globalCtx)
+	s.conn = conn
+	s.heartbeatLock.Unlock()
+
+	return s.conn.connect(s.heartbeatCtx)
+}
+
+// cancelCheck cancels in-progress connection dials and reads. It does not set any fields on the server.
+func (s *Server) cancelCheck() {
+	var conn *connection
+
+	// Take heartbeatLock for mutual exclusion with the checks in the update function.
+	s.heartbeatLock.Lock()
+	if s.heartbeatCtx != nil {
+		s.heartbeatCtxCancel()
+	}
+	conn = s.conn
+	s.heartbeatLock.Unlock()
+
+	if conn == nil {
+		return
+	}
+
+	// If the connection exists, we need to wait for it to be connected because conn.connect() and
+	// conn.close() cannot be called concurrently. If the connection wasn't successfully opened, its
+	// state was set back to disconnected, so calling conn.close() will be a no-op.
+	conn.closeConnectContext()
+	conn.wait()
+	_ = conn.close()
+}
+
+func (s *Server) checkWasCancelled() bool {
+	return s.heartbeatCtx.Err() != nil
+}
+
+func (s *Server) createBaseOperation(conn driver.Connection) *operation.Hello {
+	return operation.
+		NewHello().
+		ClusterClock(s.cfg.clock).
+		Deployment(driver.SingleConnectionDeployment{C: conn}).
+		ServerAPI(s.cfg.serverAPI)
+}
+
+func isStreamingEnabled(srv *Server) bool {
+	switch srv.cfg.serverMonitoringMode {
+	case connstring.ServerMonitoringModeStream:
+		return true
+	case connstring.ServerMonitoringModePoll:
+		return false
+	default:
+		return driverutil.GetFaasEnvName() == ""
+	}
+}
+
+func isStreamable(srv *Server) bool {
+	return srv.Description().Kind != description.Unknown && srv.Description().TopologyVersion != nil
+}
+
+func (s *Server) check() (description.Server, error) {
+	var descPtr *description.Server
+	var err error
+	var duration time.Duration
+
+	start := time.Now()
+
+	// Create a new connection if this is the first check, the connection was closed after an error during the previous
+	// check, or the previous check was cancelled.
+	if s.conn == nil || s.conn.closed() || s.checkWasCancelled() {
+		connID := "0"
+		if s.conn != nil {
+			connID = s.conn.ID()
+		}
+		s.publishServerHeartbeatStartedEvent(connID, false)
+		// Create a new connection and add it's handshake RTT as a sample.
+		err = s.setupHeartbeatConnection()
+		duration = time.Since(start)
+		connID = "0"
+		if s.conn != nil {
+			connID = s.conn.ID()
+		}
+		if err == nil {
+			// Use the description from the connection handshake as the value for this check.
+			s.rttMonitor.addSample(s.conn.helloRTT)
+			descPtr = &s.conn.desc
+			s.publishServerHeartbeatSucceededEvent(connID, duration, s.conn.desc, false)
+		} else {
+			err = unwrapConnectionError(err)
+			s.publishServerHeartbeatFailedEvent(connID, duration, err, false)
+		}
+	} else {
+		// An existing connection is being used. Use the server description properties to execute the right heartbeat.
+
+		// Wrap conn in a type that implements driver.StreamerConnection.
+		heartbeatConn := initConnection{s.conn}
+		baseOperation := s.createBaseOperation(heartbeatConn)
+		previousDescription := s.Description()
+		streamable := isStreamingEnabled(s) && isStreamable(s)
+
+		s.publishServerHeartbeatStartedEvent(s.conn.ID(), s.conn.getCurrentlyStreaming() || streamable)
+
+		switch {
+		case s.conn.getCurrentlyStreaming():
+			// The connection is already in a streaming state, so we stream the next response.
+			err = baseOperation.StreamResponse(s.heartbeatCtx, heartbeatConn)
+		case streamable:
+			// The server supports the streamable protocol. Set the socket timeout to
+			// connectTimeoutMS+heartbeatFrequencyMS and execute an awaitable hello request. Set conn.canStream so
+			// the wire message will advertise streaming support to the server.
+
+			// Calculation for maxAwaitTimeMS is taken from time.Duration.Milliseconds (added in Go 1.13).
+			maxAwaitTimeMS := int64(s.cfg.heartbeatInterval) / 1e6
+			// If connectTimeoutMS=0, the socket timeout should be infinite. Otherwise, it is connectTimeoutMS +
+			// heartbeatFrequencyMS to account for the fact that the query will block for heartbeatFrequencyMS
+			// server-side.
+			socketTimeout := s.cfg.heartbeatTimeout
+			if socketTimeout != 0 {
+				socketTimeout += s.cfg.heartbeatInterval
+			}
+			s.conn.setSocketTimeout(socketTimeout)
+			baseOperation = baseOperation.TopologyVersion(previousDescription.TopologyVersion).
+				MaxAwaitTimeMS(maxAwaitTimeMS)
+			s.conn.setCanStream(true)
+			err = baseOperation.Execute(s.heartbeatCtx)
+		default:
+			// The server doesn't support the awaitable protocol. Set the socket timeout to connectTimeoutMS and
+			// execute a regular heartbeat without any additional parameters.
+
+			s.conn.setSocketTimeout(s.cfg.heartbeatTimeout)
+			err = baseOperation.Execute(s.heartbeatCtx)
+		}
+
+		duration = time.Since(start)
+
+		// We need to record an RTT sample in the polling case so that if the server
+		// is < 4.4, or if polling is specified by the user, then the
+		// RTT-short-circuit feature of CSOT is not disabled.
+		if !streamable {
+			s.rttMonitor.addSample(duration)
+		}
+
+		if err == nil {
+			tempDesc := baseOperation.Result(s.address)
+			descPtr = &tempDesc
+			s.publishServerHeartbeatSucceededEvent(s.conn.ID(), duration, tempDesc, s.conn.getCurrentlyStreaming() || streamable)
+		} else {
+			// Close the connection here rather than below so we ensure we're not closing a connection that wasn't
+			// successfully created.
+			if s.conn != nil {
+				_ = s.conn.close()
+			}
+			s.publishServerHeartbeatFailedEvent(s.conn.ID(), duration, err, s.conn.getCurrentlyStreaming() || streamable)
+		}
+	}
+
+	if descPtr != nil {
+		// The check was successful. Set the average RTT and the 90th percentile RTT and return.
+		desc := *descPtr
+		desc = desc.SetAverageRTT(s.rttMonitor.EWMA())
+		desc.HeartbeatInterval = s.cfg.heartbeatInterval
+		return desc, nil
+	}
+
+	if s.checkWasCancelled() {
+		// If the previous check was cancelled, we don't want to clear the pool. Return a sentinel error so the caller
+		// will know that an actual error didn't occur.
+		return emptyDescription, errCheckCancelled
+	}
+
+	// An error occurred. We reset the RTT monitor for all errors and return an Unknown description. The pool must also
+	// be cleared, but only after the description has already been updated, so that is handled by the caller.
+	topologyVersion := extractTopologyVersion(err)
+	s.rttMonitor.reset()
+	return description.NewServerFromError(s.address, err, topologyVersion), nil
+}
+
+func extractTopologyVersion(err error) *description.TopologyVersion {
+	if ce, ok := err.(ConnectionError); ok {
+		err = ce.Wrapped
+	}
+
+	switch converted := err.(type) {
+	case driver.Error:
+		return converted.TopologyVersion
+	case driver.WriteCommandError:
+		if converted.WriteConcernError != nil {
+			return converted.WriteConcernError.TopologyVersion
+		}
+	}
+
+	return nil
+}
+
+// RTTMonitor returns this server's round-trip-time monitor.
+func (s *Server) RTTMonitor() driver.RTTMonitor {
+	return s.rttMonitor
+}
+
+// OperationCount returns the current number of in-progress operations for this server.
+func (s *Server) OperationCount() int64 {
+	return atomic.LoadInt64(&s.operationCount)
+}
+
+// String implements the Stringer interface.
+func (s *Server) String() string {
+	desc := s.Description()
+	state := atomic.LoadInt64(&s.state)
+	str := fmt.Sprintf("Addr: %s, Type: %s, State: %s",
+		s.address, desc.Kind, serverStateString(state))
+	if len(desc.Tags) != 0 {
+		str += fmt.Sprintf(", Tag sets: %s", desc.Tags)
+	}
+	if state == serverConnected {
+		str += fmt.Sprintf(", Average RTT: %s, Min RTT: %s", desc.AverageRTT, s.RTTMonitor().Min())
+	}
+	if desc.LastError != nil {
+		str += fmt.Sprintf(", Last error: %s", desc.LastError)
+	}
+
+	return str
+}
+
+// ServerSubscription represents a subscription to the description.Server updates for
+// a specific server.
+type ServerSubscription struct {
+	C  <-chan description.Server
+	s  *Server
+	id uint64
+}
+
+// Unsubscribe unsubscribes this ServerSubscription from updates and closes the
+// subscription channel.
+func (ss *ServerSubscription) Unsubscribe() error {
+	ss.s.subLock.Lock()
+	defer ss.s.subLock.Unlock()
+	if ss.s.subscriptionsClosed {
+		return nil
+	}
+
+	ch, ok := ss.s.subscribers[ss.id]
+	if !ok {
+		return nil
+	}
+
+	close(ch)
+	delete(ss.s.subscribers, ss.id)
+
+	return nil
+}
+
+// publishes a ServerOpeningEvent to indicate the server is being initialized
+func (s *Server) publishServerOpeningEvent(addr address.Address) {
+	if s == nil {
+		return
+	}
+
+	serverOpening := &event.ServerOpeningEvent{
+		Address:    addr,
+		TopologyID: s.topologyID,
+	}
+
+	if s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerOpening != nil {
+		s.cfg.serverMonitor.ServerOpening(serverOpening)
+	}
+
+	if mustLogServerMessage(s) {
+		logServerMessage(s, logger.TopologyServerOpening)
+	}
+}
+
+// publishes a ServerHeartbeatStartedEvent to indicate a hello command has started
+func (s *Server) publishServerHeartbeatStartedEvent(connectionID string, await bool) {
+	serverHeartbeatStarted := &event.ServerHeartbeatStartedEvent{
+		ConnectionID: connectionID,
+		Awaited:      await,
+	}
+
+	if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatStarted != nil {
+		s.cfg.serverMonitor.ServerHeartbeatStarted(serverHeartbeatStarted)
+	}
+
+	if mustLogServerMessage(s) {
+		logServerMessage(s, logger.TopologyServerHeartbeatStarted,
+			logger.KeyAwaited, await)
+	}
+}
+
+// publishes a ServerHeartbeatSucceededEvent to indicate hello has succeeded
+func (s *Server) publishServerHeartbeatSucceededEvent(connectionID string,
+	duration time.Duration,
+	desc description.Server,
+	await bool,
+) {
+	serverHeartbeatSucceeded := &event.ServerHeartbeatSucceededEvent{
+		DurationNanos: duration.Nanoseconds(),
+		Duration:      duration,
+		Reply:         desc,
+		ConnectionID:  connectionID,
+		Awaited:       await,
+	}
+
+	if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatSucceeded != nil {
+		s.cfg.serverMonitor.ServerHeartbeatSucceeded(serverHeartbeatSucceeded)
+	}
+
+	if mustLogServerMessage(s) {
+		descRaw, _ := bson.Marshal(struct {
+			description.Server `bson:",inline"`
+			Ok                 int32
+		}{
+			Server: desc,
+			Ok: func() int32 {
+				if desc.LastError != nil {
+					return 0
+				}
+
+				return 1
+			}(),
+		})
+
+		logServerMessage(s, logger.TopologyServerHeartbeatSucceeded,
+			logger.KeyAwaited, await,
+			logger.KeyDurationMS, duration.Milliseconds(),
+			logger.KeyReply, bson.Raw(descRaw).String())
+	}
+}
+
+// publishes a ServerHeartbeatFailedEvent to indicate hello has failed
+func (s *Server) publishServerHeartbeatFailedEvent(connectionID string,
+	duration time.Duration,
+	err error,
+	await bool,
+) {
+	serverHeartbeatFailed := &event.ServerHeartbeatFailedEvent{
+		DurationNanos: duration.Nanoseconds(),
+		Duration:      duration,
+		Failure:       err,
+		ConnectionID:  connectionID,
+		Awaited:       await,
+	}
+
+	if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatFailed != nil {
+		s.cfg.serverMonitor.ServerHeartbeatFailed(serverHeartbeatFailed)
+	}
+
+	if mustLogServerMessage(s) {
+		logServerMessage(s, logger.TopologyServerHeartbeatFailed,
+			logger.KeyAwaited, await,
+			logger.KeyDurationMS, duration.Milliseconds(),
+			logger.KeyFailure, err.Error())
+	}
+}
+
+// unwrapConnectionError returns the connection error wrapped by err, or nil if err does not wrap a connection error.
+func unwrapConnectionError(err error) error {
+	// This is essentially an implementation of errors.As to unwrap this error until we get a ConnectionError and then
+	// return ConnectionError.Wrapped.
+
+	connErr, ok := err.(ConnectionError)
+	if ok {
+		return connErr.Wrapped
+	}
+
+	driverErr, ok := err.(driver.Error)
+	if !ok || !driverErr.NetworkError() {
+		return nil
+	}
+
+	connErr, ok = driverErr.Wrapped.(ConnectionError)
+	if ok {
+		return connErr.Wrapped
+	}
+
+	return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..4504a253554c14c4e568f30d95dbba688ea7c6ea
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go
@@ -0,0 +1,220 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/bsoncodec"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+var defaultRegistry = bson.NewRegistryBuilder().Build()
+
+type serverConfig struct {
+	clock                *session.ClusterClock
+	compressionOpts      []string
+	connectionOpts       []ConnectionOption
+	appname              string
+	heartbeatInterval    time.Duration
+	heartbeatTimeout     time.Duration
+	serverMonitoringMode string
+	serverMonitor        *event.ServerMonitor
+	registry             *bsoncodec.Registry
+	monitoringDisabled   bool
+	serverAPI            *driver.ServerAPIOptions
+	loadBalanced         bool
+
+	// Connection pool options.
+	maxConns             uint64
+	minConns             uint64
+	maxConnecting        uint64
+	poolMonitor          *event.PoolMonitor
+	logger               *logger.Logger
+	poolMaxIdleTime      time.Duration
+	poolMaintainInterval time.Duration
+}
+
+func newServerConfig(opts ...ServerOption) *serverConfig {
+	cfg := &serverConfig{
+		heartbeatInterval: 10 * time.Second,
+		heartbeatTimeout:  10 * time.Second,
+		registry:          defaultRegistry,
+	}
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		opt(cfg)
+	}
+
+	return cfg
+}
+
+// ServerOption configures a server.
+type ServerOption func(*serverConfig)
+
+// ServerAPIFromServerOptions will return the server API options if they have been functionally set on the ServerOption
+// slice.
+func ServerAPIFromServerOptions(opts []ServerOption) *driver.ServerAPIOptions {
+	return newServerConfig(opts...).serverAPI
+}
+
+func withMonitoringDisabled(fn func(bool) bool) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.monitoringDisabled = fn(cfg.monitoringDisabled)
+	}
+}
+
+// WithConnectionOptions configures the server's connections.
+func WithConnectionOptions(fn func(...ConnectionOption) []ConnectionOption) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.connectionOpts = fn(cfg.connectionOpts...)
+	}
+}
+
+// WithCompressionOptions configures the server's compressors.
+func WithCompressionOptions(fn func(...string) []string) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.compressionOpts = fn(cfg.compressionOpts...)
+	}
+}
+
+// WithServerAppName configures the server's application name.
+func WithServerAppName(fn func(string) string) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.appname = fn(cfg.appname)
+	}
+}
+
+// WithHeartbeatInterval configures a server's heartbeat interval.
+func WithHeartbeatInterval(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.heartbeatInterval = fn(cfg.heartbeatInterval)
+	}
+}
+
+// WithHeartbeatTimeout configures how long to wait for a heartbeat socket to
+// connection.
+func WithHeartbeatTimeout(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.heartbeatTimeout = fn(cfg.heartbeatTimeout)
+	}
+}
+
+// WithMaxConnections configures the maximum number of connections to allow for
+// a given server. If max is 0, then maximum connection pool size is not limited.
+func WithMaxConnections(fn func(uint64) uint64) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.maxConns = fn(cfg.maxConns)
+	}
+}
+
+// WithMinConnections configures the minimum number of connections to allow for
+// a given server. If min is 0, then there is no lower limit to the number of
+// connections.
+func WithMinConnections(fn func(uint64) uint64) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.minConns = fn(cfg.minConns)
+	}
+}
+
+// WithMaxConnecting configures the maximum number of connections a connection
+// pool may establish simultaneously. If maxConnecting is 0, the default value
+// of 2 is used.
+func WithMaxConnecting(fn func(uint64) uint64) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.maxConnecting = fn(cfg.maxConnecting)
+	}
+}
+
+// WithConnectionPoolMaxIdleTime configures the maximum time that a connection can remain idle in the connection pool
+// before being removed. If connectionPoolMaxIdleTime is 0, then no idle time is set and connections will not be removed
+// because of their age
+func WithConnectionPoolMaxIdleTime(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.poolMaxIdleTime = fn(cfg.poolMaxIdleTime)
+	}
+}
+
+// WithConnectionPoolMaintainInterval configures the interval that the background connection pool
+// maintenance goroutine runs.
+func WithConnectionPoolMaintainInterval(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.poolMaintainInterval = fn(cfg.poolMaintainInterval)
+	}
+}
+
+// WithConnectionPoolMonitor configures the monitor for all connection pool actions
+func WithConnectionPoolMonitor(fn func(*event.PoolMonitor) *event.PoolMonitor) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.poolMonitor = fn(cfg.poolMonitor)
+	}
+}
+
+// WithServerMonitor configures the monitor for all SDAM events for a server
+func WithServerMonitor(fn func(*event.ServerMonitor) *event.ServerMonitor) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.serverMonitor = fn(cfg.serverMonitor)
+	}
+}
+
+// WithClock configures the ClusterClock for the server to use.
+func WithClock(fn func(clock *session.ClusterClock) *session.ClusterClock) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.clock = fn(cfg.clock)
+	}
+}
+
+// WithRegistry configures the registry for the server to use when creating
+// cursors.
+func WithRegistry(fn func(*bsoncodec.Registry) *bsoncodec.Registry) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.registry = fn(cfg.registry)
+	}
+}
+
+// WithServerAPI configures the server API options for the server to use.
+func WithServerAPI(fn func(serverAPI *driver.ServerAPIOptions) *driver.ServerAPIOptions) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.serverAPI = fn(cfg.serverAPI)
+	}
+}
+
+// WithServerLoadBalanced specifies whether or not the server is behind a load balancer.
+func WithServerLoadBalanced(fn func(bool) bool) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.loadBalanced = fn(cfg.loadBalanced)
+	}
+}
+
+// withLogger configures the logger for the server to use.
+func withLogger(fn func() *logger.Logger) ServerOption {
+	return func(cfg *serverConfig) {
+		cfg.logger = fn()
+	}
+}
+
+// withServerMonitoringMode configures the mode (stream, poll, or auto) to use
+// for monitoring.
+func withServerMonitoringMode(mode *string) ServerOption {
+	return func(cfg *serverConfig) {
+		if mode != nil {
+			cfg.serverMonitoringMode = *mode
+
+			return
+		}
+
+		cfg.serverMonitoringMode = connstring.ServerMonitoringModeAuto
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go
new file mode 100644
index 0000000000000000000000000000000000000000..387f2ec04d62632bab544fb567e04a419e6ecbe5
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go
@@ -0,0 +1,58 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build !go1.17
+// +build !go1.17
+
+package topology
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+)
+
+type tlsConn interface {
+	net.Conn
+
+	// Only require Handshake on the interface for Go 1.16 and less.
+	Handshake() error
+	ConnectionState() tls.ConnectionState
+}
+
+var _ tlsConn = (*tls.Conn)(nil)
+
+type tlsConnectionSource interface {
+	Client(net.Conn, *tls.Config) tlsConn
+}
+
+type tlsConnectionSourceFn func(net.Conn, *tls.Config) tlsConn
+
+var _ tlsConnectionSource = (tlsConnectionSourceFn)(nil)
+
+func (t tlsConnectionSourceFn) Client(nc net.Conn, cfg *tls.Config) tlsConn {
+	return t(nc, cfg)
+}
+
+var defaultTLSConnectionSource tlsConnectionSourceFn = func(nc net.Conn, cfg *tls.Config) tlsConn {
+	return tls.Client(nc, cfg)
+}
+
+// clientHandshake will perform a handshake with a goroutine and wait for its completion on Go 1.16 and less
+// when HandshakeContext is not available.
+func clientHandshake(ctx context.Context, client tlsConn) error {
+	errChan := make(chan error, 1)
+	go func() {
+		errChan <- client.Handshake()
+	}()
+
+	select {
+	case err := <-errChan:
+		return err
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9822e06090b8d5918ced49876fdd33da6f7f371
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go
@@ -0,0 +1,47 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//go:build go1.17
+// +build go1.17
+
+package topology
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+)
+
+type tlsConn interface {
+	net.Conn
+
+	// Require HandshakeContext on the interface for Go 1.17 and higher.
+	HandshakeContext(ctx context.Context) error
+	ConnectionState() tls.ConnectionState
+}
+
+var _ tlsConn = (*tls.Conn)(nil)
+
+type tlsConnectionSource interface {
+	Client(net.Conn, *tls.Config) tlsConn
+}
+
+type tlsConnectionSourceFn func(net.Conn, *tls.Config) tlsConn
+
+var _ tlsConnectionSource = (tlsConnectionSourceFn)(nil)
+
+func (t tlsConnectionSourceFn) Client(nc net.Conn, cfg *tls.Config) tlsConn {
+	return t(nc, cfg)
+}
+
+var defaultTLSConnectionSource tlsConnectionSourceFn = func(nc net.Conn, cfg *tls.Config) tlsConn {
+	return tls.Client(nc, cfg)
+}
+
+// clientHandshake will perform a handshake on Go 1.17 and higher with HandshakeContext.
+func clientHandshake(ctx context.Context, client tlsConn) error {
+	return client.HandshakeContext(ctx)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go
new file mode 100644
index 0000000000000000000000000000000000000000..0fb913d21b25c9cd3acdc339c23a8f0ffc514776
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go
@@ -0,0 +1,1105 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package topology is intended for internal use only. It is made available to
+// facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+//
+// Package topology contains types that handles the discovery, monitoring, and
+// selection of servers. This package is designed to expose enough inner
+// workings of service discovery and monitoring to allow low level applications
+// to have fine grained control, while hiding most of the detailed
+// implementation of the algorithms.
+package topology // import "go.mongodb.org/mongo-driver/x/mongo/driver/topology"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/internal/randutil"
+	"go.mongodb.org/mongo-driver/mongo/address"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/dns"
+)
+
+// Topology state constants.
+const (
+	topologyDisconnected int64 = iota
+	topologyDisconnecting
+	topologyConnected
+	topologyConnecting
+)
+
+// ErrSubscribeAfterClosed is returned when a user attempts to subscribe to a
+// closed Server or Topology.
+var ErrSubscribeAfterClosed = errors.New("cannot subscribe after closeConnection")
+
+// ErrTopologyClosed is returned when a user attempts to call a method on a
+// closed Topology.
+var ErrTopologyClosed = errors.New("topology is closed")
+
+// ErrTopologyConnected is returned whena  user attempts to Connect to an
+// already connected Topology.
+var ErrTopologyConnected = errors.New("topology is connected or connecting")
+
+// ErrServerSelectionTimeout is returned from server selection when the server
+// selection process took longer than allowed by the timeout.
+var ErrServerSelectionTimeout = errors.New("server selection timeout")
+
+// MonitorMode represents the way in which a server is monitored.
+type MonitorMode uint8
+
+// random is a package-global pseudo-random number generator.
+var random = randutil.NewLockedRand()
+
+// These constants are the available monitoring modes.
+const (
+	AutomaticMode MonitorMode = iota
+	SingleMode
+)
+
+// Topology represents a MongoDB deployment.
+type Topology struct {
+	state int64
+
+	cfg *Config
+
+	desc atomic.Value // holds a description.Topology
+
+	dnsResolver *dns.Resolver
+
+	done chan struct{}
+
+	pollingRequired   bool
+	pollingDone       chan struct{}
+	pollingwg         sync.WaitGroup
+	rescanSRVInterval time.Duration
+	pollHeartbeatTime atomic.Value // holds a bool
+
+	hosts []string
+
+	updateCallback updateTopologyCallback
+	fsm            *fsm
+
+	// This should really be encapsulated into it's own type. This will likely
+	// require a redesign so we can share a minimum of data between the
+	// subscribers and the topology.
+	subscribers         map[uint64]chan description.Topology
+	currentSubscriberID uint64
+	subscriptionsClosed bool
+	subLock             sync.Mutex
+
+	// We should redesign how we Connect and handle individual servers. This is
+	// too difficult to maintain and it's rather easy to accidentally access
+	// the servers without acquiring the lock or checking if the servers are
+	// closed. This lock should also be an RWMutex.
+	serversLock   sync.Mutex
+	serversClosed bool
+	servers       map[address.Address]*Server
+
+	id primitive.ObjectID
+}
+
+var (
+	_ driver.Deployment = &Topology{}
+	_ driver.Subscriber = &Topology{}
+)
+
+type serverSelectionState struct {
+	selector    description.ServerSelector
+	timeoutChan <-chan time.Time
+}
+
+func newServerSelectionState(selector description.ServerSelector, timeoutChan <-chan time.Time) serverSelectionState {
+	return serverSelectionState{
+		selector:    selector,
+		timeoutChan: timeoutChan,
+	}
+}
+
+// New creates a new topology. A "nil" config is interpreted as the default configuration.
+func New(cfg *Config) (*Topology, error) {
+	if cfg == nil {
+		var err error
+		cfg, err = NewConfig(options.Client(), nil)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	t := &Topology{
+		cfg:               cfg,
+		done:              make(chan struct{}),
+		pollingDone:       make(chan struct{}),
+		rescanSRVInterval: 60 * time.Second,
+		fsm:               newFSM(),
+		subscribers:       make(map[uint64]chan description.Topology),
+		servers:           make(map[address.Address]*Server),
+		dnsResolver:       dns.DefaultResolver,
+		id:                primitive.NewObjectID(),
+	}
+	t.desc.Store(description.Topology{})
+	t.updateCallback = func(desc description.Server) description.Server {
+		return t.apply(context.TODO(), desc)
+	}
+
+	if t.cfg.URI != "" {
+		connStr, err := connstring.Parse(t.cfg.URI)
+		if err != nil {
+			return nil, err
+		}
+		t.pollingRequired = (connStr.Scheme == connstring.SchemeMongoDBSRV) && !t.cfg.LoadBalanced
+		t.hosts = connStr.RawHosts
+	}
+
+	t.publishTopologyOpeningEvent()
+
+	return t, nil
+}
+
+func mustLogTopologyMessage(topo *Topology, level logger.Level) bool {
+	return topo.cfg.logger != nil && topo.cfg.logger.LevelComponentEnabled(
+		level, logger.ComponentTopology)
+}
+
+func logTopologyMessage(topo *Topology, level logger.Level, msg string, keysAndValues ...interface{}) {
+	topo.cfg.logger.Print(level,
+		logger.ComponentTopology,
+		msg,
+		logger.SerializeTopology(logger.Topology{
+			ID:      topo.id,
+			Message: msg,
+		}, keysAndValues...)...)
+}
+
+func logTopologyThirdPartyUsage(topo *Topology, parsedHosts []string) {
+	thirdPartyMessages := [2]string{
+		`You appear to be connected to a CosmosDB cluster. For more information regarding feature compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb`,
+		`You appear to be connected to a DocumentDB cluster. For more information regarding feature compatibility and support please visit https://www.mongodb.com/supportability/documentdb`,
+	}
+
+	thirdPartySuffixes := map[string]int{
+		".cosmos.azure.com":            0,
+		".docdb.amazonaws.com":         1,
+		".docdb-elastic.amazonaws.com": 1,
+	}
+
+	hostSet := make([]bool, len(thirdPartyMessages))
+	for _, host := range parsedHosts {
+		if h, _, err := net.SplitHostPort(host); err == nil {
+			host = h
+		}
+		for suffix, env := range thirdPartySuffixes {
+			if !strings.HasSuffix(host, suffix) {
+				continue
+			}
+			if hostSet[env] {
+				break
+			}
+			hostSet[env] = true
+			logTopologyMessage(topo, logger.LevelInfo, thirdPartyMessages[env])
+		}
+	}
+}
+
+func mustLogServerSelection(topo *Topology, level logger.Level) bool {
+	return topo.cfg.logger != nil && topo.cfg.logger.LevelComponentEnabled(
+		level, logger.ComponentServerSelection)
+}
+
+func logServerSelection(
+	ctx context.Context,
+	topo *Topology,
+	level logger.Level,
+	msg string,
+	srvSelector description.ServerSelector,
+	keysAndValues ...interface{},
+) {
+	var srvSelectorString string
+
+	selectorStringer, ok := srvSelector.(fmt.Stringer)
+	if ok {
+		srvSelectorString = selectorStringer.String()
+	}
+
+	operationName, _ := logger.OperationName(ctx)
+	operationID, _ := logger.OperationID(ctx)
+
+	topo.cfg.logger.Print(level,
+		logger.ComponentServerSelection,
+		msg,
+		logger.SerializeServerSelection(logger.ServerSelection{
+			Selector:            srvSelectorString,
+			Operation:           operationName,
+			OperationID:         &operationID,
+			TopologyDescription: topo.String(),
+		}, keysAndValues...)...)
+}
+
+func logServerSelectionSucceeded(
+	ctx context.Context,
+	topo *Topology,
+	srvSelector description.ServerSelector,
+	server *SelectedServer,
+) {
+	host, port, err := net.SplitHostPort(server.address.String())
+	if err != nil {
+		host = server.address.String()
+		port = ""
+	}
+
+	portInt64, _ := strconv.ParseInt(port, 10, 32)
+
+	logServerSelection(ctx, topo, logger.LevelDebug, logger.ServerSelectionSucceeded, srvSelector,
+		logger.KeyServerHost, host,
+		logger.KeyServerPort, portInt64)
+}
+
+func logServerSelectionFailed(
+	ctx context.Context,
+	topo *Topology,
+	srvSelector description.ServerSelector,
+	err error,
+) {
+	logServerSelection(ctx, topo, logger.LevelDebug, logger.ServerSelectionFailed, srvSelector,
+		logger.KeyFailure, err.Error())
+}
+
+// logUnexpectedFailure is a defer-recover function for logging unexpected
+// failures encountered while maintaining a topology.
+//
+// Most topology maintenance actions, such as updating a server, should not take
+// down a client's application. This function provides a best-effort to log
+// unexpected failures. If the logger passed to this function is nil, then the
+// recovery will be silent.
+func logUnexpectedFailure(log *logger.Logger, msg string, callbacks ...func()) {
+	r := recover()
+	if r == nil {
+		return
+	}
+
+	defer func() {
+		for _, clbk := range callbacks {
+			clbk()
+		}
+	}()
+
+	if log == nil {
+		return
+	}
+
+	log.Print(logger.LevelInfo, logger.ComponentTopology, fmt.Sprintf("%s: %v", msg, r))
+}
+
+// Connect initializes a Topology and starts the monitoring process. This function
+// must be called to properly monitor the topology.
+func (t *Topology) Connect() error {
+	if !atomic.CompareAndSwapInt64(&t.state, topologyDisconnected, topologyConnecting) {
+		return ErrTopologyConnected
+	}
+
+	t.desc.Store(description.Topology{})
+	var err error
+	t.serversLock.Lock()
+
+	// A replica set name sets the initial topology type to ReplicaSetNoPrimary unless a direct connection is also
+	// specified, in which case the initial type is Single.
+	if t.cfg.ReplicaSetName != "" {
+		t.fsm.SetName = t.cfg.ReplicaSetName
+		t.fsm.Kind = description.ReplicaSetNoPrimary
+	}
+
+	// A direct connection unconditionally sets the topology type to Single.
+	if t.cfg.Mode == SingleMode {
+		t.fsm.Kind = description.Single
+	}
+
+	for _, a := range t.cfg.SeedList {
+		addr := address.Address(a).Canonicalize()
+		t.fsm.Servers = append(t.fsm.Servers, description.NewDefaultServer(addr))
+	}
+
+	switch {
+	case t.cfg.LoadBalanced:
+		// In LoadBalanced mode, we mock a series of events: TopologyDescriptionChanged from Unknown to LoadBalanced,
+		// ServerDescriptionChanged from Unknown to LoadBalancer, and then TopologyDescriptionChanged to reflect the
+		// previous ServerDescriptionChanged event. We publish all of these events here because we don't start server
+		// monitoring routines in this mode, so we have to mock state changes.
+
+		// Transition from Unknown with no servers to LoadBalanced with a single Unknown server.
+		t.fsm.Kind = description.LoadBalanced
+		t.publishTopologyDescriptionChangedEvent(description.Topology{}, t.fsm.Topology)
+
+		addr := address.Address(t.cfg.SeedList[0]).Canonicalize()
+		if err := t.addServer(addr); err != nil {
+			t.serversLock.Unlock()
+			return err
+		}
+
+		// Transition the server from Unknown to LoadBalancer.
+		newServerDesc := t.servers[addr].Description()
+		t.publishServerDescriptionChangedEvent(t.fsm.Servers[0], newServerDesc)
+
+		// Transition from LoadBalanced with an Unknown server to LoadBalanced with a LoadBalancer.
+		oldDesc := t.fsm.Topology
+		t.fsm.Servers = []description.Server{newServerDesc}
+		t.desc.Store(t.fsm.Topology)
+		t.publishTopologyDescriptionChangedEvent(oldDesc, t.fsm.Topology)
+	default:
+		// In non-LB mode, we only publish an initial TopologyDescriptionChanged event from Unknown with no servers to
+		// the current state (e.g. Unknown with one or more servers if we're discovering or Single with one server if
+		// we're connecting directly). Other events are published when state changes occur due to responses in the
+		// server monitoring goroutines.
+
+		newDesc := description.Topology{
+			Kind:                     t.fsm.Kind,
+			Servers:                  t.fsm.Servers,
+			SessionTimeoutMinutesPtr: t.fsm.SessionTimeoutMinutesPtr,
+
+			// TODO(GODRIVER-2885): This field can be removed once
+			// legacy SessionTimeoutMinutes is removed.
+			SessionTimeoutMinutes: t.fsm.SessionTimeoutMinutes,
+		}
+		t.desc.Store(newDesc)
+		t.publishTopologyDescriptionChangedEvent(description.Topology{}, t.fsm.Topology)
+		for _, a := range t.cfg.SeedList {
+			addr := address.Address(a).Canonicalize()
+			err = t.addServer(addr)
+			if err != nil {
+				t.serversLock.Unlock()
+				return err
+			}
+		}
+	}
+
+	t.serversLock.Unlock()
+	if mustLogTopologyMessage(t, logger.LevelInfo) {
+		logTopologyThirdPartyUsage(t, t.hosts)
+	}
+	if t.pollingRequired {
+		// sanity check before passing the hostname to resolver
+		if len(t.hosts) != 1 {
+			return fmt.Errorf("URI with SRV must include one and only one hostname")
+		}
+		_, _, err = net.SplitHostPort(t.hosts[0])
+		if err == nil {
+			// we were able to successfully extract a port from the host,
+			// but should not be able to when using SRV
+			return fmt.Errorf("URI with srv must not include a port number")
+		}
+		go t.pollSRVRecords(t.hosts[0])
+		t.pollingwg.Add(1)
+	}
+
+	t.subscriptionsClosed = false // explicitly set in case topology was disconnected and then reconnected
+
+	atomic.StoreInt64(&t.state, topologyConnected)
+	return nil
+}
+
+// Disconnect closes the topology. It stops the monitoring thread and
+// closes all open subscriptions.
+func (t *Topology) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt64(&t.state, topologyConnected, topologyDisconnecting) {
+		return ErrTopologyClosed
+	}
+
+	servers := make(map[address.Address]*Server)
+	t.serversLock.Lock()
+	t.serversClosed = true
+	for addr, server := range t.servers {
+		servers[addr] = server
+	}
+	t.serversLock.Unlock()
+
+	for _, server := range servers {
+		_ = server.Disconnect(ctx)
+		t.publishServerClosedEvent(server.address)
+	}
+
+	t.subLock.Lock()
+	for id, ch := range t.subscribers {
+		close(ch)
+		delete(t.subscribers, id)
+	}
+	t.subscriptionsClosed = true
+	t.subLock.Unlock()
+
+	if t.pollingRequired {
+		t.pollingDone <- struct{}{}
+		t.pollingwg.Wait()
+	}
+
+	t.desc.Store(description.Topology{})
+
+	atomic.StoreInt64(&t.state, topologyDisconnected)
+	t.publishTopologyClosedEvent()
+	return nil
+}
+
+// Description returns a description of the topology.
+func (t *Topology) Description() description.Topology {
+	td, ok := t.desc.Load().(description.Topology)
+	if !ok {
+		td = description.Topology{}
+	}
+	return td
+}
+
+// Kind returns the topology kind of this Topology.
+func (t *Topology) Kind() description.TopologyKind { return t.Description().Kind }
+
+// Subscribe returns a Subscription on which all updated description.Topologys
+// will be sent. The channel of the subscription will have a buffer size of one,
+// and will be pre-populated with the current description.Topology.
+// Subscribe implements the driver.Subscriber interface.
+func (t *Topology) Subscribe() (*driver.Subscription, error) {
+	if atomic.LoadInt64(&t.state) != topologyConnected {
+		return nil, errors.New("cannot subscribe to Topology that is not connected")
+	}
+	ch := make(chan description.Topology, 1)
+	td, ok := t.desc.Load().(description.Topology)
+	if !ok {
+		td = description.Topology{}
+	}
+	ch <- td
+
+	t.subLock.Lock()
+	defer t.subLock.Unlock()
+	if t.subscriptionsClosed {
+		return nil, ErrSubscribeAfterClosed
+	}
+	id := t.currentSubscriberID
+	t.subscribers[id] = ch
+	t.currentSubscriberID++
+
+	return &driver.Subscription{
+		Updates: ch,
+		ID:      id,
+	}, nil
+}
+
+// Unsubscribe unsubscribes the given subscription from the topology and closes the subscription channel.
+// Unsubscribe implements the driver.Subscriber interface.
+func (t *Topology) Unsubscribe(sub *driver.Subscription) error {
+	t.subLock.Lock()
+	defer t.subLock.Unlock()
+
+	if t.subscriptionsClosed {
+		return nil
+	}
+
+	ch, ok := t.subscribers[sub.ID]
+	if !ok {
+		return nil
+	}
+
+	close(ch)
+	delete(t.subscribers, sub.ID)
+	return nil
+}
+
+// RequestImmediateCheck will send heartbeats to all the servers in the
+// topology right away, instead of waiting for the heartbeat timeout.
+func (t *Topology) RequestImmediateCheck() {
+	if atomic.LoadInt64(&t.state) != topologyConnected {
+		return
+	}
+	t.serversLock.Lock()
+	for _, server := range t.servers {
+		server.RequestImmediateCheck()
+	}
+	t.serversLock.Unlock()
+}
+
+// SelectServer selects a server with given a selector. SelectServer complies with the
+// server selection spec, and will time out after serverSelectionTimeout or when the
+// parent context is done.
+func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelector) (driver.Server, error) {
+	if atomic.LoadInt64(&t.state) != topologyConnected {
+		if mustLogServerSelection(t, logger.LevelDebug) {
+			logServerSelectionFailed(ctx, t, ss, ErrTopologyClosed)
+		}
+
+		return nil, ErrTopologyClosed
+	}
+	var ssTimeoutCh <-chan time.Time
+
+	if t.cfg.ServerSelectionTimeout > 0 {
+		ssTimeout := time.NewTimer(t.cfg.ServerSelectionTimeout)
+		ssTimeoutCh = ssTimeout.C
+		defer ssTimeout.Stop()
+	}
+
+	var doneOnce bool
+	var sub *driver.Subscription
+	selectionState := newServerSelectionState(ss, ssTimeoutCh)
+
+	// Record the start time.
+	startTime := time.Now()
+	for {
+		var suitable []description.Server
+		var selectErr error
+
+		if !doneOnce {
+			if mustLogServerSelection(t, logger.LevelDebug) {
+				logServerSelection(ctx, t, logger.LevelDebug, logger.ServerSelectionStarted, ss)
+			}
+
+			// for the first pass, select a server from the current description.
+			// this improves selection speed for up-to-date topology descriptions.
+			suitable, selectErr = t.selectServerFromDescription(t.Description(), selectionState)
+			doneOnce = true
+		} else {
+			// if the first pass didn't select a server, the previous description did not contain a suitable server, so
+			// we subscribe to the topology and attempt to obtain a server from that subscription
+			if sub == nil {
+				var err error
+				sub, err = t.Subscribe()
+				if err != nil {
+					if mustLogServerSelection(t, logger.LevelDebug) {
+						logServerSelectionFailed(ctx, t, ss, err)
+					}
+
+					return nil, err
+				}
+				defer func() { _ = t.Unsubscribe(sub) }()
+			}
+
+			suitable, selectErr = t.selectServerFromSubscription(ctx, sub.Updates, selectionState)
+		}
+		if selectErr != nil {
+			if mustLogServerSelection(t, logger.LevelDebug) {
+				logServerSelectionFailed(ctx, t, ss, selectErr)
+			}
+
+			return nil, selectErr
+		}
+
+		if len(suitable) == 0 {
+			// try again if there are no servers available
+			if mustLogServerSelection(t, logger.LevelInfo) {
+				elapsed := time.Since(startTime)
+				remainingTimeMS := t.cfg.ServerSelectionTimeout - elapsed
+
+				logServerSelection(ctx, t, logger.LevelInfo, logger.ServerSelectionWaiting, ss,
+					logger.KeyRemainingTimeMS, remainingTimeMS.Milliseconds())
+			}
+
+			continue
+		}
+
+		// If there's only one suitable server description, try to find the associated server and
+		// return it. This is an optimization primarily for standalone and load-balanced deployments.
+		if len(suitable) == 1 {
+			server, err := t.FindServer(suitable[0])
+			if err != nil {
+				if mustLogServerSelection(t, logger.LevelDebug) {
+					logServerSelectionFailed(ctx, t, ss, err)
+				}
+
+				return nil, err
+			}
+			if server == nil {
+				continue
+			}
+
+			if mustLogServerSelection(t, logger.LevelDebug) {
+				logServerSelectionSucceeded(ctx, t, ss, server)
+			}
+
+			return server, nil
+		}
+
+		// Randomly select 2 suitable server descriptions and find servers for them. We select two
+		// so we can pick the one with the one with fewer in-progress operations below.
+		desc1, desc2 := pick2(suitable)
+		server1, err := t.FindServer(desc1)
+		if err != nil {
+			if mustLogServerSelection(t, logger.LevelDebug) {
+				logServerSelectionFailed(ctx, t, ss, err)
+			}
+
+			return nil, err
+		}
+		server2, err := t.FindServer(desc2)
+		if err != nil {
+			if mustLogServerSelection(t, logger.LevelDebug) {
+				logServerSelectionFailed(ctx, t, ss, err)
+			}
+
+			return nil, err
+		}
+
+		// If we don't have an actual server for one or both of the provided descriptions, either
+		// return the one server we have, or try again if they're both nil. This could happen for a
+		// number of reasons, including that the server has since stopped being a part of this
+		// topology.
+		if server1 == nil || server2 == nil {
+			if server1 == nil && server2 == nil {
+				continue
+			}
+
+			if server1 != nil {
+				if mustLogServerSelection(t, logger.LevelDebug) {
+					logServerSelectionSucceeded(ctx, t, ss, server1)
+				}
+				return server1, nil
+			}
+
+			if mustLogServerSelection(t, logger.LevelDebug) {
+				logServerSelectionSucceeded(ctx, t, ss, server2)
+			}
+
+			return server2, nil
+		}
+
+		// Of the two randomly selected suitable servers, pick the one with fewer in-use connections.
+		// We use in-use connections as an analog for in-progress operations because they are almost
+		// always the same value for a given server.
+		if server1.OperationCount() < server2.OperationCount() {
+			if mustLogServerSelection(t, logger.LevelDebug) {
+				logServerSelectionSucceeded(ctx, t, ss, server1)
+			}
+
+			return server1, nil
+		}
+
+		if mustLogServerSelection(t, logger.LevelDebug) {
+			logServerSelectionSucceeded(ctx, t, ss, server2)
+		}
+		return server2, nil
+	}
+}
+
+// pick2 returns 2 random server descriptions from the input slice of server descriptions,
+// guaranteeing that the same element from the slice is not picked twice. The order of server
+// descriptions in the input slice may be modified. If fewer than 2 server descriptions are
+// provided, pick2 will panic.
+func pick2(ds []description.Server) (description.Server, description.Server) {
+	// Select a random index from the input slice and keep the server description from that index.
+	idx := random.Intn(len(ds))
+	s1 := ds[idx]
+
+	// Swap the selected index to the end and reslice to remove it so we don't pick the same server
+	// description twice.
+	ds[idx], ds[len(ds)-1] = ds[len(ds)-1], ds[idx]
+	ds = ds[:len(ds)-1]
+
+	// Select another random index from the input slice and return both selected server descriptions.
+	return s1, ds[random.Intn(len(ds))]
+}
+
+// FindServer will attempt to find a server that fits the given server description.
+// This method will return nil, nil if a matching server could not be found.
+func (t *Topology) FindServer(selected description.Server) (*SelectedServer, error) {
+	if atomic.LoadInt64(&t.state) != topologyConnected {
+		return nil, ErrTopologyClosed
+	}
+	t.serversLock.Lock()
+	defer t.serversLock.Unlock()
+	server, ok := t.servers[selected.Addr]
+	if !ok {
+		return nil, nil
+	}
+
+	desc := t.Description()
+	return &SelectedServer{
+		Server: server,
+		Kind:   desc.Kind,
+	}, nil
+}
+
+// selectServerFromSubscription loops until a topology description is available for server selection. It returns
+// when the given context expires, server selection timeout is reached, or a description containing a selectable
+// server is available.
+func (t *Topology) selectServerFromSubscription(ctx context.Context, subscriptionCh <-chan description.Topology,
+	selectionState serverSelectionState) ([]description.Server, error) {
+
+	current := t.Description()
+	for {
+		select {
+		case <-ctx.Done():
+			return nil, ServerSelectionError{Wrapped: ctx.Err(), Desc: current}
+		case <-selectionState.timeoutChan:
+			return nil, ServerSelectionError{Wrapped: ErrServerSelectionTimeout, Desc: current}
+		case current = <-subscriptionCh:
+		}
+
+		suitable, err := t.selectServerFromDescription(current, selectionState)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(suitable) > 0 {
+			return suitable, nil
+		}
+		t.RequestImmediateCheck()
+	}
+}
+
+// selectServerFromDescription process the given topology description and returns a slice of suitable servers.
+func (t *Topology) selectServerFromDescription(desc description.Topology,
+	selectionState serverSelectionState) ([]description.Server, error) {
+
+	// Unlike selectServerFromSubscription, this code path does not check ctx.Done or selectionState.timeoutChan because
+	// selecting a server from a description is not a blocking operation.
+
+	if desc.CompatibilityErr != nil {
+		return nil, desc.CompatibilityErr
+	}
+
+	// If the topology kind is LoadBalanced, the LB is the only server and it is always considered selectable. The
+	// selectors exported by the driver should already return the LB as a candidate, so this but this check ensures that
+	// the LB is always selectable even if a user of the low-level driver provides a custom selector.
+	if desc.Kind == description.LoadBalanced {
+		return desc.Servers, nil
+	}
+
+	allowedIndexes := make([]int, 0, len(desc.Servers))
+	for i, s := range desc.Servers {
+		if s.Kind != description.Unknown {
+			allowedIndexes = append(allowedIndexes, i)
+		}
+	}
+
+	allowed := make([]description.Server, len(allowedIndexes))
+	for i, idx := range allowedIndexes {
+		allowed[i] = desc.Servers[idx]
+	}
+
+	suitable, err := selectionState.selector.SelectServer(desc, allowed)
+	if err != nil {
+		return nil, ServerSelectionError{Wrapped: err, Desc: desc}
+	}
+	return suitable, nil
+}
+
+func (t *Topology) pollSRVRecords(hosts string) {
+	defer t.pollingwg.Done()
+
+	serverConfig := newServerConfig(t.cfg.ServerOpts...)
+	heartbeatInterval := serverConfig.heartbeatInterval
+
+	pollTicker := time.NewTicker(t.rescanSRVInterval)
+	defer pollTicker.Stop()
+	t.pollHeartbeatTime.Store(false)
+	var doneOnce bool
+	defer logUnexpectedFailure(t.cfg.logger, "Encountered unexpected failure polling SRV records", func() {
+		if !doneOnce {
+			<-t.pollingDone
+		}
+	})
+
+	for {
+		select {
+		case <-pollTicker.C:
+		case <-t.pollingDone:
+			doneOnce = true
+			return
+		}
+		topoKind := t.Description().Kind
+		if !(topoKind == description.Unknown || topoKind == description.Sharded) {
+			break
+		}
+
+		parsedHosts, err := t.dnsResolver.ParseHosts(hosts, t.cfg.SRVServiceName, false)
+		// DNS problem or no verified hosts returned
+		if err != nil || len(parsedHosts) == 0 {
+			if !t.pollHeartbeatTime.Load().(bool) {
+				pollTicker.Stop()
+				pollTicker = time.NewTicker(heartbeatInterval)
+				t.pollHeartbeatTime.Store(true)
+			}
+			continue
+		}
+		if t.pollHeartbeatTime.Load().(bool) {
+			pollTicker.Stop()
+			pollTicker = time.NewTicker(t.rescanSRVInterval)
+			t.pollHeartbeatTime.Store(false)
+		}
+
+		cont := t.processSRVResults(parsedHosts)
+		if !cont {
+			break
+		}
+	}
+	<-t.pollingDone
+	doneOnce = true
+}
+
+func (t *Topology) processSRVResults(parsedHosts []string) bool {
+	t.serversLock.Lock()
+	defer t.serversLock.Unlock()
+
+	if t.serversClosed {
+		return false
+	}
+	prev := t.fsm.Topology
+	diff := diffHostList(t.fsm.Topology, parsedHosts)
+
+	if len(diff.Added) == 0 && len(diff.Removed) == 0 {
+		return true
+	}
+
+	for _, r := range diff.Removed {
+		addr := address.Address(r).Canonicalize()
+		s, ok := t.servers[addr]
+		if !ok {
+			continue
+		}
+		go func() {
+			cancelCtx, cancel := context.WithCancel(context.Background())
+			cancel()
+			_ = s.Disconnect(cancelCtx)
+		}()
+		delete(t.servers, addr)
+		t.fsm.removeServerByAddr(addr)
+		t.publishServerClosedEvent(s.address)
+	}
+
+	// Now that we've removed all the hosts that disappeared from the SRV record, we need to add any
+	// new hosts added to the SRV record. If adding all of the new hosts would increase the number
+	// of servers past srvMaxHosts, shuffle the list of added hosts.
+	if t.cfg.SRVMaxHosts > 0 && len(t.servers)+len(diff.Added) > t.cfg.SRVMaxHosts {
+		random.Shuffle(len(diff.Added), func(i, j int) {
+			diff.Added[i], diff.Added[j] = diff.Added[j], diff.Added[i]
+		})
+	}
+	// Add all added hosts until the number of servers reaches srvMaxHosts.
+	for _, a := range diff.Added {
+		if t.cfg.SRVMaxHosts > 0 && len(t.servers) >= t.cfg.SRVMaxHosts {
+			break
+		}
+		addr := address.Address(a).Canonicalize()
+		_ = t.addServer(addr)
+		t.fsm.addServer(addr)
+	}
+
+	// store new description
+	newDesc := description.Topology{
+		Kind:                     t.fsm.Kind,
+		Servers:                  t.fsm.Servers,
+		SessionTimeoutMinutesPtr: t.fsm.SessionTimeoutMinutesPtr,
+
+		// TODO(GODRIVER-2885): This field can be removed once legacy
+		// SessionTimeoutMinutes is removed.
+		SessionTimeoutMinutes: t.fsm.SessionTimeoutMinutes,
+	}
+	t.desc.Store(newDesc)
+
+	if !prev.Equal(newDesc) {
+		t.publishTopologyDescriptionChangedEvent(prev, newDesc)
+	}
+
+	t.subLock.Lock()
+	for _, ch := range t.subscribers {
+		// We drain the description if there's one in the channel
+		select {
+		case <-ch:
+		default:
+		}
+		ch <- newDesc
+	}
+	t.subLock.Unlock()
+
+	return true
+}
+
+// apply updates the Topology and its underlying FSM based on the provided server description and returns the server
+// description that should be stored.
+func (t *Topology) apply(ctx context.Context, desc description.Server) description.Server {
+	t.serversLock.Lock()
+	defer t.serversLock.Unlock()
+
+	ind, ok := t.fsm.findServer(desc.Addr)
+	if t.serversClosed || !ok {
+		return desc
+	}
+
+	prev := t.fsm.Topology
+	oldDesc := t.fsm.Servers[ind]
+	if oldDesc.TopologyVersion.CompareToIncoming(desc.TopologyVersion) > 0 {
+		return oldDesc
+	}
+
+	var current description.Topology
+	current, desc = t.fsm.apply(desc)
+
+	if !oldDesc.Equal(desc) {
+		t.publishServerDescriptionChangedEvent(oldDesc, desc)
+	}
+
+	diff := diffTopology(prev, current)
+
+	for _, removed := range diff.Removed {
+		if s, ok := t.servers[removed.Addr]; ok {
+			go func() {
+				cancelCtx, cancel := context.WithCancel(ctx)
+				cancel()
+				_ = s.Disconnect(cancelCtx)
+			}()
+			delete(t.servers, removed.Addr)
+			t.publishServerClosedEvent(s.address)
+		}
+	}
+
+	for _, added := range diff.Added {
+		_ = t.addServer(added.Addr)
+	}
+
+	t.desc.Store(current)
+	if !prev.Equal(current) {
+		t.publishTopologyDescriptionChangedEvent(prev, current)
+	}
+
+	t.subLock.Lock()
+	for _, ch := range t.subscribers {
+		// We drain the description if there's one in the channel
+		select {
+		case <-ch:
+		default:
+		}
+		ch <- current
+	}
+	t.subLock.Unlock()
+
+	return desc
+}
+
+func (t *Topology) addServer(addr address.Address) error {
+	if _, ok := t.servers[addr]; ok {
+		return nil
+	}
+
+	svr, err := ConnectServer(addr, t.updateCallback, t.id, t.cfg.ServerOpts...)
+	if err != nil {
+		return err
+	}
+
+	t.servers[addr] = svr
+
+	return nil
+}
+
+// String implements the Stringer interface
+func (t *Topology) String() string {
+	desc := t.Description()
+
+	serversStr := ""
+	t.serversLock.Lock()
+	defer t.serversLock.Unlock()
+	for _, s := range t.servers {
+		serversStr += "{ " + s.String() + " }, "
+	}
+	return fmt.Sprintf("Type: %s, Servers: [%s]", desc.Kind, serversStr)
+}
+
+// publishes a ServerDescriptionChangedEvent to indicate the server description has changed
+func (t *Topology) publishServerDescriptionChangedEvent(prev description.Server, current description.Server) {
+	serverDescriptionChanged := &event.ServerDescriptionChangedEvent{
+		Address:             current.Addr,
+		TopologyID:          t.id,
+		PreviousDescription: prev,
+		NewDescription:      current,
+	}
+
+	if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.ServerDescriptionChanged != nil {
+		t.cfg.ServerMonitor.ServerDescriptionChanged(serverDescriptionChanged)
+	}
+}
+
+// publishes a ServerClosedEvent to indicate the server has closed
+func (t *Topology) publishServerClosedEvent(addr address.Address) {
+	serverClosed := &event.ServerClosedEvent{
+		Address:    addr,
+		TopologyID: t.id,
+	}
+
+	if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.ServerClosed != nil {
+		t.cfg.ServerMonitor.ServerClosed(serverClosed)
+	}
+
+	if mustLogTopologyMessage(t, logger.LevelDebug) {
+		serverHost, serverPort, err := net.SplitHostPort(addr.String())
+		if err != nil {
+			serverHost = addr.String()
+			serverPort = ""
+		}
+
+		portInt64, _ := strconv.ParseInt(serverPort, 10, 32)
+
+		logTopologyMessage(t, logger.LevelDebug, logger.TopologyServerClosed,
+			logger.KeyServerHost, serverHost,
+			logger.KeyServerPort, portInt64)
+	}
+}
+
+// publishes a TopologyDescriptionChangedEvent to indicate the topology description has changed
+func (t *Topology) publishTopologyDescriptionChangedEvent(prev description.Topology, current description.Topology) {
+	topologyDescriptionChanged := &event.TopologyDescriptionChangedEvent{
+		TopologyID:          t.id,
+		PreviousDescription: prev,
+		NewDescription:      current,
+	}
+
+	if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyDescriptionChanged != nil {
+		t.cfg.ServerMonitor.TopologyDescriptionChanged(topologyDescriptionChanged)
+	}
+
+	if mustLogTopologyMessage(t, logger.LevelDebug) {
+		logTopologyMessage(t, logger.LevelDebug, logger.TopologyDescriptionChanged,
+			logger.KeyPreviousDescription, prev.String(),
+			logger.KeyNewDescription, current.String())
+	}
+}
+
+// publishes a TopologyOpeningEvent to indicate the topology is being initialized
+func (t *Topology) publishTopologyOpeningEvent() {
+	topologyOpening := &event.TopologyOpeningEvent{
+		TopologyID: t.id,
+	}
+
+	if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyOpening != nil {
+		t.cfg.ServerMonitor.TopologyOpening(topologyOpening)
+	}
+
+	if mustLogTopologyMessage(t, logger.LevelDebug) {
+		logTopologyMessage(t, logger.LevelDebug, logger.TopologyOpening)
+	}
+}
+
+// publishes a TopologyClosedEvent to indicate the topology has been closed
+func (t *Topology) publishTopologyClosedEvent() {
+	topologyClosed := &event.TopologyClosedEvent{
+		TopologyID: t.id,
+	}
+
+	if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyClosed != nil {
+		t.cfg.ServerMonitor.TopologyClosed(topologyClosed)
+	}
+
+	if mustLogTopologyMessage(t, logger.LevelDebug) {
+		logTopologyMessage(t, logger.LevelDebug, logger.TopologyClosed)
+	}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c0518a54ee3532378acaaf2d08b42b02dcc0853
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go
@@ -0,0 +1,431 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"crypto/tls"
+	"fmt"
+	"net/http"
+	"time"
+
+	"go.mongodb.org/mongo-driver/event"
+	"go.mongodb.org/mongo-driver/internal/logger"
+	"go.mongodb.org/mongo-driver/mongo/description"
+	"go.mongodb.org/mongo-driver/mongo/options"
+	"go.mongodb.org/mongo-driver/x/mongo/driver"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/auth"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/ocsp"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
+	"go.mongodb.org/mongo-driver/x/mongo/driver/session"
+)
+
+const defaultServerSelectionTimeout = 30 * time.Second
+
+// Config is used to construct a topology.
+type Config struct {
+	Mode                   MonitorMode
+	ReplicaSetName         string
+	SeedList               []string
+	ServerOpts             []ServerOption
+	URI                    string
+	ServerSelectionTimeout time.Duration
+	ServerMonitor          *event.ServerMonitor
+	SRVMaxHosts            int
+	SRVServiceName         string
+	LoadBalanced           bool
+	logger                 *logger.Logger
+}
+
+// ConvertToDriverAPIOptions converts a options.ServerAPIOptions instance to a driver.ServerAPIOptions.
+func ConvertToDriverAPIOptions(s *options.ServerAPIOptions) *driver.ServerAPIOptions {
+	driverOpts := driver.NewServerAPIOptions(string(s.ServerAPIVersion))
+	if s.Strict != nil {
+		driverOpts.SetStrict(*s.Strict)
+	}
+	if s.DeprecationErrors != nil {
+		driverOpts.SetDeprecationErrors(*s.DeprecationErrors)
+	}
+	return driverOpts
+}
+
+func newLogger(opts *options.LoggerOptions) (*logger.Logger, error) {
+	if opts == nil {
+		opts = options.Logger()
+	}
+
+	componentLevels := make(map[logger.Component]logger.Level)
+	for component, level := range opts.ComponentLevels {
+		componentLevels[logger.Component(component)] = logger.Level(level)
+	}
+
+	log, err := logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels)
+	if err != nil {
+		return nil, fmt.Errorf("error creating logger: %w", err)
+	}
+
+	return log, nil
+}
+
+// convertOIDCArgs converts the internal *driver.OIDCArgs into the equivalent
+// public type *options.OIDCArgs.
+func convertOIDCArgs(args *driver.OIDCArgs) *options.OIDCArgs {
+	if args == nil {
+		return nil
+	}
+	return &options.OIDCArgs{
+		Version:      args.Version,
+		IDPInfo:      (*options.IDPInfo)(args.IDPInfo),
+		RefreshToken: args.RefreshToken,
+	}
+}
+
+// ConvertCreds takes an [options.Credential] and returns the equivalent
+// [driver.Cred].
+func ConvertCreds(cred *options.Credential) *driver.Cred {
+	if cred == nil {
+		return nil
+	}
+
+	var oidcMachineCallback auth.OIDCCallback
+	if cred.OIDCMachineCallback != nil {
+		oidcMachineCallback = func(ctx context.Context, args *driver.OIDCArgs) (*driver.OIDCCredential, error) {
+			cred, err := cred.OIDCMachineCallback(ctx, convertOIDCArgs(args))
+			return (*driver.OIDCCredential)(cred), err
+		}
+	}
+
+	var oidcHumanCallback auth.OIDCCallback
+	if cred.OIDCHumanCallback != nil {
+		oidcHumanCallback = func(ctx context.Context, args *driver.OIDCArgs) (*driver.OIDCCredential, error) {
+			cred, err := cred.OIDCHumanCallback(ctx, convertOIDCArgs(args))
+			return (*driver.OIDCCredential)(cred), err
+		}
+	}
+
+	return &auth.Cred{
+		Source:              cred.AuthSource,
+		Username:            cred.Username,
+		Password:            cred.Password,
+		PasswordSet:         cred.PasswordSet,
+		Props:               cred.AuthMechanismProperties,
+		OIDCMachineCallback: oidcMachineCallback,
+		OIDCHumanCallback:   oidcHumanCallback,
+	}
+}
+
+// NewConfig will translate data from client options into a topology config for
+// building non-default deployments.
+func NewConfig(co *options.ClientOptions, clock *session.ClusterClock) (*Config, error) {
+	var authenticator driver.Authenticator
+	var err error
+	if co.Auth != nil {
+		authenticator, err = auth.CreateAuthenticator(
+			co.Auth.AuthMechanism,
+			ConvertCreds(co.Auth),
+			co.HTTPClient,
+		)
+		if err != nil {
+			return nil, fmt.Errorf("error creating authenticator: %w", err)
+		}
+	}
+	return NewConfigWithAuthenticator(co, clock, authenticator)
+}
+
+// NewConfigWithAuthenticator will translate data from client options into a
+// topology config for building non-default deployments. Server and topology
+// options are not honored if a custom deployment is used. It uses a passed in
+// authenticator to authenticate the connection.
+func NewConfigWithAuthenticator(
+	co *options.ClientOptions,
+	clock *session.ClusterClock,
+	authenticator driver.Authenticator,
+) (*Config, error) {
+	var serverAPI *driver.ServerAPIOptions
+
+	if err := co.Validate(); err != nil {
+		return nil, err
+	}
+
+	var connOpts []ConnectionOption
+	var serverOpts []ServerOption
+
+	cfgp := &Config{}
+
+	// Set the default "ServerSelectionTimeout" to 30 seconds.
+	cfgp.ServerSelectionTimeout = defaultServerSelectionTimeout
+
+	// Set the default "SeedList" to localhost.
+	cfgp.SeedList = []string{"localhost:27017"}
+
+	// TODO(GODRIVER-814): Add tests for topology, server, and connection related options.
+
+	// ServerAPIOptions need to be handled early as other client and server options below reference
+	// c.serverAPI and serverOpts.serverAPI.
+	if co.ServerAPIOptions != nil {
+		serverAPI = ConvertToDriverAPIOptions(co.ServerAPIOptions)
+		serverOpts = append(serverOpts, WithServerAPI(func(*driver.ServerAPIOptions) *driver.ServerAPIOptions {
+			return serverAPI
+		}))
+	}
+
+	cfgp.URI = co.GetURI()
+
+	if co.SRVServiceName != nil {
+		cfgp.SRVServiceName = *co.SRVServiceName
+	}
+
+	if co.SRVMaxHosts != nil {
+		cfgp.SRVMaxHosts = *co.SRVMaxHosts
+	}
+
+	// AppName
+	var appName string
+	if co.AppName != nil {
+		appName = *co.AppName
+
+		serverOpts = append(serverOpts, WithServerAppName(func(string) string {
+			return appName
+		}))
+	}
+	// Compressors & ZlibLevel
+	var comps []string
+	if len(co.Compressors) > 0 {
+		comps = co.Compressors
+
+		connOpts = append(connOpts, WithCompressors(
+			func(compressors []string) []string {
+				return append(compressors, comps...)
+			},
+		))
+
+		for _, comp := range comps {
+			switch comp {
+			case "zlib":
+				connOpts = append(connOpts, WithZlibLevel(func(*int) *int {
+					return co.ZlibLevel
+				}))
+			case "zstd":
+				connOpts = append(connOpts, WithZstdLevel(func(*int) *int {
+					return co.ZstdLevel
+				}))
+			}
+		}
+
+		serverOpts = append(serverOpts, WithCompressionOptions(
+			func(opts ...string) []string { return append(opts, comps...) },
+		))
+	}
+
+	var loadBalanced bool
+	if co.LoadBalanced != nil {
+		loadBalanced = *co.LoadBalanced
+	}
+
+	// Handshaker
+	var handshaker func(driver.Handshaker) driver.Handshaker
+	if authenticator != nil {
+		handshakeOpts := &auth.HandshakeOptions{
+			AppName:       appName,
+			Authenticator: authenticator,
+			Compressors:   comps,
+			ServerAPI:     serverAPI,
+			LoadBalanced:  loadBalanced,
+			ClusterClock:  clock,
+		}
+
+		if co.Auth.AuthMechanism == "" {
+			// Required for SASL mechanism negotiation during handshake
+			handshakeOpts.DBUser = co.Auth.AuthSource + "." + co.Auth.Username
+		}
+		if co.AuthenticateToAnything != nil && *co.AuthenticateToAnything {
+			// Authenticate arbiters
+			handshakeOpts.PerformAuthentication = func(description.Server) bool {
+				return true
+			}
+		}
+
+		handshaker = func(driver.Handshaker) driver.Handshaker {
+			return auth.Handshaker(nil, handshakeOpts)
+		}
+	} else {
+		handshaker = func(driver.Handshaker) driver.Handshaker {
+			return operation.NewHello().
+				AppName(appName).
+				Compressors(comps).
+				ClusterClock(clock).
+				ServerAPI(serverAPI).
+				LoadBalanced(loadBalanced)
+		}
+	}
+
+	connOpts = append(connOpts, WithHandshaker(handshaker))
+	// ConnectTimeout
+	if co.ConnectTimeout != nil {
+		serverOpts = append(serverOpts, WithHeartbeatTimeout(
+			func(time.Duration) time.Duration { return *co.ConnectTimeout },
+		))
+		connOpts = append(connOpts, WithConnectTimeout(
+			func(time.Duration) time.Duration { return *co.ConnectTimeout },
+		))
+	}
+	// Dialer
+	if co.Dialer != nil {
+		connOpts = append(connOpts, WithDialer(
+			func(Dialer) Dialer { return co.Dialer },
+		))
+	}
+	// Direct
+	if co.Direct != nil && *co.Direct {
+		cfgp.Mode = SingleMode
+	}
+
+	// HeartbeatInterval
+	if co.HeartbeatInterval != nil {
+		serverOpts = append(serverOpts, WithHeartbeatInterval(
+			func(time.Duration) time.Duration { return *co.HeartbeatInterval },
+		))
+	}
+	// Hosts
+	cfgp.SeedList = []string{"localhost:27017"} // default host
+	if len(co.Hosts) > 0 {
+		cfgp.SeedList = co.Hosts
+	}
+
+	// MaxConIdleTime
+	if co.MaxConnIdleTime != nil {
+		serverOpts = append(serverOpts, WithConnectionPoolMaxIdleTime(
+			func(time.Duration) time.Duration { return *co.MaxConnIdleTime },
+		))
+	}
+	// MaxPoolSize
+	if co.MaxPoolSize != nil {
+		serverOpts = append(
+			serverOpts,
+			WithMaxConnections(func(uint64) uint64 { return *co.MaxPoolSize }),
+		)
+	}
+	// MinPoolSize
+	if co.MinPoolSize != nil {
+		serverOpts = append(
+			serverOpts,
+			WithMinConnections(func(uint64) uint64 { return *co.MinPoolSize }),
+		)
+	}
+	// MaxConnecting
+	if co.MaxConnecting != nil {
+		serverOpts = append(
+			serverOpts,
+			WithMaxConnecting(func(uint64) uint64 { return *co.MaxConnecting }),
+		)
+	}
+	// PoolMonitor
+	if co.PoolMonitor != nil {
+		serverOpts = append(
+			serverOpts,
+			WithConnectionPoolMonitor(func(*event.PoolMonitor) *event.PoolMonitor { return co.PoolMonitor }),
+		)
+	}
+	// Monitor
+	if co.Monitor != nil {
+		connOpts = append(connOpts, WithMonitor(
+			func(*event.CommandMonitor) *event.CommandMonitor { return co.Monitor },
+		))
+	}
+	// ServerMonitor
+	if co.ServerMonitor != nil {
+		serverOpts = append(
+			serverOpts,
+			WithServerMonitor(func(*event.ServerMonitor) *event.ServerMonitor { return co.ServerMonitor }),
+		)
+		cfgp.ServerMonitor = co.ServerMonitor
+	}
+	// ReplicaSet
+	if co.ReplicaSet != nil {
+		cfgp.ReplicaSetName = *co.ReplicaSet
+	}
+	// ServerSelectionTimeout
+	if co.ServerSelectionTimeout != nil {
+		cfgp.ServerSelectionTimeout = *co.ServerSelectionTimeout
+	}
+	// SocketTimeout
+	if co.SocketTimeout != nil {
+		connOpts = append(
+			connOpts,
+			WithReadTimeout(func(time.Duration) time.Duration { return *co.SocketTimeout }),
+			WithWriteTimeout(func(time.Duration) time.Duration { return *co.SocketTimeout }),
+		)
+	}
+	// TLSConfig
+	if co.TLSConfig != nil {
+		connOpts = append(connOpts, WithTLSConfig(
+			func(*tls.Config) *tls.Config {
+				return co.TLSConfig
+			},
+		))
+	}
+
+	// HTTP Client
+	if co.HTTPClient != nil {
+		connOpts = append(connOpts, WithHTTPClient(
+			func(*http.Client) *http.Client {
+				return co.HTTPClient
+			},
+		))
+	}
+
+	// OCSP cache
+	ocspCache := ocsp.NewCache()
+	connOpts = append(
+		connOpts,
+		WithOCSPCache(func(ocsp.Cache) ocsp.Cache { return ocspCache }),
+	)
+
+	// Disable communication with external OCSP responders.
+	if co.DisableOCSPEndpointCheck != nil {
+		connOpts = append(
+			connOpts,
+			WithDisableOCSPEndpointCheck(func(bool) bool { return *co.DisableOCSPEndpointCheck }),
+		)
+	}
+
+	// LoadBalanced
+	if co.LoadBalanced != nil {
+		cfgp.LoadBalanced = *co.LoadBalanced
+
+		serverOpts = append(
+			serverOpts,
+			WithServerLoadBalanced(func(bool) bool { return *co.LoadBalanced }),
+		)
+		connOpts = append(
+			connOpts,
+			WithConnectionLoadBalanced(func(bool) bool { return *co.LoadBalanced }),
+		)
+	}
+
+	lgr, err := newLogger(co.LoggerOptions)
+	if err != nil {
+		return nil, err
+	}
+
+	serverOpts = append(
+		serverOpts,
+		withLogger(func() *logger.Logger { return lgr }),
+		withServerMonitoringMode(co.ServerMonitoringMode),
+	)
+
+	cfgp.logger = lgr
+
+	serverOpts = append(
+		serverOpts,
+		WithClock(func(*session.ClusterClock) *session.ClusterClock { return clock }),
+		WithConnectionOptions(func(...ConnectionOption) []ConnectionOption { return connOpts }))
+
+	cfgp.ServerOpts = serverOpts
+
+	return cfgp, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go
new file mode 100644
index 0000000000000000000000000000000000000000..987ae16c08e76cb11bc8c0810aeea815dc247e21
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go
@@ -0,0 +1,608 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package wiremessage is intended for internal use only. It is made available
+// to facilitate use cases that require access to internal MongoDB driver
+// functionality and state. The API of this package is not stable and there is
+// no backward compatibility guarantee.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT
+// NOTICE! USE WITH EXTREME CAUTION!
+package wiremessage
+
+import (
+	"bytes"
+	"encoding/binary"
+	"strings"
+	"sync/atomic"
+
+	"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// WireMessage represents a MongoDB wire message in binary form.
+type WireMessage []byte
+
+var globalRequestID int32
+
+// NextRequestID returns the next request ID.
+func NextRequestID() int32 { return atomic.AddInt32(&globalRequestID, 1) }
+
+// OpCode represents a MongoDB wire protocol opcode.
+type OpCode int32
+
+// These constants are the valid opcodes for the version of the wireprotocol
+// supported by this library. The skipped OpCodes are historical OpCodes that
+// are no longer used.
+const (
+	OpReply  OpCode = 1
+	_        OpCode = 1001
+	OpUpdate OpCode = 2001
+	OpInsert OpCode = 2002
+	_        OpCode = 2003
+	// Deprecated: Use OpMsg instead.
+	OpQuery        OpCode = 2004
+	OpGetMore      OpCode = 2005
+	OpDelete       OpCode = 2006
+	OpKillCursors  OpCode = 2007
+	OpCommand      OpCode = 2010
+	OpCommandReply OpCode = 2011
+	OpCompressed   OpCode = 2012
+	OpMsg          OpCode = 2013
+)
+
+// String implements the fmt.Stringer interface.
+func (oc OpCode) String() string {
+	switch oc {
+	case OpReply:
+		return "OP_REPLY"
+	case OpUpdate:
+		return "OP_UPDATE"
+	case OpInsert:
+		return "OP_INSERT"
+	case OpQuery:
+		return "OP_QUERY"
+	case OpGetMore:
+		return "OP_GET_MORE"
+	case OpDelete:
+		return "OP_DELETE"
+	case OpKillCursors:
+		return "OP_KILL_CURSORS"
+	case OpCommand:
+		return "OP_COMMAND"
+	case OpCommandReply:
+		return "OP_COMMANDREPLY"
+	case OpCompressed:
+		return "OP_COMPRESSED"
+	case OpMsg:
+		return "OP_MSG"
+	default:
+		return "<invalid opcode>"
+	}
+}
+
+// QueryFlag represents the flags on an OP_QUERY message.
+type QueryFlag int32
+
+// These constants represent the individual flags on an OP_QUERY message.
+const (
+	_ QueryFlag = 1 << iota
+	TailableCursor
+	SecondaryOK
+	OplogReplay
+	NoCursorTimeout
+	AwaitData
+	Exhaust
+	Partial
+)
+
+// String implements the fmt.Stringer interface.
+func (qf QueryFlag) String() string {
+	strs := make([]string, 0)
+	if qf&TailableCursor == TailableCursor {
+		strs = append(strs, "TailableCursor")
+	}
+	if qf&SecondaryOK == SecondaryOK {
+		strs = append(strs, "SecondaryOK")
+	}
+	if qf&OplogReplay == OplogReplay {
+		strs = append(strs, "OplogReplay")
+	}
+	if qf&NoCursorTimeout == NoCursorTimeout {
+		strs = append(strs, "NoCursorTimeout")
+	}
+	if qf&AwaitData == AwaitData {
+		strs = append(strs, "AwaitData")
+	}
+	if qf&Exhaust == Exhaust {
+		strs = append(strs, "Exhaust")
+	}
+	if qf&Partial == Partial {
+		strs = append(strs, "Partial")
+	}
+	str := "["
+	str += strings.Join(strs, ", ")
+	str += "]"
+	return str
+}
+
+// MsgFlag represents the flags on an OP_MSG message.
+type MsgFlag uint32
+
+// These constants represent the individual flags on an OP_MSG message.
+const (
+	ChecksumPresent MsgFlag = 1 << iota
+	MoreToCome
+
+	ExhaustAllowed MsgFlag = 1 << 16
+)
+
+// ReplyFlag represents the flags of an OP_REPLY message.
+type ReplyFlag int32
+
+// These constants represent the individual flags of an OP_REPLY message.
+const (
+	CursorNotFound ReplyFlag = 1 << iota
+	QueryFailure
+	ShardConfigStale
+	AwaitCapable
+)
+
+// String implements the fmt.Stringer interface.
+func (rf ReplyFlag) String() string {
+	strs := make([]string, 0)
+	if rf&CursorNotFound == CursorNotFound {
+		strs = append(strs, "CursorNotFound")
+	}
+	if rf&QueryFailure == QueryFailure {
+		strs = append(strs, "QueryFailure")
+	}
+	if rf&ShardConfigStale == ShardConfigStale {
+		strs = append(strs, "ShardConfigStale")
+	}
+	if rf&AwaitCapable == AwaitCapable {
+		strs = append(strs, "AwaitCapable")
+	}
+	str := "["
+	str += strings.Join(strs, ", ")
+	str += "]"
+	return str
+}
+
+// SectionType represents the type for 1 section in an OP_MSG
+type SectionType uint8
+
+// These constants represent the individual section types for a section in an OP_MSG
+const (
+	SingleDocument SectionType = iota
+	DocumentSequence
+)
+
+// CompressorID is the ID for each type of Compressor.
+type CompressorID uint8
+
+// These constants represent the individual compressor IDs for an OP_COMPRESSED.
+const (
+	CompressorNoOp CompressorID = iota
+	CompressorSnappy
+	CompressorZLib
+	CompressorZstd
+)
+
+// String implements the fmt.Stringer interface.
+func (id CompressorID) String() string {
+	switch id {
+	case CompressorNoOp:
+		return "CompressorNoOp"
+	case CompressorSnappy:
+		return "CompressorSnappy"
+	case CompressorZLib:
+		return "CompressorZLib"
+	case CompressorZstd:
+		return "CompressorZstd"
+	default:
+		return "CompressorInvalid"
+	}
+}
+
+const (
+	// DefaultZlibLevel is the default level for zlib compression
+	DefaultZlibLevel = 6
+	// DefaultZstdLevel is the default level for zstd compression.
+	// Matches https://github.com/wiredtiger/wiredtiger/blob/f08bc4b18612ef95a39b12166abcccf207f91596/ext/compressors/zstd/zstd_compress.c#L299
+	DefaultZstdLevel = 6
+)
+
+// AppendHeaderStart appends a header to the dst slice and returns an index where the wire message
+// starts in dst and the updated slice.
+func AppendHeaderStart(dst []byte, reqid, respto int32, opcode OpCode) (index int32, b []byte) {
+	index, dst = bsoncore.ReserveLength(dst)
+	dst = appendi32(dst, reqid)
+	dst = appendi32(dst, respto)
+	dst = appendi32(dst, int32(opcode))
+	return index, dst
+}
+
+// AppendHeader appends a header to dst.
+func AppendHeader(dst []byte, length, reqid, respto int32, opcode OpCode) []byte {
+	dst = appendi32(dst, length)
+	dst = appendi32(dst, reqid)
+	dst = appendi32(dst, respto)
+	dst = appendi32(dst, int32(opcode))
+	return dst
+}
+
+// ReadHeader reads a wire message header from src.
+func ReadHeader(src []byte) (length, requestID, responseTo int32, opcode OpCode, rem []byte, ok bool) {
+	if len(src) < 16 {
+		return 0, 0, 0, 0, src, false
+	}
+
+	length = readi32unsafe(src)
+	requestID = readi32unsafe(src[4:])
+	responseTo = readi32unsafe(src[8:])
+	opcode = OpCode(readi32unsafe(src[12:]))
+	return length, requestID, responseTo, opcode, src[16:], true
+}
+
+// AppendQueryFlags appends the flags for an OP_QUERY wire message.
+func AppendQueryFlags(dst []byte, flags QueryFlag) []byte {
+	return appendi32(dst, int32(flags))
+}
+
+// AppendMsgFlags appends the flags for an OP_MSG wire message.
+func AppendMsgFlags(dst []byte, flags MsgFlag) []byte {
+	return appendi32(dst, int32(flags))
+}
+
+// AppendReplyFlags appends the flags for an OP_REPLY wire message.
+func AppendReplyFlags(dst []byte, flags ReplyFlag) []byte {
+	return appendi32(dst, int32(flags))
+}
+
+// AppendMsgSectionType appends the section type to dst.
+func AppendMsgSectionType(dst []byte, stype SectionType) []byte {
+	return append(dst, byte(stype))
+}
+
+// AppendQueryFullCollectionName appends the full collection name to dst.
+func AppendQueryFullCollectionName(dst []byte, ns string) []byte {
+	return appendCString(dst, ns)
+}
+
+// AppendQueryNumberToSkip appends the number to skip to dst.
+func AppendQueryNumberToSkip(dst []byte, skip int32) []byte {
+	return appendi32(dst, skip)
+}
+
+// AppendQueryNumberToReturn appends the number to return to dst.
+func AppendQueryNumberToReturn(dst []byte, nor int32) []byte {
+	return appendi32(dst, nor)
+}
+
+// AppendReplyCursorID appends the cursor ID to dst.
+func AppendReplyCursorID(dst []byte, id int64) []byte {
+	return appendi64(dst, id)
+}
+
+// AppendReplyStartingFrom appends the starting from field to dst.
+func AppendReplyStartingFrom(dst []byte, sf int32) []byte {
+	return appendi32(dst, sf)
+}
+
+// AppendReplyNumberReturned appends the number returned to dst.
+func AppendReplyNumberReturned(dst []byte, nr int32) []byte {
+	return appendi32(dst, nr)
+}
+
+// AppendCompressedOriginalOpCode appends the original opcode to dst.
+func AppendCompressedOriginalOpCode(dst []byte, opcode OpCode) []byte {
+	return appendi32(dst, int32(opcode))
+}
+
+// AppendCompressedUncompressedSize appends the uncompressed size of a
+// compressed wiremessage to dst.
+func AppendCompressedUncompressedSize(dst []byte, size int32) []byte { return appendi32(dst, size) }
+
+// AppendCompressedCompressorID appends the ID of the compressor to dst.
+func AppendCompressedCompressorID(dst []byte, id CompressorID) []byte {
+	return append(dst, byte(id))
+}
+
+// AppendCompressedCompressedMessage appends the compressed wiremessage to dst.
+func AppendCompressedCompressedMessage(dst []byte, msg []byte) []byte { return append(dst, msg...) }
+
+// AppendGetMoreZero appends the zero field to dst.
+func AppendGetMoreZero(dst []byte) []byte {
+	return appendi32(dst, 0)
+}
+
+// AppendGetMoreFullCollectionName appends the fullCollectionName field to dst.
+func AppendGetMoreFullCollectionName(dst []byte, ns string) []byte {
+	return appendCString(dst, ns)
+}
+
+// AppendGetMoreNumberToReturn appends the numberToReturn field to dst.
+func AppendGetMoreNumberToReturn(dst []byte, numToReturn int32) []byte {
+	return appendi32(dst, numToReturn)
+}
+
+// AppendGetMoreCursorID appends the cursorID field to dst.
+func AppendGetMoreCursorID(dst []byte, cursorID int64) []byte {
+	return appendi64(dst, cursorID)
+}
+
+// AppendKillCursorsZero appends the zero field to dst.
+func AppendKillCursorsZero(dst []byte) []byte {
+	return appendi32(dst, 0)
+}
+
+// AppendKillCursorsNumberIDs appends the numberOfCursorIDs field to dst.
+func AppendKillCursorsNumberIDs(dst []byte, numIDs int32) []byte {
+	return appendi32(dst, numIDs)
+}
+
+// AppendKillCursorsCursorIDs appends each the cursorIDs field to dst.
+func AppendKillCursorsCursorIDs(dst []byte, cursors []int64) []byte {
+	for _, cursor := range cursors {
+		dst = appendi64(dst, cursor)
+	}
+	return dst
+}
+
+// ReadMsgFlags reads the OP_MSG flags from src.
+func ReadMsgFlags(src []byte) (flags MsgFlag, rem []byte, ok bool) {
+	i32, rem, ok := readi32(src)
+	return MsgFlag(i32), rem, ok
+}
+
+// IsMsgMoreToCome returns if the provided wire message is an OP_MSG with the more to come flag set.
+func IsMsgMoreToCome(wm []byte) bool {
+	return len(wm) >= 20 &&
+		OpCode(readi32unsafe(wm[12:16])) == OpMsg &&
+		MsgFlag(readi32unsafe(wm[16:20]))&MoreToCome == MoreToCome
+}
+
+// ReadMsgSectionType reads the section type from src.
+func ReadMsgSectionType(src []byte) (stype SectionType, rem []byte, ok bool) {
+	if len(src) < 1 {
+		return 0, src, false
+	}
+	return SectionType(src[0]), src[1:], true
+}
+
+// ReadMsgSectionSingleDocument reads a single document from src.
+func ReadMsgSectionSingleDocument(src []byte) (doc bsoncore.Document, rem []byte, ok bool) {
+	return bsoncore.ReadDocument(src)
+}
+
+// ReadMsgSectionDocumentSequence reads an identifier and document sequence from src and returns the document sequence
+// data parsed into a slice of BSON documents.
+func ReadMsgSectionDocumentSequence(src []byte) (identifier string, docs []bsoncore.Document, rem []byte, ok bool) {
+	identifier, rem, ret, ok := ReadMsgSectionRawDocumentSequence(src)
+	if !ok {
+		return "", nil, src, false
+	}
+
+	docs = make([]bsoncore.Document, 0)
+	var doc bsoncore.Document
+	for {
+		doc, rem, ok = bsoncore.ReadDocument(rem)
+		if !ok {
+			break
+		}
+		docs = append(docs, doc)
+	}
+	if len(rem) > 0 {
+		return "", nil, src, false
+	}
+
+	return identifier, docs, ret, true
+}
+
+// ReadMsgSectionRawDocumentSequence reads an identifier and document sequence from src and returns the raw document
+// sequence data.
+func ReadMsgSectionRawDocumentSequence(src []byte) (identifier string, data []byte, rem []byte, ok bool) {
+	length, rem, ok := readi32(src)
+	if !ok || int(length) > len(src) || length-4 < 0 {
+		return "", nil, src, false
+	}
+
+	// After these assignments, rem will be the data containing the identifier string + the document sequence bytes and
+	// rest will be the rest of the wire message after this document sequence.
+	rem, rest := rem[:length-4], rem[length-4:]
+
+	identifier, rem, ok = readcstring(rem)
+	if !ok {
+		return "", nil, src, false
+	}
+
+	return identifier, rem, rest, true
+}
+
+// ReadMsgChecksum reads a checksum from src.
+func ReadMsgChecksum(src []byte) (checksum uint32, rem []byte, ok bool) {
+	i32, rem, ok := readi32(src)
+	return uint32(i32), rem, ok
+}
+
+// ReadQueryFlags reads OP_QUERY flags from src.
+//
+// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions
+// instead.
+func ReadQueryFlags(src []byte) (flags QueryFlag, rem []byte, ok bool) {
+	i32, rem, ok := readi32(src)
+	return QueryFlag(i32), rem, ok
+}
+
+// ReadQueryFullCollectionName reads the full collection name from src.
+//
+// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions
+// instead.
+func ReadQueryFullCollectionName(src []byte) (collname string, rem []byte, ok bool) {
+	return readcstring(src)
+}
+
+// ReadQueryNumberToSkip reads the number to skip from src.
+//
+// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions
+// instead.
+func ReadQueryNumberToSkip(src []byte) (nts int32, rem []byte, ok bool) {
+	return readi32(src)
+}
+
+// ReadQueryNumberToReturn reads the number to return from src.
+//
+// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions
+// instead.
+func ReadQueryNumberToReturn(src []byte) (ntr int32, rem []byte, ok bool) {
+	return readi32(src)
+}
+
+// ReadQueryQuery reads the query from src.
+//
+// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions
+// instead.
+func ReadQueryQuery(src []byte) (query bsoncore.Document, rem []byte, ok bool) {
+	return bsoncore.ReadDocument(src)
+}
+
+// ReadQueryReturnFieldsSelector reads a return fields selector document from src.
+//
+// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions
+// instead.
+func ReadQueryReturnFieldsSelector(src []byte) (rfs bsoncore.Document, rem []byte, ok bool) {
+	return bsoncore.ReadDocument(src)
+}
+
+// ReadReplyFlags reads OP_REPLY flags from src.
+func ReadReplyFlags(src []byte) (flags ReplyFlag, rem []byte, ok bool) {
+	i32, rem, ok := readi32(src)
+	return ReplyFlag(i32), rem, ok
+}
+
+// ReadReplyCursorID reads a cursor ID from src.
+func ReadReplyCursorID(src []byte) (cursorID int64, rem []byte, ok bool) {
+	return readi64(src)
+}
+
+// ReadReplyStartingFrom reads the starting from src.
+func ReadReplyStartingFrom(src []byte) (startingFrom int32, rem []byte, ok bool) {
+	return readi32(src)
+}
+
+// ReadReplyNumberReturned reads the numbered returned from src.
+func ReadReplyNumberReturned(src []byte) (numberReturned int32, rem []byte, ok bool) {
+	return readi32(src)
+}
+
+// ReadReplyDocuments reads as many documents as possible from src
+func ReadReplyDocuments(src []byte) (docs []bsoncore.Document, rem []byte, ok bool) {
+	rem = src
+	for {
+		var doc bsoncore.Document
+		doc, rem, ok = bsoncore.ReadDocument(rem)
+		if !ok {
+			break
+		}
+
+		docs = append(docs, doc)
+	}
+
+	return docs, rem, true
+}
+
+// ReadReplyDocument reads a reply document from src.
+func ReadReplyDocument(src []byte) (doc bsoncore.Document, rem []byte, ok bool) {
+	return bsoncore.ReadDocument(src)
+}
+
+// ReadCompressedOriginalOpCode reads the original opcode from src.
+func ReadCompressedOriginalOpCode(src []byte) (opcode OpCode, rem []byte, ok bool) {
+	i32, rem, ok := readi32(src)
+	return OpCode(i32), rem, ok
+}
+
+// ReadCompressedUncompressedSize reads the uncompressed size of a
+// compressed wiremessage to dst.
+func ReadCompressedUncompressedSize(src []byte) (size int32, rem []byte, ok bool) {
+	return readi32(src)
+}
+
+// ReadCompressedCompressorID reads the ID of the compressor to dst.
+func ReadCompressedCompressorID(src []byte) (id CompressorID, rem []byte, ok bool) {
+	if len(src) < 1 {
+		return 0, src, false
+	}
+	return CompressorID(src[0]), src[1:], true
+}
+
+// ReadKillCursorsZero reads the zero field from src.
+func ReadKillCursorsZero(src []byte) (zero int32, rem []byte, ok bool) {
+	return readi32(src)
+}
+
+// ReadKillCursorsNumberIDs reads the numberOfCursorIDs field from src.
+func ReadKillCursorsNumberIDs(src []byte) (numIDs int32, rem []byte, ok bool) {
+	return readi32(src)
+}
+
+// ReadKillCursorsCursorIDs reads numIDs cursor IDs from src.
+func ReadKillCursorsCursorIDs(src []byte, numIDs int32) (cursorIDs []int64, rem []byte, ok bool) {
+	var i int32
+	var id int64
+	for i = 0; i < numIDs; i++ {
+		id, src, ok = readi64(src)
+		if !ok {
+			return cursorIDs, src, false
+		}
+
+		cursorIDs = append(cursorIDs, id)
+	}
+	return cursorIDs, src, true
+}
+
+func appendi32(dst []byte, x int32) []byte {
+	b := []byte{0, 0, 0, 0}
+	binary.LittleEndian.PutUint32(b, uint32(x))
+	return append(dst, b...)
+}
+
+func appendi64(dst []byte, x int64) []byte {
+	b := []byte{0, 0, 0, 0, 0, 0, 0, 0}
+	binary.LittleEndian.PutUint64(b, uint64(x))
+	return append(dst, b...)
+}
+
+func appendCString(b []byte, str string) []byte {
+	b = append(b, str...)
+	return append(b, 0x00)
+}
+
+func readi32(src []byte) (int32, []byte, bool) {
+	if len(src) < 4 {
+		return 0, src, false
+	}
+	return readi32unsafe(src), src[4:], true
+}
+
+func readi32unsafe(src []byte) int32 {
+	return int32(binary.LittleEndian.Uint32(src))
+}
+
+func readi64(src []byte) (int64, []byte, bool) {
+	if len(src) < 8 {
+		return 0, src, false
+	}
+	return int64(binary.LittleEndian.Uint64(src)), src[8:], true
+}
+
+func readcstring(src []byte) (string, []byte, bool) {
+	idx := bytes.IndexByte(src, 0x00)
+	if idx < 0 {
+		return "", src, false
+	}
+	return string(src[:idx]), src[idx+1:], true
+}
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
index 6a66aea5eafe0ca6a688840c47219556c552488e..2a7cf70da6e498df9c11ab6a5eaa2ddd7af34da4 100644
--- a/vendor/golang.org/x/crypto/LICENSE
+++ b/vendor/golang.org/x/crypto/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
+   * Neither the name of Google LLC nor the names of its
 contributors may be used to endorse or promote products derived from
 this software without specific prior written permission.
 
diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6c645e7ceb47b4431b346484d240291e4dd7a2d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go
@@ -0,0 +1,793 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
+// are signed messages attesting to the validity of a certificate for a small
+// period of time. This is used to manage revocation for X.509 certificates.
+package ocsp
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"crypto/rsa"
+	_ "crypto/sha1"
+	_ "crypto/sha256"
+	_ "crypto/sha512"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/asn1"
+	"errors"
+	"fmt"
+	"math/big"
+	"strconv"
+	"time"
+)
+
+var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
+
+// ResponseStatus contains the result of an OCSP request. See
+// https://tools.ietf.org/html/rfc6960#section-2.3
+type ResponseStatus int
+
+const (
+	Success       ResponseStatus = 0
+	Malformed     ResponseStatus = 1
+	InternalError ResponseStatus = 2
+	TryLater      ResponseStatus = 3
+	// Status code four is unused in OCSP. See
+	// https://tools.ietf.org/html/rfc6960#section-4.2.1
+	SignatureRequired ResponseStatus = 5
+	Unauthorized      ResponseStatus = 6
+)
+
+func (r ResponseStatus) String() string {
+	switch r {
+	case Success:
+		return "success"
+	case Malformed:
+		return "malformed"
+	case InternalError:
+		return "internal error"
+	case TryLater:
+		return "try later"
+	case SignatureRequired:
+		return "signature required"
+	case Unauthorized:
+		return "unauthorized"
+	default:
+		return "unknown OCSP status: " + strconv.Itoa(int(r))
+	}
+}
+
+// ResponseError is an error that may be returned by ParseResponse to indicate
+// that the response itself is an error, not just that it's indicating that a
+// certificate is revoked, unknown, etc.
+type ResponseError struct {
+	Status ResponseStatus
+}
+
+func (r ResponseError) Error() string {
+	return "ocsp: error from server: " + r.Status.String()
+}
+
+// These are internal structures that reflect the ASN.1 structure of an OCSP
+// response. See RFC 2560, section 4.2.
+
+type certID struct {
+	HashAlgorithm pkix.AlgorithmIdentifier
+	NameHash      []byte
+	IssuerKeyHash []byte
+	SerialNumber  *big.Int
+}
+
+// https://tools.ietf.org/html/rfc2560#section-4.1.1
+type ocspRequest struct {
+	TBSRequest tbsRequest
+}
+
+type tbsRequest struct {
+	Version       int              `asn1:"explicit,tag:0,default:0,optional"`
+	RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"`
+	RequestList   []request
+}
+
+type request struct {
+	Cert certID
+}
+
+type responseASN1 struct {
+	Status   asn1.Enumerated
+	Response responseBytes `asn1:"explicit,tag:0,optional"`
+}
+
+type responseBytes struct {
+	ResponseType asn1.ObjectIdentifier
+	Response     []byte
+}
+
+type basicResponse struct {
+	TBSResponseData    responseData
+	SignatureAlgorithm pkix.AlgorithmIdentifier
+	Signature          asn1.BitString
+	Certificates       []asn1.RawValue `asn1:"explicit,tag:0,optional"`
+}
+
+type responseData struct {
+	Raw            asn1.RawContent
+	Version        int `asn1:"optional,default:0,explicit,tag:0"`
+	RawResponderID asn1.RawValue
+	ProducedAt     time.Time `asn1:"generalized"`
+	Responses      []singleResponse
+}
+
+type singleResponse struct {
+	CertID           certID
+	Good             asn1.Flag        `asn1:"tag:0,optional"`
+	Revoked          revokedInfo      `asn1:"tag:1,optional"`
+	Unknown          asn1.Flag        `asn1:"tag:2,optional"`
+	ThisUpdate       time.Time        `asn1:"generalized"`
+	NextUpdate       time.Time        `asn1:"generalized,explicit,tag:0,optional"`
+	SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"`
+}
+
+type revokedInfo struct {
+	RevocationTime time.Time       `asn1:"generalized"`
+	Reason         asn1.Enumerated `asn1:"explicit,tag:0,optional"`
+}
+
+var (
+	oidSignatureMD2WithRSA      = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+	oidSignatureMD5WithRSA      = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+	oidSignatureSHA1WithRSA     = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+	oidSignatureSHA256WithRSA   = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+	oidSignatureSHA384WithRSA   = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+	oidSignatureSHA512WithRSA   = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+	oidSignatureDSAWithSHA1     = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+	oidSignatureDSAWithSHA256   = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
+	oidSignatureECDSAWithSHA1   = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+	oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+	oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+	oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+)
+
+var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{
+	crypto.SHA1:   asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}),
+	crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}),
+	crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}),
+	crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}),
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+var signatureAlgorithmDetails = []struct {
+	algo       x509.SignatureAlgorithm
+	oid        asn1.ObjectIdentifier
+	pubKeyAlgo x509.PublicKeyAlgorithm
+	hash       crypto.Hash
+}{
+	{x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
+	{x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
+	{x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
+	{x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
+	{x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
+	{x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
+	{x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
+	{x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
+	{x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
+	{x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
+	{x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
+	{x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
+	var pubType x509.PublicKeyAlgorithm
+
+	switch pub := pub.(type) {
+	case *rsa.PublicKey:
+		pubType = x509.RSA
+		hashFunc = crypto.SHA256
+		sigAlgo.Algorithm = oidSignatureSHA256WithRSA
+		sigAlgo.Parameters = asn1.RawValue{
+			Tag: 5,
+		}
+
+	case *ecdsa.PublicKey:
+		pubType = x509.ECDSA
+
+		switch pub.Curve {
+		case elliptic.P224(), elliptic.P256():
+			hashFunc = crypto.SHA256
+			sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
+		case elliptic.P384():
+			hashFunc = crypto.SHA384
+			sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
+		case elliptic.P521():
+			hashFunc = crypto.SHA512
+			sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
+		default:
+			err = errors.New("x509: unknown elliptic curve")
+		}
+
+	default:
+		err = errors.New("x509: only RSA and ECDSA keys supported")
+	}
+
+	if err != nil {
+		return
+	}
+
+	if requestedSigAlgo == 0 {
+		return
+	}
+
+	found := false
+	for _, details := range signatureAlgorithmDetails {
+		if details.algo == requestedSigAlgo {
+			if details.pubKeyAlgo != pubType {
+				err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
+				return
+			}
+			sigAlgo.Algorithm, hashFunc = details.oid, details.hash
+			if hashFunc == 0 {
+				err = errors.New("x509: cannot sign with hash function requested")
+				return
+			}
+			found = true
+			break
+		}
+	}
+
+	if !found {
+		err = errors.New("x509: unknown SignatureAlgorithm")
+	}
+
+	return
+}
+
+// TODO(agl): this is taken from crypto/x509 and so should probably be exported
+// from crypto/x509 or crypto/x509/pkix.
+func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm {
+	for _, details := range signatureAlgorithmDetails {
+		if oid.Equal(details.oid) {
+			return details.algo
+		}
+	}
+	return x509.UnknownSignatureAlgorithm
+}
+
+// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form.
+func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash {
+	for hash, oid := range hashOIDs {
+		if oid.Equal(target) {
+			return hash
+		}
+	}
+	return crypto.Hash(0)
+}
+
+func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier {
+	for hash, oid := range hashOIDs {
+		if hash == target {
+			return oid
+		}
+	}
+	return nil
+}
+
+// This is the exposed reflection of the internal OCSP structures.
+
+// The status values that can be expressed in OCSP. See RFC 6960.
+// These are used for the Response.Status field.
+const (
+	// Good means that the certificate is valid.
+	Good = 0
+	// Revoked means that the certificate has been deliberately revoked.
+	Revoked = 1
+	// Unknown means that the OCSP responder doesn't know about the certificate.
+	Unknown = 2
+	// ServerFailed is unused and was never used (see
+	// https://go-review.googlesource.com/#/c/18944). ParseResponse will
+	// return a ResponseError when an error response is parsed.
+	ServerFailed = 3
+)
+
+// The enumerated reasons for revoking a certificate. See RFC 5280.
+const (
+	Unspecified          = 0
+	KeyCompromise        = 1
+	CACompromise         = 2
+	AffiliationChanged   = 3
+	Superseded           = 4
+	CessationOfOperation = 5
+	CertificateHold      = 6
+
+	RemoveFromCRL      = 8
+	PrivilegeWithdrawn = 9
+	AACompromise       = 10
+)
+
+// Request represents an OCSP request. See RFC 6960.
+type Request struct {
+	HashAlgorithm  crypto.Hash
+	IssuerNameHash []byte
+	IssuerKeyHash  []byte
+	SerialNumber   *big.Int
+}
+
+// Marshal marshals the OCSP request to ASN.1 DER encoded form.
+func (req *Request) Marshal() ([]byte, error) {
+	hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm)
+	if hashAlg == nil {
+		return nil, errors.New("Unknown hash algorithm")
+	}
+	return asn1.Marshal(ocspRequest{
+		tbsRequest{
+			Version: 0,
+			RequestList: []request{
+				{
+					Cert: certID{
+						pkix.AlgorithmIdentifier{
+							Algorithm:  hashAlg,
+							Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+						},
+						req.IssuerNameHash,
+						req.IssuerKeyHash,
+						req.SerialNumber,
+					},
+				},
+			},
+		},
+	})
+}
+
+// Response represents an OCSP response containing a single SingleResponse. See
+// RFC 6960.
+type Response struct {
+	Raw []byte
+
+	// Status is one of {Good, Revoked, Unknown}
+	Status                                        int
+	SerialNumber                                  *big.Int
+	ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
+	RevocationReason                              int
+	Certificate                                   *x509.Certificate
+	// TBSResponseData contains the raw bytes of the signed response. If
+	// Certificate is nil then this can be used to verify Signature.
+	TBSResponseData    []byte
+	Signature          []byte
+	SignatureAlgorithm x509.SignatureAlgorithm
+
+	// IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash.
+	// Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512.
+	// If zero, the default is crypto.SHA1.
+	IssuerHash crypto.Hash
+
+	// RawResponderName optionally contains the DER-encoded subject of the
+	// responder certificate. Exactly one of RawResponderName and
+	// ResponderKeyHash is set.
+	RawResponderName []byte
+	// ResponderKeyHash optionally contains the SHA-1 hash of the
+	// responder's public key. Exactly one of RawResponderName and
+	// ResponderKeyHash is set.
+	ResponderKeyHash []byte
+
+	// Extensions contains raw X.509 extensions from the singleExtensions field
+	// of the OCSP response. When parsing certificates, this can be used to
+	// extract non-critical extensions that are not parsed by this package. When
+	// marshaling OCSP responses, the Extensions field is ignored, see
+	// ExtraExtensions.
+	Extensions []pkix.Extension
+
+	// ExtraExtensions contains extensions to be copied, raw, into any marshaled
+	// OCSP response (in the singleExtensions field). Values override any
+	// extensions that would otherwise be produced based on the other fields. The
+	// ExtraExtensions field is not populated when parsing certificates, see
+	// Extensions.
+	ExtraExtensions []pkix.Extension
+}
+
+// These are pre-serialized error responses for the various non-success codes
+// defined by OCSP. The Unauthorized code in particular can be used by an OCSP
+// responder that supports only pre-signed responses as a response to requests
+// for certificates with unknown status. See RFC 5019.
+var (
+	MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}
+	InternalErrorErrorResponse    = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}
+	TryLaterErrorResponse         = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}
+	SigRequredErrorResponse       = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}
+	UnauthorizedErrorResponse     = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}
+)
+
+// CheckSignatureFrom checks that the signature in resp is a valid signature
+// from issuer. This should only be used if resp.Certificate is nil. Otherwise,
+// the OCSP response contained an intermediate certificate that created the
+// signature. That signature is checked by ParseResponse and only
+// resp.Certificate remains to be validated.
+func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error {
+	return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature)
+}
+
+// ParseError results from an invalid OCSP response.
+type ParseError string
+
+func (p ParseError) Error() string {
+	return string(p)
+}
+
+// ParseRequest parses an OCSP request in DER form. It only supports
+// requests for a single certificate. Signed requests are not supported.
+// If a request includes a signature, it will result in a ParseError.
+func ParseRequest(bytes []byte) (*Request, error) {
+	var req ocspRequest
+	rest, err := asn1.Unmarshal(bytes, &req)
+	if err != nil {
+		return nil, err
+	}
+	if len(rest) > 0 {
+		return nil, ParseError("trailing data in OCSP request")
+	}
+
+	if len(req.TBSRequest.RequestList) == 0 {
+		return nil, ParseError("OCSP request contains no request body")
+	}
+	innerRequest := req.TBSRequest.RequestList[0]
+
+	hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm)
+	if hashFunc == crypto.Hash(0) {
+		return nil, ParseError("OCSP request uses unknown hash function")
+	}
+
+	return &Request{
+		HashAlgorithm:  hashFunc,
+		IssuerNameHash: innerRequest.Cert.NameHash,
+		IssuerKeyHash:  innerRequest.Cert.IssuerKeyHash,
+		SerialNumber:   innerRequest.Cert.SerialNumber,
+	}, nil
+}
+
+// ParseResponse parses an OCSP response in DER form. The response must contain
+// only one certificate status. To parse the status of a specific certificate
+// from a response which may contain multiple statuses, use ParseResponseForCert
+// instead.
+//
+// If the response contains an embedded certificate, then that certificate will
+// be used to verify the response signature. If the response contains an
+// embedded certificate and issuer is not nil, then issuer will be used to verify
+// the signature on the embedded certificate.
+//
+// If the response does not contain an embedded certificate and issuer is not
+// nil, then issuer will be used to verify the response signature.
+//
+// Invalid responses and parse failures will result in a ParseError.
+// Error responses will result in a ResponseError.
+func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) {
+	return ParseResponseForCert(bytes, nil, issuer)
+}
+
+// ParseResponseForCert acts identically to ParseResponse, except it supports
+// parsing responses that contain multiple statuses. If the response contains
+// multiple statuses and cert is not nil, then ParseResponseForCert will return
+// the first status which contains a matching serial, otherwise it will return an
+// error. If cert is nil, then the first status in the response will be returned.
+func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) {
+	var resp responseASN1
+	rest, err := asn1.Unmarshal(bytes, &resp)
+	if err != nil {
+		return nil, err
+	}
+	if len(rest) > 0 {
+		return nil, ParseError("trailing data in OCSP response")
+	}
+
+	if status := ResponseStatus(resp.Status); status != Success {
+		return nil, ResponseError{status}
+	}
+
+	if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
+		return nil, ParseError("bad OCSP response type")
+	}
+
+	var basicResp basicResponse
+	rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
+	if err != nil {
+		return nil, err
+	}
+	if len(rest) > 0 {
+		return nil, ParseError("trailing data in OCSP response")
+	}
+
+	if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 {
+		return nil, ParseError("OCSP response contains bad number of responses")
+	}
+
+	var singleResp singleResponse
+	if cert == nil {
+		singleResp = basicResp.TBSResponseData.Responses[0]
+	} else {
+		match := false
+		for _, resp := range basicResp.TBSResponseData.Responses {
+			if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 {
+				singleResp = resp
+				match = true
+				break
+			}
+		}
+		if !match {
+			return nil, ParseError("no response matching the supplied certificate")
+		}
+	}
+
+	ret := &Response{
+		Raw:                bytes,
+		TBSResponseData:    basicResp.TBSResponseData.Raw,
+		Signature:          basicResp.Signature.RightAlign(),
+		SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm),
+		Extensions:         singleResp.SingleExtensions,
+		SerialNumber:       singleResp.CertID.SerialNumber,
+		ProducedAt:         basicResp.TBSResponseData.ProducedAt,
+		ThisUpdate:         singleResp.ThisUpdate,
+		NextUpdate:         singleResp.NextUpdate,
+	}
+
+	// Handle the ResponderID CHOICE tag. ResponderID can be flattened into
+	// TBSResponseData once https://go-review.googlesource.com/34503 has been
+	// released.
+	rawResponderID := basicResp.TBSResponseData.RawResponderID
+	switch rawResponderID.Tag {
+	case 1: // Name
+		var rdn pkix.RDNSequence
+		if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 {
+			return nil, ParseError("invalid responder name")
+		}
+		ret.RawResponderName = rawResponderID.Bytes
+	case 2: // KeyHash
+		if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 {
+			return nil, ParseError("invalid responder key hash")
+		}
+	default:
+		return nil, ParseError("invalid responder id tag")
+	}
+
+	if len(basicResp.Certificates) > 0 {
+		// Responders should only send a single certificate (if they
+		// send any) that connects the responder's certificate to the
+		// original issuer. We accept responses with multiple
+		// certificates due to a number responders sending them[1], but
+		// ignore all but the first.
+		//
+		// [1] https://github.com/golang/go/issues/21527
+		ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
+		if err != nil {
+			return nil, err
+		}
+
+		if err := ret.CheckSignatureFrom(ret.Certificate); err != nil {
+			return nil, ParseError("bad signature on embedded certificate: " + err.Error())
+		}
+
+		if issuer != nil {
+			if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil {
+				return nil, ParseError("bad OCSP signature: " + err.Error())
+			}
+		}
+	} else if issuer != nil {
+		if err := ret.CheckSignatureFrom(issuer); err != nil {
+			return nil, ParseError("bad OCSP signature: " + err.Error())
+		}
+	}
+
+	for _, ext := range singleResp.SingleExtensions {
+		if ext.Critical {
+			return nil, ParseError("unsupported critical extension")
+		}
+	}
+
+	for h, oid := range hashOIDs {
+		if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) {
+			ret.IssuerHash = h
+			break
+		}
+	}
+	if ret.IssuerHash == 0 {
+		return nil, ParseError("unsupported issuer hash algorithm")
+	}
+
+	switch {
+	case bool(singleResp.Good):
+		ret.Status = Good
+	case bool(singleResp.Unknown):
+		ret.Status = Unknown
+	default:
+		ret.Status = Revoked
+		ret.RevokedAt = singleResp.Revoked.RevocationTime
+		ret.RevocationReason = int(singleResp.Revoked.Reason)
+	}
+
+	return ret, nil
+}
+
+// RequestOptions contains options for constructing OCSP requests.
+type RequestOptions struct {
+	// Hash contains the hash function that should be used when
+	// constructing the OCSP request. If zero, SHA-1 will be used.
+	Hash crypto.Hash
+}
+
+func (opts *RequestOptions) hash() crypto.Hash {
+	if opts == nil || opts.Hash == 0 {
+		// SHA-1 is nearly universally used in OCSP.
+		return crypto.SHA1
+	}
+	return opts.Hash
+}
+
+// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If
+// opts is nil then sensible defaults are used.
+func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) {
+	hashFunc := opts.hash()
+
+	// OCSP seems to be the only place where these raw hash identifiers are
+	// used. I took the following from
+	// http://msdn.microsoft.com/en-us/library/ff635603.aspx
+	_, ok := hashOIDs[hashFunc]
+	if !ok {
+		return nil, x509.ErrUnsupportedAlgorithm
+	}
+
+	if !hashFunc.Available() {
+		return nil, x509.ErrUnsupportedAlgorithm
+	}
+	h := opts.hash().New()
+
+	var publicKeyInfo struct {
+		Algorithm pkix.AlgorithmIdentifier
+		PublicKey asn1.BitString
+	}
+	if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+		return nil, err
+	}
+
+	h.Write(publicKeyInfo.PublicKey.RightAlign())
+	issuerKeyHash := h.Sum(nil)
+
+	h.Reset()
+	h.Write(issuer.RawSubject)
+	issuerNameHash := h.Sum(nil)
+
+	req := &Request{
+		HashAlgorithm:  hashFunc,
+		IssuerNameHash: issuerNameHash,
+		IssuerKeyHash:  issuerKeyHash,
+		SerialNumber:   cert.SerialNumber,
+	}
+	return req.Marshal()
+}
+
+// CreateResponse returns a DER-encoded OCSP response with the specified contents.
+// The fields in the response are populated as follows:
+//
+// The responder cert is used to populate the responder's name field, and the
+// certificate itself is provided alongside the OCSP response signature.
+//
+// The issuer cert is used to populate the IssuerNameHash and IssuerKeyHash fields.
+//
+// The template is used to populate the SerialNumber, Status, RevokedAt,
+// RevocationReason, ThisUpdate, and NextUpdate fields.
+//
+// If template.IssuerHash is not set, SHA1 will be used.
+//
+// The ProducedAt date is automatically set to the current date, to the nearest minute.
+func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) {
+	var publicKeyInfo struct {
+		Algorithm pkix.AlgorithmIdentifier
+		PublicKey asn1.BitString
+	}
+	if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+		return nil, err
+	}
+
+	if template.IssuerHash == 0 {
+		template.IssuerHash = crypto.SHA1
+	}
+	hashOID := getOIDFromHashAlgorithm(template.IssuerHash)
+	if hashOID == nil {
+		return nil, errors.New("unsupported issuer hash algorithm")
+	}
+
+	if !template.IssuerHash.Available() {
+		return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash)
+	}
+	h := template.IssuerHash.New()
+	h.Write(publicKeyInfo.PublicKey.RightAlign())
+	issuerKeyHash := h.Sum(nil)
+
+	h.Reset()
+	h.Write(issuer.RawSubject)
+	issuerNameHash := h.Sum(nil)
+
+	innerResponse := singleResponse{
+		CertID: certID{
+			HashAlgorithm: pkix.AlgorithmIdentifier{
+				Algorithm:  hashOID,
+				Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+			},
+			NameHash:      issuerNameHash,
+			IssuerKeyHash: issuerKeyHash,
+			SerialNumber:  template.SerialNumber,
+		},
+		ThisUpdate:       template.ThisUpdate.UTC(),
+		NextUpdate:       template.NextUpdate.UTC(),
+		SingleExtensions: template.ExtraExtensions,
+	}
+
+	switch template.Status {
+	case Good:
+		innerResponse.Good = true
+	case Unknown:
+		innerResponse.Unknown = true
+	case Revoked:
+		innerResponse.Revoked = revokedInfo{
+			RevocationTime: template.RevokedAt.UTC(),
+			Reason:         asn1.Enumerated(template.RevocationReason),
+		}
+	}
+
+	rawResponderID := asn1.RawValue{
+		Class:      2, // context-specific
+		Tag:        1, // Name (explicit tag)
+		IsCompound: true,
+		Bytes:      responderCert.RawSubject,
+	}
+	tbsResponseData := responseData{
+		Version:        0,
+		RawResponderID: rawResponderID,
+		ProducedAt:     time.Now().Truncate(time.Minute).UTC(),
+		Responses:      []singleResponse{innerResponse},
+	}
+
+	tbsResponseDataDER, err := asn1.Marshal(tbsResponseData)
+	if err != nil {
+		return nil, err
+	}
+
+	hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
+	if err != nil {
+		return nil, err
+	}
+
+	responseHash := hashFunc.New()
+	responseHash.Write(tbsResponseDataDER)
+	signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc)
+	if err != nil {
+		return nil, err
+	}
+
+	response := basicResponse{
+		TBSResponseData:    tbsResponseData,
+		SignatureAlgorithm: signatureAlgorithm,
+		Signature: asn1.BitString{
+			Bytes:     signature,
+			BitLength: 8 * len(signature),
+		},
+	}
+	if template.Certificate != nil {
+		response.Certificates = []asn1.RawValue{
+			{FullBytes: template.Certificate.Raw},
+		}
+	}
+	responseDER, err := asn1.Marshal(response)
+	if err != nil {
+		return nil, err
+	}
+
+	return asn1.Marshal(responseASN1{
+		Status: asn1.Enumerated(Success),
+		Response: responseBytes{
+			ResponseType: idPKIXOCSPBasic,
+			Response:     responseDER,
+		},
+	})
+}
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
index 904b57e01d7a50d12ed28c9e27c5a6d3ef3ef44f..28cd99c7f3fc50b03f3789b1013401db46d40bf9 100644
--- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
 choose, you can pass the `New` functions from the different SHA packages to
 pbkdf2.Key.
 */
-package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+package pbkdf2
 
 import (
 	"crypto/hmac"
diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go
new file mode 100644
index 0000000000000000000000000000000000000000..76fa40fb20afd69ab3d511c63963940383a795cb
--- /dev/null
+++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go
@@ -0,0 +1,212 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scrypt implements the scrypt key derivation function as defined in
+// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard
+// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf).
+package scrypt
+
+import (
+	"crypto/sha256"
+	"encoding/binary"
+	"errors"
+	"math/bits"
+
+	"golang.org/x/crypto/pbkdf2"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// blockCopy copies n numbers from src into dst.
+func blockCopy(dst, src []uint32, n int) {
+	copy(dst, src[:n])
+}
+
+// blockXOR XORs numbers from dst with n numbers from src.
+func blockXOR(dst, src []uint32, n int) {
+	for i, v := range src[:n] {
+		dst[i] ^= v
+	}
+}
+
+// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
+// and puts the result into both tmp and out.
+func salsaXOR(tmp *[16]uint32, in, out []uint32) {
+	w0 := tmp[0] ^ in[0]
+	w1 := tmp[1] ^ in[1]
+	w2 := tmp[2] ^ in[2]
+	w3 := tmp[3] ^ in[3]
+	w4 := tmp[4] ^ in[4]
+	w5 := tmp[5] ^ in[5]
+	w6 := tmp[6] ^ in[6]
+	w7 := tmp[7] ^ in[7]
+	w8 := tmp[8] ^ in[8]
+	w9 := tmp[9] ^ in[9]
+	w10 := tmp[10] ^ in[10]
+	w11 := tmp[11] ^ in[11]
+	w12 := tmp[12] ^ in[12]
+	w13 := tmp[13] ^ in[13]
+	w14 := tmp[14] ^ in[14]
+	w15 := tmp[15] ^ in[15]
+
+	x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8
+	x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15
+
+	for i := 0; i < 8; i += 2 {
+		x4 ^= bits.RotateLeft32(x0+x12, 7)
+		x8 ^= bits.RotateLeft32(x4+x0, 9)
+		x12 ^= bits.RotateLeft32(x8+x4, 13)
+		x0 ^= bits.RotateLeft32(x12+x8, 18)
+
+		x9 ^= bits.RotateLeft32(x5+x1, 7)
+		x13 ^= bits.RotateLeft32(x9+x5, 9)
+		x1 ^= bits.RotateLeft32(x13+x9, 13)
+		x5 ^= bits.RotateLeft32(x1+x13, 18)
+
+		x14 ^= bits.RotateLeft32(x10+x6, 7)
+		x2 ^= bits.RotateLeft32(x14+x10, 9)
+		x6 ^= bits.RotateLeft32(x2+x14, 13)
+		x10 ^= bits.RotateLeft32(x6+x2, 18)
+
+		x3 ^= bits.RotateLeft32(x15+x11, 7)
+		x7 ^= bits.RotateLeft32(x3+x15, 9)
+		x11 ^= bits.RotateLeft32(x7+x3, 13)
+		x15 ^= bits.RotateLeft32(x11+x7, 18)
+
+		x1 ^= bits.RotateLeft32(x0+x3, 7)
+		x2 ^= bits.RotateLeft32(x1+x0, 9)
+		x3 ^= bits.RotateLeft32(x2+x1, 13)
+		x0 ^= bits.RotateLeft32(x3+x2, 18)
+
+		x6 ^= bits.RotateLeft32(x5+x4, 7)
+		x7 ^= bits.RotateLeft32(x6+x5, 9)
+		x4 ^= bits.RotateLeft32(x7+x6, 13)
+		x5 ^= bits.RotateLeft32(x4+x7, 18)
+
+		x11 ^= bits.RotateLeft32(x10+x9, 7)
+		x8 ^= bits.RotateLeft32(x11+x10, 9)
+		x9 ^= bits.RotateLeft32(x8+x11, 13)
+		x10 ^= bits.RotateLeft32(x9+x8, 18)
+
+		x12 ^= bits.RotateLeft32(x15+x14, 7)
+		x13 ^= bits.RotateLeft32(x12+x15, 9)
+		x14 ^= bits.RotateLeft32(x13+x12, 13)
+		x15 ^= bits.RotateLeft32(x14+x13, 18)
+	}
+	x0 += w0
+	x1 += w1
+	x2 += w2
+	x3 += w3
+	x4 += w4
+	x5 += w5
+	x6 += w6
+	x7 += w7
+	x8 += w8
+	x9 += w9
+	x10 += w10
+	x11 += w11
+	x12 += w12
+	x13 += w13
+	x14 += w14
+	x15 += w15
+
+	out[0], tmp[0] = x0, x0
+	out[1], tmp[1] = x1, x1
+	out[2], tmp[2] = x2, x2
+	out[3], tmp[3] = x3, x3
+	out[4], tmp[4] = x4, x4
+	out[5], tmp[5] = x5, x5
+	out[6], tmp[6] = x6, x6
+	out[7], tmp[7] = x7, x7
+	out[8], tmp[8] = x8, x8
+	out[9], tmp[9] = x9, x9
+	out[10], tmp[10] = x10, x10
+	out[11], tmp[11] = x11, x11
+	out[12], tmp[12] = x12, x12
+	out[13], tmp[13] = x13, x13
+	out[14], tmp[14] = x14, x14
+	out[15], tmp[15] = x15, x15
+}
+
+func blockMix(tmp *[16]uint32, in, out []uint32, r int) {
+	blockCopy(tmp[:], in[(2*r-1)*16:], 16)
+	for i := 0; i < 2*r; i += 2 {
+		salsaXOR(tmp, in[i*16:], out[i*8:])
+		salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:])
+	}
+}
+
+func integer(b []uint32, r int) uint64 {
+	j := (2*r - 1) * 16
+	return uint64(b[j]) | uint64(b[j+1])<<32
+}
+
+func smix(b []byte, r, N int, v, xy []uint32) {
+	var tmp [16]uint32
+	R := 32 * r
+	x := xy
+	y := xy[R:]
+
+	j := 0
+	for i := 0; i < R; i++ {
+		x[i] = binary.LittleEndian.Uint32(b[j:])
+		j += 4
+	}
+	for i := 0; i < N; i += 2 {
+		blockCopy(v[i*R:], x, R)
+		blockMix(&tmp, x, y, r)
+
+		blockCopy(v[(i+1)*R:], y, R)
+		blockMix(&tmp, y, x, r)
+	}
+	for i := 0; i < N; i += 2 {
+		j := int(integer(x, r) & uint64(N-1))
+		blockXOR(x, v[j*R:], R)
+		blockMix(&tmp, x, y, r)
+
+		j = int(integer(y, r) & uint64(N-1))
+		blockXOR(y, v[j*R:], R)
+		blockMix(&tmp, y, x, r)
+	}
+	j = 0
+	for _, v := range x[:R] {
+		binary.LittleEndian.PutUint32(b[j:], v)
+		j += 4
+	}
+}
+
+// Key derives a key from the password, salt, and cost parameters, returning
+// a byte slice of length keyLen that can be used as cryptographic key.
+//
+// N is a CPU/memory cost parameter, which must be a power of two greater than 1.
+// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the
+// limits, the function returns a nil byte slice and an error.
+//
+// For example, you can get a derived key for e.g. AES-256 (which needs a
+// 32-byte key) by doing:
+//
+//	dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32)
+//
+// The recommended parameters for interactive logins as of 2017 are N=32768, r=8
+// and p=1. The parameters N, r, and p should be increased as memory latency and
+// CPU parallelism increases; consider setting N to the highest power of 2 you
+// can derive within 100 milliseconds. Remember to get a good random salt.
+func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) {
+	if N <= 1 || N&(N-1) != 0 {
+		return nil, errors.New("scrypt: N must be > 1 and a power of 2")
+	}
+	if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r {
+		return nil, errors.New("scrypt: parameters are too large")
+	}
+
+	xy := make([]uint32, 64*r)
+	v := make([]uint32, 32*N*r)
+	b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New)
+
+	for i := 0; i < p; i++ {
+		smix(b[i*128*r:], r, N, v, xy)
+	}
+
+	return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil
+}
diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE
index 6a66aea5eafe0ca6a688840c47219556c552488e..2a7cf70da6e498df9c11ab6a5eaa2ddd7af34da4 100644
--- a/vendor/golang.org/x/sync/LICENSE
+++ b/vendor/golang.org/x/sync/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
+   * Neither the name of Google LLC nor the names of its
 contributors may be used to endorse or promote products derived from
 this software without specific prior written permission.
 
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
new file mode 100644
index 0000000000000000000000000000000000000000..948a3ee63d4ffe370a795e2233634309646d69b9
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -0,0 +1,135 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errgroup provides synchronization, error propagation, and Context
+// cancelation for groups of goroutines working on subtasks of a common task.
+//
+// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
+// returning errors.
+package errgroup
+
+import (
+	"context"
+	"fmt"
+	"sync"
+)
+
+type token struct{}
+
+// A Group is a collection of goroutines working on subtasks that are part of
+// the same overall task.
+//
+// A zero Group is valid, has no limit on the number of active goroutines,
+// and does not cancel on error.
+type Group struct {
+	cancel func(error)
+
+	wg sync.WaitGroup
+
+	sem chan token
+
+	errOnce sync.Once
+	err     error
+}
+
+func (g *Group) done() {
+	if g.sem != nil {
+		<-g.sem
+	}
+	g.wg.Done()
+}
+
+// WithContext returns a new Group and an associated Context derived from ctx.
+//
+// The derived Context is canceled the first time a function passed to Go
+// returns a non-nil error or the first time Wait returns, whichever occurs
+// first.
+func WithContext(ctx context.Context) (*Group, context.Context) {
+	ctx, cancel := withCancelCause(ctx)
+	return &Group{cancel: cancel}, ctx
+}
+
+// Wait blocks until all function calls from the Go method have returned, then
+// returns the first non-nil error (if any) from them.
+func (g *Group) Wait() error {
+	g.wg.Wait()
+	if g.cancel != nil {
+		g.cancel(g.err)
+	}
+	return g.err
+}
+
+// Go calls the given function in a new goroutine.
+// It blocks until the new goroutine can be added without the number of
+// active goroutines in the group exceeding the configured limit.
+//
+// The first call to return a non-nil error cancels the group's context, if the
+// group was created by calling WithContext. The error will be returned by Wait.
+func (g *Group) Go(f func() error) {
+	if g.sem != nil {
+		g.sem <- token{}
+	}
+
+	g.wg.Add(1)
+	go func() {
+		defer g.done()
+
+		if err := f(); err != nil {
+			g.errOnce.Do(func() {
+				g.err = err
+				if g.cancel != nil {
+					g.cancel(g.err)
+				}
+			})
+		}
+	}()
+}
+
+// TryGo calls the given function in a new goroutine only if the number of
+// active goroutines in the group is currently below the configured limit.
+//
+// The return value reports whether the goroutine was started.
+func (g *Group) TryGo(f func() error) bool {
+	if g.sem != nil {
+		select {
+		case g.sem <- token{}:
+			// Note: this allows barging iff channels in general allow barging.
+		default:
+			return false
+		}
+	}
+
+	g.wg.Add(1)
+	go func() {
+		defer g.done()
+
+		if err := f(); err != nil {
+			g.errOnce.Do(func() {
+				g.err = err
+				if g.cancel != nil {
+					g.cancel(g.err)
+				}
+			})
+		}
+	}()
+	return true
+}
+
+// SetLimit limits the number of active goroutines in this group to at most n.
+// A negative value indicates no limit.
+//
+// Any subsequent call to the Go method will block until it can add an active
+// goroutine without exceeding the configured limit.
+//
+// The limit must not be modified while any goroutines in the group are active.
+func (g *Group) SetLimit(n int) {
+	if n < 0 {
+		g.sem = nil
+		return
+	}
+	if len(g.sem) != 0 {
+		panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
+	}
+	g.sem = make(chan token, n)
+}
diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go
new file mode 100644
index 0000000000000000000000000000000000000000..f93c740b638ca5afc20c86e22b5fefc7b68f6670
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/go120.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+
+package errgroup
+
+import "context"
+
+func withCancelCause(parent context.Context) (context.Context, func(error)) {
+	return context.WithCancelCause(parent)
+}
diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go
new file mode 100644
index 0000000000000000000000000000000000000000..88ce33434e2387b167457af3195f655e90968beb
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.20
+
+package errgroup
+
+import "context"
+
+func withCancelCause(parent context.Context) (context.Context, func(error)) {
+	ctx, cancel := context.WithCancel(parent)
+	return ctx, func(error) { cancel() }
+}
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
index 30f632c577bd1aa41489c813ed1cd580b6f52d45..b618162aab648f61fedf72757e0e46cca3af2c9a 100644
--- a/vendor/golang.org/x/sync/semaphore/semaphore.go
+++ b/vendor/golang.org/x/sync/semaphore/semaphore.go
@@ -35,11 +35,25 @@ type Weighted struct {
 // Acquire acquires the semaphore with a weight of n, blocking until resources
 // are available or ctx is done. On success, returns nil. On failure, returns
 // ctx.Err() and leaves the semaphore unchanged.
-//
-// If ctx is already done, Acquire may still succeed without blocking.
 func (s *Weighted) Acquire(ctx context.Context, n int64) error {
+	done := ctx.Done()
+
 	s.mu.Lock()
+	select {
+	case <-done:
+		// ctx becoming done has "happened before" acquiring the semaphore,
+		// whether it became done before the call began or while we were
+		// waiting for the mutex. We prefer to fail even if we could acquire
+		// the mutex without blocking.
+		s.mu.Unlock()
+		return ctx.Err()
+	default:
+	}
 	if s.size-s.cur >= n && s.waiters.Len() == 0 {
+		// Since we hold s.mu and haven't synchronized since checking done, if
+		// ctx becomes done before we return here, it becoming done must have
+		// "happened concurrently" with this call - it cannot "happen before"
+		// we return in this branch. So, we're ok to always acquire here.
 		s.cur += n
 		s.mu.Unlock()
 		return nil
@@ -48,7 +62,7 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
 	if n > s.size {
 		// Don't make other Acquire calls block on one that's doomed to fail.
 		s.mu.Unlock()
-		<-ctx.Done()
+		<-done
 		return ctx.Err()
 	}
 
@@ -58,14 +72,14 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
 	s.mu.Unlock()
 
 	select {
-	case <-ctx.Done():
-		err := ctx.Err()
+	case <-done:
 		s.mu.Lock()
 		select {
 		case <-ready:
-			// Acquired the semaphore after we were canceled.  Rather than trying to
-			// fix up the queue, just pretend we didn't notice the cancelation.
-			err = nil
+			// Acquired the semaphore after we were canceled.
+			// Pretend we didn't and put the tokens back.
+			s.cur -= n
+			s.notifyWaiters()
 		default:
 			isFront := s.waiters.Front() == elem
 			s.waiters.Remove(elem)
@@ -75,9 +89,19 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
 			}
 		}
 		s.mu.Unlock()
-		return err
+		return ctx.Err()
 
 	case <-ready:
+		// Acquired the semaphore. Check that ctx isn't already done.
+		// We check the done channel instead of calling ctx.Err because we
+		// already have the channel, and ctx.Err is O(n) with the nesting
+		// depth of ctx.
+		select {
+		case <-done:
+			s.Release(n)
+			return ctx.Err()
+		default:
+		}
 		return nil
 	}
 }
diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go
new file mode 100644
index 0000000000000000000000000000000000000000..4051830982ad3fbeea9ba92f99d38542d8221f1f
--- /dev/null
+++ b/vendor/golang.org/x/sync/singleflight/singleflight.go
@@ -0,0 +1,214 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight // import "golang.org/x/sync/singleflight"
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"sync"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+	value interface{}
+	stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+	return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func (p *panicError) Unwrap() error {
+	err, ok := p.value.(error)
+	if !ok {
+		return nil
+	}
+
+	return err
+}
+
+func newPanicError(v interface{}) error {
+	stack := debug.Stack()
+
+	// The first line of the stack trace is of the form "goroutine N [status]:"
+	// but by the time the panic reaches Do the goroutine may no longer exist
+	// and its status will have changed. Trim out the misleading line.
+	if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+		stack = stack[line+1:]
+	}
+	return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+	wg sync.WaitGroup
+
+	// These fields are written once before the WaitGroup is done
+	// and are only read after the WaitGroup is done.
+	val interface{}
+	err error
+
+	// These fields are read and written with the singleflight
+	// mutex held before the WaitGroup is done, and are read but
+	// not written after the WaitGroup is done.
+	dups  int
+	chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+	mu sync.Mutex       // protects m
+	m  map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+	Val    interface{}
+	Err    error
+	Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call)
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		g.mu.Unlock()
+		c.wg.Wait()
+
+		if e, ok := c.err.(*panicError); ok {
+			panic(e)
+		} else if c.err == errGoexit {
+			runtime.Goexit()
+		}
+		return c.val, c.err, true
+	}
+	c := new(call)
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	g.doCall(c, key, fn)
+	return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+//
+// The returned channel will not be closed.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+	ch := make(chan Result, 1)
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call)
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		c.chans = append(c.chans, ch)
+		g.mu.Unlock()
+		return ch
+	}
+	c := &call{chans: []chan<- Result{ch}}
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	go g.doCall(c, key, fn)
+
+	return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+	normalReturn := false
+	recovered := false
+
+	// use double-defer to distinguish panic from runtime.Goexit,
+	// more details see https://golang.org/cl/134395
+	defer func() {
+		// the given function invoked runtime.Goexit
+		if !normalReturn && !recovered {
+			c.err = errGoexit
+		}
+
+		g.mu.Lock()
+		defer g.mu.Unlock()
+		c.wg.Done()
+		if g.m[key] == c {
+			delete(g.m, key)
+		}
+
+		if e, ok := c.err.(*panicError); ok {
+			// In order to prevent the waiting channels from being blocked forever,
+			// needs to ensure that this panic cannot be recovered.
+			if len(c.chans) > 0 {
+				go panic(e)
+				select {} // Keep this goroutine around so that it will appear in the crash dump.
+			} else {
+				panic(e)
+			}
+		} else if c.err == errGoexit {
+			// Already in the process of goexit, no need to call again
+		} else {
+			// Normal return
+			for _, ch := range c.chans {
+				ch <- Result{c.val, c.err, c.dups > 0}
+			}
+		}
+	}()
+
+	func() {
+		defer func() {
+			if !normalReturn {
+				// Ideally, we would wait to take a stack trace until we've determined
+				// whether this is a panic or a runtime.Goexit.
+				//
+				// Unfortunately, the only way we can distinguish the two is to see
+				// whether the recover stopped the goroutine from terminating, and by
+				// the time we know that, the part of the stack trace relevant to the
+				// panic has been discarded.
+				if r := recover(); r != nil {
+					c.err = newPanicError(r)
+				}
+			}
+		}()
+
+		c.val, c.err = fn()
+		normalReturn = true
+	}()
+
+	if !normalReturn {
+		recovered = true
+	}
+}
+
+// Forget tells the singleflight to forget about a key.  Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+	g.mu.Lock()
+	delete(g.m, key)
+	g.mu.Unlock()
+}
diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE
index 6a66aea5eafe0ca6a688840c47219556c552488e..2a7cf70da6e498df9c11ab6a5eaa2ddd7af34da4 100644
--- a/vendor/golang.org/x/text/LICENSE
+++ b/vendor/golang.org/x/text/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
+   * Neither the name of Google LLC nor the names of its
 contributors may be used to endorse or promote products derived from
 this software without specific prior written permission.
 
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 879911e03622ba463f3568268a4162c240d14908..c824999e502e602a90868ec2510c386fb636d800 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -8,9 +8,15 @@ github.com/go-chi/chi/v5/middleware
 # github.com/golang/protobuf v1.5.3
 ## explicit; go 1.9
 github.com/golang/protobuf/proto
+# github.com/golang/snappy v0.0.4
+## explicit
+github.com/golang/snappy
 # github.com/google/uuid v1.5.0
 ## explicit
 github.com/google/uuid
+# github.com/gorilla/mux v1.8.1
+## explicit; go 1.20
+github.com/gorilla/mux
 # github.com/jackc/pgpassfile v1.0.0
 ## explicit; go 1.12
 github.com/jackc/pgpassfile
@@ -35,8 +41,19 @@ github.com/jackc/pgx/v5/pgxpool
 ## explicit; go 1.19
 github.com/jackc/puddle/v2
 github.com/jackc/puddle/v2/internal/genstack
+# github.com/klauspost/compress v1.13.6
+## explicit; go 1.15
+github.com/klauspost/compress
+github.com/klauspost/compress/fse
+github.com/klauspost/compress/huff0
+github.com/klauspost/compress/internal/snapref
+github.com/klauspost/compress/zstd
+github.com/klauspost/compress/zstd/internal/xxhash
 # github.com/kr/text v0.2.0
 ## explicit
+# github.com/montanaflynn/stats v0.7.1
+## explicit; go 1.13
+github.com/montanaflynn/stats
 # github.com/pelletier/go-toml/v2 v2.1.0
 ## explicit; go 1.16
 github.com/pelletier/go-toml/v2
@@ -53,20 +70,85 @@ github.com/pmezard/go-difflib/difflib
 ## explicit; go 1.20
 github.com/stretchr/testify/assert
 github.com/stretchr/testify/require
-# golang.org/x/crypto v0.16.0
+# github.com/xdg-go/pbkdf2 v1.0.0
+## explicit; go 1.9
+github.com/xdg-go/pbkdf2
+# github.com/xdg-go/scram v1.1.2
+## explicit; go 1.11
+github.com/xdg-go/scram
+# github.com/xdg-go/stringprep v1.0.4
+## explicit; go 1.11
+github.com/xdg-go/stringprep
+# github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78
+## explicit; go 1.17
+github.com/youmark/pkcs8
+# go.mongodb.org/mongo-driver v1.17.1
 ## explicit; go 1.18
+go.mongodb.org/mongo-driver/bson
+go.mongodb.org/mongo-driver/bson/bsoncodec
+go.mongodb.org/mongo-driver/bson/bsonoptions
+go.mongodb.org/mongo-driver/bson/bsonrw
+go.mongodb.org/mongo-driver/bson/bsontype
+go.mongodb.org/mongo-driver/bson/primitive
+go.mongodb.org/mongo-driver/event
+go.mongodb.org/mongo-driver/internal/aws
+go.mongodb.org/mongo-driver/internal/aws/awserr
+go.mongodb.org/mongo-driver/internal/aws/credentials
+go.mongodb.org/mongo-driver/internal/aws/signer/v4
+go.mongodb.org/mongo-driver/internal/bsonutil
+go.mongodb.org/mongo-driver/internal/codecutil
+go.mongodb.org/mongo-driver/internal/credproviders
+go.mongodb.org/mongo-driver/internal/csfle
+go.mongodb.org/mongo-driver/internal/csot
+go.mongodb.org/mongo-driver/internal/driverutil
+go.mongodb.org/mongo-driver/internal/handshake
+go.mongodb.org/mongo-driver/internal/httputil
+go.mongodb.org/mongo-driver/internal/logger
+go.mongodb.org/mongo-driver/internal/ptrutil
+go.mongodb.org/mongo-driver/internal/rand
+go.mongodb.org/mongo-driver/internal/randutil
+go.mongodb.org/mongo-driver/internal/uuid
+go.mongodb.org/mongo-driver/mongo
+go.mongodb.org/mongo-driver/mongo/address
+go.mongodb.org/mongo-driver/mongo/description
+go.mongodb.org/mongo-driver/mongo/options
+go.mongodb.org/mongo-driver/mongo/readconcern
+go.mongodb.org/mongo-driver/mongo/readpref
+go.mongodb.org/mongo-driver/mongo/writeconcern
+go.mongodb.org/mongo-driver/tag
+go.mongodb.org/mongo-driver/version
+go.mongodb.org/mongo-driver/x/bsonx/bsoncore
+go.mongodb.org/mongo-driver/x/mongo/driver
+go.mongodb.org/mongo-driver/x/mongo/driver/auth
+go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds
+go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi
+go.mongodb.org/mongo-driver/x/mongo/driver/connstring
+go.mongodb.org/mongo-driver/x/mongo/driver/dns
+go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt
+go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options
+go.mongodb.org/mongo-driver/x/mongo/driver/ocsp
+go.mongodb.org/mongo-driver/x/mongo/driver/operation
+go.mongodb.org/mongo-driver/x/mongo/driver/session
+go.mongodb.org/mongo-driver/x/mongo/driver/topology
+go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage
+# golang.org/x/crypto v0.26.0
+## explicit; go 1.20
+golang.org/x/crypto/ocsp
 golang.org/x/crypto/pbkdf2
-# golang.org/x/net v0.19.0
+golang.org/x/crypto/scrypt
+# golang.org/x/net v0.21.0
 ## explicit; go 1.18
 golang.org/x/net/context
 # golang.org/x/oauth2 v0.13.0
 ## explicit; go 1.18
 golang.org/x/oauth2
 golang.org/x/oauth2/internal
-# golang.org/x/sync v0.5.0
+# golang.org/x/sync v0.8.0
 ## explicit; go 1.18
+golang.org/x/sync/errgroup
 golang.org/x/sync/semaphore
-# golang.org/x/text v0.14.0
+golang.org/x/sync/singleflight
+# golang.org/x/text v0.17.0
 ## explicit; go 1.18
 golang.org/x/text/cases
 golang.org/x/text/internal