Compare commits

...

219 Commits

Author SHA1 Message Date
Robin
cad243905f fix ts request cli (#4461)
Co-authored-by: robinzhang <azhangrongbing@163.com>
2024-11-21 21:40:08 +08:00
dependabot[bot]
7c8f41d577 chore(deps): bump codecov/codecov-action from 4 to 5 (#4459)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-21 21:25:53 +08:00
dependabot[bot]
cbd118d55f chore(deps): bump google.golang.org/protobuf from 1.35.1 to 1.35.2 (#4457) 2024-11-15 17:00:10 +08:00
dependabot[bot]
9d2a1b8b0a chore(deps): bump golang.org/x/time from 0.7.0 to 0.8.0 (#4454)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-11 00:24:06 +08:00
dependabot[bot]
f6ada979aa chore(deps): bump golang.org/x/net from 0.30.0 to 0.31.0 (#4452)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-11 00:13:28 +08:00
Kevin Wan
53a74759a5 feat: support json array in request body (#4444) 2024-11-05 14:09:30 +00:00
dependabot[bot]
1940f7bd58 chore(deps): bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (#4447) 2024-11-05 19:37:32 +08:00
Kevin Wan
18cb3141ba feat: support query array in httpx.Parse (#4440) 2024-11-02 13:55:37 +00:00
dependabot[bot]
f822c9a94f chore(deps): bump github.com/fatih/color from 1.17.0 to 1.18.0 (#4434)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-26 14:49:56 +08:00
Kevin Wan
1a3dc75874 chore: upgrade goctl version (#4429) 2024-10-20 10:38:04 +08:00
yangjinheng
796dd5b6e2 fix the source code directory after the soft link (#4425) 2024-10-19 15:37:44 +00:00
dependabot[bot]
94e476ade7 chore(deps): bump github.com/redis/go-redis/v9 from 9.6.1 to 9.7.0 (#4426) 2024-10-19 08:10:32 +08:00
dependabot[bot]
2a74996e1b chore(deps): bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 (#4424) 2024-10-18 11:38:41 +08:00
fishJack01
f52af1ebf9 feat:New redis method TxPipeline (#4417)
Co-authored-by: fish <fish@fishdeMac-mini.local>
2024-10-13 05:51:55 +00:00
dependabot[bot]
24450f18bb chore(deps): bump google.golang.org/protobuf from 1.34.2 to 1.35.1 (#4414)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-12 21:13:32 +08:00
dependabot[bot]
f1a45d8a23 chore(deps): bump golang.org/x/time from 0.6.0 to 0.7.0 (#4409)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-05 12:58:06 +08:00
MarkJoyMa
9aebba1566 fix/conf_multi_layer_map (#4407)
Co-authored-by: aiden.ma <Aiden.ma@yijinin.com>
2024-10-05 04:46:32 +00:00
dependabot[bot]
4998479f9a chore(deps): bump golang.org/x/net from 0.29.0 to 0.30.0 (#4410)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-05 12:27:34 +08:00
dependabot[bot]
873d1351ee chore(deps): bump golang.org/x/sys from 0.25.0 to 0.26.0 (#4408)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-05 12:04:39 +08:00
dependabot[bot]
afcbca8f24 chore(deps): bump go.mongodb.org/mongo-driver from 1.17.0 to 1.17.1 (#4402)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-03 11:16:00 +08:00
dependabot[bot]
5b8126c2cf chore(deps): bump go.uber.org/automaxprocs from 1.5.3 to 1.6.0 (#4389) 2024-09-24 09:16:20 +08:00
Kevin Wan
4dfaf35151 fix: goctl k8s autoscaling version upgrade (#4387) 2024-09-20 23:16:38 +08:00
dependabot[bot]
00cd77c92b chore(deps): bump github.com/prometheus/client_golang from 1.20.3 to 1.20.4 (#4380)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-20 10:26:52 +08:00
dependabot[bot]
2145a7a93c chore(deps): bump go.mongodb.org/mongo-driver from 1.16.1 to 1.17.0 (#4379)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-19 22:10:29 +08:00
dependabot[bot]
d5302f2dbe chore(deps): bump golang.org/x/net from 0.28.0 to 0.29.0 (#4360)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-07 21:13:42 +08:00
dependabot[bot]
6181594bc8 chore(deps): bump github.com/jhump/protoreflect from 1.16.0 to 1.17.0 (#4359)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-06 20:13:29 +08:00
dependabot[bot]
11c10e51ff chore(deps): bump github.com/prometheus/client_golang from 1.20.2 to 1.20.3 (#4361)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-06 18:52:59 +08:00
Kevin Wan
3f03126d27 chore: fix goctl Dockerfile warnings (#4358) 2024-09-05 22:13:01 +08:00
dependabot[bot]
d43adc2823 chore(deps): bump golang.org/x/sys from 0.24.0 to 0.25.0 (#4356)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-05 10:17:27 +08:00
Kevin Wan
656222b572 chore: update goctl version (#4353) 2024-09-03 09:22:04 +08:00
Kevin Wan
077b6072fa chore: coding style (#4352) 2024-09-03 09:06:50 +08:00
jursonmo
0cafb1164b should check if devServer Enabled, then call once.Do() (#4351)
Co-authored-by: william <myname@example.com>
2024-09-03 00:54:43 +00:00
MarkJoyMa
90afa08367 Revert "fix: etcd scheme on grpc resolver" (#4349) 2024-09-03 00:48:08 +00:00
Kevin Wan
c92f788292 chore: update go version in test (#4347) 2024-09-01 16:19:31 +08:00
Kevin Wan
e94be9b302 chore: update go version for building goctl releases (#4346) 2024-09-01 16:02:49 +08:00
Kevin Wan
e713d9013d chore: update goctl deps (#4345) 2024-09-01 15:42:45 +08:00
Kevin Wan
24d6150073 chore: refactor config center (#4339)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-08-28 12:02:48 +00:00
MarkJoyMa
44cddec5c3 feat: added configuration center function (#3035)
Co-authored-by: aiden.ma <Aiden.ma@yijinin.com>
2024-08-28 14:47:52 +08:00
Kevin Wan
47d13e5ef8 fix: etcd scheme on grpc resolver (#4121) 2024-08-27 23:13:37 +08:00
Kevin Wan
896e1a2abb chore: refactor logx file time format (#4335) 2024-08-27 22:01:01 +08:00
kui
075817a8dd Add custom log file name date format (#4333) 2024-08-27 20:43:25 +08:00
dependabot[bot]
29400f6814 chore(deps): bump github.com/prometheus/client_golang from 1.20.1 to 1.20.2 (#4334)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-27 19:51:54 +08:00
dependabot[bot]
34f536264f chore(deps): bump github.com/prometheus/client_golang from 1.20.0 to 1.20.1 (#4324)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-21 17:38:54 +08:00
Kevin Wan
9d9c7e0fe0 feat: support build tag to reduce binary size w/o k8s (#4323) 2024-08-20 19:53:20 +08:00
dependabot[bot]
e220d3a4cb chore(deps): bump github.com/prometheus/client_golang from 1.19.1 to 1.20.0 (#4317)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-19 12:22:06 +08:00
dependabot[bot]
193dcf90bc chore(deps): bump golang.org/x/sys from 0.23.0 to 0.24.0 (#4307)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-10 21:15:08 +08:00
featherlight
03756c9166 refactor zrpc server interceptor builder (#4300) 2024-08-08 14:37:19 +00:00
kesonan
c1f12c5784 (goctl): fix map conversion (#4306) 2024-08-08 14:19:14 +00:00
dependabot[bot]
2883111af5 chore(deps): bump go.mongodb.org/mongo-driver from 1.16.0 to 1.16.1 (#4305)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-08 22:07:18 +08:00
dependabot[bot]
2758c4e842 chore(deps): bump golang.org/x/net from 0.27.0 to 0.28.0 (#4301)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-07 20:19:47 +08:00
Kevin Wan
4196ddb3e3 Update readme-cn.md (#4294) 2024-08-06 17:54:38 +08:00
dependabot[bot]
e24d797226 chore(deps): bump golang.org/x/time from 0.5.0 to 0.6.0 (#4299)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-06 17:54:01 +08:00
dependabot[bot]
d4349fa958 chore(deps): bump golang.org/x/sys from 0.22.0 to 0.23.0 (#4298)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-06 16:39:57 +08:00
kesonan
da2c14d45f (goctl): fix quickstart error while reading go module info (#4297) 2024-08-05 15:29:06 +00:00
kesonan
64e3aeda55 Add goctl version to code header (#4293) 2024-08-03 14:22:51 +00:00
Kevin Wan
dedba17219 refactor: simplify BatchError (#4292) 2024-08-03 13:57:41 +08:00
kesonan
c6348b9855 refactor goctl-compare (#4290) 2024-08-03 04:26:24 +00:00
chentong
8689a6247e refactor(core/errorx): use errors.Join simplify error handle (#4289) 2024-08-03 03:00:59 +00:00
Rui Chen
ff6ee25d23 ci: update workflows to use go.mod instead of specifying go version (#4286)
Signed-off-by: Rui Chen <rui@chenrui.dev>
2024-08-03 02:12:28 +00:00
Kevin Wan
5213243bbb Update readme.md (#4287) 2024-08-02 18:26:54 +08:00
Kevin Wan
2588a36555 feat: support rest.WithCorsHeaders to customize cors headers (#4284) 2024-07-30 17:29:44 +08:00
Krystian Kulas
c2421beb25 fix: readme remove duplicate text (#4280) 2024-07-29 01:06:29 +00:00
kesonan
dfe8a81c76 Upgrade goctl version to 1.7.1 (#4282) 2024-07-29 00:54:21 +00:00
kesonan
ee643a945e (goctl): fix nested struct generation (#4281) 2024-07-28 15:40:25 +00:00
Kevin Wan
eeda6efae7 chore: upgrade go-zero version (#4277) 2024-07-27 17:31:32 +08:00
Kevin Wan
caf0e64beb chore: optimize lock in discov.etcd (#4275) 2024-07-27 16:27:05 +08:00
dependabot[bot]
0e61303cb0 chore(deps): bump github.com/redis/go-redis/v9 from 9.6.0 to 9.6.1 (#4274)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-26 23:17:39 +08:00
dependabot[bot]
f651d7cf6c chore(deps): bump go.etcd.io/etcd/client/v3 from 3.5.14 to 3.5.15 (#4267)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-25 19:24:07 +08:00
tsinghuacoder
05da2c560b chore: fix some comments (#4270)
Signed-off-by: tsinghuacoder <tsinghuacoder@icloud.com>
2024-07-25 19:11:56 +08:00
Kevin Wan
8ae0f287d6 chore: optimize lock in discov.etcd (#4272) 2024-07-25 17:24:05 +08:00
Kevin Wan
8f7aff558f chore: refactor BuildTypes in tsgen. (#4266) 2024-07-22 21:17:59 +08:00
jaron
6e08d478fe feat(tsgen): tsgen export buildTypes function (#4197) 2024-07-22 12:11:06 +00:00
dependabot[bot]
944ac383d2 chore(deps): bump github.com/redis/go-redis/v9 from 9.5.4 to 9.6.0 (#4262)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-20 23:18:19 +08:00
Kevin Wan
0eec33f14b chore: optimize file reading (#4264) 2024-07-20 22:44:13 +08:00
JiChen
9de04ee035 fix: handle with read the empty file (#4258) 2024-07-20 12:01:13 +00:00
kesonan
cf5b080fbe (goctl): Use .goctl as home if not exists (#4260) 2024-07-19 05:54:24 +00:00
Kevin Wan
4a14164be1 feat: handle using root as the path of file server (#4255) 2024-07-18 15:15:03 +00:00
Kevin Wan
5dd6f2a43a feat: support embed file system to serve files in rest (#4253) 2024-07-17 16:21:08 +08:00
Kevin Wan
a00c956776 chore: upgrade go version (#4248)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-07-16 11:43:25 +08:00
yonwoo9
c02fb3acab chore: initialize some slice type variables (#4249) 2024-07-15 15:50:42 +00:00
Kevin Wan
9f8455ddb3 chore: fix typo (#4246) 2024-07-14 10:52:47 +08:00
guonaihong
775b105ab2 added code comments (#4219) 2024-07-13 12:09:58 +00:00
Kevin Wan
ec86f22cd6 feat: support file server in rest (#4244) 2024-07-13 19:58:35 +08:00
dependabot[bot]
e776b5d8ab chore(deps): bump github.com/redis/go-redis/v9 from 9.5.3 to 9.5.4 (#4245)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-13 16:31:57 +08:00
Kevin Wan
2026d4410b fix: should not trigger breaker on duplicate key with mongodb (#4238) 2024-07-08 23:41:02 +08:00
MarkJoyMa
f8437e6364 feat/sqlc_partial (#4237) 2024-07-07 15:54:18 +00:00
Kevin Wan
bd2033eb35 feat: support adding more writer, easy to write to console additionally (#4234)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-07-07 23:31:27 +08:00
MarkJoyMa
fed835bc25 feat/redis_hook (#4233) 2024-07-07 04:50:30 +00:00
dependabot[bot]
c9cbd74bf3 chore(deps): bump golang.org/x/net from 0.26.0 to 0.27.0 (#4231)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-07 10:54:37 +08:00
dependabot[bot]
27ea106293 chore(deps): bump golang.org/x/sys from 0.21.0 to 0.22.0 (#4229)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-05 19:20:54 +08:00
Kevin Wan
657923b9d5 Update readme-cn.md (#4228) 2024-07-04 11:05:23 +08:00
dependabot[bot]
8dbec6a800 chore(deps): bump google.golang.org/grpc from 1.64.0 to 1.65.0 (#4227)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-03 21:03:23 +08:00
Kevin Wan
490559434a chore: remove unnecessary return (#4226) 2024-07-02 23:45:18 +08:00
kesonan
4a62d084a9 fix: disable array request body (#4220) 2024-07-02 03:55:01 +00:00
kesonan
2f9b6cf8ec disable nested struct for array and map type (#4222) 2024-06-29 05:44:46 +00:00
Kevin Wan
01bbc78bac Update FUNDING.yml (#4216) 2024-06-27 11:15:42 +08:00
kesonan
a012a9138f (goctl): support nested struct (#4211) 2024-06-25 15:18:15 +00:00
Kevin Wan
4ec9cac82b chore: update readme for goctl installation (#4206) 2024-06-23 11:47:17 +08:00
苏蓝
8d9746e794 Update readme-cn.md (#4205) 2024-06-23 03:32:57 +00:00
Kevin Wan
8f83705199 Update version.go (#4204) 2024-06-21 20:09:36 +08:00
Kevin Wan
f1ed7bd75d Update readme-cn.md (#4195) 2024-06-17 22:28:24 +08:00
Kevin Wan
7a20608756 chore: add trending badge (#4194) 2024-06-17 22:26:08 +08:00
dependabot[bot]
5cfff95e95 chore(deps): bump google.golang.org/protobuf from 1.34.1 to 1.34.2 (#4185)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-12 14:26:26 +08:00
dependabot[bot]
1e1cc1a0d9 chore(deps): bump github.com/redis/go-redis/v9 from 9.5.2 to 9.5.3 (#4183)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-08 09:28:45 +08:00
dependabot[bot]
0a1440a839 chore(deps): bump golang.org/x/net from 0.25.0 to 0.26.0 (#4180) 2024-06-05 08:01:46 +08:00
kesonan
23980d29c3 fix no such dir if not create goctl home (#4177) 2024-06-04 10:55:56 +00:00
jiz4oh
424119d796 chore: fix the confused log level in comment (#4175) 2024-06-04 10:43:26 +00:00
kesonan
97c7835d9e fix #4161 (#4176) 2024-06-04 10:26:34 +00:00
kesonan
7954ad3759 fix: fix readme (#4174) 2024-06-02 15:22:33 +00:00
dependabot[bot]
e8c9b0ddf8 chore(deps): bump go.etcd.io/etcd/client/v3 from 3.5.13 to 3.5.14 (#4169)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-02 11:17:55 +08:00
dependabot[bot]
70112e59cb chore(deps): bump github.com/redis/go-redis/v9 from 9.4.0 to 9.5.2 (#4172)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-01 13:12:33 +08:00
dependabot[bot]
7ba5ced2d9 chore(deps): bump github.com/alicebob/miniredis/v2 from 2.32.1 to 2.33.0 (#4168)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-30 11:15:59 +08:00
Kevin Wan
962b36d745 fix: log concurrency problems after calling WithXXX methods (#4164) 2024-05-26 12:52:05 +08:00
dependabot[bot]
57060cc6d7 chore(deps): bump google.golang.org/grpc from 1.63.2 to 1.64.0 (#4155) 2024-05-16 07:47:19 +08:00
Kevin Wan
e0c16059d9 optimize: simplify breaker algorithm (#4151) 2024-05-14 17:02:21 +08:00
dependabot[bot]
a0d954dfab chore(deps): bump github.com/fatih/color from 1.16.0 to 1.17.0 (#4150)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-14 10:37:25 +08:00
Alex Last
a5ece25c07 feat: add secure option for sending traces via otlphttp (#3973) 2024-05-12 17:00:54 +00:00
Kevin Wan
0cac41a38b chore: refactor mapping unmarshaler (#4145) 2024-05-12 14:37:36 +08:00
Kevin Wan
f10084a3f5 chore: refactor and coding style (#4144) 2024-05-11 23:06:59 +08:00
Leo
040fee5669 feat: httpx.Parse supports parsing structures that implement the Unmarshaler interface (#4143) 2024-05-11 22:25:10 +08:00
Kevin Wan
42b3bae65a optimize: improve breaker algorithm on recovery time (#4141) 2024-05-11 21:44:26 +08:00
guangwu
7c730b97d8 fix: make: command: Command not found (#4132)
Signed-off-by: guoguangwu <guoguangwug@gmail.com>
2024-05-10 13:33:03 +00:00
Kevin Wan
057bae92ab fix: log panic on Error() or String() panics (#4136) 2024-05-10 12:49:34 +08:00
Kevin Wan
74331a45c9 fix: log panic when use nil error or stringer with Field method (#4130) 2024-05-10 00:31:36 +08:00
chen quan
9d551d507f chore(api/maxconnshandler): add tracing information to the log (#4126) 2024-05-08 05:25:35 +00:00
Kevin Wan
02dd81c05c Update FUNDING.yml (#4128) 2024-05-08 12:52:42 +08:00
dependabot[bot]
3095ba2b1f chore(deps): bump golang.org/x/net from 0.24.0 to 0.25.0 (#4124)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 12:52:13 +08:00
dependabot[bot]
2afa60132c chore(deps): bump google.golang.org/protobuf from 1.34.0 to 1.34.1 (#4123)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 12:45:10 +08:00
Kevin Wan
e71ed7294b Update FUNDING.yml (#4127) 2024-05-08 12:26:41 +08:00
dependabot[bot]
95822281bf chore(deps): bump golang.org/x/sys from 0.19.0 to 0.20.0 (#4122)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 12:22:06 +08:00
Kevin Wan
588e10daef chore: refactor and coding style (#4120) 2024-05-06 18:16:56 +08:00
soasurs
62ba01120e fix: zrpc kube resolver builder (#4119)
Signed-off-by: soasurs <soasurs@gmail.com>
2024-05-06 14:50:35 +08:00
dependabot[bot]
527de1c50e chore(deps): bump google.golang.org/protobuf from 1.33.1-0.20240408130810-98873a205002 to 1.34.0 (#4115)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-04 23:59:47 +08:00
dependabot[bot]
abfe62a2d7 chore(deps): bump github.com/pelletier/go-toml/v2 from 2.2.1 to 2.2.2 (#4116)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-04 23:39:25 +08:00
Kevin Wan
36f4cf97ff Update FUNDING.yml (#4114) 2024-04-30 22:58:51 +08:00
Kevin Wan
b3cd8a32ed feat: trigger breaker on underlying service timeout (#4112) 2024-04-30 19:01:20 +08:00
kesonan
a9d27cda8a (goctl): fix prefix syntax (#4113) 2024-04-30 09:11:08 +00:00
kesonan
04116f647d chore(goctl): change goctl version to 1.6.5 (#4111) 2024-04-30 04:25:47 +00:00
Kevin Wan
a8ccda0c06 feat: add fx.ParallelErr (#4107) 2024-04-29 00:18:30 +08:00
Kevin Wan
bfddb9dae4 feat: add errorx.In to facility error checking (#4105) 2024-04-27 20:43:45 +08:00
Kevin Wan
b337ae36e5 Update readme-cn.md 2024-04-20 10:00:10 +08:00
Kevin Wan
5e5123caa3 chore: add more tests (#4094) 2024-04-19 11:13:23 +08:00
Kevin Wan
d371ab5479 feat: use breaker with ctx to prevent deadline exceeded (#4091)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-04-18 23:18:49 +08:00
jaron
1b9b61f505 fix(goctl): GOPROXY env should set by ourself (#4087) 2024-04-18 22:50:30 +08:00
suyhuai
e1f15efb3b add customized.tpl for model template (#4086)
Co-authored-by: sudaoxyz <sudaoxyz@gmail.com>
2024-04-18 14:40:54 +00:00
Kevin Wan
1540bdc4c9 optimize: improve breaker algorithm on recovery time (#4077) 2024-04-18 22:33:25 +08:00
Kevin Wan
95b32b5779 chore: add code coverage (#4090) 2024-04-18 20:58:36 +08:00
Kevin Wan
815a4f7eed feat: support context in breaker methods (#4088) 2024-04-18 18:00:17 +08:00
dependabot[bot]
4b0bacc9c6 chore(deps): bump k8s.io/apimachinery from 0.29.3 to 0.29.4 (#4084)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-18 11:22:49 +08:00
Kevin Wan
e9dc96af17 chore: coding style (#4082) 2024-04-17 23:37:35 +08:00
fearlessfei
62c88a84d1 feat: migrate lua script to lua file (#4069) 2024-04-17 15:20:10 +00:00
Kevin Wan
36088ea0d4 fix: avoid duplicate in logx plain mode (#4080) 2024-04-17 17:43:22 +08:00
dependabot[bot]
164f5aa86c chore(deps): bump github.com/pelletier/go-toml/v2 from 2.2.0 to 2.2.1 (#4073)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-13 12:32:56 +08:00
dependabot[bot]
07d07cdd23 chore(deps): bump github.com/fullstorydev/grpcurl from 1.8.9 to 1.9.1 (#4065)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-11 13:02:52 +08:00
dependabot[bot]
0efe99af66 chore(deps): bump github.com/jhump/protoreflect from 1.15.6 to 1.16.0 (#4064)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-11 12:39:00 +08:00
Kevin Wan
927f8bc821 fix: fix ignored context.DeadlineExceeded (#4066) 2024-04-11 11:14:20 +08:00
kesonan
2a7ada993b (goctl)feature/model config (#4062)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-04-10 15:01:59 +00:00
Kevin Wan
682460c1c8 fix: fix ignored scanner.Err() (#4063) 2024-04-10 17:28:52 +08:00
Kevin Wan
a66ae0d4c4 fix: timeout on query should return context.DeadlineExceeded (#4060) 2024-04-10 04:17:39 +00:00
dependabot[bot]
d1f24ab70f chore(deps): bump google.golang.org/grpc from 1.63.0 to 1.63.2 (#4058)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-10 10:23:33 +08:00
dependabot[bot]
d0983948b5 chore(deps): bump google.golang.org/grpc from 1.63.0 to 1.63.2 in /tools/goctl (#4059)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-10 10:14:15 +08:00
Kevin Wan
3343fc2cdb chore: update goctl version to 1.6.4 (#4057) 2024-04-09 22:59:33 +08:00
Kevin Wan
3866b5741a feat: support http stream response (#4055) 2024-04-09 20:46:44 +08:00
Kevin Wan
5fbe8ff5c4 chore: coding style (#4054) 2024-04-09 17:19:47 +08:00
jaron
6f763f71f9 chore(goctl): update readme (#4053) 2024-04-09 08:30:25 +00:00
dependabot[bot]
80377f18e7 chore(deps): bump codecov/codecov-action from 3 to 4 (#4051)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-09 15:24:24 +08:00
dependabot[bot]
8690859c7d chore(deps): bump golang.org/x/net from 0.23.0 to 0.24.0 (#4048)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-06 10:03:06 +08:00
dependabot[bot]
d744038198 chore(deps): bump google.golang.org/grpc from 1.62.1 to 1.63.0 (#4045)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-05 19:43:17 +08:00
dependabot[bot]
58ad8cac8a chore(deps): bump golang.org/x/sys from 0.18.0 to 0.19.0 (#4046)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-05 19:28:54 +08:00
dependabot[bot]
74886a151e chore(deps): bump google.golang.org/grpc from 1.62.1 to 1.63.0 in /tools/goctl (#4047)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-05 18:45:49 +08:00
Kevin Wan
c5eda1f155 chore: fix codecov (#4044) 2024-04-05 00:53:13 +08:00
Kevin Wan
b5b7c054ca chore: fix codecov (#4043) 2024-04-05 00:43:38 +08:00
Kevin Wan
6c8073b691 chore: add more tests (#4042) 2024-04-05 00:13:42 +08:00
Kevin Wan
64d430d424 fix: bug on form data with slices (#4040) 2024-04-04 20:28:54 +08:00
Jayson Wang
f138cc792e fix(goctl): multi imports the api cause redeclared error in types.go (#3988)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-04-04 11:39:24 +00:00
dependabot[bot]
b20ec8aedb chore(deps): bump golang.org/x/net from 0.22.0 to 0.23.0 (#4039)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-04 10:48:52 +08:00
Kevin Wan
a53254fa91 chore: update codecov config (#4038) 2024-04-03 23:58:02 +08:00
Kevin Wan
08563482e5 chore: coding style (#4037) 2024-04-03 22:55:52 +08:00
fearlessfei
968727412d add custom health response information (#4034)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-04-03 14:33:55 +00:00
linden-in-China
6f3d094eba opton to option (#4035) 2024-04-03 14:15:21 +00:00
kesonan
2d3ebb9b62 (goctl) fix #4027 (#4032) 2024-04-01 15:22:29 +00:00
chentong
8c0bb27136 feat: add gen api @doc comment to logic handler routes (#3790)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-03-30 11:09:54 +00:00
ak5w
cf987295df fix the usage datasource url of postgresql (#4029) (#4030) 2024-03-30 05:51:54 +00:00
dependabot[bot]
8c92b3af7d chore(deps): bump go.etcd.io/etcd/client/v3 from 3.5.12 to 3.5.13 (#4028)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-30 13:40:24 +08:00
Kevin Wan
5dd9342703 chore: fix test failure (#4031) 2024-03-30 13:29:58 +08:00
shyandsy
3ef59f6a71 fix(httpx): support array field for request dto (#4026)
Co-authored-by: yshi3 <yshi3@tesla.com>
2024-03-30 12:10:56 +08:00
dependabot[bot]
f12802abc7 chore(deps): bump github.com/go-sql-driver/mysql from 1.8.0 to 1.8.1 (#4022)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-27 18:42:35 +08:00
dependabot[bot]
6f0fe67804 chore(deps): bump github.com/go-sql-driver/mysql from 1.8.0 to 1.8.1 in /tools/goctl (#4023)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-27 18:07:00 +08:00
dependabot[bot]
f44f0e7e62 chore(deps): bump github.com/pelletier/go-toml/v2 from 2.1.1 to 2.2.0 (#4017)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-20 23:34:06 +08:00
dependabot[bot]
cdd95296db chore(deps): bump k8s.io/client-go from 0.29.2 to 0.29.3 (#4012)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-19 15:41:20 +08:00
kesonan
3e794cf991 (goctl)fix code_ql (#4009) 2024-03-17 02:21:36 +00:00
Kevin Wan
bbce95e7e1 fix: didn't count failure in allow method with breaker algorithm (#4008) 2024-03-16 22:19:36 +08:00
dependabot[bot]
0449450c64 chore(deps): bump github.com/jackc/pgx/v5 from 5.5.3 to 5.5.4 in /tools/goctl (#4007)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-15 12:55:57 +08:00
dependabot[bot]
9f9a12ea57 chore(deps): bump github.com/alicebob/miniredis/v2 from 2.31.1 to 2.32.1 (#4003)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-14 11:19:44 +08:00
Kevin Wan
cc2a7e97f9 chore: coding style, add code for prometheus (#4002) 2024-03-13 20:00:35 +08:00
dependabot[bot]
09d7af76af chore(deps): bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.0 in /tools/goctl (#3997)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-13 13:28:27 +08:00
dependabot[bot]
c233a66601 chore(deps): bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.0 (#3998)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-13 12:56:41 +08:00
dependabot[bot]
94fa12560c chore(deps): bump github.com/jackc/pgx/v5 from 5.5.4 to 5.5.5 (#3999)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-12 12:28:28 +08:00
MarkJoyMa
7d90f906f5 feat: migrate redis breaker into hook (#3982) 2024-03-12 04:21:33 +00:00
Viktor Patchev
f372b98d96 Add: Optimize the error log to be more specific (#3994) 2024-03-11 13:06:50 +08:00
mongobaba
459d3025c5 optimize: change err == xx to errors.Is(err, xx) (#3991) 2024-03-09 12:49:16 +00:00
Kevin Wan
e9e55125a9 chore: fix warnings (#3990) 2024-03-09 13:48:11 +08:00
Kevin Wan
159ecb7386 chore: fix warnings (#3989) 2024-03-08 22:35:17 +08:00
ansoda
69bb746a1d fix: StopAgent panics when trace agent disabled (#3981)
Co-authored-by: ansoda <ansoda@gmail.com>
2024-03-08 10:28:23 +00:00
Kevin Wan
d184f96b13 chore: coding style (#3987) 2024-03-08 16:11:28 +08:00
MarkJoyMa
c7dacb0146 fix: mysql WithAcceptable bug (#3986) 2024-03-08 04:23:41 +00:00
dependabot[bot]
2207477b60 chore(deps): bump google.golang.org/protobuf from 1.32.0 to 1.33.0 in /tools/goctl (#3978)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 11:15:48 +08:00
dependabot[bot]
105ab590ff chore(deps): bump google.golang.org/grpc from 1.62.0 to 1.62.1 in /tools/goctl (#3977)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 11:01:14 +08:00
dependabot[bot]
2f4c58ed73 chore(deps): bump google.golang.org/grpc from 1.62.0 to 1.62.1 (#3976)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 10:45:19 +08:00
dependabot[bot]
1631aa02ad chore(deps): bump github.com/golang/protobuf from 1.5.3 to 1.5.4 (#3984)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 10:24:51 +08:00
dependabot[bot]
4df10eef5d chore(deps): bump golang.org/x/net from 0.21.0 to 0.22.0 (#3975)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-06 23:31:02 +08:00
dependabot[bot]
3d552ea7a8 chore(deps): bump google.golang.org/protobuf from 1.32.0 to 1.33.0 (#3974)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-06 21:33:45 +08:00
Kevin Wan
74b87ac9fd chore: coding style (#3972) 2024-03-05 14:40:10 +08:00
Alex Last
ba1d6e3664 fix: only add log middleware to not found handler when enabled (#3969) 2024-03-05 04:14:54 +00:00
dependabot[bot]
2096cd5749 chore(deps): bump github.com/jackc/pgx/v5 from 5.5.3 to 5.5.4 (#3970)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-05 12:09:18 +08:00
dependabot[bot]
2eb2fa26f6 chore(deps): bump golang.org/x/sys from 0.17.0 to 0.18.0 (#3971)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-05 12:03:06 +08:00
Kevin Wan
bc4187ca90 Create SECURITY.md (#3968) 2024-03-04 23:07:54 +08:00
Kevin Wan
b7be25b98b Update readme-cn.md (#3966) 2024-03-03 14:18:27 +08:00
Kevin Wan
dd01695d45 chore: update goctl version to 1.6.3 (#3965) 2024-03-03 13:36:35 +08:00
279 changed files with 7597 additions and 3753 deletions

12
.github/FUNDING.yml vendored
View File

@@ -1,13 +1,3 @@
# These are supported funding model platforms # These are supported funding model platforms
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] github: [zeromicro]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
ethereum: # 0x5052b7f6B937B02563996D23feb69b38D06Ca150 | kevwan

View File

@@ -17,7 +17,7 @@ jobs:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: '1.19' go-version-file: go.mod
check-latest: true check-latest: true
cache: true cache: true
id: go id: go
@@ -40,7 +40,7 @@ jobs:
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./... run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
- name: Codecov - name: Codecov
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v5
test-win: test-win:
name: Windows name: Windows
@@ -52,8 +52,8 @@ jobs:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
# use 1.19 to guarantee Go 1.19 compatibility # make sure Go version compatible with go-zero
go-version: '1.19' go-version-file: go.mod
check-latest: true check-latest: true
cache: true cache: true

View File

@@ -22,7 +22,7 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }} goarch: ${{ matrix.goarch }}
goversion: "https://dl.google.com/go/go1.19.13.linux-amd64.tar.gz" goversion: "https://dl.google.com/go/go1.20.14.linux-amd64.tar.gz"
project_path: "tools/goctl" project_path: "tools/goctl"
binary_name: "goctl" binary_name: "goctl"
extra_files: tools/goctl/readme.md tools/goctl/readme-cn.md extra_files: tools/goctl/readme.md tools/goctl/readme-cn.md

16
SECURITY.md Normal file
View File

@@ -0,0 +1,16 @@
# Security Policy
## Supported Versions
We publish releases monthly.
| Version | Supported |
| ------- | ------------------ |
| >= 1.4.4 | :white_check_mark: |
| < 1.4.4 | :x: |
## Reporting a Vulnerability
https://github.com/zeromicro/go-zero/security/advisories
Accepted vulnerabilities are expected to be fixed within a month.

View File

@@ -2,6 +2,7 @@ package bloom
import ( import (
"context" "context"
_ "embed"
"errors" "errors"
"strconv" "strconv"
@@ -17,19 +18,13 @@ var (
// ErrTooLargeOffset indicates the offset is too large in bitset. // ErrTooLargeOffset indicates the offset is too large in bitset.
ErrTooLargeOffset = errors.New("too large offset") ErrTooLargeOffset = errors.New("too large offset")
setScript = redis.NewScript(` //go:embed setscript.lua
for _, offset in ipairs(ARGV) do setLuaScript string
redis.call("setbit", KEYS[1], offset, 1) setScript = redis.NewScript(setLuaScript)
end
`) //go:embed testscript.lua
testScript = redis.NewScript(` testLuaScript string
for _, offset in ipairs(ARGV) do testScript = redis.NewScript(testLuaScript)
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
return false
end
end
return true
`)
) )
type ( type (
@@ -110,7 +105,7 @@ func newRedisBitSet(store *redis.Redis, key string, bits uint) *redisBitSet {
} }
func (r *redisBitSet) buildOffsetArgs(offsets []uint) ([]string, error) { func (r *redisBitSet) buildOffsetArgs(offsets []uint) ([]string, error) {
var args []string args := make([]string, 0, len(offsets))
for _, offset := range offsets { for _, offset := range offsets {
if offset >= r.bits { if offset >= r.bits {
@@ -130,7 +125,7 @@ func (r *redisBitSet) check(ctx context.Context, offsets []uint) (bool, error) {
} }
resp, err := r.store.ScriptRunCtx(ctx, testScript, []string{r.key}, args) resp, err := r.store.ScriptRunCtx(ctx, testScript, []string{r.key}, args)
if err == redis.Nil { if errors.Is(err, redis.Nil) {
return false, nil return false, nil
} else if err != nil { } else if err != nil {
return false, err return false, err
@@ -162,7 +157,7 @@ func (r *redisBitSet) set(ctx context.Context, offsets []uint) error {
} }
_, err = r.store.ScriptRunCtx(ctx, setScript, []string{r.key}, args) _, err = r.store.ScriptRunCtx(ctx, setScript, []string{r.key}, args)
if err == redis.Nil { if errors.Is(err, redis.Nil) {
return nil return nil
} }

3
core/bloom/setscript.lua Normal file
View File

@@ -0,0 +1,3 @@
for _, offset in ipairs(ARGV) do
redis.call("setbit", KEYS[1], offset, 1)
end

View File

@@ -0,0 +1,6 @@
for _, offset in ipairs(ARGV) do
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
return false
end
end
return true

View File

@@ -1,6 +1,7 @@
package breaker package breaker
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
@@ -31,16 +32,21 @@ type (
Name() string Name() string
// Allow checks if the request is allowed. // Allow checks if the request is allowed.
// If allowed, a promise will be returned, the caller needs to call promise.Accept() // If allowed, a promise will be returned,
// on success, or call promise.Reject() on failure. // otherwise ErrServiceUnavailable will be returned as the error.
// If not allow, ErrServiceUnavailable will be returned. // The caller needs to call promise.Accept() on success,
// or call promise.Reject() on failure.
Allow() (Promise, error) Allow() (Promise, error)
// AllowCtx checks if the request is allowed when ctx isn't done.
AllowCtx(ctx context.Context) (Promise, error)
// Do runs the given request if the Breaker accepts it. // Do runs the given request if the Breaker accepts it.
// Do returns an error instantly if the Breaker rejects the request. // Do returns an error instantly if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error // If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again. // and causes the same panic again.
Do(req func() error) error Do(req func() error) error
// DoCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoCtx(ctx context.Context, req func() error) error
// DoWithAcceptable runs the given request if the Breaker accepts it. // DoWithAcceptable runs the given request if the Breaker accepts it.
// DoWithAcceptable returns an error instantly if the Breaker rejects the request. // DoWithAcceptable returns an error instantly if the Breaker rejects the request.
@@ -48,12 +54,16 @@ type (
// and causes the same panic again. // and causes the same panic again.
// acceptable checks if it's a successful call, even if the error is not nil. // acceptable checks if it's a successful call, even if the error is not nil.
DoWithAcceptable(req func() error, acceptable Acceptable) error DoWithAcceptable(req func() error, acceptable Acceptable) error
// DoWithAcceptableCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoWithAcceptableCtx(ctx context.Context, req func() error, acceptable Acceptable) error
// DoWithFallback runs the given request if the Breaker accepts it. // DoWithFallback runs the given request if the Breaker accepts it.
// DoWithFallback runs the fallback if the Breaker rejects the request. // DoWithFallback runs the fallback if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error // If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again. // and causes the same panic again.
DoWithFallback(req func() error, fallback Fallback) error DoWithFallback(req func() error, fallback Fallback) error
// DoWithFallbackCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoWithFallbackCtx(ctx context.Context, req func() error, fallback Fallback) error
// DoWithFallbackAcceptable runs the given request if the Breaker accepts it. // DoWithFallbackAcceptable runs the given request if the Breaker accepts it.
// DoWithFallbackAcceptable runs the fallback if the Breaker rejects the request. // DoWithFallbackAcceptable runs the fallback if the Breaker rejects the request.
@@ -61,6 +71,9 @@ type (
// and causes the same panic again. // and causes the same panic again.
// acceptable checks if it's a successful call, even if the error is not nil. // acceptable checks if it's a successful call, even if the error is not nil.
DoWithFallbackAcceptable(req func() error, fallback Fallback, acceptable Acceptable) error DoWithFallbackAcceptable(req func() error, fallback Fallback, acceptable Acceptable) error
// DoWithFallbackAcceptableCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoWithFallbackAcceptableCtx(ctx context.Context, req func() error, fallback Fallback,
acceptable Acceptable) error
} }
// Fallback is the func to be called if the request is rejected. // Fallback is the func to be called if the request is rejected.
@@ -117,23 +130,71 @@ func (cb *circuitBreaker) Allow() (Promise, error) {
return cb.throttle.allow() return cb.throttle.allow()
} }
func (cb *circuitBreaker) AllowCtx(ctx context.Context) (Promise, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
return cb.Allow()
}
}
func (cb *circuitBreaker) Do(req func() error) error { func (cb *circuitBreaker) Do(req func() error) error {
return cb.throttle.doReq(req, nil, defaultAcceptable) return cb.throttle.doReq(req, nil, defaultAcceptable)
} }
func (cb *circuitBreaker) DoCtx(ctx context.Context, req func() error) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.Do(req)
}
}
func (cb *circuitBreaker) DoWithAcceptable(req func() error, acceptable Acceptable) error { func (cb *circuitBreaker) DoWithAcceptable(req func() error, acceptable Acceptable) error {
return cb.throttle.doReq(req, nil, acceptable) return cb.throttle.doReq(req, nil, acceptable)
} }
func (cb *circuitBreaker) DoWithAcceptableCtx(ctx context.Context, req func() error,
acceptable Acceptable) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.DoWithAcceptable(req, acceptable)
}
}
func (cb *circuitBreaker) DoWithFallback(req func() error, fallback Fallback) error { func (cb *circuitBreaker) DoWithFallback(req func() error, fallback Fallback) error {
return cb.throttle.doReq(req, fallback, defaultAcceptable) return cb.throttle.doReq(req, fallback, defaultAcceptable)
} }
func (cb *circuitBreaker) DoWithFallbackCtx(ctx context.Context, req func() error,
fallback Fallback) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.DoWithFallback(req, fallback)
}
}
func (cb *circuitBreaker) DoWithFallbackAcceptable(req func() error, fallback Fallback, func (cb *circuitBreaker) DoWithFallbackAcceptable(req func() error, fallback Fallback,
acceptable Acceptable) error { acceptable Acceptable) error {
return cb.throttle.doReq(req, fallback, acceptable) return cb.throttle.doReq(req, fallback, acceptable)
} }
func (cb *circuitBreaker) DoWithFallbackAcceptableCtx(ctx context.Context, req func() error,
fallback Fallback, acceptable Acceptable) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.DoWithFallbackAcceptable(req, fallback, acceptable)
}
}
func (cb *circuitBreaker) Name() string { func (cb *circuitBreaker) Name() string {
return cb.name return cb.name
} }
@@ -208,7 +269,7 @@ func (ew *errorWindow) add(reason string) {
} }
func (ew *errorWindow) String() string { func (ew *errorWindow) String() string {
var reasons []string reasons := make([]string, 0, ew.count)
ew.lock.Lock() ew.lock.Lock()
// reverse order // reverse order

View File

@@ -1,11 +1,13 @@
package breaker package breaker
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/stat" "github.com/zeromicro/go-zero/core/stat"
@@ -16,10 +18,274 @@ func init() {
} }
func TestCircuitBreaker_Allow(t *testing.T) { func TestCircuitBreaker_Allow(t *testing.T) {
b := NewBreaker() t.Run("allow", func(t *testing.T) {
assert.True(t, len(b.Name()) > 0) b := NewBreaker()
_, err := b.Allow() assert.True(t, len(b.Name()) > 0)
assert.Nil(t, err) _, err := b.Allow()
assert.Nil(t, err)
})
t.Run("allow with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
_, err := b.AllowCtx(context.Background())
assert.Nil(t, err)
})
t.Run("allow with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
_, err := b.AllowCtx(ctx)
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("allow with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
_, err := b.AllowCtx(ctx)
assert.ErrorIs(t, err, context.Canceled)
}
_, err := b.AllowCtx(context.Background())
assert.NoError(t, err)
})
}
func TestCircuitBreaker_Do(t *testing.T) {
t.Run("do", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.Do(func() error {
return nil
})
assert.Nil(t, err)
})
t.Run("do with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoCtx(context.Background(), func() error {
return nil
})
assert.Nil(t, err)
})
t.Run("do with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoCtx(ctx, func() error {
return nil
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("do with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoCtx(ctx, func() error {
return nil
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoCtx(context.Background(), func() error {
return nil
}))
})
}
func TestCircuitBreaker_DoWithAcceptable(t *testing.T) {
t.Run("doWithAcceptable", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithAcceptable(func() error {
return nil
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithAcceptable with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithAcceptable with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoWithAcceptableCtx(ctx, func() error {
return nil
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("doWithAcceptable with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoWithAcceptableCtx(ctx, func() error {
return nil
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoWithAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) bool {
return true
}))
})
}
func TestCircuitBreaker_DoWithFallback(t *testing.T) {
t.Run("doWithFallback", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallback(func() error {
return nil
}, func(err error) error {
return err
})
assert.Nil(t, err)
})
t.Run("doWithFallback with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallbackCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
})
assert.Nil(t, err)
})
t.Run("doWithFallback with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoWithFallbackCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("doWithFallback with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoWithFallbackCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoWithFallbackCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
}))
})
}
func TestCircuitBreaker_DoWithFallbackAcceptable(t *testing.T) {
t.Run("doWithFallbackAcceptable", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallbackAcceptable(func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithFallbackAcceptable with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithFallbackAcceptable with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoWithFallbackAcceptableCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("doWithFallbackAcceptable with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoWithFallbackAcceptableCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
}))
})
} }
func TestLogReason(t *testing.T) { func TestLogReason(t *testing.T) {

View File

@@ -1,6 +1,9 @@
package breaker package breaker
import "sync" import (
"context"
"sync"
)
var ( var (
lock sync.RWMutex lock sync.RWMutex
@@ -14,6 +17,13 @@ func Do(name string, req func() error) error {
}) })
} }
// DoCtx calls Breaker.DoCtx on the Breaker with given name.
func DoCtx(ctx context.Context, name string, req func() error) error {
return do(name, func(b Breaker) error {
return b.DoCtx(ctx, req)
})
}
// DoWithAcceptable calls Breaker.DoWithAcceptable on the Breaker with given name. // DoWithAcceptable calls Breaker.DoWithAcceptable on the Breaker with given name.
func DoWithAcceptable(name string, req func() error, acceptable Acceptable) error { func DoWithAcceptable(name string, req func() error, acceptable Acceptable) error {
return do(name, func(b Breaker) error { return do(name, func(b Breaker) error {
@@ -21,6 +31,14 @@ func DoWithAcceptable(name string, req func() error, acceptable Acceptable) erro
}) })
} }
// DoWithAcceptableCtx calls Breaker.DoWithAcceptableCtx on the Breaker with given name.
func DoWithAcceptableCtx(ctx context.Context, name string, req func() error,
acceptable Acceptable) error {
return do(name, func(b Breaker) error {
return b.DoWithAcceptableCtx(ctx, req, acceptable)
})
}
// DoWithFallback calls Breaker.DoWithFallback on the Breaker with given name. // DoWithFallback calls Breaker.DoWithFallback on the Breaker with given name.
func DoWithFallback(name string, req func() error, fallback Fallback) error { func DoWithFallback(name string, req func() error, fallback Fallback) error {
return do(name, func(b Breaker) error { return do(name, func(b Breaker) error {
@@ -28,6 +46,13 @@ func DoWithFallback(name string, req func() error, fallback Fallback) error {
}) })
} }
// DoWithFallbackCtx calls Breaker.DoWithFallbackCtx on the Breaker with given name.
func DoWithFallbackCtx(ctx context.Context, name string, req func() error, fallback Fallback) error {
return do(name, func(b Breaker) error {
return b.DoWithFallbackCtx(ctx, req, fallback)
})
}
// DoWithFallbackAcceptable calls Breaker.DoWithFallbackAcceptable on the Breaker with given name. // DoWithFallbackAcceptable calls Breaker.DoWithFallbackAcceptable on the Breaker with given name.
func DoWithFallbackAcceptable(name string, req func() error, fallback Fallback, func DoWithFallbackAcceptable(name string, req func() error, fallback Fallback,
acceptable Acceptable) error { acceptable Acceptable) error {
@@ -36,6 +61,14 @@ func DoWithFallbackAcceptable(name string, req func() error, fallback Fallback,
}) })
} }
// DoWithFallbackAcceptableCtx calls Breaker.DoWithFallbackAcceptableCtx on the Breaker with given name.
func DoWithFallbackAcceptableCtx(ctx context.Context, name string, req func() error,
fallback Fallback, acceptable Acceptable) error {
return do(name, func(b Breaker) error {
return b.DoWithFallbackAcceptableCtx(ctx, req, fallback, acceptable)
})
}
// GetBreaker returns the Breaker with the given name. // GetBreaker returns the Breaker with the given name.
func GetBreaker(name string) Breaker { func GetBreaker(name string) Breaker {
lock.RLock() lock.RLock()

View File

@@ -1,6 +1,7 @@
package breaker package breaker
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"testing" "testing"
@@ -22,6 +23,9 @@ func TestBreakersDo(t *testing.T) {
assert.Equal(t, errDummy, Do("any", func() error { assert.Equal(t, errDummy, Do("any", func() error {
return errDummy return errDummy
})) }))
assert.Equal(t, errDummy, DoCtx(context.Background(), "any", func() error {
return errDummy
}))
} }
func TestBreakersDoWithAcceptable(t *testing.T) { func TestBreakersDoWithAcceptable(t *testing.T) {
@@ -38,6 +42,13 @@ func TestBreakersDoWithAcceptable(t *testing.T) {
return nil return nil
}) == nil }) == nil
}) })
verify(t, func() bool {
return DoWithAcceptableCtx(context.Background(), "anyone", func() error {
return nil
}, func(err error) bool {
return true
}) == nil
})
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {
err := DoWithAcceptable("another", func() error { err := DoWithAcceptable("another", func() error {
@@ -76,6 +87,12 @@ func TestBreakersFallback(t *testing.T) {
return nil return nil
}) })
assert.True(t, err == nil || errors.Is(err, errDummy)) assert.True(t, err == nil || errors.Is(err, errDummy))
err = DoWithFallbackCtx(context.Background(), "fallback", func() error {
return errDummy
}, func(err error) error {
return nil
})
assert.True(t, err == nil || errors.Is(err, errDummy))
} }
verify(t, func() bool { verify(t, func() bool {
return errors.Is(Do("fallback", func() error { return errors.Is(Do("fallback", func() error {
@@ -86,7 +103,7 @@ func TestBreakersFallback(t *testing.T) {
func TestBreakersAcceptableFallback(t *testing.T) { func TestBreakersAcceptableFallback(t *testing.T) {
errDummy := errors.New("any") errDummy := errors.New("any")
for i := 0; i < 10000; i++ { for i := 0; i < 5000; i++ {
err := DoWithFallbackAcceptable("acceptablefallback", func() error { err := DoWithFallbackAcceptable("acceptablefallback", func() error {
return errDummy return errDummy
}, func(err error) error { }, func(err error) error {
@@ -95,6 +112,14 @@ func TestBreakersAcceptableFallback(t *testing.T) {
return err == nil return err == nil
}) })
assert.True(t, err == nil || errors.Is(err, errDummy)) assert.True(t, err == nil || errors.Is(err, errDummy))
err = DoWithFallbackAcceptableCtx(context.Background(), "acceptablefallback", func() error {
return errDummy
}, func(err error) error {
return nil
}, func(err error) bool {
return err == nil
})
assert.True(t, err == nil || errors.Is(err, errDummy))
} }
verify(t, func() bool { verify(t, func() bool {
return errors.Is(Do("acceptablefallback", func() error { return errors.Is(Do("acceptablefallback", func() error {
@@ -110,5 +135,5 @@ func verify(t *testing.T, fn func() bool) {
count++ count++
} }
} }
assert.True(t, count >= 80, fmt.Sprintf("should be greater than 80, actual %d", count)) assert.True(t, count >= 75, fmt.Sprintf("should be greater than 75, actual %d", count))
} }

48
core/breaker/bucket.go Normal file
View File

@@ -0,0 +1,48 @@
package breaker
const (
success = iota
fail
drop
)
// bucket defines the bucket that holds sum and num of additions.
type bucket struct {
Sum int64
Success int64
Failure int64
Drop int64
}
func (b *bucket) Add(v int64) {
switch v {
case fail:
b.fail()
case drop:
b.drop()
default:
b.succeed()
}
}
func (b *bucket) Reset() {
b.Sum = 0
b.Success = 0
b.Failure = 0
b.Drop = 0
}
func (b *bucket) drop() {
b.Sum++
b.Drop++
}
func (b *bucket) fail() {
b.Sum++
b.Failure++
}
func (b *bucket) succeed() {
b.Sum++
b.Success++
}

View File

@@ -0,0 +1,43 @@
package breaker
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestBucketAdd(t *testing.T) {
b := &bucket{}
// Test succeed
b.Add(0) // Using 0 for success
assert.Equal(t, int64(1), b.Sum, "Sum should be incremented")
assert.Equal(t, int64(1), b.Success, "Success should be incremented")
assert.Equal(t, int64(0), b.Failure, "Failure should not be incremented")
assert.Equal(t, int64(0), b.Drop, "Drop should not be incremented")
// Test failure
b.Add(fail)
assert.Equal(t, int64(2), b.Sum, "Sum should be incremented")
assert.Equal(t, int64(1), b.Failure, "Failure should be incremented")
assert.Equal(t, int64(0), b.Drop, "Drop should not be incremented")
// Test drop
b.Add(drop)
assert.Equal(t, int64(3), b.Sum, "Sum should be incremented")
assert.Equal(t, int64(1), b.Drop, "Drop should be incremented")
}
func TestBucketReset(t *testing.T) {
b := &bucket{
Sum: 3,
Success: 1,
Failure: 1,
Drop: 1,
}
b.Reset()
assert.Equal(t, int64(0), b.Sum, "Sum should be reset to 0")
assert.Equal(t, int64(0), b.Success, "Success should be reset to 0")
assert.Equal(t, int64(0), b.Failure, "Failure should be reset to 0")
assert.Equal(t, int64(0), b.Drop, "Drop should be reset to 0")
}

View File

@@ -5,53 +5,83 @@ import (
"github.com/zeromicro/go-zero/core/collection" "github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/mathx" "github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
) )
const ( const (
// 250ms for bucket duration // 250ms for bucket duration
window = time.Second * 10 window = time.Second * 10
buckets = 40 buckets = 40
k = 1.5 forcePassDuration = time.Second
protection = 5 k = 1.5
minK = 1.1
protection = 5
) )
// googleBreaker is a netflixBreaker pattern from google. // googleBreaker is a netflixBreaker pattern from google.
// see Client-Side Throttling section in https://landing.google.com/sre/sre-book/chapters/handling-overload/ // see Client-Side Throttling section in https://landing.google.com/sre/sre-book/chapters/handling-overload/
type googleBreaker struct { type (
k float64 googleBreaker struct {
stat *collection.RollingWindow k float64
proba *mathx.Proba stat *collection.RollingWindow[int64, *bucket]
} proba *mathx.Proba
lastPass *syncx.AtomicDuration
}
windowResult struct {
accepts int64
total int64
failingBuckets int64
workingBuckets int64
}
)
func newGoogleBreaker() *googleBreaker { func newGoogleBreaker() *googleBreaker {
bucketDuration := time.Duration(int64(window) / int64(buckets)) bucketDuration := time.Duration(int64(window) / int64(buckets))
st := collection.NewRollingWindow(buckets, bucketDuration) st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
return new(bucket)
}, buckets, bucketDuration)
return &googleBreaker{ return &googleBreaker{
stat: st, stat: st,
k: k, k: k,
proba: mathx.NewProba(), proba: mathx.NewProba(),
lastPass: syncx.NewAtomicDuration(),
} }
} }
func (b *googleBreaker) accept() error { func (b *googleBreaker) accept() error {
accepts, total := b.history() var w float64
weightedAccepts := b.k * float64(accepts) history := b.history()
w = b.k - (b.k-minK)*float64(history.failingBuckets)/buckets
weightedAccepts := mathx.AtLeast(w, minK) * float64(history.accepts)
// https://landing.google.com/sre/sre-book/chapters/handling-overload/#eq2101 // https://landing.google.com/sre/sre-book/chapters/handling-overload/#eq2101
// for better performance, no need to care about negative ratio // for better performance, no need to care about the negative ratio
dropRatio := (float64(total-protection) - weightedAccepts) / float64(total+1) dropRatio := (float64(history.total-protection) - weightedAccepts) / float64(history.total+1)
if dropRatio <= 0 { if dropRatio <= 0 {
return nil return nil
} }
lastPass := b.lastPass.Load()
if lastPass > 0 && timex.Since(lastPass) > forcePassDuration {
b.lastPass.Set(timex.Now())
return nil
}
dropRatio *= float64(buckets-history.workingBuckets) / buckets
if b.proba.TrueOnProba(dropRatio) { if b.proba.TrueOnProba(dropRatio) {
return ErrServiceUnavailable return ErrServiceUnavailable
} }
b.lastPass.Set(timex.Now())
return nil return nil
} }
func (b *googleBreaker) allow() (internalPromise, error) { func (b *googleBreaker) allow() (internalPromise, error) {
if err := b.accept(); err != nil { if err := b.accept(); err != nil {
b.markDrop()
return nil, err return nil, err
} }
@@ -62,7 +92,7 @@ func (b *googleBreaker) allow() (internalPromise, error) {
func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Acceptable) error { func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Acceptable) error {
if err := b.accept(); err != nil { if err := b.accept(); err != nil {
b.markFailure() b.markDrop()
if fallback != nil { if fallback != nil {
return fallback(err) return fallback(err)
} }
@@ -70,10 +100,10 @@ func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Ac
return err return err
} }
var success bool var succ bool
defer func() { defer func() {
// if req() panic, success is false, mark as failure // if req() panic, success is false, mark as failure
if success { if succ {
b.markSuccess() b.markSuccess()
} else { } else {
b.markFailure() b.markFailure()
@@ -82,27 +112,43 @@ func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Ac
err := req() err := req()
if acceptable(err) { if acceptable(err) {
success = true succ = true
} }
return err return err
} }
func (b *googleBreaker) markSuccess() { func (b *googleBreaker) markDrop() {
b.stat.Add(1) b.stat.Add(drop)
} }
func (b *googleBreaker) markFailure() { func (b *googleBreaker) markFailure() {
b.stat.Add(0) b.stat.Add(fail)
} }
func (b *googleBreaker) history() (accepts, total int64) { func (b *googleBreaker) markSuccess() {
b.stat.Reduce(func(b *collection.Bucket) { b.stat.Add(success)
accepts += int64(b.Sum) }
total += b.Count
func (b *googleBreaker) history() windowResult {
var result windowResult
b.stat.Reduce(func(b *bucket) {
result.accepts += b.Success
result.total += b.Sum
if b.Failure > 0 {
result.workingBuckets = 0
} else if b.Success > 0 {
result.workingBuckets++
}
if b.Success > 0 {
result.failingBuckets = 0
} else if b.Failure > 0 {
result.failingBuckets++
}
}) })
return return result
} }
type googlePromise struct { type googlePromise struct {

View File

@@ -10,6 +10,7 @@ import (
"github.com/zeromicro/go-zero/core/collection" "github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/mathx" "github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/stat" "github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/syncx"
) )
const ( const (
@@ -22,11 +23,14 @@ func init() {
} }
func getGoogleBreaker() *googleBreaker { func getGoogleBreaker() *googleBreaker {
st := collection.NewRollingWindow(testBuckets, testInterval) st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
return new(bucket)
}, testBuckets, testInterval)
return &googleBreaker{ return &googleBreaker{
stat: st, stat: st,
k: 5, k: 5,
proba: mathx.NewProba(), proba: mathx.NewProba(),
lastPass: syncx.NewAtomicDuration(),
} }
} }
@@ -63,6 +67,33 @@ func TestGoogleBreakerOpen(t *testing.T) {
}) })
} }
func TestGoogleBreakerRecover(t *testing.T) {
st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
return new(bucket)
}, testBuckets*2, testInterval)
b := &googleBreaker{
stat: st,
k: k,
proba: mathx.NewProba(),
lastPass: syncx.NewAtomicDuration(),
}
for i := 0; i < testBuckets; i++ {
for j := 0; j < 100; j++ {
b.stat.Add(1)
}
time.Sleep(testInterval)
}
for i := 0; i < testBuckets; i++ {
for j := 0; j < 100; j++ {
b.stat.Add(0)
}
time.Sleep(testInterval)
}
verify(t, func() bool {
return b.accept() == nil
})
}
func TestGoogleBreakerFallback(t *testing.T) { func TestGoogleBreakerFallback(t *testing.T) {
b := getGoogleBreaker() b := getGoogleBreaker()
markSuccess(b, 1) markSuccess(b, 1)
@@ -89,6 +120,43 @@ func TestGoogleBreakerReject(t *testing.T) {
}, nil, defaultAcceptable)) }, nil, defaultAcceptable))
} }
func TestGoogleBreakerMoreFallingBuckets(t *testing.T) {
t.Parallel()
t.Run("more falling buckets", func(t *testing.T) {
b := getGoogleBreaker()
func() {
stopChan := time.After(testInterval * 6)
for {
time.Sleep(time.Millisecond)
select {
case <-stopChan:
return
default:
assert.Error(t, b.doReq(func() error {
return errors.New("foo")
}, func(err error) error {
return err
}, func(err error) bool {
return err == nil
}))
}
}
}()
var count int
for i := 0; i < 100; i++ {
if errors.Is(b.doReq(func() error {
return ErrServiceUnavailable
}, nil, defaultAcceptable), ErrServiceUnavailable) {
count++
}
}
assert.True(t, count > 90)
})
}
func TestGoogleBreakerAcceptable(t *testing.T) { func TestGoogleBreakerAcceptable(t *testing.T) {
b := getGoogleBreaker() b := getGoogleBreaker()
errAcceptable := errors.New("any") errAcceptable := errors.New("any")
@@ -164,41 +232,38 @@ func TestGoogleBreakerSelfProtection(t *testing.T) {
} }
func TestGoogleBreakerHistory(t *testing.T) { func TestGoogleBreakerHistory(t *testing.T) {
var b *googleBreaker
var accepts, total int64
sleep := testInterval sleep := testInterval
t.Run("accepts == total", func(t *testing.T) { t.Run("accepts == total", func(t *testing.T) {
b = getGoogleBreaker() b := getGoogleBreaker()
markSuccessWithDuration(b, 10, sleep/2) markSuccessWithDuration(b, 10, sleep/2)
accepts, total = b.history() result := b.history()
assert.Equal(t, int64(10), accepts) assert.Equal(t, int64(10), result.accepts)
assert.Equal(t, int64(10), total) assert.Equal(t, int64(10), result.total)
}) })
t.Run("fail == total", func(t *testing.T) { t.Run("fail == total", func(t *testing.T) {
b = getGoogleBreaker() b := getGoogleBreaker()
markFailedWithDuration(b, 10, sleep/2) markFailedWithDuration(b, 10, sleep/2)
accepts, total = b.history() result := b.history()
assert.Equal(t, int64(0), accepts) assert.Equal(t, int64(0), result.accepts)
assert.Equal(t, int64(10), total) assert.Equal(t, int64(10), result.total)
}) })
t.Run("accepts = 1/2 * total, fail = 1/2 * total", func(t *testing.T) { t.Run("accepts = 1/2 * total, fail = 1/2 * total", func(t *testing.T) {
b = getGoogleBreaker() b := getGoogleBreaker()
markFailedWithDuration(b, 5, sleep/2) markFailedWithDuration(b, 5, sleep/2)
markSuccessWithDuration(b, 5, sleep/2) markSuccessWithDuration(b, 5, sleep/2)
accepts, total = b.history() result := b.history()
assert.Equal(t, int64(5), accepts) assert.Equal(t, int64(5), result.accepts)
assert.Equal(t, int64(10), total) assert.Equal(t, int64(10), result.total)
}) })
t.Run("auto reset rolling counter", func(t *testing.T) { t.Run("auto reset rolling counter", func(t *testing.T) {
b = getGoogleBreaker() b := getGoogleBreaker()
time.Sleep(testInterval * testBuckets) time.Sleep(testInterval * testBuckets)
accepts, total = b.history() result := b.history()
assert.Equal(t, int64(0), accepts) assert.Equal(t, int64(0), result.accepts)
assert.Equal(t, int64(0), total) assert.Equal(t, int64(0), result.total)
}) })
} }

View File

@@ -1,5 +1,7 @@
package breaker package breaker
import "context"
const nopBreakerName = "nopBreaker" const nopBreakerName = "nopBreaker"
type nopBreaker struct{} type nopBreaker struct{}
@@ -17,22 +19,43 @@ func (b nopBreaker) Allow() (Promise, error) {
return nopPromise{}, nil return nopPromise{}, nil
} }
func (b nopBreaker) AllowCtx(_ context.Context) (Promise, error) {
return nopPromise{}, nil
}
func (b nopBreaker) Do(req func() error) error { func (b nopBreaker) Do(req func() error) error {
return req() return req()
} }
func (b nopBreaker) DoCtx(_ context.Context, req func() error) error {
return req()
}
func (b nopBreaker) DoWithAcceptable(req func() error, _ Acceptable) error { func (b nopBreaker) DoWithAcceptable(req func() error, _ Acceptable) error {
return req() return req()
} }
func (b nopBreaker) DoWithAcceptableCtx(_ context.Context, req func() error, _ Acceptable) error {
return req()
}
func (b nopBreaker) DoWithFallback(req func() error, _ Fallback) error { func (b nopBreaker) DoWithFallback(req func() error, _ Fallback) error {
return req() return req()
} }
func (b nopBreaker) DoWithFallbackCtx(_ context.Context, req func() error, _ Fallback) error {
return req()
}
func (b nopBreaker) DoWithFallbackAcceptable(req func() error, _ Fallback, _ Acceptable) error { func (b nopBreaker) DoWithFallbackAcceptable(req func() error, _ Fallback, _ Acceptable) error {
return req() return req()
} }
func (b nopBreaker) DoWithFallbackAcceptableCtx(_ context.Context, req func() error,
_ Fallback, _ Acceptable) error {
return req()
}
type nopPromise struct{} type nopPromise struct{}
func (p nopPromise) Accept() { func (p nopPromise) Accept() {

View File

@@ -1,6 +1,7 @@
package breaker package breaker
import ( import (
"context"
"errors" "errors"
"testing" "testing"
@@ -12,6 +13,8 @@ func TestNopBreaker(t *testing.T) {
assert.Equal(t, nopBreakerName, b.Name()) assert.Equal(t, nopBreakerName, b.Name())
p, err := b.Allow() p, err := b.Allow()
assert.Nil(t, err) assert.Nil(t, err)
p, err = b.AllowCtx(context.Background())
assert.Nil(t, err)
p.Accept() p.Accept()
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
p, err := b.Allow() p, err := b.Allow()
@@ -21,18 +24,34 @@ func TestNopBreaker(t *testing.T) {
assert.Nil(t, b.Do(func() error { assert.Nil(t, b.Do(func() error {
return nil return nil
})) }))
assert.Nil(t, b.DoCtx(context.Background(), func() error {
return nil
}))
assert.Nil(t, b.DoWithAcceptable(func() error { assert.Nil(t, b.DoWithAcceptable(func() error {
return nil return nil
}, defaultAcceptable)) }, defaultAcceptable))
assert.Nil(t, b.DoWithAcceptableCtx(context.Background(), func() error {
return nil
}, defaultAcceptable))
errDummy := errors.New("any") errDummy := errors.New("any")
assert.Equal(t, errDummy, b.DoWithFallback(func() error { assert.Equal(t, errDummy, b.DoWithFallback(func() error {
return errDummy return errDummy
}, func(err error) error { }, func(err error) error {
return nil return nil
})) }))
assert.Equal(t, errDummy, b.DoWithFallbackCtx(context.Background(), func() error {
return errDummy
}, func(err error) error {
return nil
}))
assert.Equal(t, errDummy, b.DoWithFallbackAcceptable(func() error { assert.Equal(t, errDummy, b.DoWithFallbackAcceptable(func() error {
return errDummy return errDummy
}, func(err error) error { }, func(err error) error {
return nil return nil
}, defaultAcceptable)) }, defaultAcceptable))
assert.Equal(t, errDummy, b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
return errDummy
}, func(err error) error {
return nil
}, defaultAcceptable))
} }

View File

@@ -23,7 +23,7 @@ var (
zero = big.NewInt(0) zero = big.NewInt(0)
) )
// DhKey defines the Diffie Hellman key. // DhKey defines the Diffie-Hellman key.
type DhKey struct { type DhKey struct {
PriKey *big.Int PriKey *big.Int
PubKey *big.Int PubKey *big.Int
@@ -46,7 +46,7 @@ func ComputeKey(pubKey, priKey *big.Int) (*big.Int, error) {
return new(big.Int).Exp(pubKey, priKey, p), nil return new(big.Int).Exp(pubKey, priKey, p), nil
} }
// GenerateKey returns a Diffie Hellman key. // GenerateKey returns a Diffie-Hellman key.
func GenerateKey() (*DhKey, error) { func GenerateKey() (*DhKey, error) {
var err error var err error
var x *big.Int var x *big.Int

View File

@@ -128,8 +128,8 @@ func (c *Cache) Take(key string, fetch func() (any, error)) (any, error) {
var fresh bool var fresh bool
val, err := c.barrier.Do(key, func() (any, error) { val, err := c.barrier.Do(key, func() (any, error) {
// because O(1) on map search in memory, and fetch is an IO query // because O(1) on map search in memory, and fetch is an IO query,
// so we do double check, cache might be taken by another call // so we do double-check, cache might be taken by another call
if val, ok := c.doGet(key); ok { if val, ok := c.doGet(key); ok {
return val, nil return val, nil
} }

View File

@@ -4,18 +4,28 @@ import (
"sync" "sync"
"time" "time"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/timex" "github.com/zeromicro/go-zero/core/timex"
) )
type ( type (
// RollingWindowOption let callers customize the RollingWindow. // BucketInterface is the interface that defines the buckets.
RollingWindowOption func(rollingWindow *RollingWindow) BucketInterface[T Numerical] interface {
Add(v T)
Reset()
}
// RollingWindow defines a rolling window to calculate the events in buckets with time interval. // Numerical is the interface that restricts the numerical type.
RollingWindow struct { Numerical = mathx.Numerical
// RollingWindowOption let callers customize the RollingWindow.
RollingWindowOption[T Numerical, B BucketInterface[T]] func(rollingWindow *RollingWindow[T, B])
// RollingWindow defines a rolling window to calculate the events in buckets with the time interval.
RollingWindow[T Numerical, B BucketInterface[T]] struct {
lock sync.RWMutex lock sync.RWMutex
size int size int
win *window win *window[T, B]
interval time.Duration interval time.Duration
offset int offset int
ignoreCurrent bool ignoreCurrent bool
@@ -25,14 +35,15 @@ type (
// NewRollingWindow returns a RollingWindow that with size buckets and time interval, // NewRollingWindow returns a RollingWindow that with size buckets and time interval,
// use opts to customize the RollingWindow. // use opts to customize the RollingWindow.
func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOption) *RollingWindow { func NewRollingWindow[T Numerical, B BucketInterface[T]](newBucket func() B, size int,
interval time.Duration, opts ...RollingWindowOption[T, B]) *RollingWindow[T, B] {
if size < 1 { if size < 1 {
panic("size must be greater than 0") panic("size must be greater than 0")
} }
w := &RollingWindow{ w := &RollingWindow[T, B]{
size: size, size: size,
win: newWindow(size), win: newWindow[T, B](newBucket, size),
interval: interval, interval: interval,
lastTime: timex.Now(), lastTime: timex.Now(),
} }
@@ -43,7 +54,7 @@ func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOpt
} }
// Add adds value to current bucket. // Add adds value to current bucket.
func (rw *RollingWindow) Add(v float64) { func (rw *RollingWindow[T, B]) Add(v T) {
rw.lock.Lock() rw.lock.Lock()
defer rw.lock.Unlock() defer rw.lock.Unlock()
rw.updateOffset() rw.updateOffset()
@@ -51,13 +62,13 @@ func (rw *RollingWindow) Add(v float64) {
} }
// Reduce runs fn on all buckets, ignore current bucket if ignoreCurrent was set. // Reduce runs fn on all buckets, ignore current bucket if ignoreCurrent was set.
func (rw *RollingWindow) Reduce(fn func(b *Bucket)) { func (rw *RollingWindow[T, B]) Reduce(fn func(b B)) {
rw.lock.RLock() rw.lock.RLock()
defer rw.lock.RUnlock() defer rw.lock.RUnlock()
var diff int var diff int
span := rw.span() span := rw.span()
// ignore current bucket, because of partial data // ignore the current bucket, because of partial data
if span == 0 && rw.ignoreCurrent { if span == 0 && rw.ignoreCurrent {
diff = rw.size - 1 diff = rw.size - 1
} else { } else {
@@ -69,7 +80,7 @@ func (rw *RollingWindow) Reduce(fn func(b *Bucket)) {
} }
} }
func (rw *RollingWindow) span() int { func (rw *RollingWindow[T, B]) span() int {
offset := int(timex.Since(rw.lastTime) / rw.interval) offset := int(timex.Since(rw.lastTime) / rw.interval)
if 0 <= offset && offset < rw.size { if 0 <= offset && offset < rw.size {
return offset return offset
@@ -78,7 +89,7 @@ func (rw *RollingWindow) span() int {
return rw.size return rw.size
} }
func (rw *RollingWindow) updateOffset() { func (rw *RollingWindow[T, B]) updateOffset() {
span := rw.span() span := rw.span()
if span <= 0 { if span <= 0 {
return return
@@ -97,54 +108,54 @@ func (rw *RollingWindow) updateOffset() {
} }
// Bucket defines the bucket that holds sum and num of additions. // Bucket defines the bucket that holds sum and num of additions.
type Bucket struct { type Bucket[T Numerical] struct {
Sum float64 Sum T
Count int64 Count int64
} }
func (b *Bucket) add(v float64) { func (b *Bucket[T]) Add(v T) {
b.Sum += v b.Sum += v
b.Count++ b.Count++
} }
func (b *Bucket) reset() { func (b *Bucket[T]) Reset() {
b.Sum = 0 b.Sum = 0
b.Count = 0 b.Count = 0
} }
type window struct { type window[T Numerical, B BucketInterface[T]] struct {
buckets []*Bucket buckets []B
size int size int
} }
func newWindow(size int) *window { func newWindow[T Numerical, B BucketInterface[T]](newBucket func() B, size int) *window[T, B] {
buckets := make([]*Bucket, size) buckets := make([]B, size)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
buckets[i] = new(Bucket) buckets[i] = newBucket()
} }
return &window{ return &window[T, B]{
buckets: buckets, buckets: buckets,
size: size, size: size,
} }
} }
func (w *window) add(offset int, v float64) { func (w *window[T, B]) add(offset int, v T) {
w.buckets[offset%w.size].add(v) w.buckets[offset%w.size].Add(v)
} }
func (w *window) reduce(start, count int, fn func(b *Bucket)) { func (w *window[T, B]) reduce(start, count int, fn func(b B)) {
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
fn(w.buckets[(start+i)%w.size]) fn(w.buckets[(start+i)%w.size])
} }
} }
func (w *window) resetBucket(offset int) { func (w *window[T, B]) resetBucket(offset int) {
w.buckets[offset%w.size].reset() w.buckets[offset%w.size].Reset()
} }
// IgnoreCurrentBucket lets the Reduce call ignore current bucket. // IgnoreCurrentBucket lets the Reduce call ignore current bucket.
func IgnoreCurrentBucket() RollingWindowOption { func IgnoreCurrentBucket[T Numerical, B BucketInterface[T]]() RollingWindowOption[T, B] {
return func(w *RollingWindow) { return func(w *RollingWindow[T, B]) {
w.ignoreCurrent = true w.ignoreCurrent = true
} }
} }

View File

@@ -12,18 +12,24 @@ import (
const duration = time.Millisecond * 50 const duration = time.Millisecond * 50
func TestNewRollingWindow(t *testing.T) { func TestNewRollingWindow(t *testing.T) {
assert.NotNil(t, NewRollingWindow(10, time.Second)) assert.NotNil(t, NewRollingWindow[int64, *Bucket[int64]](func() *Bucket[int64] {
return new(Bucket[int64])
}, 10, time.Second))
assert.Panics(t, func() { assert.Panics(t, func() {
NewRollingWindow(0, time.Second) NewRollingWindow[int64, *Bucket[int64]](func() *Bucket[int64] {
return new(Bucket[int64])
}, 0, time.Second)
}) })
} }
func TestRollingWindowAdd(t *testing.T) { func TestRollingWindowAdd(t *testing.T) {
const size = 3 const size = 3
r := NewRollingWindow(size, duration) r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration)
listBuckets := func() []float64 { listBuckets := func() []float64 {
var buckets []float64 var buckets []float64
r.Reduce(func(b *Bucket) { r.Reduce(func(b *Bucket[float64]) {
buckets = append(buckets, b.Sum) buckets = append(buckets, b.Sum)
}) })
return buckets return buckets
@@ -47,10 +53,12 @@ func TestRollingWindowAdd(t *testing.T) {
func TestRollingWindowReset(t *testing.T) { func TestRollingWindowReset(t *testing.T) {
const size = 3 const size = 3
r := NewRollingWindow(size, duration, IgnoreCurrentBucket()) r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration, IgnoreCurrentBucket[float64, *Bucket[float64]]())
listBuckets := func() []float64 { listBuckets := func() []float64 {
var buckets []float64 var buckets []float64
r.Reduce(func(b *Bucket) { r.Reduce(func(b *Bucket[float64]) {
buckets = append(buckets, b.Sum) buckets = append(buckets, b.Sum)
}) })
return buckets return buckets
@@ -72,15 +80,19 @@ func TestRollingWindowReset(t *testing.T) {
func TestRollingWindowReduce(t *testing.T) { func TestRollingWindowReduce(t *testing.T) {
const size = 4 const size = 4
tests := []struct { tests := []struct {
win *RollingWindow win *RollingWindow[float64, *Bucket[float64]]
expect float64 expect float64
}{ }{
{ {
win: NewRollingWindow(size, duration), win: NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration),
expect: 10, expect: 10,
}, },
{ {
win: NewRollingWindow(size, duration, IgnoreCurrentBucket()), win: NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration, IgnoreCurrentBucket[float64, *Bucket[float64]]()),
expect: 4, expect: 4,
}, },
} }
@@ -97,7 +109,7 @@ func TestRollingWindowReduce(t *testing.T) {
} }
} }
var result float64 var result float64
r.Reduce(func(b *Bucket) { r.Reduce(func(b *Bucket[float64]) {
result += b.Sum result += b.Sum
}) })
assert.Equal(t, test.expect, result) assert.Equal(t, test.expect, result)
@@ -108,10 +120,12 @@ func TestRollingWindowReduce(t *testing.T) {
func TestRollingWindowBucketTimeBoundary(t *testing.T) { func TestRollingWindowBucketTimeBoundary(t *testing.T) {
const size = 3 const size = 3
interval := time.Millisecond * 30 interval := time.Millisecond * 30
r := NewRollingWindow(size, interval) r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, interval)
listBuckets := func() []float64 { listBuckets := func() []float64 {
var buckets []float64 var buckets []float64
r.Reduce(func(b *Bucket) { r.Reduce(func(b *Bucket[float64]) {
buckets = append(buckets, b.Sum) buckets = append(buckets, b.Sum)
}) })
return buckets return buckets
@@ -138,7 +152,9 @@ func TestRollingWindowBucketTimeBoundary(t *testing.T) {
func TestRollingWindowDataRace(t *testing.T) { func TestRollingWindowDataRace(t *testing.T) {
const size = 3 const size = 3
r := NewRollingWindow(size, duration) r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration)
stop := make(chan bool) stop := make(chan bool)
go func() { go func() {
for { for {
@@ -157,7 +173,7 @@ func TestRollingWindowDataRace(t *testing.T) {
case <-stop: case <-stop:
return return
default: default:
r.Reduce(func(b *Bucket) {}) r.Reduce(func(b *Bucket[float64]) {})
} }
} }
}() }()

View File

@@ -133,7 +133,7 @@ func addOrMergeFields(info *fieldInfo, key string, child *fieldInfo, fullName st
return newConflictKeyError(fullName) return newConflictKeyError(fullName)
} }
if err := mergeFields(prev, key, child.children, fullName); err != nil { if err := mergeFields(prev, child.children, fullName); err != nil {
return err return err
} }
} else { } else {
@@ -189,7 +189,7 @@ func buildFieldsInfo(tp reflect.Type, fullName string) (*fieldInfo, error) {
switch tp.Kind() { switch tp.Kind() {
case reflect.Struct: case reflect.Struct:
return buildStructFieldsInfo(tp, fullName) return buildStructFieldsInfo(tp, fullName)
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice, reflect.Map:
return buildFieldsInfo(mapping.Deref(tp.Elem()), fullName) return buildFieldsInfo(mapping.Deref(tp.Elem()), fullName)
case reflect.Chan, reflect.Func: case reflect.Chan, reflect.Func:
return nil, fmt.Errorf("unsupported type: %s", tp.Kind()) return nil, fmt.Errorf("unsupported type: %s", tp.Kind())
@@ -281,7 +281,7 @@ func getTagName(field reflect.StructField) string {
return field.Name return field.Name
} }
func mergeFields(prev *fieldInfo, key string, children map[string]*fieldInfo, fullName string) error { func mergeFields(prev *fieldInfo, children map[string]*fieldInfo, fullName string) error {
if len(prev.children) == 0 || len(children) == 0 { if len(prev.children) == 0 || len(children) == 0 {
return newConflictKeyError(fullName) return newConflictKeyError(fullName)
} }
@@ -332,6 +332,8 @@ func toLowerCaseKeyMap(m map[string]any, info *fieldInfo) map[string]any {
res[lk] = toLowerCaseInterface(v, ti) res[lk] = toLowerCaseInterface(v, ti)
} else if info.mapField != nil { } else if info.mapField != nil {
res[k] = toLowerCaseInterface(v, info.mapField) res[k] = toLowerCaseInterface(v, info.mapField)
} else if vv, ok := v.(map[string]any); ok {
res[k] = toLowerCaseKeyMap(vv, info)
} else { } else {
res[k] = v res[k] = v
} }

View File

@@ -1192,6 +1192,29 @@ Email = "bar"`)
assert.Len(t, c.Value, 2) assert.Len(t, c.Value, 2)
} }
}) })
t.Run("multi layer map", func(t *testing.T) {
type Value struct {
User struct {
Name string
}
}
type Config struct {
Value map[string]map[string]Value
}
var input = []byte(`
[Value.first.User1.User]
Name = "foo"
[Value.second.User2.User]
Name = "bar"
`)
var c Config
if assert.NoError(t, LoadFromTomlBytes(input, &c)) {
assert.Len(t, c.Value, 2)
}
})
} }
func Test_getFullName(t *testing.T) { func Test_getFullName(t *testing.T) {

View File

@@ -0,0 +1,200 @@
package configurator
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
"github.com/zeromicro/go-zero/core/configcenter/subscriber"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/mapping"
"github.com/zeromicro/go-zero/core/threading"
)
var (
errEmptyConfig = errors.New("empty config value")
errMissingUnmarshalerType = errors.New("missing unmarshaler type")
)
// Configurator is the interface for configuration center.
type Configurator[T any] interface {
// GetConfig returns the subscription value.
GetConfig() (T, error)
// AddListener adds a listener to the subscriber.
AddListener(listener func())
}
type (
// Config is the configuration for Configurator.
Config struct {
// Type is the value type, yaml, json or toml.
Type string `json:",default=yaml,options=[yaml,json,toml]"`
// Log is the flag to control logging.
Log bool `json:",default=true"`
}
configCenter[T any] struct {
conf Config
unmarshaler LoaderFn
subscriber subscriber.Subscriber
listeners []func()
lock sync.Mutex
snapshot atomic.Value
}
value[T any] struct {
data string
marshalData T
err error
}
)
// Configurator is the interface for configuration center.
var _ Configurator[any] = (*configCenter[any])(nil)
// MustNewConfigCenter returns a Configurator, exits on errors.
func MustNewConfigCenter[T any](c Config, subscriber subscriber.Subscriber) Configurator[T] {
cc, err := NewConfigCenter[T](c, subscriber)
logx.Must(err)
return cc
}
// NewConfigCenter returns a Configurator.
func NewConfigCenter[T any](c Config, subscriber subscriber.Subscriber) (Configurator[T], error) {
unmarshaler, ok := Unmarshaler(strings.ToLower(c.Type))
if !ok {
return nil, fmt.Errorf("unknown format: %s", c.Type)
}
cc := &configCenter[T]{
conf: c,
unmarshaler: unmarshaler,
subscriber: subscriber,
}
if err := cc.loadConfig(); err != nil {
return nil, err
}
if err := cc.subscriber.AddListener(cc.onChange); err != nil {
return nil, err
}
if _, err := cc.GetConfig(); err != nil {
return nil, err
}
return cc, nil
}
// AddListener adds listener to s.
func (c *configCenter[T]) AddListener(listener func()) {
c.lock.Lock()
defer c.lock.Unlock()
c.listeners = append(c.listeners, listener)
}
// GetConfig return structured config.
func (c *configCenter[T]) GetConfig() (T, error) {
v := c.value()
if v == nil || len(v.data) == 0 {
var empty T
return empty, errEmptyConfig
}
return v.marshalData, v.err
}
// Value returns the subscription value.
func (c *configCenter[T]) Value() string {
v := c.value()
if v == nil {
return ""
}
return v.data
}
func (c *configCenter[T]) loadConfig() error {
v, err := c.subscriber.Value()
if err != nil {
if c.conf.Log {
logx.Errorf("ConfigCenter loads changed configuration, error: %v", err)
}
return err
}
if c.conf.Log {
logx.Infof("ConfigCenter loads changed configuration, content [%s]", v)
}
c.snapshot.Store(c.genValue(v))
return nil
}
func (c *configCenter[T]) onChange() {
if err := c.loadConfig(); err != nil {
return
}
c.lock.Lock()
listeners := make([]func(), len(c.listeners))
copy(listeners, c.listeners)
c.lock.Unlock()
for _, l := range listeners {
threading.GoSafe(l)
}
}
func (c *configCenter[T]) value() *value[T] {
content := c.snapshot.Load()
if content == nil {
return nil
}
return content.(*value[T])
}
func (c *configCenter[T]) genValue(data string) *value[T] {
v := &value[T]{
data: data,
}
if len(data) == 0 {
return v
}
t := reflect.TypeOf(v.marshalData)
// if the type is nil, it means that the user has not set the type of the configuration.
if t == nil {
v.err = errMissingUnmarshalerType
return v
}
t = mapping.Deref(t)
switch t.Kind() {
case reflect.Struct, reflect.Array, reflect.Slice:
if err := c.unmarshaler([]byte(data), &v.marshalData); err != nil {
v.err = err
if c.conf.Log {
logx.Errorf("ConfigCenter unmarshal configuration failed, err: %+v, content [%s]",
err.Error(), data)
}
}
case reflect.String:
if str, ok := any(data).(T); ok {
v.marshalData = str
} else {
v.err = errMissingUnmarshalerType
}
default:
if c.conf.Log {
logx.Errorf("ConfigCenter unmarshal configuration missing unmarshaler for type: %s, content [%s]",
t.Kind(), data)
}
v.err = errMissingUnmarshalerType
}
return v
}

View File

@@ -0,0 +1,233 @@
package configurator
import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewConfigCenter(t *testing.T) {
_, err := NewConfigCenter[any](Config{
Log: true,
}, &mockSubscriber{})
assert.Error(t, err)
_, err = NewConfigCenter[any](Config{
Type: "json",
Log: true,
}, &mockSubscriber{})
assert.Error(t, err)
}
func TestConfigCenter_GetConfig(t *testing.T) {
mock := &mockSubscriber{}
type Data struct {
Name string `json:"name"`
}
mock.v = `{"name": "go-zero"}`
c1, err := NewConfigCenter[Data](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
data, err := c1.GetConfig()
assert.NoError(t, err)
assert.Equal(t, "go-zero", data.Name)
mock.v = `{"name": "111"}`
c2, err := NewConfigCenter[Data](Config{Type: "json"}, mock)
assert.NoError(t, err)
mock.v = `{}`
c3, err := NewConfigCenter[string](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
_, err = c3.GetConfig()
assert.NoError(t, err)
data, err = c2.GetConfig()
assert.NoError(t, err)
mock.lisErr = errors.New("mock error")
_, err = NewConfigCenter[Data](Config{
Type: "json",
Log: true,
}, mock)
assert.Error(t, err)
}
func TestConfigCenter_onChange(t *testing.T) {
mock := &mockSubscriber{}
type Data struct {
Name string `json:"name"`
}
mock.v = `{"name": "go-zero"}`
c1, err := NewConfigCenter[Data](Config{Type: "json", Log: true}, mock)
assert.NoError(t, err)
data, err := c1.GetConfig()
assert.NoError(t, err)
assert.Equal(t, "go-zero", data.Name)
mock.v = `{"name": "go-zero2"}`
mock.change()
data, err = c1.GetConfig()
assert.NoError(t, err)
assert.Equal(t, "go-zero2", data.Name)
mock.valErr = errors.New("mock error")
_, err = NewConfigCenter[Data](Config{Type: "json", Log: false}, mock)
assert.Error(t, err)
}
func TestConfigCenter_Value(t *testing.T) {
mock := &mockSubscriber{}
mock.v = "1234"
c, err := NewConfigCenter[string](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
cc := c.(*configCenter[string])
assert.Equal(t, cc.Value(), "1234")
mock.valErr = errors.New("mock error")
_, err = NewConfigCenter[any](Config{
Type: "json",
Log: true,
}, mock)
assert.Error(t, err)
}
func TestConfigCenter_AddListener(t *testing.T) {
mock := &mockSubscriber{}
mock.v = "1234"
c, err := NewConfigCenter[string](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
cc := c.(*configCenter[string])
var a, b int
var mutex sync.Mutex
cc.AddListener(func() {
mutex.Lock()
a = 1
mutex.Unlock()
})
cc.AddListener(func() {
mutex.Lock()
b = 2
mutex.Unlock()
})
assert.Equal(t, 2, len(cc.listeners))
mock.change()
time.Sleep(time.Millisecond * 100)
mutex.Lock()
assert.Equal(t, 1, a)
assert.Equal(t, 2, b)
mutex.Unlock()
}
func TestConfigCenter_genValue(t *testing.T) {
t.Run("data is empty", func(t *testing.T) {
c := &configCenter[string]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("")
assert.Equal(t, "", v.data)
})
t.Run("invalid template type", func(t *testing.T) {
c := &configCenter[any]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("xxxx")
assert.Equal(t, errMissingUnmarshalerType, v.err)
})
t.Run("unsupported template type", func(t *testing.T) {
c := &configCenter[int]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("1")
assert.Equal(t, errMissingUnmarshalerType, v.err)
})
t.Run("supported template string type", func(t *testing.T) {
c := &configCenter[string]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("12345")
assert.NoError(t, v.err)
assert.Equal(t, "12345", v.data)
})
t.Run("unmarshal fail", func(t *testing.T) {
c := &configCenter[struct {
Name string `json:"name"`
}]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue(`{"name":"new name}`)
assert.Equal(t, `{"name":"new name}`, v.data)
assert.Error(t, v.err)
})
t.Run("success", func(t *testing.T) {
c := &configCenter[struct {
Name string `json:"name"`
}]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue(`{"name":"new name"}`)
assert.Equal(t, `{"name":"new name"}`, v.data)
assert.Equal(t, "new name", v.marshalData.Name)
assert.NoError(t, v.err)
})
}
type mockSubscriber struct {
v string
lisErr, valErr error
listener func()
}
func (m *mockSubscriber) AddListener(listener func()) error {
m.listener = listener
return m.lisErr
}
func (m *mockSubscriber) Value() (string, error) {
return m.v, m.valErr
}
func (m *mockSubscriber) change() {
if m.listener != nil {
m.listener()
}
}

View File

@@ -0,0 +1,67 @@
package subscriber
import (
"github.com/zeromicro/go-zero/core/discov"
"github.com/zeromicro/go-zero/core/logx"
)
type (
// etcdSubscriber is a subscriber that subscribes to etcd.
etcdSubscriber struct {
*discov.Subscriber
}
// EtcdConf is the configuration for etcd.
EtcdConf = discov.EtcdConf
)
// MustNewEtcdSubscriber returns an etcd Subscriber, exits on errors.
func MustNewEtcdSubscriber(conf EtcdConf) Subscriber {
s, err := NewEtcdSubscriber(conf)
logx.Must(err)
return s
}
// NewEtcdSubscriber returns an etcd Subscriber.
func NewEtcdSubscriber(conf EtcdConf) (Subscriber, error) {
opts := buildSubOptions(conf)
s, err := discov.NewSubscriber(conf.Hosts, conf.Key, opts...)
if err != nil {
return nil, err
}
return &etcdSubscriber{Subscriber: s}, nil
}
// buildSubOptions constructs the options for creating a new etcd subscriber.
func buildSubOptions(conf EtcdConf) []discov.SubOption {
opts := []discov.SubOption{
discov.WithExactMatch(),
}
if len(conf.User) > 0 {
opts = append(opts, discov.WithSubEtcdAccount(conf.User, conf.Pass))
}
if len(conf.CertFile) > 0 || len(conf.CertKeyFile) > 0 || len(conf.CACertFile) > 0 {
opts = append(opts, discov.WithSubEtcdTLS(conf.CertFile, conf.CertKeyFile,
conf.CACertFile, conf.InsecureSkipVerify))
}
return opts
}
// AddListener adds a listener to the subscriber.
func (s *etcdSubscriber) AddListener(listener func()) error {
s.Subscriber.AddListener(listener)
return nil
}
// Value returns the value of the subscriber.
func (s *etcdSubscriber) Value() (string, error) {
vs := s.Subscriber.Values()
if len(vs) > 0 {
return vs[len(vs)-1], nil
}
return "", nil
}

View File

@@ -0,0 +1,9 @@
package subscriber
// Subscriber is the interface for configcenter subscribers.
type Subscriber interface {
// AddListener adds a listener to the subscriber.
AddListener(listener func()) error
// Value returns the value of the subscriber.
Value() (string, error)
}

View File

@@ -0,0 +1,41 @@
package configurator
import (
"sync"
"github.com/zeromicro/go-zero/core/conf"
)
var registry = &unmarshalerRegistry{
unmarshalers: map[string]LoaderFn{
"json": conf.LoadFromJsonBytes,
"toml": conf.LoadFromTomlBytes,
"yaml": conf.LoadFromYamlBytes,
},
}
type (
// LoaderFn is the function type for loading configuration.
LoaderFn func([]byte, any) error
// unmarshalerRegistry is the registry for unmarshalers.
unmarshalerRegistry struct {
unmarshalers map[string]LoaderFn
mu sync.RWMutex
}
)
// RegisterUnmarshaler registers an unmarshaler.
func RegisterUnmarshaler(name string, fn LoaderFn) {
registry.mu.Lock()
defer registry.mu.Unlock()
registry.unmarshalers[name] = fn
}
// Unmarshaler returns the unmarshaler by name.
func Unmarshaler(name string) (LoaderFn, bool) {
registry.mu.RLock()
defer registry.mu.RUnlock()
fn, ok := registry.unmarshalers[name]
return fn, ok
}

View File

@@ -0,0 +1,28 @@
package configurator
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestRegisterUnmarshaler(t *testing.T) {
RegisterUnmarshaler("test", func(data []byte, v interface{}) error {
return nil
})
_, ok := Unmarshaler("test")
assert.True(t, ok)
_, ok = Unmarshaler("test2")
assert.False(t, ok)
_, ok = Unmarshaler("json")
assert.True(t, ok)
_, ok = Unmarshaler("toml")
assert.True(t, ok)
_, ok = Unmarshaler("yaml")
assert.True(t, ok)
}

View File

@@ -10,13 +10,14 @@ import (
"sync" "sync"
"time" "time"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
"github.com/zeromicro/go-zero/core/contextx" "github.com/zeromicro/go-zero/core/contextx"
"github.com/zeromicro/go-zero/core/lang" "github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx" "github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/syncx" "github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/threading" "github.com/zeromicro/go-zero/core/threading"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
) )
var ( var (
@@ -30,7 +31,7 @@ var (
// A Registry is a registry that manages the etcd client connections. // A Registry is a registry that manages the etcd client connections.
type Registry struct { type Registry struct {
clusters map[string]*cluster clusters map[string]*cluster
lock sync.Mutex lock sync.RWMutex
} }
// GetRegistry returns a global Registry. // GetRegistry returns a global Registry.
@@ -45,7 +46,7 @@ func (r *Registry) GetConn(endpoints []string) (EtcdClient, error) {
} }
// Monitor monitors the key on given etcd endpoints, notify with the given UpdateListener. // Monitor monitors the key on given etcd endpoints, notify with the given UpdateListener.
func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) error { func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener, exactMatch bool) error {
c, exists := r.getCluster(endpoints) c, exists := r.getCluster(endpoints)
// if exists, the existing values should be updated to the listener. // if exists, the existing values should be updated to the listener.
if exists { if exists {
@@ -55,17 +56,24 @@ func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) err
} }
} }
return c.monitor(key, l) return c.monitor(key, l, exactMatch)
} }
func (r *Registry) getCluster(endpoints []string) (c *cluster, exists bool) { func (r *Registry) getCluster(endpoints []string) (c *cluster, exists bool) {
clusterKey := getClusterKey(endpoints) clusterKey := getClusterKey(endpoints)
r.lock.Lock() r.lock.RLock()
defer r.lock.Unlock()
c, exists = r.clusters[clusterKey] c, exists = r.clusters[clusterKey]
r.lock.RUnlock()
if !exists { if !exists {
c = newCluster(endpoints) r.lock.Lock()
r.clusters[clusterKey] = c defer r.lock.Unlock()
// double-check locking
c, exists = r.clusters[clusterKey]
if !exists {
c = newCluster(endpoints)
r.clusters[clusterKey] = c
}
} }
return return
@@ -78,7 +86,8 @@ type cluster struct {
listeners map[string][]UpdateListener listeners map[string][]UpdateListener
watchGroup *threading.RoutineGroup watchGroup *threading.RoutineGroup
done chan lang.PlaceholderType done chan lang.PlaceholderType
lock sync.Mutex lock sync.RWMutex
exactMatch bool
} }
func newCluster(endpoints []string) *cluster { func newCluster(endpoints []string) *cluster {
@@ -108,8 +117,8 @@ func (c *cluster) getClient() (EtcdClient, error) {
} }
func (c *cluster) getCurrent(key string) []KV { func (c *cluster) getCurrent(key string) []KV {
c.lock.Lock() c.lock.RLock()
defer c.lock.Unlock() defer c.lock.RUnlock()
var kvs []KV var kvs []KV
for k, v := range c.values[key] { for k, v := range c.values[key] {
@@ -125,6 +134,7 @@ func (c *cluster) getCurrent(key string) []KV {
func (c *cluster) handleChanges(key string, kvs []KV) { func (c *cluster) handleChanges(key string, kvs []KV) {
var add []KV var add []KV
var remove []KV var remove []KV
c.lock.Lock() c.lock.Lock()
listeners := append([]UpdateListener(nil), c.listeners[key]...) listeners := append([]UpdateListener(nil), c.listeners[key]...)
vals, ok := c.values[key] vals, ok := c.values[key]
@@ -173,9 +183,9 @@ func (c *cluster) handleChanges(key string, kvs []KV) {
} }
func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) { func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) {
c.lock.Lock() c.lock.RLock()
listeners := append([]UpdateListener(nil), c.listeners[key]...) listeners := append([]UpdateListener(nil), c.listeners[key]...)
c.lock.Unlock() c.lock.RUnlock()
for _, ev := range events { for _, ev := range events {
switch ev.Type { switch ev.Type {
@@ -216,13 +226,18 @@ func (c *cluster) load(cli EtcdClient, key string) int64 {
for { for {
var err error var err error
ctx, cancel := context.WithTimeout(c.context(cli), RequestTimeout) ctx, cancel := context.WithTimeout(c.context(cli), RequestTimeout)
resp, err = cli.Get(ctx, makeKeyPrefix(key), clientv3.WithPrefix()) if c.exactMatch {
resp, err = cli.Get(ctx, key)
} else {
resp, err = cli.Get(ctx, makeKeyPrefix(key), clientv3.WithPrefix())
}
cancel() cancel()
if err == nil { if err == nil {
break break
} }
logx.Error(err) logx.Errorf("%s, key is %s", err.Error(), key)
time.Sleep(coolDownInterval) time.Sleep(coolDownInterval)
} }
@@ -239,9 +254,10 @@ func (c *cluster) load(cli EtcdClient, key string) int64 {
return resp.Header.Revision return resp.Header.Revision
} }
func (c *cluster) monitor(key string, l UpdateListener) error { func (c *cluster) monitor(key string, l UpdateListener, exactMatch bool) error {
c.lock.Lock() c.lock.Lock()
c.listeners[key] = append(c.listeners[key], l) c.listeners[key] = append(c.listeners[key], l)
c.exactMatch = exactMatch
c.lock.Unlock() c.lock.Unlock()
cli, err := c.getClient() cli, err := c.getClient()
@@ -307,14 +323,20 @@ func (c *cluster) watch(cli EtcdClient, key string, rev int64) {
} }
func (c *cluster) watchStream(cli EtcdClient, key string, rev int64) error { func (c *cluster) watchStream(cli EtcdClient, key string, rev int64) error {
var rch clientv3.WatchChan var (
if rev != 0 { rch clientv3.WatchChan
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), ops []clientv3.OpOption
clientv3.WithPrefix(), clientv3.WithRev(rev+1)) watchKey = key
} else { )
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), if !c.exactMatch {
clientv3.WithPrefix()) watchKey = makeKeyPrefix(key)
ops = append(ops, clientv3.WithPrefix())
} }
if rev != 0 {
ops = append(ops, clientv3.WithRev(rev+1))
}
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), watchKey, ops...)
for { for {
select { select {

View File

@@ -289,7 +289,7 @@ func TestRegistry_Monitor(t *testing.T) {
}, },
} }
GetRegistry().lock.Unlock() GetRegistry().lock.Unlock()
assert.Error(t, GetRegistry().Monitor(endpoints, "foo", new(mockListener))) assert.Error(t, GetRegistry().Monitor(endpoints, "foo", new(mockListener), false))
} }
type mockListener struct { type mockListener struct {

View File

@@ -15,9 +15,10 @@ type (
// A Subscriber is used to subscribe the given key on an etcd cluster. // A Subscriber is used to subscribe the given key on an etcd cluster.
Subscriber struct { Subscriber struct {
endpoints []string endpoints []string
exclusive bool exclusive bool
items *container exactMatch bool
items *container
} }
) )
@@ -34,7 +35,7 @@ func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscrib
} }
sub.items = newContainer(sub.exclusive) sub.items = newContainer(sub.exclusive)
if err := internal.GetRegistry().Monitor(endpoints, key, sub.items); err != nil { if err := internal.GetRegistry().Monitor(endpoints, key, sub.items, sub.exactMatch); err != nil {
return nil, err return nil, err
} }
@@ -59,6 +60,13 @@ func Exclusive() SubOption {
} }
} }
// WithExactMatch turn off querying using key prefixes.
func WithExactMatch() SubOption {
return func(sub *Subscriber) {
sub.exactMatch = true
}
}
// WithSubEtcdAccount provides the etcd username/password. // WithSubEtcdAccount provides the etcd username/password.
func WithSubEtcdAccount(user, pass string) SubOption { func WithSubEtcdAccount(user, pass string) SubOption {
return func(sub *Subscriber) { return func(sub *Subscriber) {

View File

@@ -1,21 +1,17 @@
package errorx package errorx
import ( import (
"bytes" "errors"
"sync" "sync"
) )
type ( // BatchError is an error that can hold multiple errors.
// A BatchError is an error that can hold multiple errors. type BatchError struct {
BatchError struct { errs []error
errs errorArray lock sync.RWMutex
lock sync.Mutex }
}
errorArray []error // Add adds one or more non-nil errors to the BatchError instance.
)
// Add adds errs to be, nil errors are ignored.
func (be *BatchError) Add(errs ...error) { func (be *BatchError) Add(errs ...error) {
be.lock.Lock() be.lock.Lock()
defer be.lock.Unlock() defer be.lock.Unlock()
@@ -27,39 +23,20 @@ func (be *BatchError) Add(errs ...error) {
} }
} }
// Err returns an error that represents all errors. // Err returns an error that represents all accumulated errors.
// It returns nil if there are no errors.
func (be *BatchError) Err() error { func (be *BatchError) Err() error {
be.lock.Lock() be.lock.RLock()
defer be.lock.Unlock() defer be.lock.RUnlock()
switch len(be.errs) { // If there are no non-nil errors, errors.Join(...) returns nil.
case 0: return errors.Join(be.errs...)
return nil
case 1:
return be.errs[0]
default:
return be.errs
}
} }
// NotNil checks if any error inside. // NotNil checks if there is at least one error inside the BatchError.
func (be *BatchError) NotNil() bool { func (be *BatchError) NotNil() bool {
be.lock.Lock() be.lock.RLock()
defer be.lock.Unlock() defer be.lock.RUnlock()
return len(be.errs) > 0 return len(be.errs) > 0
} }
// Error returns a string that represents inside errors.
func (ea errorArray) Error() string {
var buf bytes.Buffer
for i := range ea {
if i > 0 {
buf.WriteByte('\n')
}
buf.WriteString(ea[i].Error())
}
return buf.String()
}

View File

@@ -66,3 +66,82 @@ func TestBatchErrorConcurrentAdd(t *testing.T) {
assert.Equal(t, count, len(batch.errs)) assert.Equal(t, count, len(batch.errs))
assert.True(t, batch.NotNil()) assert.True(t, batch.NotNil())
} }
func TestBatchError_Unwrap(t *testing.T) {
t.Run("nil", func(t *testing.T) {
var be BatchError
assert.Nil(t, be.Err())
assert.True(t, errors.Is(be.Err(), nil))
})
t.Run("one error", func(t *testing.T) {
var errFoo = errors.New("foo")
var errBar = errors.New("bar")
var be BatchError
be.Add(errFoo)
assert.True(t, errors.Is(be.Err(), errFoo))
assert.False(t, errors.Is(be.Err(), errBar))
})
t.Run("two errors", func(t *testing.T) {
var errFoo = errors.New("foo")
var errBar = errors.New("bar")
var errBaz = errors.New("baz")
var be BatchError
be.Add(errFoo)
be.Add(errBar)
assert.True(t, errors.Is(be.Err(), errFoo))
assert.True(t, errors.Is(be.Err(), errBar))
assert.False(t, errors.Is(be.Err(), errBaz))
})
}
func TestBatchError_Add(t *testing.T) {
var be BatchError
// Test adding nil errors
be.Add(nil, nil)
assert.False(t, be.NotNil(), "Expected BatchError to be empty after adding nil errors")
// Test adding non-nil errors
err1 := errors.New("error 1")
err2 := errors.New("error 2")
be.Add(err1, err2)
assert.True(t, be.NotNil(), "Expected BatchError to be non-empty after adding errors")
// Test adding a mix of nil and non-nil errors
err3 := errors.New("error 3")
be.Add(nil, err3, nil)
assert.True(t, be.NotNil(), "Expected BatchError to be non-empty after adding a mix of nil and non-nil errors")
}
func TestBatchError_Err(t *testing.T) {
var be BatchError
// Test Err() on empty BatchError
assert.Nil(t, be.Err(), "Expected nil error for empty BatchError")
// Test Err() with multiple errors
err1 := errors.New("error 1")
err2 := errors.New("error 2")
be.Add(err1, err2)
combinedErr := be.Err()
assert.NotNil(t, combinedErr, "Expected nil error for BatchError with multiple errors")
// Check if the combined error contains both error messages
errString := combinedErr.Error()
assert.Truef(t, errors.Is(combinedErr, err1), "Combined error doesn't contain first error: %s", errString)
assert.Truef(t, errors.Is(combinedErr, err2), "Combined error doesn't contain second error: %s", errString)
}
func TestBatchError_NotNil(t *testing.T) {
var be BatchError
// Test NotNil() on empty BatchError
assert.Nil(t, be.Err(), "Expected nil error for empty BatchError")
// Test NotNil() after adding an error
be.Add(errors.New("test error"))
assert.NotNil(t, be.Err(), "Expected non-nil error after adding an error")
}

14
core/errorx/check.go Normal file
View File

@@ -0,0 +1,14 @@
package errorx
import "errors"
// In checks if the given err is one of errs.
func In(err error, errs ...error) bool {
for _, each := range errs {
if errors.Is(err, each) {
return true
}
}
return false
}

70
core/errorx/check_test.go Normal file
View File

@@ -0,0 +1,70 @@
package errorx
import (
"errors"
"testing"
)
func TestIn(t *testing.T) {
err1 := errors.New("error 1")
err2 := errors.New("error 2")
err3 := errors.New("error 3")
tests := []struct {
name string
err error
errs []error
want bool
}{
{
name: "Error matches one of the errors in the list",
err: err1,
errs: []error{err1, err2},
want: true,
},
{
name: "Error does not match any errors in the list",
err: err3,
errs: []error{err1, err2},
want: false,
},
{
name: "Empty error list",
err: err1,
errs: []error{},
want: false,
},
{
name: "Nil error with non-nil list",
err: nil,
errs: []error{err1, err2},
want: false,
},
{
name: "Non-nil error with nil in list",
err: err1,
errs: []error{nil, err2},
want: false,
},
{
name: "Error matches nil error in the list",
err: nil,
errs: []error{nil, err2},
want: true,
},
{
name: "Nil error with empty list",
err: nil,
errs: []error{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := In(tt.err, tt.errs...); got != tt.want {
t.Errorf("In() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -35,6 +35,7 @@ func firstLine(file *os.File) (string, error) {
for { for {
buf := make([]byte, bufSize) buf := make([]byte, bufSize)
n, err := file.ReadAt(buf, offset) n, err := file.ReadAt(buf, offset)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return "", err return "", err
} }
@@ -45,6 +46,10 @@ func firstLine(file *os.File) (string, error) {
} }
} }
if err == io.EOF {
return string(append(first, buf[:n]...)), nil
}
first = append(first, buf[:n]...) first = append(first, buf[:n]...)
offset += bufSize offset += bufSize
} }
@@ -57,30 +62,42 @@ func lastLine(filename string, file *os.File) (string, error) {
} }
var last []byte var last []byte
bufLen := int64(bufSize)
offset := info.Size() offset := info.Size()
for {
offset -= bufSize for offset > 0 {
if offset < 0 { if offset < bufLen {
bufLen = offset
offset = 0 offset = 0
} else {
offset -= bufLen
} }
buf := make([]byte, bufSize)
buf := make([]byte, bufLen)
n, err := file.ReadAt(buf, offset) n, err := file.ReadAt(buf, offset)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return "", err return "", err
} }
if n == 0 {
break
}
if buf[n-1] == '\n' { if buf[n-1] == '\n' {
buf = buf[:n-1] buf = buf[:n-1]
n-- n--
} else { } else {
buf = buf[:n] buf = buf[:n]
} }
for n--; n >= 0; n-- {
if buf[n] == '\n' { for i := n - 1; i >= 0; i-- {
return string(append(buf[n+1:], last...)), nil if buf[i] == '\n' {
return string(append(buf[i+1:], last...)), nil
} }
} }
last = append(buf, last...) last = append(buf, last...)
} }
return string(last), nil
} }

View File

@@ -52,6 +52,7 @@ last line`
second line second line
last line last line
` `
emptyContent = ``
) )
func TestFirstLine(t *testing.T) { func TestFirstLine(t *testing.T) {
@@ -79,6 +80,26 @@ func TestFirstLineError(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
} }
func TestFirstLineEmptyFile(t *testing.T) {
filename, err := fs.TempFilenameWithText(emptyContent)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, "", val)
}
func TestFirstLineWithoutNewline(t *testing.T) {
filename, err := fs.TempFilenameWithText(longLine)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, longLine, val)
}
func TestLastLine(t *testing.T) { func TestLastLine(t *testing.T) {
filename, err := fs.TempFilenameWithText(text) filename, err := fs.TempFilenameWithText(text)
assert.Nil(t, err) assert.Nil(t, err)
@@ -99,6 +120,16 @@ func TestLastLineWithLastNewline(t *testing.T) {
assert.Equal(t, longLine, val) assert.Equal(t, longLine, val)
} }
func TestLastLineWithoutLastNewline(t *testing.T) {
filename, err := fs.TempFilenameWithText(longLine)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, longLine, val)
}
func TestLastLineShort(t *testing.T) { func TestLastLineShort(t *testing.T) {
filename, err := fs.TempFilenameWithText(shortText) filename, err := fs.TempFilenameWithText(shortText)
assert.Nil(t, err) assert.Nil(t, err)
@@ -123,3 +154,67 @@ func TestLastLineError(t *testing.T) {
_, err := LastLine("/tmp/does-not-exist") _, err := LastLine("/tmp/does-not-exist")
assert.Error(t, err) assert.Error(t, err)
} }
func TestLastLineEmptyFile(t *testing.T) {
filename, err := fs.TempFilenameWithText(emptyContent)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, "", val)
}
func TestFirstLineExactlyBufSize(t *testing.T) {
content := make([]byte, bufSize)
for i := range content {
content[i] = 'a'
}
content[bufSize-1] = '\n' // Ensure there is a newline at the edge
filename, err := fs.TempFilenameWithText(string(content))
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, string(content[:bufSize-1]), val)
}
func TestLastLineExactlyBufSize(t *testing.T) {
content := make([]byte, bufSize)
for i := range content {
content[i] = 'a'
}
content[bufSize-1] = '\n' // Ensure there is a newline at the edge
filename, err := fs.TempFilenameWithText(string(content))
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, string(content[:bufSize-1]), val)
}
func TestFirstLineLargeFile(t *testing.T) {
content := text + text + text + "\n" + "extra"
filename, err := fs.TempFilenameWithText(content)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, "first line", val)
}
func TestLastLineLargeFile(t *testing.T) {
content := text + text + text + "\n" + "extra"
filename, err := fs.TempFilenameWithText(content)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, "extra", val)
}

View File

@@ -5,7 +5,7 @@ import "gopkg.in/cheggaaa/pb.v1"
type ( type (
// A Scanner is used to read lines. // A Scanner is used to read lines.
Scanner interface { Scanner interface {
// Scan checks if has remaining to read. // Scan checks if it has remaining to read.
Scan() bool Scan() bool
// Text returns next line. // Text returns next line.
Text() string Text() string

View File

@@ -1,6 +1,9 @@
package fx package fx
import "github.com/zeromicro/go-zero/core/threading" import (
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/threading"
)
// Parallel runs fns parallelly and waits for done. // Parallel runs fns parallelly and waits for done.
func Parallel(fns ...func()) { func Parallel(fns ...func()) {
@@ -10,3 +13,20 @@ func Parallel(fns ...func()) {
} }
group.Wait() group.Wait()
} }
func ParallelErr(fns ...func() error) error {
var be errorx.BatchError
group := threading.NewRoutineGroup()
for _, fn := range fns {
f := fn
group.RunSafe(func() {
if err := f(); err != nil {
be.Add(err)
}
})
}
group.Wait()
return be.Err()
}

View File

@@ -1,6 +1,7 @@
package fx package fx
import ( import (
"errors"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
@@ -22,3 +23,54 @@ func TestParallel(t *testing.T) {
}) })
assert.Equal(t, int32(6), count) assert.Equal(t, int32(6), count)
} }
func TestParallelErr(t *testing.T) {
var count int32
err := ParallelErr(
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 1)
return errors.New("failed to exec #1")
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 2)
return errors.New("failed to exec #2")
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 3)
return nil
},
)
assert.Equal(t, int32(6), count)
assert.Error(t, err)
assert.ErrorContains(t, err, "failed to exec #1", "failed to exec #2")
}
func TestParallelErrErrorNil(t *testing.T) {
var count int32
err := ParallelErr(
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 1)
return nil
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 2)
return nil
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 3)
return nil
},
)
assert.Equal(t, int32(6), count)
assert.NoError(t, err)
}

View File

@@ -84,10 +84,10 @@ func Range(source <-chan any) Stream {
} }
} }
// AllMach returns whether all elements of this stream match the provided predicate. // AllMatch returns whether all elements of this stream match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result. // May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then true is returned and the predicate is not evaluated. // If the stream is empty then true is returned and the predicate is not evaluated.
func (s Stream) AllMach(predicate func(item any) bool) bool { func (s Stream) AllMatch(predicate func(item any) bool) bool {
for item := range s.source { for item := range s.source {
if !predicate(item) { if !predicate(item) {
// make sure the former goroutine not block, and current func returns fast. // make sure the former goroutine not block, and current func returns fast.
@@ -99,10 +99,10 @@ func (s Stream) AllMach(predicate func(item any) bool) bool {
return true return true
} }
// AnyMach returns whether any elements of this stream match the provided predicate. // AnyMatch returns whether any elements of this stream match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result. // May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then false is returned and the predicate is not evaluated. // If the stream is empty then false is returned and the predicate is not evaluated.
func (s Stream) AnyMach(predicate func(item any) bool) bool { func (s Stream) AnyMatch(predicate func(item any) bool) bool {
for item := range s.source { for item := range s.source {
if predicate(item) { if predicate(item) {
// make sure the former goroutine not block, and current func returns fast. // make sure the former goroutine not block, and current func returns fast.
@@ -352,7 +352,7 @@ func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
}, opts...).Done() }, opts...).Done()
} }
// Reduce is an utility method to let the caller deal with the underlying channel. // Reduce is a utility method to let the caller deal with the underlying channel.
func (s Stream) Reduce(fn ReduceFunc) (any, error) { func (s Stream) Reduce(fn ReduceFunc) (any, error) {
return fn(s.source) return fn(s.source)
} }

View File

@@ -398,16 +398,16 @@ func TestWalk(t *testing.T) {
func TestStream_AnyMach(t *testing.T) { func TestStream_AnyMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool { assetEqual(t, false, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 4 return item.(int) == 4
})) }))
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool { assetEqual(t, false, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 0 return item.(int) == 0
})) }))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool { assetEqual(t, true, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 2 return item.(int) == 2
})) }))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool { assetEqual(t, true, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 2 return item.(int) == 2
})) }))
}) })
@@ -416,17 +416,17 @@ func TestStream_AnyMach(t *testing.T) {
func TestStream_AllMach(t *testing.T) { func TestStream_AllMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
assetEqual( assetEqual(
t, true, Just(1, 2, 3).AllMach(func(item any) bool { t, true, Just(1, 2, 3).AllMatch(func(item any) bool {
return true return true
}), }),
) )
assetEqual( assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item any) bool { t, false, Just(1, 2, 3).AllMatch(func(item any) bool {
return false return false
}), }),
) )
assetEqual( assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item any) bool { t, false, Just(1, 2, 3).AllMatch(func(item any) bool {
return item.(int) == 1 return item.(int) == 1
}), }),
) )

View File

@@ -2,7 +2,7 @@ package iox
import "os" import "os"
// RedirectInOut redirects stdin to r, stdout to w, and callers need to call restore afterwards. // RedirectInOut redirects stdin to r, stdout to w, and callers need to call restore afterward.
func RedirectInOut() (restore func(), err error) { func RedirectInOut() (restore func(), err error) {
var r, w *os.File var r, w *os.File
r, w, err = os.Pipe() r, w, err = os.Pipe()

View File

@@ -9,7 +9,7 @@ import (
const bufSize = 32 * 1024 const bufSize = 32 * 1024
// CountLines returns the number of lines in file. // CountLines returns the number of lines in the file.
func CountLines(file string) (int, error) { func CountLines(file string) (int, error) {
f, err := os.Open(file) f, err := os.Open(file)
if err != nil { if err != nil {

View File

@@ -2,11 +2,12 @@ package iox
import ( import (
"bufio" "bufio"
"errors"
"io" "io"
"strings" "strings"
) )
// A TextLineScanner is a scanner that can scan lines from given reader. // A TextLineScanner is a scanner that can scan lines from the given reader.
type TextLineScanner struct { type TextLineScanner struct {
reader *bufio.Reader reader *bufio.Reader
hasNext bool hasNext bool
@@ -14,7 +15,7 @@ type TextLineScanner struct {
err error err error
} }
// NewTextLineScanner returns a TextLineScanner with given reader. // NewTextLineScanner returns a TextLineScanner with the given reader.
func NewTextLineScanner(reader io.Reader) *TextLineScanner { func NewTextLineScanner(reader io.Reader) *TextLineScanner {
return &TextLineScanner{ return &TextLineScanner{
reader: bufio.NewReader(reader), reader: bufio.NewReader(reader),
@@ -30,7 +31,7 @@ func (scanner *TextLineScanner) Scan() bool {
line, err := scanner.reader.ReadString('\n') line, err := scanner.reader.ReadString('\n')
scanner.line = strings.TrimRight(line, "\n") scanner.line = strings.TrimRight(line, "\n")
if err == io.EOF { if errors.Is(err, io.EOF) {
scanner.hasNext = false scanner.hasNext = false
return true return true
} else if err != nil { } else if err != nil {

View File

@@ -2,6 +2,7 @@ package limit
import ( import (
"context" "context"
_ "embed"
"errors" "errors"
"strconv" "strconv"
"time" "time"
@@ -28,20 +29,9 @@ var (
// ErrUnknownCode is an error that represents unknown status code. // ErrUnknownCode is an error that represents unknown status code.
ErrUnknownCode = errors.New("unknown status code") ErrUnknownCode = errors.New("unknown status code")
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key //go:embed periodscript.lua
periodScript = redis.NewScript(`local limit = tonumber(ARGV[1]) periodLuaScript string
local window = tonumber(ARGV[2]) periodScript = redis.NewScript(periodLuaScript)
local current = redis.call("INCRBY", KEYS[1], 1)
if current == 1 then
redis.call("expire", KEYS[1], window)
end
if current < limit then
return 1
elseif current == limit then
return 2
else
return 0
end`)
) )
type ( type (

View File

@@ -0,0 +1,14 @@
-- to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
local limit = tonumber(ARGV[1])
local window = tonumber(ARGV[2])
local current = redis.call("INCRBY", KEYS[1], 1)
if current == 1 then
redis.call("expire", KEYS[1], window)
end
if current < limit then
return 1
elseif current == limit then
return 2
else
return 0
end

View File

@@ -2,6 +2,7 @@ package limit
import ( import (
"context" "context"
_ "embed"
"errors" "errors"
"fmt" "fmt"
"strconv" "strconv"
@@ -9,6 +10,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/logx" "github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stores/redis" "github.com/zeromicro/go-zero/core/stores/redis"
xrate "golang.org/x/time/rate" xrate "golang.org/x/time/rate"
@@ -20,37 +22,11 @@ const (
pingInterval = time.Millisecond * 100 pingInterval = time.Millisecond * 100
) )
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key var (
// KEYS[1] as tokens_key //go:embed tokenscript.lua
// KEYS[2] as timestamp_key tokenLuaScript string
var script = redis.NewScript(`local rate = tonumber(ARGV[1]) tokenScript = redis.NewScript(tokenLuaScript)
local capacity = tonumber(ARGV[2]) )
local now = tonumber(ARGV[3])
local requested = tonumber(ARGV[4])
local fill_time = capacity/rate
local ttl = math.floor(fill_time*2)
local last_tokens = tonumber(redis.call("get", KEYS[1]))
if last_tokens == nil then
last_tokens = capacity
end
local last_refreshed = tonumber(redis.call("get", KEYS[2]))
if last_refreshed == nil then
last_refreshed = 0
end
local delta = math.max(0, now-last_refreshed)
local filled_tokens = math.min(capacity, last_tokens+(delta*rate))
local allowed = filled_tokens >= requested
local new_tokens = filled_tokens
if allowed then
new_tokens = filled_tokens - requested
end
redis.call("setex", KEYS[1], ttl, new_tokens)
redis.call("setex", KEYS[2], ttl, now)
return allowed`)
// A TokenLimiter controls how frequently events are allowed to happen with in one second. // A TokenLimiter controls how frequently events are allowed to happen with in one second.
type TokenLimiter struct { type TokenLimiter struct {
@@ -112,7 +88,7 @@ func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) boo
} }
resp, err := lim.store.ScriptRunCtx(ctx, resp, err := lim.store.ScriptRunCtx(ctx,
script, tokenScript,
[]string{ []string{
lim.tokenKey, lim.tokenKey,
lim.timestampKey, lim.timestampKey,
@@ -125,10 +101,10 @@ func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) boo
}) })
// redis allowed == false // redis allowed == false
// Lua boolean false -> r Nil bulk reply // Lua boolean false -> r Nil bulk reply
if err == redis.Nil { if errors.Is(err, redis.Nil) {
return false return false
} }
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { if errorx.In(err, context.DeadlineExceeded, context.Canceled) {
logx.Errorf("fail to use rate limiter: %s", err) logx.Errorf("fail to use rate limiter: %s", err)
return false return false
} }

View File

@@ -0,0 +1,31 @@
-- to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
-- KEYS[1] as tokens_key
-- KEYS[2] as timestamp_key
local rate = tonumber(ARGV[1])
local capacity = tonumber(ARGV[2])
local now = tonumber(ARGV[3])
local requested = tonumber(ARGV[4])
local fill_time = capacity/rate
local ttl = math.floor(fill_time*2)
local last_tokens = tonumber(redis.call("get", KEYS[1]))
if last_tokens == nil then
last_tokens = capacity
end
local last_refreshed = tonumber(redis.call("get", KEYS[2]))
if last_refreshed == nil then
last_refreshed = 0
end
local delta = math.max(0, now-last_refreshed)
local filled_tokens = math.min(capacity, last_tokens+(delta*rate))
local allowed = filled_tokens >= requested
local new_tokens = filled_tokens
if allowed then
new_tokens = filled_tokens - requested
end
redis.call("setex", KEYS[1], ttl, new_tokens)
redis.call("setex", KEYS[2], ttl, now)
return allowed

View File

@@ -76,8 +76,8 @@ type (
avgFlyingLock syncx.SpinLock avgFlyingLock syncx.SpinLock
overloadTime *syncx.AtomicDuration overloadTime *syncx.AtomicDuration
droppedRecently *syncx.AtomicBool droppedRecently *syncx.AtomicBool
passCounter *collection.RollingWindow passCounter *collection.RollingWindow[int64, *collection.Bucket[int64]]
rtCounter *collection.RollingWindow rtCounter *collection.RollingWindow[int64, *collection.Bucket[int64]]
} }
) )
@@ -107,15 +107,16 @@ func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
opt(&options) opt(&options)
} }
bucketDuration := options.window / time.Duration(options.buckets) bucketDuration := options.window / time.Duration(options.buckets)
newBucket := func() *collection.Bucket[int64] {
return new(collection.Bucket[int64])
}
return &adaptiveShedder{ return &adaptiveShedder{
cpuThreshold: options.cpuThreshold, cpuThreshold: options.cpuThreshold,
windowScale: float64(time.Second) / float64(bucketDuration) / millisecondsPerSecond, windowScale: float64(time.Second) / float64(bucketDuration) / millisecondsPerSecond,
overloadTime: syncx.NewAtomicDuration(), overloadTime: syncx.NewAtomicDuration(),
droppedRecently: syncx.NewAtomicBool(), droppedRecently: syncx.NewAtomicBool(),
passCounter: collection.NewRollingWindow(options.buckets, bucketDuration, passCounter: collection.NewRollingWindow[int64, *collection.Bucket[int64]](newBucket, options.buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]()),
collection.IgnoreCurrentBucket()), rtCounter: collection.NewRollingWindow[int64, *collection.Bucket[int64]](newBucket, options.buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]()),
rtCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
collection.IgnoreCurrentBucket()),
} }
} }
@@ -138,10 +139,10 @@ func (as *adaptiveShedder) Allow() (Promise, error) {
func (as *adaptiveShedder) addFlying(delta int64) { func (as *adaptiveShedder) addFlying(delta int64) {
flying := atomic.AddInt64(&as.flying, delta) flying := atomic.AddInt64(&as.flying, delta)
// update avgFlying when the request is finished. // update avgFlying when the request is finished.
// this strategy makes avgFlying have a little bit lag against flying, and smoother. // this strategy makes avgFlying have a little bit of lag against flying, and smoother.
// when the flying requests increase rapidly, avgFlying increase slower, accept more requests. // when the flying requests increase rapidly, avgFlying increase slower, accept more requests.
// when the flying requests drop rapidly, avgFlying drop slower, accept fewer requests. // when the flying requests drop rapidly, avgFlying drop slower, accept fewer requests.
// it makes the service to serve as more requests as possible. // it makes the service to serve as many requests as possible.
if delta < 0 { if delta < 0 {
as.avgFlyingLock.Lock() as.avgFlyingLock.Lock()
as.avgFlying = as.avgFlying*flyingBeta + float64(flying)*(1-flyingBeta) as.avgFlying = as.avgFlying*flyingBeta + float64(flying)*(1-flyingBeta)
@@ -167,15 +168,15 @@ func (as *adaptiveShedder) maxFlight() float64 {
} }
func (as *adaptiveShedder) maxPass() int64 { func (as *adaptiveShedder) maxPass() int64 {
var result float64 = 1 var result int64 = 1
as.passCounter.Reduce(func(b *collection.Bucket) { as.passCounter.Reduce(func(b *collection.Bucket[int64]) {
if b.Sum > result { if b.Sum > result {
result = b.Sum result = b.Sum
} }
}) })
return int64(result) return result
} }
func (as *adaptiveShedder) minRt() float64 { func (as *adaptiveShedder) minRt() float64 {
@@ -183,12 +184,12 @@ func (as *adaptiveShedder) minRt() float64 {
// its a reasonable large value to avoid dropping requests. // its a reasonable large value to avoid dropping requests.
result := defaultMinRt result := defaultMinRt
as.rtCounter.Reduce(func(b *collection.Bucket) { as.rtCounter.Reduce(func(b *collection.Bucket[int64]) {
if b.Count <= 0 { if b.Count <= 0 {
return return
} }
avg := math.Round(b.Sum / float64(b.Count)) avg := math.Round(float64(b.Sum) / float64(b.Count))
if avg < result { if avg < result {
result = avg result = avg
} }
@@ -200,7 +201,7 @@ func (as *adaptiveShedder) minRt() float64 {
func (as *adaptiveShedder) overloadFactor() float64 { func (as *adaptiveShedder) overloadFactor() float64 {
// as.cpuThreshold must be less than cpuMax // as.cpuThreshold must be less than cpuMax
factor := (cpuMax - float64(stat.CpuUsage())) / (cpuMax - float64(as.cpuThreshold)) factor := (cpuMax - float64(stat.CpuUsage())) / (cpuMax - float64(as.cpuThreshold))
// at least accept 10% of acceptable requests even cpu is highly overloaded. // at least accept 10% of acceptable requests, even cpu is highly overloaded.
return mathx.Between(factor, overloadFactorLowerBound, 1) return mathx.Between(factor, overloadFactorLowerBound, 1)
} }
@@ -250,14 +251,14 @@ func (as *adaptiveShedder) systemOverloaded() bool {
return true return true
} }
// WithBuckets customizes the Shedder with given number of buckets. // WithBuckets customizes the Shedder with the given number of buckets.
func WithBuckets(buckets int) ShedderOption { func WithBuckets(buckets int) ShedderOption {
return func(opts *shedderOptions) { return func(opts *shedderOptions) {
opts.buckets = buckets opts.buckets = buckets
} }
} }
// WithCpuThreshold customizes the Shedder with given cpu threshold. // WithCpuThreshold customizes the Shedder with the given cpu threshold.
func WithCpuThreshold(threshold int64) ShedderOption { func WithCpuThreshold(threshold int64) ShedderOption {
return func(opts *shedderOptions) { return func(opts *shedderOptions) {
opts.cpuThreshold = threshold opts.cpuThreshold = threshold
@@ -283,6 +284,6 @@ func (p *promise) Fail() {
func (p *promise) Pass() { func (p *promise) Pass() {
rt := float64(timex.Since(p.start)) / float64(time.Millisecond) rt := float64(timex.Since(p.start)) / float64(time.Millisecond)
p.shedder.addFlying(-1) p.shedder.addFlying(-1)
p.shedder.rtCounter.Add(math.Ceil(rt)) p.shedder.rtCounter.Add(int64(math.Ceil(rt)))
p.shedder.passCounter.Add(1) p.shedder.passCounter.Add(1)
} }

View File

@@ -58,7 +58,7 @@ func TestAdaptiveShedder(t *testing.T) {
func TestAdaptiveShedderMaxPass(t *testing.T) { func TestAdaptiveShedderMaxPass(t *testing.T) {
passCounter := newRollingWindow() passCounter := newRollingWindow()
for i := 1; i <= 10; i++ { for i := 1; i <= 10; i++ {
passCounter.Add(float64(i * 100)) passCounter.Add(int64(i * 100))
time.Sleep(bucketDuration) time.Sleep(bucketDuration)
} }
shedder := &adaptiveShedder{ shedder := &adaptiveShedder{
@@ -83,7 +83,7 @@ func TestAdaptiveShedderMinRt(t *testing.T) {
time.Sleep(bucketDuration) time.Sleep(bucketDuration)
} }
for j := i*10 + 1; j <= i*10+10; j++ { for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j)) rtCounter.Add(int64(j))
} }
} }
shedder := &adaptiveShedder{ shedder := &adaptiveShedder{
@@ -107,9 +107,9 @@ func TestAdaptiveShedderMaxFlight(t *testing.T) {
if i > 0 { if i > 0 {
time.Sleep(bucketDuration) time.Sleep(bucketDuration)
} }
passCounter.Add(float64((i + 1) * 100)) passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ { for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j)) rtCounter.Add(int64(j))
} }
} }
shedder := &adaptiveShedder{ shedder := &adaptiveShedder{
@@ -129,9 +129,9 @@ func TestAdaptiveShedderShouldDrop(t *testing.T) {
if i > 0 { if i > 0 {
time.Sleep(bucketDuration) time.Sleep(bucketDuration)
} }
passCounter.Add(float64((i + 1) * 100)) passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ { for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j)) rtCounter.Add(int64(j))
} }
} }
shedder := &adaptiveShedder{ shedder := &adaptiveShedder{
@@ -184,9 +184,9 @@ func TestAdaptiveShedderStillHot(t *testing.T) {
if i > 0 { if i > 0 {
time.Sleep(bucketDuration) time.Sleep(bucketDuration)
} }
passCounter.Add(float64((i + 1) * 100)) passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ { for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j)) rtCounter.Add(int64(j))
} }
} }
shedder := &adaptiveShedder{ shedder := &adaptiveShedder{
@@ -248,9 +248,9 @@ func BenchmarkMaxFlight(b *testing.B) {
if i > 0 { if i > 0 {
time.Sleep(bucketDuration) time.Sleep(bucketDuration)
} }
passCounter.Add(float64((i + 1) * 100)) passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ { for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j)) rtCounter.Add(int64(j))
} }
} }
shedder := &adaptiveShedder{ shedder := &adaptiveShedder{
@@ -265,6 +265,8 @@ func BenchmarkMaxFlight(b *testing.B) {
} }
} }
func newRollingWindow() *collection.RollingWindow { func newRollingWindow() *collection.RollingWindow[int64, *collection.Bucket[int64]] {
return collection.NewRollingWindow(buckets, bucketDuration, collection.IgnoreCurrentBucket()) return collection.NewRollingWindow[int64, *collection.Bucket[int64]](func() *collection.Bucket[int64] {
return new(collection.Bucket[int64])
}, buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]())
} }

View File

@@ -6,7 +6,7 @@ import (
"github.com/zeromicro/go-zero/core/syncx" "github.com/zeromicro/go-zero/core/syncx"
) )
// A ShedderGroup is a manager to manage key based shedders. // A ShedderGroup is a manager to manage key-based shedders.
type ShedderGroup struct { type ShedderGroup struct {
options []ShedderOption options []ShedderOption
manager *syncx.ResourceManager manager *syncx.ResourceManager

View File

@@ -42,7 +42,7 @@ func Debugv(ctx context.Context, v interface{}) {
getLogger(ctx).Debugv(v) getLogger(ctx).Debugv(v)
} }
// Debugw writes msg along with fields into access log. // Debugw writes msg along with fields into the access log.
func Debugw(ctx context.Context, msg string, fields ...LogField) { func Debugw(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Debugw(msg, fields...) getLogger(ctx).Debugw(msg, fields...)
} }
@@ -63,7 +63,7 @@ func Errorv(ctx context.Context, v any) {
getLogger(ctx).Errorv(v) getLogger(ctx).Errorv(v)
} }
// Errorw writes msg along with fields into error log. // Errorw writes msg along with fields into the error log.
func Errorw(ctx context.Context, msg string, fields ...LogField) { func Errorw(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Errorw(msg, fields...) getLogger(ctx).Errorw(msg, fields...)
} }
@@ -88,7 +88,7 @@ func Infov(ctx context.Context, v any) {
getLogger(ctx).Infov(v) getLogger(ctx).Infov(v)
} }
// Infow writes msg along with fields into access log. // Infow writes msg along with fields into the access log.
func Infow(ctx context.Context, msg string, fields ...LogField) { func Infow(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Infow(msg, fields...) getLogger(ctx).Infow(msg, fields...)
} }
@@ -108,10 +108,11 @@ func SetLevel(level uint32) {
logx.SetLevel(level) logx.SetLevel(level)
} }
// SetUp sets up the logx. If already set up, just return nil. // SetUp sets up the logx.
// we allow SetUp to be called multiple times, because for example // If already set up, return nil.
// We allow SetUp to be called multiple times, because, for example,
// we need to allow different service frameworks to initialize logx respectively. // we need to allow different service frameworks to initialize logx respectively.
// the same logic for SetUp // The same logic for SetUp
func SetUp(c LogConf) error { func SetUp(c LogConf) error {
return logx.SetUp(c) return logx.SetUp(c)
} }

View File

@@ -42,4 +42,6 @@ type LogConf struct {
// daily: daily rotation. // daily: daily rotation.
// size: size limited rotation. // size: size limited rotation.
Rotation string `json:",default=daily,options=[daily,size]"` Rotation string `json:",default=daily,options=[daily,size]"`
// FileTimeFormat represents the time format for file name, default is `2006-01-02T15:04:05.000Z07:00`.
FileTimeFormat string `json:",optional"`
} }

View File

@@ -1,6 +1,6 @@
package logx package logx
// A LessLogger is a logger that control to log once during the given duration. // A LessLogger is a logger that controls to log once during the given duration.
type LessLogger struct { type LessLogger struct {
*limitedExecutor *limitedExecutor
} }

View File

@@ -7,13 +7,13 @@ import (
// A Logger represents a logger. // A Logger represents a logger.
type Logger interface { type Logger interface {
// Debug logs a message at info level. // Debug logs a message at debug level.
Debug(...any) Debug(...any)
// Debugf logs a message at info level. // Debugf logs a message at debug level.
Debugf(string, ...any) Debugf(string, ...any)
// Debugv logs a message at info level. // Debugv logs a message at debug level.
Debugv(any) Debugv(any)
// Debugw logs a message at info level. // Debugw logs a message at debug level.
Debugw(string, ...LogField) Debugw(string, ...LogField)
// Error logs a message at error level. // Error logs a message at error level.
Error(...any) Error(...any)

View File

@@ -6,6 +6,7 @@ import (
"log" "log"
"os" "os"
"path" "path"
"reflect"
"runtime/debug" "runtime/debug"
"sync" "sync"
"sync/atomic" "sync/atomic"
@@ -51,6 +52,26 @@ type (
} }
) )
// AddWriter adds a new writer.
// If there is already a writer, the new writer will be added to the writer chain.
// For example, to write logs to both file and console, if there is already a file writer,
// ```go
// logx.AddWriter(logx.NewWriter(os.Stdout))
// ```
func AddWriter(w Writer) {
ow := Reset()
if ow == nil {
SetWriter(w)
} else {
// no need to check if the existing writer is a comboWriter,
// because it is not common to add more than one writer.
// even more than one writer, the behavior is the same.
SetWriter(comboWriter{
writers: []Writer{ow, w},
})
}
}
// Alert alerts v in alert level, and the message is written to error log. // Alert alerts v in alert level, and the message is written to error log.
func Alert(v string) { func Alert(v string) {
getWriter().Alert(v) getWriter().Alert(v)
@@ -86,7 +107,7 @@ func Debugv(v any) {
} }
} }
// Debugw writes msg along with fields into access log. // Debugw writes msg along with fields into the access log.
func Debugw(msg string, fields ...LogField) { func Debugw(msg string, fields ...LogField) {
if shallLog(DebugLevel) { if shallLog(DebugLevel) {
writeDebug(msg, fields...) writeDebug(msg, fields...)
@@ -142,7 +163,7 @@ func Errorv(v any) {
} }
} }
// Errorw writes msg along with fields into error log. // Errorw writes msg along with fields into the error log.
func Errorw(msg string, fields ...LogField) { func Errorw(msg string, fields ...LogField) {
if shallLog(ErrorLevel) { if shallLog(ErrorLevel) {
writeError(msg, fields...) writeError(msg, fields...)
@@ -153,11 +174,11 @@ func Errorw(msg string, fields ...LogField) {
func Field(key string, value any) LogField { func Field(key string, value any) LogField {
switch val := value.(type) { switch val := value.(type) {
case error: case error:
return LogField{Key: key, Value: val.Error()} return LogField{Key: key, Value: encodeError(val)}
case []error: case []error:
var errs []string var errs []string
for _, err := range val { for _, err := range val {
errs = append(errs, err.Error()) errs = append(errs, encodeError(err))
} }
return LogField{Key: key, Value: errs} return LogField{Key: key, Value: errs}
case time.Duration: case time.Duration:
@@ -175,11 +196,11 @@ func Field(key string, value any) LogField {
} }
return LogField{Key: key, Value: times} return LogField{Key: key, Value: times}
case fmt.Stringer: case fmt.Stringer:
return LogField{Key: key, Value: val.String()} return LogField{Key: key, Value: encodeStringer(val)}
case []fmt.Stringer: case []fmt.Stringer:
var strs []string var strs []string
for _, str := range val { for _, str := range val {
strs = append(strs, str.String()) strs = append(strs, encodeStringer(str))
} }
return LogField{Key: key, Value: strs} return LogField{Key: key, Value: strs}
default: default:
@@ -208,7 +229,7 @@ func Infov(v any) {
} }
} }
// Infow writes msg along with fields into access log. // Infow writes msg along with fields into the access log.
func Infow(msg string, fields ...LogField) { func Infow(msg string, fields ...LogField) {
if shallLog(InfoLevel) { if shallLog(InfoLevel) {
writeInfo(msg, fields...) writeInfo(msg, fields...)
@@ -254,11 +275,12 @@ func SetWriter(w Writer) {
} }
} }
// SetUp sets up the logx. If already set up, just return nil. // SetUp sets up the logx.
// we allow SetUp to be called multiple times, because for example // If already set up, return nil.
// We allow SetUp to be called multiple times, because, for example,
// we need to allow different service frameworks to initialize logx respectively. // we need to allow different service frameworks to initialize logx respectively.
func SetUp(c LogConf) (err error) { func SetUp(c LogConf) (err error) {
// Just ignore the subsequent SetUp calls. // Ignore the later SetUp calls.
// Because multiple services in one process might call SetUp respectively. // Because multiple services in one process might call SetUp respectively.
// Need to wait for the first caller to complete the execution. // Need to wait for the first caller to complete the execution.
setupOnce.Do(func() { setupOnce.Do(func() {
@@ -272,6 +294,10 @@ func SetUp(c LogConf) (err error) {
timeFormat = c.TimeFormat timeFormat = c.TimeFormat
} }
if len(c.FileTimeFormat) > 0 {
fileTimeFormat = c.FileTimeFormat
}
atomic.StoreUint32(&maxContentLength, c.MaxContentLength) atomic.StoreUint32(&maxContentLength, c.MaxContentLength)
switch c.Encoding { switch c.Encoding {
@@ -413,6 +439,32 @@ func createOutput(path string) (io.WriteCloser, error) {
return NewLogger(path, rule, options.gzipEnabled) return NewLogger(path, rule, options.gzipEnabled)
} }
func encodeError(err error) (ret string) {
return encodeWithRecover(err, func() string {
return err.Error()
})
}
func encodeStringer(v fmt.Stringer) (ret string) {
return encodeWithRecover(v, func() string {
return v.String()
})
}
func encodeWithRecover(arg any, fn func() string) (ret string) {
defer func() {
if err := recover(); err != nil {
if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() {
ret = nilAngleString
} else {
ret = fmt.Sprintf("panic: %v", err)
}
}
}()
return fn()
}
func getWriter() Writer { func getWriter() Writer {
w := writer.Load() w := writer.Load()
if w == nil { if w == nil {
@@ -480,7 +532,7 @@ func writeDebug(val any, fields ...LogField) {
getWriter().Debug(val, addCaller(fields...)...) getWriter().Debug(val, addCaller(fields...)...)
} }
// writeError writes v into error log. // writeError writes v into the error log.
// Not checking shallLog here is for performance consideration. // Not checking shallLog here is for performance consideration.
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled. // If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
// The caller should check shallLog before calling this function. // The caller should check shallLog before calling this function.
@@ -520,7 +572,7 @@ func writeStack(msg string) {
getWriter().Stack(fmt.Sprintf("%s\n%s", msg, string(debug.Stack()))) getWriter().Stack(fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
} }
// writeStat writes v into stat log. // writeStat writes v into the stat log.
// Not checking shallLog here is for performance consideration. // Not checking shallLog here is for performance consideration.
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled. // If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
// The caller should check shallLog before calling this function. // The caller should check shallLog before calling this function.

View File

@@ -348,6 +348,27 @@ func TestStructedLogInfow(t *testing.T) {
}) })
} }
func TestStructedLogFieldNil(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
defer writer.Store(old)
assert.NotPanics(t, func() {
var s *string
Infow("test", Field("bb", s))
var d *nilStringer
Infow("test", Field("bb", d))
var e *nilError
Errorw("test", Field("bb", e))
})
assert.NotPanics(t, func() {
var p panicStringer
Infow("test", Field("bb", p))
var ps innerPanicStringer
Infow("test", Field("bb", ps))
})
}
func TestStructedLogInfoConsoleAny(t *testing.T) { func TestStructedLogInfoConsoleAny(t *testing.T) {
w := new(mockWriter) w := new(mockWriter)
old := writer.Swap(w) old := writer.Swap(w)
@@ -570,7 +591,7 @@ func TestErrorfWithWrappedError(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
Errorf("hello %w", errors.New(message)) Errorf("hello %s", errors.New(message))
assert.True(t, strings.Contains(w.String(), "hello there")) assert.True(t, strings.Contains(w.String(), "hello there"))
} }
@@ -658,6 +679,10 @@ func TestSetup(t *testing.T) {
func TestDisable(t *testing.T) { func TestDisable(t *testing.T) {
Disable() Disable()
defer func() {
SetLevel(InfoLevel)
atomic.StoreUint32(&encoding, jsonEncodingType)
}()
var opt logOptions var opt logOptions
WithKeepDays(1)(&opt) WithKeepDays(1)(&opt)
@@ -680,6 +705,17 @@ func TestDisableStat(t *testing.T) {
assert.Equal(t, 0, w.builder.Len()) assert.Equal(t, 0, w.builder.Len())
} }
func TestAddWriter(t *testing.T) {
const message = "hello there"
w := new(mockWriter)
AddWriter(w)
w1 := new(mockWriter)
AddWriter(w1)
Error(message)
assert.Contains(t, w.String(), message)
assert.Contains(t, w1.String(), message)
}
func TestSetWriter(t *testing.T) { func TestSetWriter(t *testing.T) {
atomic.StoreUint32(&logLevel, 0) atomic.StoreUint32(&logLevel, 0)
Reset() Reset()
@@ -814,12 +850,13 @@ func doTestStructedLogConsole(t *testing.T, w *mockWriter, write func(...any)) {
func testSetLevelTwiceWithMode(t *testing.T, mode string, w *mockWriter) { func testSetLevelTwiceWithMode(t *testing.T, mode string, w *mockWriter) {
writer.Store(nil) writer.Store(nil)
SetUp(LogConf{ SetUp(LogConf{
Mode: mode, Mode: mode,
Level: "debug", Level: "debug",
Path: "/dev/null", Path: "/dev/null",
Encoding: plainEncoding, Encoding: plainEncoding,
Stat: false, Stat: false,
TimeFormat: time.RFC3339, TimeFormat: time.RFC3339,
FileTimeFormat: time.DateTime,
}) })
SetUp(LogConf{ SetUp(LogConf{
Mode: mode, Mode: mode,
@@ -859,3 +896,36 @@ func validateFields(t *testing.T, content string, fields map[string]any) {
} }
} }
} }
type nilError struct {
Name string
}
func (e *nilError) Error() string {
return e.Name
}
type nilStringer struct {
Name string
}
func (s *nilStringer) String() string {
return s.Name
}
type innerPanicStringer struct {
Inner *struct {
Name string
}
}
func (s innerPanicStringer) String() string {
return s.Inner.Name
}
type panicStringer struct {
}
func (s panicStringer) String() string {
panic("panic")
}

View File

@@ -141,23 +141,43 @@ func (l *richLogger) WithCallerSkip(skip int) Logger {
return l return l
} }
l.callerSkip = skip return &richLogger{
return l ctx: l.ctx,
callerSkip: skip,
fields: l.fields,
}
} }
func (l *richLogger) WithContext(ctx context.Context) Logger { func (l *richLogger) WithContext(ctx context.Context) Logger {
l.ctx = ctx return &richLogger{
return l ctx: ctx,
callerSkip: l.callerSkip,
fields: l.fields,
}
} }
func (l *richLogger) WithDuration(duration time.Duration) Logger { func (l *richLogger) WithDuration(duration time.Duration) Logger {
l.fields = append(l.fields, Field(durationKey, timex.ReprOfDuration(duration))) fields := append(l.fields, Field(durationKey, timex.ReprOfDuration(duration)))
return l
return &richLogger{
ctx: l.ctx,
callerSkip: l.callerSkip,
fields: fields,
}
} }
func (l *richLogger) WithFields(fields ...LogField) Logger { func (l *richLogger) WithFields(fields ...LogField) Logger {
l.fields = append(l.fields, fields...) if len(fields) == 0 {
return l return l
}
f := append(l.fields, fields...)
return &richLogger{
ctx: l.ctx,
callerSkip: l.callerSkip,
fields: f,
}
} }
func (l *richLogger) buildFields(fields ...LogField) []LogField { func (l *richLogger) buildFields(fields ...LogField) []LogField {

View File

@@ -287,6 +287,54 @@ func TestLogWithCallerSkip(t *testing.T) {
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1))) assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
} }
func TestLogWithCallerSkipCopy(t *testing.T) {
log1 := WithCallerSkip(2)
log2 := log1.WithCallerSkip(3)
log3 := log2.WithCallerSkip(-1)
assert.Equal(t, 2, log1.(*richLogger).callerSkip)
assert.Equal(t, 3, log2.(*richLogger).callerSkip)
assert.Equal(t, 3, log3.(*richLogger).callerSkip)
}
func TestLogWithContextCopy(t *testing.T) {
c1 := context.Background()
c2 := context.WithValue(context.Background(), "foo", "bar")
log1 := WithContext(c1)
log2 := log1.WithContext(c2)
assert.Equal(t, c1, log1.(*richLogger).ctx)
assert.Equal(t, c2, log2.(*richLogger).ctx)
}
func TestLogWithDurationCopy(t *testing.T) {
log1 := WithContext(context.Background())
log2 := log1.WithDuration(time.Second)
assert.Empty(t, log1.(*richLogger).fields)
assert.Equal(t, 1, len(log2.(*richLogger).fields))
var w mockWriter
old := writer.Swap(&w)
defer writer.Store(old)
log2.Info("hello")
assert.Contains(t, w.String(), `"duration":"1000.0ms"`)
}
func TestLogWithFieldsCopy(t *testing.T) {
log1 := WithContext(context.Background())
log2 := log1.WithFields(Field("foo", "bar"))
log3 := log1.WithFields()
assert.Empty(t, log1.(*richLogger).fields)
assert.Equal(t, 1, len(log2.(*richLogger).fields))
assert.Equal(t, log1, log3)
assert.Empty(t, log3.(*richLogger).fields)
var w mockWriter
old := writer.Swap(&w)
defer writer.Store(old)
log2.Info("hello")
assert.Contains(t, w.String(), `"foo":"bar"`)
}
func TestLoggerWithFields(t *testing.T) { func TestLoggerWithFields(t *testing.T) {
w := new(mockWriter) w := new(mockWriter)
old := writer.Swap(w) old := writer.Swap(w)

View File

@@ -19,7 +19,6 @@ import (
const ( const (
dateFormat = "2006-01-02" dateFormat = "2006-01-02"
fileTimeFormat = time.RFC3339
hoursPerDay = 24 hoursPerDay = 24
bufferSize = 100 bufferSize = 100
defaultDirMode = 0o755 defaultDirMode = 0o755
@@ -28,8 +27,12 @@ const (
megaBytes = 1 << 20 megaBytes = 1 << 20
) )
// ErrLogFileClosed is an error that indicates the log file is already closed. var (
var ErrLogFileClosed = errors.New("error: log file closed") // ErrLogFileClosed is an error that indicates the log file is already closed.
ErrLogFileClosed = errors.New("error: log file closed")
fileTimeFormat = time.RFC3339
)
type ( type (
// A RotateRule interface is used to define the log rotating rules. // A RotateRule interface is used to define the log rotating rules.
@@ -319,7 +322,7 @@ func (l *RotateLogger) maybeCompressFile(file string) {
}() }()
if _, err := os.Stat(file); err != nil { if _, err := os.Stat(file); err != nil {
// file not exists or other error, ignore compression // file doesn't exist or another error, ignore compression
return return
} }

View File

@@ -48,6 +48,7 @@ const (
levelDebug = "debug" levelDebug = "debug"
backupFileDelimiter = "-" backupFileDelimiter = "-"
nilAngleString = "<nil>"
flags = 0x0 flags = 0x0
) )

View File

@@ -13,6 +13,7 @@ import (
fatihcolor "github.com/fatih/color" fatihcolor "github.com/fatih/color"
"github.com/zeromicro/go-zero/core/color" "github.com/zeromicro/go-zero/core/color"
"github.com/zeromicro/go-zero/core/errorx"
) )
type ( type (
@@ -33,6 +34,10 @@ type (
lock sync.RWMutex lock sync.RWMutex
} }
comboWriter struct {
writers []Writer
}
concreteWriter struct { concreteWriter struct {
infoLog io.WriteCloser infoLog io.WriteCloser
errorLog io.WriteCloser errorLog io.WriteCloser
@@ -88,6 +93,62 @@ func (w *atomicWriter) Swap(v Writer) Writer {
return old return old
} }
func (c comboWriter) Alert(v any) {
for _, w := range c.writers {
w.Alert(v)
}
}
func (c comboWriter) Close() error {
var be errorx.BatchError
for _, w := range c.writers {
be.Add(w.Close())
}
return be.Err()
}
func (c comboWriter) Debug(v any, fields ...LogField) {
for _, w := range c.writers {
w.Debug(v, fields...)
}
}
func (c comboWriter) Error(v any, fields ...LogField) {
for _, w := range c.writers {
w.Error(v, fields...)
}
}
func (c comboWriter) Info(v any, fields ...LogField) {
for _, w := range c.writers {
w.Info(v, fields...)
}
}
func (c comboWriter) Severe(v any) {
for _, w := range c.writers {
w.Severe(v)
}
}
func (c comboWriter) Slow(v any, fields ...LogField) {
for _, w := range c.writers {
w.Slow(v, fields...)
}
}
func (c comboWriter) Stack(v any) {
for _, w := range c.writers {
w.Stack(v)
}
}
func (c comboWriter) Stat(v any, fields ...LogField) {
for _, w := range c.writers {
w.Stat(v, fields...)
}
}
func newConsoleWriter() Writer { func newConsoleWriter() Writer {
outLog := newLogWriter(log.New(fatihcolor.Output, "", flags)) outLog := newLogWriter(log.New(fatihcolor.Output, "", flags))
errLog := newLogWriter(log.New(fatihcolor.Error, "", flags)) errLog := newLogWriter(log.New(fatihcolor.Error, "", flags))
@@ -254,11 +315,10 @@ func (n nopWriter) Stack(_ any) {
func (n nopWriter) Stat(_ any, _ ...LogField) { func (n nopWriter) Stat(_ any, _ ...LogField) {
} }
func buildPlainFields(fields ...LogField) []string { func buildPlainFields(fields logEntry) []string {
var items []string items := make([]string, 0, len(fields))
for k, v := range fields {
for _, field := range fields { items = append(items, fmt.Sprintf("%s=%+v", k, v))
items = append(items, fmt.Sprintf("%s=%+v", field.Key, field.Value))
} }
return items return items
@@ -278,6 +338,20 @@ func combineGlobalFields(fields []LogField) []LogField {
return ret return ret
} }
func marshalJson(t interface{}) ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
err := encoder.Encode(t)
// go 1.5+ will append a newline to the end of the json string
// https://github.com/golang/go/issues/13520
if l := buf.Len(); l > 0 && buf.Bytes()[l-1] == '\n' {
buf.Truncate(l - 1)
}
return buf.Bytes(), err
}
func output(writer io.Writer, level string, val any, fields ...LogField) { func output(writer io.Writer, level string, val any, fields ...LogField) {
// only truncate string content, don't know how to truncate the values of other types. // only truncate string content, don't know how to truncate the values of other types.
if v, ok := val.(string); ok { if v, ok := val.(string); ok {
@@ -289,15 +363,17 @@ func output(writer io.Writer, level string, val any, fields ...LogField) {
} }
fields = combineGlobalFields(fields) fields = combineGlobalFields(fields)
// +3 for timestamp, level and content
entry := make(logEntry, len(fields)+3)
for _, field := range fields {
entry[field.Key] = field.Value
}
switch atomic.LoadUint32(&encoding) { switch atomic.LoadUint32(&encoding) {
case plainEncodingType: case plainEncodingType:
writePlainAny(writer, level, val, buildPlainFields(fields...)...) plainFields := buildPlainFields(entry)
writePlainAny(writer, level, val, plainFields...)
default: default:
entry := make(logEntry)
for _, field := range fields {
entry[field.Key] = field.Value
}
entry[timestampKey] = getTimestamp() entry[timestampKey] = getTimestamp()
entry[levelKey] = level entry[levelKey] = level
entry[contentKey] = val entry[contentKey] = val
@@ -332,7 +408,7 @@ func wrapLevelWithColor(level string) string {
} }
func writeJson(writer io.Writer, info any) { func writeJson(writer io.Writer, info any) {
if content, err := json.Marshal(info); err != nil { if content, err := marshalJson(info); err != nil {
log.Printf("err: %s\n\n%s", err.Error(), debug.Stack()) log.Printf("err: %s\n\n%s", err.Error(), debug.Stack())
} else if writer == nil { } else if writer == nil {
log.Println(string(content)) log.Println(string(content))

View File

@@ -9,6 +9,7 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
) )
func TestNewWriter(t *testing.T) { func TestNewWriter(t *testing.T) {
@@ -189,6 +190,41 @@ func TestWritePlainAny(t *testing.T) {
assert.Contains(t, buf.String(), "runtime/debug.Stack") assert.Contains(t, buf.String(), "runtime/debug.Stack")
} }
func TestWritePlainDuplicate(t *testing.T) {
old := atomic.SwapUint32(&encoding, plainEncodingType)
t.Cleanup(func() {
atomic.StoreUint32(&encoding, old)
})
var buf bytes.Buffer
output(&buf, levelInfo, "foo", LogField{
Key: "first",
Value: "a",
}, LogField{
Key: "first",
Value: "b",
})
assert.Contains(t, buf.String(), "foo")
assert.NotContains(t, buf.String(), "first=a")
assert.Contains(t, buf.String(), "first=b")
buf.Reset()
output(&buf, levelInfo, "foo", LogField{
Key: "first",
Value: "a",
}, LogField{
Key: "first",
Value: "b",
}, LogField{
Key: "second",
Value: "c",
})
assert.Contains(t, buf.String(), "foo")
assert.NotContains(t, buf.String(), "first=a")
assert.Contains(t, buf.String(), "first=b")
assert.Contains(t, buf.String(), "second=c")
}
func TestLogWithLimitContentLength(t *testing.T) { func TestLogWithLimitContentLength(t *testing.T) {
maxLen := atomic.LoadUint32(&maxContentLength) maxLen := atomic.LoadUint32(&maxContentLength)
atomic.StoreUint32(&maxContentLength, 10) atomic.StoreUint32(&maxContentLength, 10)
@@ -219,6 +255,117 @@ func TestLogWithLimitContentLength(t *testing.T) {
}) })
} }
func TestComboWriter(t *testing.T) {
var mockWriters []Writer
for i := 0; i < 3; i++ {
mockWriters = append(mockWriters, new(tracedWriter))
}
cw := comboWriter{
writers: mockWriters,
}
t.Run("Alert", func(t *testing.T) {
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Alert", "test alert").Once()
}
cw.Alert("test alert")
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Alert", "test alert")
}
})
t.Run("Close", func(t *testing.T) {
for i := range cw.writers {
if i == 1 {
cw.writers[i].(*tracedWriter).On("Close").Return(errors.New("error")).Once()
} else {
cw.writers[i].(*tracedWriter).On("Close").Return(nil).Once()
}
}
err := cw.Close()
assert.Error(t, err)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Close")
}
})
t.Run("Debug", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Debug", "test debug", fields).Once()
}
cw.Debug("test debug", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Debug", "test debug", fields)
}
})
t.Run("Error", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Error", "test error", fields).Once()
}
cw.Error("test error", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Error", "test error", fields)
}
})
t.Run("Info", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Info", "test info", fields).Once()
}
cw.Info("test info", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Info", "test info", fields)
}
})
t.Run("Severe", func(t *testing.T) {
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Severe", "test severe").Once()
}
cw.Severe("test severe")
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Severe", "test severe")
}
})
t.Run("Slow", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Slow", "test slow", fields).Once()
}
cw.Slow("test slow", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Slow", "test slow", fields)
}
})
t.Run("Stack", func(t *testing.T) {
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Stack", "test stack").Once()
}
cw.Stack("test stack")
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Stack", "test stack")
}
})
t.Run("Stat", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Stat", "test stat", fields).Once()
}
cw.Stat("test stat", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Stat", "test stat", fields)
}
})
}
type mockedEntry struct { type mockedEntry struct {
Level string `json:"level"` Level string `json:"level"`
Content string `json:"content"` Content string `json:"content"`
@@ -250,3 +397,44 @@ type hardToWriteWriter struct{}
func (h hardToWriteWriter) Write(_ []byte) (_ int, _ error) { func (h hardToWriteWriter) Write(_ []byte) (_ int, _ error) {
return 0, errors.New("write error") return 0, errors.New("write error")
} }
type tracedWriter struct {
mock.Mock
}
func (w *tracedWriter) Alert(v any) {
w.Called(v)
}
func (w *tracedWriter) Close() error {
args := w.Called()
return args.Error(0)
}
func (w *tracedWriter) Debug(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Error(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Info(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Severe(v any) {
w.Called(v)
}
func (w *tracedWriter) Slow(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Stack(v any) {
w.Called(v)
}
func (w *tracedWriter) Stat(v any, fields ...LogField) {
w.Called(v, fields)
}

View File

@@ -12,7 +12,7 @@ const (
) )
// Marshal marshals the given val and returns the map that contains the fields. // Marshal marshals the given val and returns the map that contains the fields.
// optional=another is not implemented, and it's hard to implement and not common used. // optional=another is not implemented, and it's hard to implement and not commonly used.
func Marshal(val any) (map[string]map[string]any, error) { func Marshal(val any) (map[string]map[string]any, error) {
ret := make(map[string]map[string]any) ret := make(map[string]map[string]any)
tp := reflect.TypeOf(val) tp := reflect.TypeOf(val)

View File

@@ -39,7 +39,7 @@ var (
) )
type ( type (
// Unmarshaler is used to unmarshal with given tag key. // Unmarshaler is used to unmarshal with the given tag key.
Unmarshaler struct { Unmarshaler struct {
key string key string
opts unmarshalOptions opts unmarshalOptions
@@ -50,6 +50,7 @@ type (
unmarshalOptions struct { unmarshalOptions struct {
fillDefault bool fillDefault bool
fromArray bool
fromString bool fromString bool
opaqueKeys bool opaqueKeys bool
canonicalKey func(key string) string canonicalKey func(key string) string
@@ -69,7 +70,7 @@ func NewUnmarshaler(key string, opts ...UnmarshalOption) *Unmarshaler {
return &unmarshaler return &unmarshaler
} }
// UnmarshalKey unmarshals m into v with tag key. // UnmarshalKey unmarshals m into v with the tag key.
func UnmarshalKey(m map[string]any, v any) error { func UnmarshalKey(m map[string]any, v any) error {
return keyUnmarshaler.Unmarshal(m, v) return keyUnmarshaler.Unmarshal(m, v)
} }
@@ -113,7 +114,8 @@ func (u *Unmarshaler) unmarshalValuer(m Valuer, v any, fullName string) error {
return u.unmarshalWithFullName(simpleValuer{current: m}, v, fullName) return u.unmarshalWithFullName(simpleValuer{current: m}, v, fullName)
} }
func (u *Unmarshaler) fillMap(fieldType reflect.Type, value reflect.Value, mapValue any, fullName string) error { func (u *Unmarshaler) fillMap(fieldType reflect.Type, value reflect.Value,
mapValue any, fullName string) error {
if !value.CanSet() { if !value.CanSet() {
return errValueNotSettable return errValueNotSettable
} }
@@ -154,7 +156,8 @@ func (u *Unmarshaler) fillMapFromString(value reflect.Value, mapValue any) error
return nil return nil
} }
func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, mapValue any, fullName string) error { func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value,
mapValue any, fullName string) error {
if !value.CanSet() { if !value.CanSet() {
return errValueNotSettable return errValueNotSettable
} }
@@ -307,7 +310,34 @@ func (u *Unmarshaler) fillSliceWithDefault(derefedType reflect.Type, value refle
return u.fillSlice(derefedType, value, slice, fullName) return u.fillSlice(derefedType, value, slice, fullName)
} }
func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue any, fullName string) (reflect.Value, error) { func (u *Unmarshaler) fillUnmarshalerStruct(fieldType reflect.Type,
value reflect.Value, targetValue string) error {
if !value.CanSet() {
return errValueNotSettable
}
baseType := Deref(fieldType)
target := reflect.New(baseType)
switch u.key {
case jsonTagKey:
unmarshaler, ok := target.Interface().(json.Unmarshaler)
if !ok {
return errUnsupportedType
}
if err := unmarshaler.UnmarshalJSON([]byte(targetValue)); err != nil {
return err
}
default:
return errUnsupportedType
}
value.Set(target)
return nil
}
func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue any,
fullName string) (reflect.Value, error) {
mapType := reflect.MapOf(keyType, elemType) mapType := reflect.MapOf(keyType, elemType)
valueType := reflect.TypeOf(mapValue) valueType := reflect.TypeOf(mapValue)
if mapType == valueType { if mapType == valueType {
@@ -399,6 +429,15 @@ func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue any,
return targetValue, nil return targetValue, nil
} }
func (u *Unmarshaler) implementsUnmarshaler(t reflect.Type) bool {
switch u.key {
case jsonTagKey:
return t.Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem())
default:
return false
}
}
func (u *Unmarshaler) parseOptionsWithContext(field reflect.StructField, m Valuer, fullName string) ( func (u *Unmarshaler) parseOptionsWithContext(field reflect.StructField, m Valuer, fullName string) (
string, *fieldOptionsWithContext, error) { string, *fieldOptionsWithContext, error) {
key, options, err := parseKeyAndOptions(u.key, field) key, options, err := parseKeyAndOptions(u.key, field)
@@ -576,6 +615,8 @@ func (u *Unmarshaler) processFieldNotFromString(fieldType reflect.Type, value re
return u.fillSliceFromString(fieldType, value, mapValue, fullName) return u.fillSliceFromString(fieldType, value, mapValue, fullName)
case valueKind == reflect.String && derefedFieldType == durationType: case valueKind == reflect.String && derefedFieldType == durationType:
return fillDurationValue(fieldType, value, mapValue.(string)) return fillDurationValue(fieldType, value, mapValue.(string))
case valueKind == reflect.String && typeKind == reflect.Struct && u.implementsUnmarshaler(fieldType):
return u.fillUnmarshalerStruct(fieldType, value, mapValue.(string))
default: default:
return u.processFieldPrimitive(fieldType, value, mapValue, opts, fullName) return u.processFieldPrimitive(fieldType, value, mapValue, opts, fullName)
} }
@@ -629,7 +670,7 @@ func (u *Unmarshaler) processFieldPrimitiveWithJSONNumber(fieldType reflect.Type
return err return err
} }
// if value is a pointer, we need to check overflow with the pointer's value. // if the value is a pointer, we need to check overflow with the pointer's value.
derefedValue := value derefedValue := value
for derefedValue.Type().Kind() == reflect.Ptr { for derefedValue.Type().Kind() == reflect.Ptr {
derefedValue = derefedValue.Elem() derefedValue = derefedValue.Elem()
@@ -771,6 +812,19 @@ func (u *Unmarshaler) processNamedField(field reflect.StructField, value reflect
return u.processNamedFieldWithoutValue(field.Type, value, opts, fullName) return u.processNamedFieldWithoutValue(field.Type, value, opts, fullName)
} }
if u.opts.fromArray {
fieldKind := field.Type.Kind()
if fieldKind != reflect.Slice && fieldKind != reflect.Array {
valueKind := reflect.TypeOf(mapValue).Kind()
if valueKind == reflect.Slice || valueKind == reflect.Array {
val := reflect.ValueOf(mapValue)
if val.Len() > 0 {
mapValue = val.Index(0).Interface()
}
}
}
}
return u.processNamedFieldWithValue(field.Type, value, valueWithParent{ return u.processNamedFieldWithValue(field.Type, value, valueWithParent{
value: mapValue, value: mapValue,
parent: valuer, parent: valuer,
@@ -950,6 +1004,16 @@ func WithDefault() UnmarshalOption {
} }
} }
// WithFromArray customizes an Unmarshaler with converting array values to non-array types.
// For example, if the field type is []string, and the value is [hello],
// the field type can be `string`, instead of `[]string`.
// Typically, this option is used for unmarshaling from form values.
func WithFromArray() UnmarshalOption {
return func(opt *unmarshalOptions) {
opt.fromArray = true
}
}
// WithOpaqueKeys customizes an Unmarshaler with opaque keys. // WithOpaqueKeys customizes an Unmarshaler with opaque keys.
// Opaque keys are keys that are not processed by the unmarshaler. // Opaque keys are keys that are not processed by the unmarshaler.
func WithOpaqueKeys() UnmarshalOption { func WithOpaqueKeys() UnmarshalOption {

View File

@@ -2,6 +2,7 @@ package mapping
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"strconv" "strconv"
@@ -260,6 +261,7 @@ func TestUnmarshalInt(t *testing.T) {
Int64FromStr int64 `key:"int64str,string"` Int64FromStr int64 `key:"int64str,string"`
DefaultInt int64 `key:"defaultint,default=11"` DefaultInt int64 `key:"defaultint,default=11"`
Optional int `key:"optional,optional"` Optional int `key:"optional,optional"`
IntOptDef int `key:"intopt,optional,default=6"`
} }
m := map[string]any{ m := map[string]any{
"int": 1, "int": 1,
@@ -288,6 +290,7 @@ func TestUnmarshalInt(t *testing.T) {
ast.Equal(int64(9), in.Int64) ast.Equal(int64(9), in.Int64)
ast.Equal(int64(10), in.Int64FromStr) ast.Equal(int64(10), in.Int64FromStr)
ast.Equal(int64(11), in.DefaultInt) ast.Equal(int64(11), in.DefaultInt)
ast.Equal(6, in.IntOptDef)
} }
} }
@@ -5636,6 +5639,62 @@ func TestUnmarshalFromStringSliceForTypeMismatch(t *testing.T) {
}, &v)) }, &v))
} }
func TestUnmarshalWithFromArray(t *testing.T) {
t.Run("array", func(t *testing.T) {
var v struct {
Value []string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{
"value": []string{"foo", "bar"},
}, &v)) {
assert.ElementsMatch(t, []string{"foo", "bar"}, v.Value)
}
})
t.Run("not array", func(t *testing.T) {
var v struct {
Value string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{
"value": []string{"foo"},
}, &v)) {
assert.Equal(t, "foo", v.Value)
}
})
t.Run("not array and empty", func(t *testing.T) {
var v struct {
Value string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{
"value": []string{""},
}, &v)) {
assert.Empty(t, v.Value)
}
})
t.Run("not array and no value", func(t *testing.T) {
var v struct {
Value string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
assert.Error(t, unmarshaler.Unmarshal(map[string]any{}, &v))
})
t.Run("not array and no value and optional", func(t *testing.T) {
var v struct {
Value string `key:"value,optional"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{}, &v)) {
assert.Empty(t, v.Value)
}
})
}
func TestUnmarshalWithOpaqueKeys(t *testing.T) { func TestUnmarshalWithOpaqueKeys(t *testing.T) {
var v struct { var v struct {
Opaque string `key:"opaque.key"` Opaque string `key:"opaque.key"`
@@ -5760,6 +5819,49 @@ func TestUnmarshalWithIgnoreFields(t *testing.T) {
} }
} }
func TestUnmarshal_Unmarshaler(t *testing.T) {
t.Run("success", func(t *testing.T) {
v := struct {
Foo *mockUnmarshaler `json:"name"`
}{}
body := `{"name": "hello"}`
assert.NoError(t, UnmarshalJsonBytes([]byte(body), &v))
assert.Equal(t, "hello", v.Foo.Name)
})
t.Run("failure", func(t *testing.T) {
v := struct {
Foo *mockUnmarshalerWithError `json:"name"`
}{}
body := `{"name": "hello"}`
assert.Error(t, UnmarshalJsonBytes([]byte(body), &v))
})
t.Run("not json unmarshaler", func(t *testing.T) {
v := struct {
Foo *struct {
Name string
} `key:"name"`
}{}
u := NewUnmarshaler(defaultKeyName)
assert.Error(t, u.Unmarshal(map[string]any{
"name": "hello",
}, &v))
})
t.Run("not with json key", func(t *testing.T) {
v := struct {
Foo *mockUnmarshaler `json:"name"`
}{}
u := NewUnmarshaler(defaultKeyName)
// with different key, ignore
assert.NoError(t, u.Unmarshal(map[string]any{
"name": "hello",
}, &v))
assert.Nil(t, v.Foo)
})
}
func BenchmarkDefaultValue(b *testing.B) { func BenchmarkDefaultValue(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
var a struct { var a struct {
@@ -5866,10 +5968,27 @@ type mockValuerWithParent struct {
ok bool ok bool
} }
func (m mockValuerWithParent) Value(key string) (any, bool) { func (m mockValuerWithParent) Value(_ string) (any, bool) {
return m.value, m.ok return m.value, m.ok
} }
func (m mockValuerWithParent) Parent() valuerWithParent { func (m mockValuerWithParent) Parent() valuerWithParent {
return m.parent return m.parent
} }
type mockUnmarshaler struct {
Name string
}
func (m *mockUnmarshaler) UnmarshalJSON(b []byte) error {
m.Name = string(b)
return nil
}
type mockUnmarshalerWithError struct {
Name string
}
func (m *mockUnmarshalerWithError) UnmarshalJSON(b []byte) error {
return errors.New("foo")
}

View File

@@ -416,7 +416,7 @@ func parseOption(fieldOpts *fieldOptions, fieldName, option string) error {
} }
// parseOptions parses the given options in tag. // parseOptions parses the given options in tag.
// for example: `json:"name,options=foo|bar"` or `json:"name,options=[foo,bar]"` // for example, `json:"name,options=foo|bar"` or `json:"name,options=[foo,bar]"`
func parseOptions(val string) []string { func parseOptions(val string) []string {
if len(val) == 0 { if len(val) == 0 {
return nil return nil

View File

@@ -26,9 +26,9 @@ type (
parent valuerWithParent parent valuerWithParent
} }
// mapValuer is a type for map to meet the Valuer interface. // mapValuer is a type for the map to meet the Valuer interface.
mapValuer map[string]any mapValuer map[string]any
// simpleValuer is a type to get value from current node. // simpleValuer is a type to get value from the current node.
simpleValuer node simpleValuer node
// recursiveValuer is a type to get the value recursively from current and parent nodes. // recursiveValuer is a type to get the value recursively from current and parent nodes.
recursiveValuer node recursiveValuer node

View File

@@ -1,13 +1,13 @@
package mathx package mathx
type numerical interface { type Numerical interface {
~int | ~int8 | ~int16 | ~int32 | ~int64 | ~int | ~int8 | ~int16 | ~int32 | ~int64 |
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 |
~float32 | ~float64 ~float32 | ~float64
} }
// AtLeast returns the greater of x or lower. // AtLeast returns the greater of x or lower.
func AtLeast[T numerical](x, lower T) T { func AtLeast[T Numerical](x, lower T) T {
if x < lower { if x < lower {
return lower return lower
} }
@@ -15,7 +15,7 @@ func AtLeast[T numerical](x, lower T) T {
} }
// AtMost returns the smaller of x or upper. // AtMost returns the smaller of x or upper.
func AtMost[T numerical](x, upper T) T { func AtMost[T Numerical](x, upper T) T {
if x > upper { if x > upper {
return upper return upper
} }
@@ -23,7 +23,7 @@ func AtMost[T numerical](x, upper T) T {
} }
// Between returns the value of x clamped to the range [lower, upper]. // Between returns the value of x clamped to the range [lower, upper].
func Between[T numerical](x, lower, upper T) T { func Between[T Numerical](x, lower, upper T) T {
if x < lower { if x < lower {
return lower return lower
} }

View File

@@ -363,9 +363,7 @@ func newGuardedWriter[T any](ctx context.Context, channel chan<- T, done <-chan
func (gw guardedWriter[T]) Write(v T) { func (gw guardedWriter[T]) Write(v T) {
select { select {
case <-gw.ctx.Done(): case <-gw.ctx.Done():
return
case <-gw.done: case <-gw.done:
return
default: default:
gw.channel <- v gw.channel <- v
} }

View File

@@ -36,6 +36,6 @@ type fakeCreator struct {
err error err error
} }
func (fc fakeCreator) Create(name string) (file *os.File, err error) { func (fc fakeCreator) Create(_ string) (file *os.File, err error) {
return fc.file, fc.err return fc.file, fc.err
} }

View File

@@ -76,7 +76,7 @@ func (q *Queue) AddListener(listener Listener) {
q.listeners = append(q.listeners, listener) q.listeners = append(q.listeners, listener)
} }
// Broadcast broadcasts message to all event channels. // Broadcast broadcasts the message to all event channels.
func (q *Queue) Broadcast(message any) { func (q *Queue) Broadcast(message any) {
go func() { go func() {
q.eventLock.Lock() q.eventLock.Lock()
@@ -202,7 +202,7 @@ func (q *Queue) produce() {
} }
func (q *Queue) produceOne(producer Producer) (string, bool) { func (q *Queue) produceOne(producer Producer) (string, bool) {
// avoid panic quit the producer, just log it and continue // avoid panic quit the producer, log it and continue
defer rescue.Recover() defer rescue.Recover()
return producer.Produce() return producer.Produce()

View File

@@ -67,7 +67,7 @@ func (p *mockedPusher) Name() string {
return p.name return p.name
} }
func (p *mockedPusher) Push(s string) error { func (p *mockedPusher) Push(_ string) error {
if proba.TrueOnProba(failProba) { if proba.TrueOnProba(failProba) {
return errors.New("dummy") return errors.New("dummy")
} }

View File

@@ -71,6 +71,6 @@ func (m *mockedWriter) Write(report *StatReport) error {
type badWriter struct{} type badWriter struct{}
func (b *badWriter) Write(report *StatReport) error { func (b *badWriter) Write(_ *StatReport) error {
return errors.New("bad") return errors.New("bad")
} }

View File

@@ -1,6 +1,7 @@
package stat package stat
import ( import (
"errors"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -28,3 +29,14 @@ func TestRemoteWriterFail(t *testing.T) {
}) })
assert.NotNil(t, err) assert.NotNil(t, err)
} }
func TestRemoteWriterError(t *testing.T) {
defer gock.Off()
gock.New("http://foo.com").ReplyError(errors.New("foo"))
writer := NewRemoteWriter("http://foo.com")
err := writer.Write(&StatReport{
Name: "bar",
})
assert.NotNil(t, err)
}

View File

@@ -2,7 +2,7 @@ package stat
import "time" import "time"
// A Task is a task that is reported to Metrics. // A Task is a task reported to Metrics.
type Task struct { type Task struct {
Drop bool Drop bool
Duration time.Duration Duration time.Duration

View File

@@ -41,7 +41,7 @@ func RawFieldNames(in any, postgreSql ...bool) []string {
out = append(out, fmt.Sprintf("`%s`", fi.Name)) out = append(out, fmt.Sprintf("`%s`", fi.Name))
} }
default: default:
// get tag name with the tag opton, e.g.: // get tag name with the tag option, e.g.:
// `db:"id"` // `db:"id"`
// `db:"id,type=char,length=16"` // `db:"id,type=char,length=16"`
// `db:",type=char,length=16"` // `db:",type=char,length=16"`

View File

@@ -8,7 +8,7 @@ const (
) )
type ( type (
// An Options is used to store the cache options. // Options is used to store the cache options.
Options struct { Options struct {
Expiry time.Duration Expiry time.Duration
NotFoundExpiry time.Duration NotFoundExpiry time.Duration

View File

@@ -7,6 +7,10 @@ import (
"go.mongodb.org/mongo-driver/mongo/integration/mtest" "go.mongodb.org/mongo-driver/mongo/integration/mtest"
) )
func init() {
_ = mtest.Setup()
}
func TestClientManger_getClient(t *testing.T) { func TestClientManger_getClient(t *testing.T) {
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
mt.Run("test", func(mt *mtest.T) { mt.Run("test", func(mt *mtest.T) {

View File

@@ -6,6 +6,7 @@ import (
"time" "time"
"github.com/zeromicro/go-zero/core/breaker" "github.com/zeromicro/go-zero/core/breaker"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/timex" "github.com/zeromicro/go-zero/core/timex"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
mopt "go.mongodb.org/mongo-driver/mongo/options" mopt "go.mongodb.org/mongo-driver/mongo/options"
@@ -15,7 +16,8 @@ import (
const ( const (
defaultSlowThreshold = time.Millisecond * 500 defaultSlowThreshold = time.Millisecond * 500
// spanName is the span name of the mongo calls. // spanName is the span name of the mongo calls.
spanName = "mongo" spanName = "mongo"
duplicateKeyCode = 11000
// mongodb method names // mongodb method names
aggregate = "Aggregate" aggregate = "Aggregate"
@@ -141,7 +143,7 @@ func (c *decoratedCollection) Aggregate(ctx context.Context, pipeline any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now() starTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, aggregate, starTime, err) c.logDurationSimple(ctx, aggregate, starTime, err)
@@ -161,7 +163,7 @@ func (c *decoratedCollection) BulkWrite(ctx context.Context, models []mongo.Writ
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, bulkWrite, startTime, err) c.logDurationSimple(ctx, bulkWrite, startTime, err)
@@ -181,7 +183,7 @@ func (c *decoratedCollection) CountDocuments(ctx context.Context, filter any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, countDocuments, startTime, err) c.logDurationSimple(ctx, countDocuments, startTime, err)
@@ -201,7 +203,7 @@ func (c *decoratedCollection) DeleteMany(ctx context.Context, filter any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, deleteMany, startTime, err) c.logDurationSimple(ctx, deleteMany, startTime, err)
@@ -221,7 +223,7 @@ func (c *decoratedCollection) DeleteOne(ctx context.Context, filter any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, deleteOne, startTime, err, filter) c.logDuration(ctx, deleteOne, startTime, err, filter)
@@ -241,7 +243,7 @@ func (c *decoratedCollection) Distinct(ctx context.Context, fieldName string, fi
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, distinct, startTime, err) c.logDurationSimple(ctx, distinct, startTime, err)
@@ -261,7 +263,7 @@ func (c *decoratedCollection) EstimatedDocumentCount(ctx context.Context,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, estimatedDocumentCount, startTime, err) c.logDurationSimple(ctx, estimatedDocumentCount, startTime, err)
@@ -281,7 +283,7 @@ func (c *decoratedCollection) Find(ctx context.Context, filter any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, find, startTime, err, filter) c.logDuration(ctx, find, startTime, err, filter)
@@ -301,7 +303,7 @@ func (c *decoratedCollection) FindOne(ctx context.Context, filter any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, findOne, startTime, err, filter) c.logDuration(ctx, findOne, startTime, err, filter)
@@ -322,7 +324,7 @@ func (c *decoratedCollection) FindOneAndDelete(ctx context.Context, filter any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, findOneAndDelete, startTime, err, filter) c.logDuration(ctx, findOneAndDelete, startTime, err, filter)
@@ -344,7 +346,7 @@ func (c *decoratedCollection) FindOneAndReplace(ctx context.Context, filter any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, findOneAndReplace, startTime, err, filter, replacement) c.logDuration(ctx, findOneAndReplace, startTime, err, filter, replacement)
@@ -365,7 +367,7 @@ func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter, upda
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, findOneAndUpdate, startTime, err, filter, update) c.logDuration(ctx, findOneAndUpdate, startTime, err, filter, update)
@@ -386,7 +388,7 @@ func (c *decoratedCollection) InsertMany(ctx context.Context, documents []any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, insertMany, startTime, err) c.logDurationSimple(ctx, insertMany, startTime, err)
@@ -406,7 +408,7 @@ func (c *decoratedCollection) InsertOne(ctx context.Context, document any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, insertOne, startTime, err, document) c.logDuration(ctx, insertOne, startTime, err, document)
@@ -426,7 +428,7 @@ func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter, replacemen
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, replaceOne, startTime, err, filter, replacement) c.logDuration(ctx, replaceOne, startTime, err, filter, replacement)
@@ -446,7 +448,7 @@ func (c *decoratedCollection) UpdateByID(ctx context.Context, id, update any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, updateByID, startTime, err, id, update) c.logDuration(ctx, updateByID, startTime, err, id, update)
@@ -466,7 +468,7 @@ func (c *decoratedCollection) UpdateMany(ctx context.Context, filter, update any
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDurationSimple(ctx, updateMany, startTime, err) c.logDurationSimple(ctx, updateMany, startTime, err)
@@ -486,7 +488,7 @@ func (c *decoratedCollection) UpdateOne(ctx context.Context, filter, update any,
endSpan(span, err) endSpan(span, err)
}() }()
err = c.brk.DoWithAcceptable(func() error { err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now() startTime := timex.Now()
defer func() { defer func() {
c.logDuration(ctx, updateOne, startTime, err, filter, update) c.logDuration(ctx, updateOne, startTime, err, filter, update)
@@ -527,19 +529,20 @@ func (p keepablePromise) keep(err error) error {
} }
func acceptable(err error) bool { func acceptable(err error) bool {
return err == nil || return err == nil || isDupKeyError(err) ||
errors.Is(err, mongo.ErrNoDocuments) || errorx.In(err, mongo.ErrNoDocuments, mongo.ErrNilValue,
errors.Is(err, mongo.ErrNilValue) || mongo.ErrNilDocument, mongo.ErrNilCursor, mongo.ErrEmptySlice,
errors.Is(err, mongo.ErrNilDocument) || // session errors
errors.Is(err, mongo.ErrNilCursor) || session.ErrSessionEnded, session.ErrNoTransactStarted, session.ErrTransactInProgress,
errors.Is(err, mongo.ErrEmptySlice) || session.ErrAbortAfterCommit, session.ErrAbortTwice, session.ErrCommitAfterAbort,
// session errors session.ErrUnackWCUnsupported, session.ErrSnapshotTransaction)
errors.Is(err, session.ErrSessionEnded) || }
errors.Is(err, session.ErrNoTransactStarted) ||
errors.Is(err, session.ErrTransactInProgress) || func isDupKeyError(err error) bool {
errors.Is(err, session.ErrAbortAfterCommit) || var e mongo.WriteException
errors.Is(err, session.ErrAbortTwice) || if !errors.As(err, &e) {
errors.Is(err, session.ErrCommitAfterAbort) || return false
errors.Is(err, session.ErrUnackWCUnsupported) || }
errors.Is(err, session.ErrSnapshotTransaction)
return e.HasErrorCode(duplicateKeyCode)
} }

View File

@@ -15,6 +15,7 @@ import (
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/integration/mtest" "go.mongodb.org/mongo-driver/mongo/integration/mtest"
mopt "go.mongodb.org/mongo-driver/mongo/options" mopt "go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
) )
var errDummy = errors.New("dummy") var errDummy = errors.New("dummy")
@@ -572,6 +573,56 @@ func TestDecoratedCollection_LogDuration(t *testing.T) {
assert.Contains(t, buf.String(), "slowcall") assert.Contains(t, buf.String(), "slowcall")
} }
func TestAcceptable(t *testing.T) {
tests := []struct {
name string
err error
want bool
}{
{"NilError", nil, true},
{"NoDocuments", mongo.ErrNoDocuments, true},
{"NilValue", mongo.ErrNilValue, true},
{"NilDocument", mongo.ErrNilDocument, true},
{"NilCursor", mongo.ErrNilCursor, true},
{"EmptySlice", mongo.ErrEmptySlice, true},
{"SessionEnded", session.ErrSessionEnded, true},
{"NoTransactStarted", session.ErrNoTransactStarted, true},
{"TransactInProgress", session.ErrTransactInProgress, true},
{"AbortAfterCommit", session.ErrAbortAfterCommit, true},
{"AbortTwice", session.ErrAbortTwice, true},
{"CommitAfterAbort", session.ErrCommitAfterAbort, true},
{"UnackWCUnsupported", session.ErrUnackWCUnsupported, true},
{"SnapshotTransaction", session.ErrSnapshotTransaction, true},
{"DuplicateKeyError", mongo.WriteException{WriteErrors: []mongo.WriteError{{Code: duplicateKeyCode}}}, true},
{"OtherError", errors.New("other error"), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, acceptable(tt.err))
})
}
}
func TestIsDupKeyError(t *testing.T) {
tests := []struct {
name string
err error
want bool
}{
{"NilError", nil, false},
{"NonDupKeyError", errors.New("some other error"), false},
{"DupKeyError", mongo.WriteException{WriteErrors: []mongo.WriteError{{Code: duplicateKeyCode}}}, true},
{"OtherMongoError", mongo.WriteException{WriteErrors: []mongo.WriteError{{Code: 12345}}}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, isDupKeyError(tt.err))
})
}
}
type mockPromise struct { type mockPromise struct {
accepted bool accepted bool
reason string reason string
@@ -595,19 +646,40 @@ func (d *dropBreaker) Allow() (breaker.Promise, error) {
return nil, errDummy return nil, errDummy
} }
func (d *dropBreaker) AllowCtx(_ context.Context) (breaker.Promise, error) {
return nil, errDummy
}
func (d *dropBreaker) Do(_ func() error) error { func (d *dropBreaker) Do(_ func() error) error {
return nil return nil
} }
func (d *dropBreaker) DoCtx(_ context.Context, _ func() error) error {
return nil
}
func (d *dropBreaker) DoWithAcceptable(_ func() error, _ breaker.Acceptable) error { func (d *dropBreaker) DoWithAcceptable(_ func() error, _ breaker.Acceptable) error {
return errDummy return errDummy
} }
func (d *dropBreaker) DoWithAcceptableCtx(_ context.Context, _ func() error, _ breaker.Acceptable) error {
return errDummy
}
func (d *dropBreaker) DoWithFallback(_ func() error, _ breaker.Fallback) error { func (d *dropBreaker) DoWithFallback(_ func() error, _ breaker.Fallback) error {
return nil return nil
} }
func (d *dropBreaker) DoWithFallbackCtx(_ context.Context, _ func() error, _ breaker.Fallback) error {
return nil
}
func (d *dropBreaker) DoWithFallbackAcceptable(_ func() error, _ breaker.Fallback, func (d *dropBreaker) DoWithFallbackAcceptable(_ func() error, _ breaker.Fallback,
_ breaker.Acceptable) error { _ breaker.Acceptable) error {
return nil return nil
} }
func (d *dropBreaker) DoWithFallbackAcceptableCtx(_ context.Context, _ func() error,
_ breaker.Fallback, _ breaker.Acceptable) error {
return nil
}

View File

@@ -69,27 +69,21 @@ func newModel(name string, cli *mongo.Client, coll Collection, brk breaker.Break
// StartSession starts a new session. // StartSession starts a new session.
func (m *Model) StartSession(opts ...*mopt.SessionOptions) (sess mongo.Session, err error) { func (m *Model) StartSession(opts ...*mopt.SessionOptions) (sess mongo.Session, err error) {
err = m.brk.DoWithAcceptable(func() error { starTime := timex.Now()
starTime := timex.Now() defer func() {
defer func() { logDuration(context.Background(), m.name, startSession, starTime, err)
logDuration(context.Background(), m.name, startSession, starTime, err) }()
}()
session, sessionErr := m.cli.StartSession(opts...) session, sessionErr := m.cli.StartSession(opts...)
if sessionErr != nil { if sessionErr != nil {
return sessionErr return nil, sessionErr
} }
sess = &wrappedSession{ return &wrappedSession{
Session: session, Session: session,
name: m.name, name: m.name,
brk: m.brk, brk: m.brk,
} }, nil
return nil
}, acceptable)
return
} }
// Aggregate executes an aggregation pipeline. // Aggregate executes an aggregation pipeline.
@@ -184,7 +178,7 @@ func (w *wrappedSession) AbortTransaction(ctx context.Context) (err error) {
endSpan(span, err) endSpan(span, err)
}() }()
return w.brk.DoWithAcceptable(func() error { return w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now() starTime := timex.Now()
defer func() { defer func() {
logDuration(ctx, w.name, abortTransaction, starTime, err) logDuration(ctx, w.name, abortTransaction, starTime, err)
@@ -201,7 +195,7 @@ func (w *wrappedSession) CommitTransaction(ctx context.Context) (err error) {
endSpan(span, err) endSpan(span, err)
}() }()
return w.brk.DoWithAcceptable(func() error { return w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now() starTime := timex.Now()
defer func() { defer func() {
logDuration(ctx, w.name, commitTransaction, starTime, err) logDuration(ctx, w.name, commitTransaction, starTime, err)
@@ -222,7 +216,7 @@ func (w *wrappedSession) WithTransaction(
endSpan(span, err) endSpan(span, err)
}() }()
err = w.brk.DoWithAcceptable(func() error { err = w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now() starTime := timex.Now()
defer func() { defer func() {
logDuration(ctx, w.name, withTransaction, starTime, err) logDuration(ctx, w.name, withTransaction, starTime, err)
@@ -243,7 +237,7 @@ func (w *wrappedSession) EndSession(ctx context.Context) {
endSpan(span, err) endSpan(span, err)
}() }()
err = w.brk.DoWithAcceptable(func() error { err = w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now() starTime := timex.Now()
defer func() { defer func() {
logDuration(ctx, w.name, endSession, starTime, err) logDuration(ctx, w.name, endSession, starTime, err)

View File

@@ -2,8 +2,8 @@ package mon
import ( import (
"context" "context"
"errors"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/trace" "github.com/zeromicro/go-zero/core/trace"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
@@ -24,8 +24,7 @@ func startSpan(ctx context.Context, cmd string) (context.Context, oteltrace.Span
func endSpan(span oteltrace.Span, err error) { func endSpan(span oteltrace.Span, err error) {
defer span.End() defer span.End()
if err == nil || errors.Is(err, mongo.ErrNoDocuments) || if err == nil || errorx.In(err, mongo.ErrNoDocuments, mongo.ErrNilValue, mongo.ErrNilDocument) {
errors.Is(err, mongo.ErrNilValue) || errors.Is(err, mongo.ErrNilDocument) {
span.SetStatus(codes.Ok, "") span.SetStatus(codes.Ok, "")
return return
} }

View File

@@ -0,0 +1,41 @@
package redis
import (
"context"
red "github.com/redis/go-redis/v9"
"github.com/zeromicro/go-zero/core/breaker"
"github.com/zeromicro/go-zero/core/lang"
)
var ignoreCmds = map[string]lang.PlaceholderType{
"blpop": {},
}
type breakerHook struct {
brk breaker.Breaker
}
func (h breakerHook) DialHook(next red.DialHook) red.DialHook {
return next
}
func (h breakerHook) ProcessHook(next red.ProcessHook) red.ProcessHook {
return func(ctx context.Context, cmd red.Cmder) error {
if _, ok := ignoreCmds[cmd.Name()]; ok {
return next(ctx, cmd)
}
return h.brk.DoWithAcceptableCtx(ctx, func() error {
return next(ctx, cmd)
}, acceptable)
}
}
func (h breakerHook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipelineHook {
return func(ctx context.Context, cmds []red.Cmder) error {
return h.brk.DoWithAcceptableCtx(ctx, func() error {
return next(ctx, cmds)
}, acceptable)
}
}

View File

@@ -0,0 +1,135 @@
package redis
import (
"context"
"errors"
"testing"
"time"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/breaker"
)
func TestBreakerHook_ProcessHook(t *testing.T) {
t.Run("breakerHookOpen", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
someError := errors.New("ERR some error")
s.SetError(someError.Error())
var err error
for i := 0; i < 1000; i++ {
_, err = rds.Get("key")
if err != nil && err.Error() != someError.Error() {
break
}
}
assert.Equal(t, breaker.ErrServiceUnavailable, err)
})
t.Run("breakerHookClose", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
var err error
for i := 0; i < 1000; i++ {
_, err = rds.Get("key")
if err != nil {
break
}
}
assert.NotEqual(t, breaker.ErrServiceUnavailable, err)
})
t.Run("breakerHook_ignoreCmd", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
someError := errors.New("ERR some error")
s.SetError(someError.Error())
var err error
node, err := getRedis(rds)
assert.NoError(t, err)
for i := 0; i < 1000; i++ {
_, err = rds.Blpop(node, "key")
if err != nil && err.Error() != someError.Error() {
break
}
}
assert.Equal(t, someError.Error(), err.Error())
})
}
func TestBreakerHook_ProcessPipelineHook(t *testing.T) {
t.Run("breakerPipelineHookOpen", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
someError := errors.New("ERR some error")
s.SetError(someError.Error())
var err error
for i := 0; i < 1000; i++ {
err = rds.Pipelined(
func(pipe Pipeliner) error {
pipe.Incr(context.Background(), "pipelined_counter")
pipe.Expire(context.Background(), "pipelined_counter", time.Hour)
pipe.ZAdd(context.Background(), "zadd", Z{Score: 12, Member: "zadd"})
return nil
},
)
if err != nil && err.Error() != someError.Error() {
break
}
}
assert.Equal(t, breaker.ErrServiceUnavailable, err)
})
t.Run("breakerPipelineHookClose", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
var err error
for i := 0; i < 1000; i++ {
err = rds.Pipelined(
func(pipe Pipeliner) error {
pipe.Incr(context.Background(), "pipelined_counter")
pipe.Expire(context.Background(), "pipelined_counter", time.Hour)
pipe.ZAdd(context.Background(), "zadd", Z{Score: 12, Member: "zadd"})
return nil
},
)
if err != nil {
break
}
}
assert.NotEqual(t, breaker.ErrServiceUnavailable, err)
})
}

View File

@@ -47,7 +47,7 @@ func (rc RedisConf) NewRedis() *Redis {
opts = append(opts, WithTLS()) opts = append(opts, WithTLS())
} }
return New(rc.Host, opts...) return newRedis(rc.Host, opts...)
} }
// Validate validates the RedisConf. // Validate validates the RedisConf.

View File

@@ -0,0 +1,5 @@
if redis.call("GET", KEYS[1]) == ARGV[1] then
return redis.call("DEL", KEYS[1])
else
return 0
end

View File

@@ -23,17 +23,18 @@ import (
const spanName = "redis" const spanName = "redis"
var ( var (
durationHook = hook{} defaultDurationHook = durationHook{}
redisCmdsAttributeKey = attribute.Key("redis.cmds") redisCmdsAttributeKey = attribute.Key("redis.cmds")
) )
type hook struct{} type durationHook struct {
}
func (h hook) DialHook(next red.DialHook) red.DialHook { func (h durationHook) DialHook(next red.DialHook) red.DialHook {
return next return next
} }
func (h hook) ProcessHook(next red.ProcessHook) red.ProcessHook { func (h durationHook) ProcessHook(next red.ProcessHook) red.ProcessHook {
return func(ctx context.Context, cmd red.Cmder) error { return func(ctx context.Context, cmd red.Cmder) error {
start := timex.Now() start := timex.Now()
ctx, endSpan := h.startSpan(ctx, cmd) ctx, endSpan := h.startSpan(ctx, cmd)
@@ -57,7 +58,7 @@ func (h hook) ProcessHook(next red.ProcessHook) red.ProcessHook {
} }
} }
func (h hook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipelineHook { func (h durationHook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipelineHook {
return func(ctx context.Context, cmds []red.Cmder) error { return func(ctx context.Context, cmds []red.Cmder) error {
if len(cmds) == 0 { if len(cmds) == 0 {
return next(ctx, cmds) return next(ctx, cmds)
@@ -83,6 +84,33 @@ func (h hook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipel
} }
} }
func (h durationHook) startSpan(ctx context.Context, cmds ...red.Cmder) (context.Context, func(err error)) {
tracer := trace.TracerFromContext(ctx)
ctx, span := tracer.Start(ctx,
spanName,
oteltrace.WithSpanKind(oteltrace.SpanKindClient),
)
cmdStrs := make([]string, 0, len(cmds))
for _, cmd := range cmds {
cmdStrs = append(cmdStrs, cmd.Name())
}
span.SetAttributes(redisCmdsAttributeKey.StringSlice(cmdStrs))
return ctx, func(err error) {
defer span.End()
if err == nil || errors.Is(err, red.Nil) {
span.SetStatus(codes.Ok, "")
return
}
span.SetStatus(codes.Error, err.Error())
span.RecordError(err)
}
}
func formatError(err error) string { func formatError(err error) string {
if err == nil || errors.Is(err, red.Nil) { if err == nil || errors.Is(err, red.Nil) {
return "" return ""
@@ -95,7 +123,7 @@ func formatError(err error) string {
} }
switch { switch {
case err == io.EOF: case errors.Is(err, io.EOF):
return "eof" return "eof"
case errors.Is(err, context.DeadlineExceeded): case errors.Is(err, context.DeadlineExceeded):
return "context deadline" return "context deadline"
@@ -123,30 +151,3 @@ func logDuration(ctx context.Context, cmds []red.Cmder, duration time.Duration)
} }
logx.WithContext(ctx).WithDuration(duration).Slowf("[REDIS] slowcall on executing: %s", buf.String()) logx.WithContext(ctx).WithDuration(duration).Slowf("[REDIS] slowcall on executing: %s", buf.String())
} }
func (h hook) startSpan(ctx context.Context, cmds ...red.Cmder) (context.Context, func(err error)) {
tracer := trace.TracerFromContext(ctx)
ctx, span := tracer.Start(ctx,
spanName,
oteltrace.WithSpanKind(oteltrace.SpanKindClient),
)
cmdStrs := make([]string, 0, len(cmds))
for _, cmd := range cmds {
cmdStrs = append(cmdStrs, cmd.Name())
}
span.SetAttributes(redisCmdsAttributeKey.StringSlice(cmdStrs))
return ctx, func(err error) {
defer span.End()
if err == nil || errors.Is(err, red.Nil) {
span.SetStatus(codes.Ok, "")
return
}
span.SetStatus(codes.Error, err.Error())
span.RecordError(err)
}
}

View File

@@ -21,7 +21,7 @@ func TestHookProcessCase1(t *testing.T) {
tracetest.NewInMemoryExporter(t) tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t) w := logtest.NewCollector(t)
err := durationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error { err := defaultDurationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name()) assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
return nil return nil
})(context.Background(), red.NewCmd(context.Background())) })(context.Background(), red.NewCmd(context.Background()))
@@ -36,7 +36,7 @@ func TestHookProcessCase2(t *testing.T) {
tracetest.NewInMemoryExporter(t) tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t) w := logtest.NewCollector(t)
err := durationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error { err := defaultDurationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name()) assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
time.Sleep(slowThreshold.Load() + time.Millisecond) time.Sleep(slowThreshold.Load() + time.Millisecond)
return nil return nil
@@ -54,12 +54,12 @@ func TestHookProcessPipelineCase1(t *testing.T) {
tracetest.NewInMemoryExporter(t) tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t) w := logtest.NewCollector(t)
err := durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error { err := defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
return nil return nil
})(context.Background(), nil) })(context.Background(), nil)
assert.NoError(t, err) assert.NoError(t, err)
err = durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error { err = defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name()) assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
return nil return nil
})(context.Background(), []red.Cmder{ })(context.Background(), []red.Cmder{
@@ -74,7 +74,7 @@ func TestHookProcessPipelineCase2(t *testing.T) {
tracetest.NewInMemoryExporter(t) tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t) w := logtest.NewCollector(t)
err := durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error { err := defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name()) assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
time.Sleep(slowThreshold.Load() + time.Millisecond) time.Sleep(slowThreshold.Load() + time.Millisecond)
return nil return nil
@@ -91,7 +91,7 @@ func TestHookProcessPipelineCase2(t *testing.T) {
func TestHookProcessPipelineCase3(t *testing.T) { func TestHookProcessPipelineCase3(t *testing.T) {
te := tracetest.NewInMemoryExporter(t) te := tracetest.NewInMemoryExporter(t)
err := durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error { err := defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name()) assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
return assert.AnError return assert.AnError
})(context.Background(), []red.Cmder{ })(context.Background(), []red.Cmder{

View File

@@ -0,0 +1,6 @@
if redis.call("GET", KEYS[1]) == ARGV[1] then
redis.call("SET", KEYS[1], ARGV[1], "PX", ARGV[2])
return "OK"
else
return redis.call("SET", KEYS[1], ARGV[1], "NX", "PX", ARGV[2])
end

View File

@@ -19,7 +19,7 @@ func TestRedisMetric(t *testing.T) {
cfg := devserver.Config{} cfg := devserver.Config{}
_ = conf.FillDefault(&cfg) _ = conf.FillDefault(&cfg)
server := devserver.NewServer(cfg) server := devserver.NewServer(cfg)
server.StartAsync() server.StartAsync(cfg)
time.Sleep(time.Second) time.Sleep(time.Second)
metricReqDur.Observe(8, "test-cmd") metricReqDur.Observe(8, "test-cmd")

File diff suppressed because it is too large Load Diff

View File

@@ -36,7 +36,7 @@ func (m myHook) ProcessHook(next red.ProcessHook) red.ProcessHook {
if cmd.Name() == "ping" && !m.includePing { if cmd.Name() == "ping" && !m.includePing {
return next(ctx, cmd) return next(ctx, cmd)
} }
return errors.New("hook error") return errors.New("durationHook error")
} }
} }
@@ -155,12 +155,12 @@ func TestRedis_NonBlock(t *testing.T) {
t.Run("nonBlock true", func(t *testing.T) { t.Run("nonBlock true", func(t *testing.T) {
s := miniredis.RunT(t) s := miniredis.RunT(t)
// use hook to simulate redis ping error // use durationHook to simulate redis ping error
_, err := NewRedis(RedisConf{ _, err := NewRedis(RedisConf{
Host: s.Addr(), Host: s.Addr(),
NonBlock: true, NonBlock: true,
Type: NodeType, Type: NodeType,
}, withHook(myHook{includePing: true})) }, WithHook(myHook{includePing: true}))
assert.NoError(t, err) assert.NoError(t, err)
}) })
@@ -170,7 +170,7 @@ func TestRedis_NonBlock(t *testing.T) {
Host: s.Addr(), Host: s.Addr(),
NonBlock: false, NonBlock: false,
Type: NodeType, Type: NodeType,
}, withHook(myHook{includePing: true})) }, WithHook(myHook{includePing: true}))
assert.ErrorContains(t, err, "redis connect error") assert.ErrorContains(t, err, "redis connect error")
}) })
} }
@@ -2080,3 +2080,70 @@ func (n mockedNode) BLPop(_ context.Context, _ time.Duration, _ ...string) *red.
return cmd return cmd
} }
func TestRedisPublish(t *testing.T) {
runOnRedis(t, func(client *Redis) {
_, err := newRedis(client.Addr, badType()).Publish("Test", "message")
assert.NotNil(t, err)
_, err = client.Publish("Test", "message")
assert.Nil(t, err)
})
}
func TestRedisRPopLPush(t *testing.T) {
runOnRedis(t, func(client *Redis) {
_, err := newRedis(client.Addr, badType()).RPopLPush("Source", "Destination")
assert.NotNil(t, err)
_, err = client.Rpush("Source", "Destination")
assert.Nil(t, err)
_, err = client.RPopLPush("Source", "Destination")
assert.Nil(t, err)
})
}
func TestRedisUnlink(t *testing.T) {
runOnRedis(t, func(client *Redis) {
_, err := newRedis(client.Addr, badType()).Unlink("Key1", "Key2")
assert.NotNil(t, err)
err = client.Set("Key1", "Key2")
assert.Nil(t, err)
get, err := client.Get("Key1")
assert.Nil(t, err)
assert.Equal(t, "Key2", get)
res, err := client.Unlink("Key1")
assert.Nil(t, err)
assert.Equal(t, int64(1), res)
})
}
func TestRedisTxPipeline(t *testing.T) {
runOnRedis(t, func(client *Redis) {
ctx := context.Background()
pipe, err := newRedis(client.Addr, badType()).TxPipeline()
assert.NotNil(t, err)
pipe, err = client.TxPipeline()
assert.Nil(t, err)
key := "key"
hashKey := "field"
hashValue := "value"
// setting value
pipe.HSet(ctx, key, hashKey, hashValue)
existsCmd := pipe.Exists(ctx, key)
getCmd := pipe.HGet(ctx, key, hashKey)
// execution
_, err = pipe.Exec(ctx)
assert.Nil(t, err)
// verification results
exists, err := existsCmd.Result()
assert.Nil(t, err)
assert.Equal(t, int64(1), exists)
value, err := getCmd.Result()
assert.Nil(t, err)
assert.Equal(t, hashValue, value)
})
}

View File

@@ -37,8 +37,11 @@ func getClient(r *Redis) (*red.Client, error) {
MinIdleConns: idleConns, MinIdleConns: idleConns,
TLSConfig: tlsConfig, TLSConfig: tlsConfig,
}) })
store.AddHook(durationHook)
for _, hook := range r.hooks { hooks := append([]red.Hook{defaultDurationHook, breakerHook{
brk: r.brk,
}}, r.hooks...)
for _, hook := range hooks {
store.AddHook(hook) store.AddHook(hook)
} }

View File

@@ -33,8 +33,11 @@ func getCluster(r *Redis) (*red.ClusterClient, error) {
MinIdleConns: idleConns, MinIdleConns: idleConns,
TLSConfig: tlsConfig, TLSConfig: tlsConfig,
}) })
store.AddHook(durationHook)
for _, hook := range r.hooks { hooks := append([]red.Hook{defaultDurationHook, breakerHook{
brk: r.brk,
}}, r.hooks...)
for _, hook := range hooks {
store.AddHook(hook) store.AddHook(hook)
} }

View File

@@ -51,7 +51,7 @@ func TestGetCluster(t *testing.T) {
Addr: r.Addr(), Addr: r.Addr(),
Type: ClusterType, Type: ClusterType,
tls: true, tls: true,
hooks: []red.Hook{durationHook}, hooks: []red.Hook{defaultDurationHook},
}) })
if assert.NoError(t, err) { if assert.NoError(t, err) {
assert.NotNil(t, c) assert.NotNil(t, c)

View File

@@ -2,6 +2,8 @@ package redis
import ( import (
"context" "context"
_ "embed"
"errors"
"math/rand" "math/rand"
"strconv" "strconv"
"sync/atomic" "sync/atomic"
@@ -19,17 +21,13 @@ const (
) )
var ( var (
lockScript = NewScript(`if redis.call("GET", KEYS[1]) == ARGV[1] then //go:embed lockscript.lua
redis.call("SET", KEYS[1], ARGV[1], "PX", ARGV[2]) lockLuaScript string
return "OK" lockScript = NewScript(lockLuaScript)
else
return redis.call("SET", KEYS[1], ARGV[1], "NX", "PX", ARGV[2]) //go:embed delscript.lua
end`) delLuaScript string
delScript = NewScript(`if redis.call("GET", KEYS[1]) == ARGV[1] then delScript = NewScript(delLuaScript)
return redis.call("DEL", KEYS[1])
else
return 0
end`)
) )
// A RedisLock is a redis lock. // A RedisLock is a redis lock.
@@ -64,7 +62,7 @@ func (rl *RedisLock) AcquireCtx(ctx context.Context) (bool, error) {
resp, err := rl.store.ScriptRunCtx(ctx, lockScript, []string{rl.key}, []string{ resp, err := rl.store.ScriptRunCtx(ctx, lockScript, []string{rl.key}, []string{
rl.id, strconv.Itoa(int(seconds)*millisPerSecond + tolerance), rl.id, strconv.Itoa(int(seconds)*millisPerSecond + tolerance),
}) })
if err == red.Nil { if errors.Is(err, red.Nil) {
return false, nil return false, nil
} else if err != nil { } else if err != nil {
logx.Errorf("Error on acquiring lock for %s, %s", rl.key, err.Error()) logx.Errorf("Error on acquiring lock for %s, %s", rl.key, err.Error())

View File

@@ -190,6 +190,17 @@ func (cc CachedConn) QueryRowNoCacheCtx(ctx context.Context, v any, q string,
return cc.db.QueryRowCtx(ctx, v, q, args...) return cc.db.QueryRowCtx(ctx, v, q, args...)
} }
// QueryRowPartialNoCache unmarshals into v with given statement.
func (cc CachedConn) QueryRowPartialNoCache(v any, q string, args ...any) error {
return cc.QueryRowPartialNoCacheCtx(context.Background(), v, q, args...)
}
// QueryRowPartialNoCacheCtx unmarshals into v with given statement.
func (cc CachedConn) QueryRowPartialNoCacheCtx(ctx context.Context, v any, q string,
args ...any) error {
return cc.db.QueryRowPartialCtx(ctx, v, q, args...)
}
// QueryRowsNoCache unmarshals into v with given statement. // QueryRowsNoCache unmarshals into v with given statement.
// It doesn't use cache, because it might cause consistency problem. // It doesn't use cache, because it might cause consistency problem.
func (cc CachedConn) QueryRowsNoCache(v any, q string, args ...any) error { func (cc CachedConn) QueryRowsNoCache(v any, q string, args ...any) error {
@@ -203,6 +214,19 @@ func (cc CachedConn) QueryRowsNoCacheCtx(ctx context.Context, v any, q string,
return cc.db.QueryRowsCtx(ctx, v, q, args...) return cc.db.QueryRowsCtx(ctx, v, q, args...)
} }
// QueryRowsPartialNoCache unmarshals into v with given statement.
// It doesn't use cache, because it might cause consistency problem.
func (cc CachedConn) QueryRowsPartialNoCache(v any, q string, args ...any) error {
return cc.QueryRowsPartialNoCacheCtx(context.Background(), v, q, args...)
}
// QueryRowsPartialNoCacheCtx unmarshals into v with given statement.
// It doesn't use cache, because it might cause consistency problem.
func (cc CachedConn) QueryRowsPartialNoCacheCtx(ctx context.Context, v any, q string,
args ...any) error {
return cc.db.QueryRowsPartialCtx(ctx, v, q, args...)
}
// SetCache sets v into cache with given key. // SetCache sets v into cache with given key.
func (cc CachedConn) SetCache(key string, val any) error { func (cc CachedConn) SetCache(key string, val any) error {
return cc.SetCacheCtx(context.Background(), key, val) return cc.SetCacheCtx(context.Background(), key, val)

Some files were not shown because too many files have changed in this diff Show More