Compare commits

...

251 Commits

Author SHA1 Message Date
Kevin Wan
cfe03ea9e1 chore: update goctl deps (#4535) 2025-01-02 00:26:08 +08:00
Kevin Wan
48d21ef8ad chore: add comments (#4534) 2025-01-01 20:49:45 +08:00
Kevin Wan
28a001c5f9 chore: refactor shutdown config, to prevent setting zero values (#4533)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2025-01-01 20:46:51 +08:00
Qiying Wang
22a41cacc7 feat: add ProcConf to make SetTimeToForceQuit configurable (#4446) 2025-01-01 11:48:53 +00:00
Kevin Wan
fcc246933c fix: service group not working well when callback takes long time (#4531)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2025-01-01 07:06:50 +00:00
dependabot[bot]
5c3679ffe7 chore(deps): bump google.golang.org/protobuf from 1.36.0 to 1.36.1 in /tools/goctl (#4521) 2024-12-28 12:05:03 +08:00
Kevin Wan
eaa01ccb9f Update readme-cn.md (#4525) 2024-12-28 09:59:00 +08:00
dependabot[bot]
b8206fb46a chore(deps): bump google.golang.org/protobuf from 1.36.0 to 1.36.1 (#4523)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-24 09:58:03 +08:00
kesonan
1c3876810e fix command goctl bug invalid (#4520) 2024-12-23 16:00:50 +00:00
Kevin Wan
1d9159ea39 feat: support form array in three notations (#4498)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-12-23 00:56:20 +08:00
Kevin Wan
2159d112c3 chore: format the code (#4518) 2024-12-22 15:10:44 +08:00
不插电
f57874a51f fix: DetailedLog format. (#4467) 2024-12-21 03:48:27 +00:00
Xingchen Wang
8625864d43 bugfix:SetSlowThreshold not effective in function logDetails (#4511)
Co-authored-by: Star Wang <starwang@Star-Wangs-MacBook-Pro.local>
2024-12-21 03:22:54 +00:00
dependabot[bot]
8f9ba3ec11 chore(deps): bump golang.org/x/net from 0.32.0 to 0.33.0 (#4516)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-20 12:04:16 +08:00
dependabot[bot]
a1d9bc08f0 chore(deps): bump github.com/alicebob/miniredis/v2 from 2.33.0 to 2.34.0 (#4509)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-19 18:13:12 +08:00
dependabot[bot]
b9d7f1cc77 chore(deps): bump github.com/emicklei/proto from 1.13.4 to 1.14.0 in /tools/goctl (#4510)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-19 17:54:44 +08:00
kesonan
6700910f64 (goctl)fix: api timeout limited during api generation (#4513) 2024-12-19 08:19:10 +00:00
godLei6
9c4ed394a7 fix: go work duplicate prefix get err (#4487) 2024-12-19 01:17:50 +00:00
dependabot[bot]
fd07a9c6e4 chore(deps): bump google.golang.org/protobuf from 1.35.1 to 1.36.0 in /tools/goctl (#4504)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-18 16:01:36 +08:00
dependabot[bot]
b8c239630c chore(deps): bump github.com/emicklei/proto from 1.13.2 to 1.13.4 in /tools/goctl (#4505)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-18 15:30:04 +08:00
dependabot[bot]
672ea55736 chore(deps): bump github.com/stretchr/testify from 1.9.0 to 1.10.0 in /tools/goctl (#4506)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-18 15:16:00 +08:00
dependabot[bot]
f7097866bf chore(deps): bump github.com/zeromicro/go-zero from 1.7.3 to 1.7.4 in /tools/goctl (#4507)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-18 11:27:55 +08:00
dependabot[bot]
796b2bd1b0 chore(deps): bump golang.org/x/crypto from 0.30.0 to 0.31.0 in the go_modules group (#4501)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-18 00:37:32 +08:00
dependabot[bot]
e1e5fb2071 chore(deps): bump golang.org/x/crypto from 0.28.0 to 0.31.0 in /tools/goctl in the go_modules group (#4502)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-17 17:20:57 +08:00
kesonan
89ecb50005 remove string restriction on atserver (#4499) 2024-12-17 03:59:30 +00:00
dependabot[bot]
dbed1ea042 chore(deps): bump google.golang.org/protobuf from 1.35.2 to 1.36.0 (#4500) 2024-12-17 11:58:22 +08:00
Kevin Wan
ad291daf78 chore: format Dockerfile template (#4496) 2024-12-14 12:38:33 +08:00
lascyb
13746a3706 Update docker.tpl (#4495) 2024-12-13 16:08:38 +00:00
dependabot[bot]
f03b13f632 chore(deps): bump github.com/fullstorydev/grpcurl from 1.9.1 to 1.9.2 (#4473)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-08 10:27:42 +08:00
dependabot[bot]
f6f64b1286 chore(deps): bump golang.org/x/net from 0.31.0 to 0.32.0 (#4481)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-08 10:06:05 +08:00
Kevin Wan
300a415f5d Add go-zero users (#4475) 2024-12-05 00:09:44 +08:00
dependabot[bot]
c5de546f8a chore(deps): bump github.com/stretchr/testify from 1.9.0 to 1.10.0 (#4470)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-26 21:46:28 +08:00
Robin
cad243905f fix ts request cli (#4461)
Co-authored-by: robinzhang <azhangrongbing@163.com>
2024-11-21 21:40:08 +08:00
dependabot[bot]
7c8f41d577 chore(deps): bump codecov/codecov-action from 4 to 5 (#4459)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-21 21:25:53 +08:00
dependabot[bot]
cbd118d55f chore(deps): bump google.golang.org/protobuf from 1.35.1 to 1.35.2 (#4457) 2024-11-15 17:00:10 +08:00
dependabot[bot]
9d2a1b8b0a chore(deps): bump golang.org/x/time from 0.7.0 to 0.8.0 (#4454)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-11 00:24:06 +08:00
dependabot[bot]
f6ada979aa chore(deps): bump golang.org/x/net from 0.30.0 to 0.31.0 (#4452)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-11 00:13:28 +08:00
Kevin Wan
53a74759a5 feat: support json array in request body (#4444) 2024-11-05 14:09:30 +00:00
dependabot[bot]
1940f7bd58 chore(deps): bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (#4447) 2024-11-05 19:37:32 +08:00
Kevin Wan
18cb3141ba feat: support query array in httpx.Parse (#4440) 2024-11-02 13:55:37 +00:00
dependabot[bot]
f822c9a94f chore(deps): bump github.com/fatih/color from 1.17.0 to 1.18.0 (#4434)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-26 14:49:56 +08:00
Kevin Wan
1a3dc75874 chore: upgrade goctl version (#4429) 2024-10-20 10:38:04 +08:00
yangjinheng
796dd5b6e2 fix the source code directory after the soft link (#4425) 2024-10-19 15:37:44 +00:00
dependabot[bot]
94e476ade7 chore(deps): bump github.com/redis/go-redis/v9 from 9.6.1 to 9.7.0 (#4426) 2024-10-19 08:10:32 +08:00
dependabot[bot]
2a74996e1b chore(deps): bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 (#4424) 2024-10-18 11:38:41 +08:00
fishJack01
f52af1ebf9 feat:New redis method TxPipeline (#4417)
Co-authored-by: fish <fish@fishdeMac-mini.local>
2024-10-13 05:51:55 +00:00
dependabot[bot]
24450f18bb chore(deps): bump google.golang.org/protobuf from 1.34.2 to 1.35.1 (#4414)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-12 21:13:32 +08:00
dependabot[bot]
f1a45d8a23 chore(deps): bump golang.org/x/time from 0.6.0 to 0.7.0 (#4409)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-05 12:58:06 +08:00
MarkJoyMa
9aebba1566 fix/conf_multi_layer_map (#4407)
Co-authored-by: aiden.ma <Aiden.ma@yijinin.com>
2024-10-05 04:46:32 +00:00
dependabot[bot]
4998479f9a chore(deps): bump golang.org/x/net from 0.29.0 to 0.30.0 (#4410)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-05 12:27:34 +08:00
dependabot[bot]
873d1351ee chore(deps): bump golang.org/x/sys from 0.25.0 to 0.26.0 (#4408)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-05 12:04:39 +08:00
dependabot[bot]
afcbca8f24 chore(deps): bump go.mongodb.org/mongo-driver from 1.17.0 to 1.17.1 (#4402)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-03 11:16:00 +08:00
dependabot[bot]
5b8126c2cf chore(deps): bump go.uber.org/automaxprocs from 1.5.3 to 1.6.0 (#4389) 2024-09-24 09:16:20 +08:00
Kevin Wan
4dfaf35151 fix: goctl k8s autoscaling version upgrade (#4387) 2024-09-20 23:16:38 +08:00
dependabot[bot]
00cd77c92b chore(deps): bump github.com/prometheus/client_golang from 1.20.3 to 1.20.4 (#4380)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-20 10:26:52 +08:00
dependabot[bot]
2145a7a93c chore(deps): bump go.mongodb.org/mongo-driver from 1.16.1 to 1.17.0 (#4379)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-19 22:10:29 +08:00
dependabot[bot]
d5302f2dbe chore(deps): bump golang.org/x/net from 0.28.0 to 0.29.0 (#4360)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-07 21:13:42 +08:00
dependabot[bot]
6181594bc8 chore(deps): bump github.com/jhump/protoreflect from 1.16.0 to 1.17.0 (#4359)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-06 20:13:29 +08:00
dependabot[bot]
11c10e51ff chore(deps): bump github.com/prometheus/client_golang from 1.20.2 to 1.20.3 (#4361)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-06 18:52:59 +08:00
Kevin Wan
3f03126d27 chore: fix goctl Dockerfile warnings (#4358) 2024-09-05 22:13:01 +08:00
dependabot[bot]
d43adc2823 chore(deps): bump golang.org/x/sys from 0.24.0 to 0.25.0 (#4356)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-05 10:17:27 +08:00
Kevin Wan
656222b572 chore: update goctl version (#4353) 2024-09-03 09:22:04 +08:00
Kevin Wan
077b6072fa chore: coding style (#4352) 2024-09-03 09:06:50 +08:00
jursonmo
0cafb1164b should check if devServer Enabled, then call once.Do() (#4351)
Co-authored-by: william <myname@example.com>
2024-09-03 00:54:43 +00:00
MarkJoyMa
90afa08367 Revert "fix: etcd scheme on grpc resolver" (#4349) 2024-09-03 00:48:08 +00:00
Kevin Wan
c92f788292 chore: update go version in test (#4347) 2024-09-01 16:19:31 +08:00
Kevin Wan
e94be9b302 chore: update go version for building goctl releases (#4346) 2024-09-01 16:02:49 +08:00
Kevin Wan
e713d9013d chore: update goctl deps (#4345) 2024-09-01 15:42:45 +08:00
Kevin Wan
24d6150073 chore: refactor config center (#4339)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-08-28 12:02:48 +00:00
MarkJoyMa
44cddec5c3 feat: added configuration center function (#3035)
Co-authored-by: aiden.ma <Aiden.ma@yijinin.com>
2024-08-28 14:47:52 +08:00
Kevin Wan
47d13e5ef8 fix: etcd scheme on grpc resolver (#4121) 2024-08-27 23:13:37 +08:00
Kevin Wan
896e1a2abb chore: refactor logx file time format (#4335) 2024-08-27 22:01:01 +08:00
kui
075817a8dd Add custom log file name date format (#4333) 2024-08-27 20:43:25 +08:00
dependabot[bot]
29400f6814 chore(deps): bump github.com/prometheus/client_golang from 1.20.1 to 1.20.2 (#4334)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-27 19:51:54 +08:00
dependabot[bot]
34f536264f chore(deps): bump github.com/prometheus/client_golang from 1.20.0 to 1.20.1 (#4324)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-21 17:38:54 +08:00
Kevin Wan
9d9c7e0fe0 feat: support build tag to reduce binary size w/o k8s (#4323) 2024-08-20 19:53:20 +08:00
dependabot[bot]
e220d3a4cb chore(deps): bump github.com/prometheus/client_golang from 1.19.1 to 1.20.0 (#4317)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-19 12:22:06 +08:00
dependabot[bot]
193dcf90bc chore(deps): bump golang.org/x/sys from 0.23.0 to 0.24.0 (#4307)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-10 21:15:08 +08:00
featherlight
03756c9166 refactor zrpc server interceptor builder (#4300) 2024-08-08 14:37:19 +00:00
kesonan
c1f12c5784 (goctl): fix map conversion (#4306) 2024-08-08 14:19:14 +00:00
dependabot[bot]
2883111af5 chore(deps): bump go.mongodb.org/mongo-driver from 1.16.0 to 1.16.1 (#4305)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-08 22:07:18 +08:00
dependabot[bot]
2758c4e842 chore(deps): bump golang.org/x/net from 0.27.0 to 0.28.0 (#4301)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-07 20:19:47 +08:00
Kevin Wan
4196ddb3e3 Update readme-cn.md (#4294) 2024-08-06 17:54:38 +08:00
dependabot[bot]
e24d797226 chore(deps): bump golang.org/x/time from 0.5.0 to 0.6.0 (#4299)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-06 17:54:01 +08:00
dependabot[bot]
d4349fa958 chore(deps): bump golang.org/x/sys from 0.22.0 to 0.23.0 (#4298)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-06 16:39:57 +08:00
kesonan
da2c14d45f (goctl): fix quickstart error while reading go module info (#4297) 2024-08-05 15:29:06 +00:00
kesonan
64e3aeda55 Add goctl version to code header (#4293) 2024-08-03 14:22:51 +00:00
Kevin Wan
dedba17219 refactor: simplify BatchError (#4292) 2024-08-03 13:57:41 +08:00
kesonan
c6348b9855 refactor goctl-compare (#4290) 2024-08-03 04:26:24 +00:00
chentong
8689a6247e refactor(core/errorx): use errors.Join simplify error handle (#4289) 2024-08-03 03:00:59 +00:00
Rui Chen
ff6ee25d23 ci: update workflows to use go.mod instead of specifying go version (#4286)
Signed-off-by: Rui Chen <rui@chenrui.dev>
2024-08-03 02:12:28 +00:00
Kevin Wan
5213243bbb Update readme.md (#4287) 2024-08-02 18:26:54 +08:00
Kevin Wan
2588a36555 feat: support rest.WithCorsHeaders to customize cors headers (#4284) 2024-07-30 17:29:44 +08:00
Krystian Kulas
c2421beb25 fix: readme remove duplicate text (#4280) 2024-07-29 01:06:29 +00:00
kesonan
dfe8a81c76 Upgrade goctl version to 1.7.1 (#4282) 2024-07-29 00:54:21 +00:00
kesonan
ee643a945e (goctl): fix nested struct generation (#4281) 2024-07-28 15:40:25 +00:00
Kevin Wan
eeda6efae7 chore: upgrade go-zero version (#4277) 2024-07-27 17:31:32 +08:00
Kevin Wan
caf0e64beb chore: optimize lock in discov.etcd (#4275) 2024-07-27 16:27:05 +08:00
dependabot[bot]
0e61303cb0 chore(deps): bump github.com/redis/go-redis/v9 from 9.6.0 to 9.6.1 (#4274)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-26 23:17:39 +08:00
dependabot[bot]
f651d7cf6c chore(deps): bump go.etcd.io/etcd/client/v3 from 3.5.14 to 3.5.15 (#4267)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-25 19:24:07 +08:00
tsinghuacoder
05da2c560b chore: fix some comments (#4270)
Signed-off-by: tsinghuacoder <tsinghuacoder@icloud.com>
2024-07-25 19:11:56 +08:00
Kevin Wan
8ae0f287d6 chore: optimize lock in discov.etcd (#4272) 2024-07-25 17:24:05 +08:00
Kevin Wan
8f7aff558f chore: refactor BuildTypes in tsgen. (#4266) 2024-07-22 21:17:59 +08:00
jaron
6e08d478fe feat(tsgen): tsgen export buildTypes function (#4197) 2024-07-22 12:11:06 +00:00
dependabot[bot]
944ac383d2 chore(deps): bump github.com/redis/go-redis/v9 from 9.5.4 to 9.6.0 (#4262)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-20 23:18:19 +08:00
Kevin Wan
0eec33f14b chore: optimize file reading (#4264) 2024-07-20 22:44:13 +08:00
JiChen
9de04ee035 fix: handle with read the empty file (#4258) 2024-07-20 12:01:13 +00:00
kesonan
cf5b080fbe (goctl): Use .goctl as home if not exists (#4260) 2024-07-19 05:54:24 +00:00
Kevin Wan
4a14164be1 feat: handle using root as the path of file server (#4255) 2024-07-18 15:15:03 +00:00
Kevin Wan
5dd6f2a43a feat: support embed file system to serve files in rest (#4253) 2024-07-17 16:21:08 +08:00
Kevin Wan
a00c956776 chore: upgrade go version (#4248)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-07-16 11:43:25 +08:00
yonwoo9
c02fb3acab chore: initialize some slice type variables (#4249) 2024-07-15 15:50:42 +00:00
Kevin Wan
9f8455ddb3 chore: fix typo (#4246) 2024-07-14 10:52:47 +08:00
guonaihong
775b105ab2 added code comments (#4219) 2024-07-13 12:09:58 +00:00
Kevin Wan
ec86f22cd6 feat: support file server in rest (#4244) 2024-07-13 19:58:35 +08:00
dependabot[bot]
e776b5d8ab chore(deps): bump github.com/redis/go-redis/v9 from 9.5.3 to 9.5.4 (#4245)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-13 16:31:57 +08:00
Kevin Wan
2026d4410b fix: should not trigger breaker on duplicate key with mongodb (#4238) 2024-07-08 23:41:02 +08:00
MarkJoyMa
f8437e6364 feat/sqlc_partial (#4237) 2024-07-07 15:54:18 +00:00
Kevin Wan
bd2033eb35 feat: support adding more writer, easy to write to console additionally (#4234)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-07-07 23:31:27 +08:00
MarkJoyMa
fed835bc25 feat/redis_hook (#4233) 2024-07-07 04:50:30 +00:00
dependabot[bot]
c9cbd74bf3 chore(deps): bump golang.org/x/net from 0.26.0 to 0.27.0 (#4231)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-07 10:54:37 +08:00
dependabot[bot]
27ea106293 chore(deps): bump golang.org/x/sys from 0.21.0 to 0.22.0 (#4229)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-05 19:20:54 +08:00
Kevin Wan
657923b9d5 Update readme-cn.md (#4228) 2024-07-04 11:05:23 +08:00
dependabot[bot]
8dbec6a800 chore(deps): bump google.golang.org/grpc from 1.64.0 to 1.65.0 (#4227)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-03 21:03:23 +08:00
Kevin Wan
490559434a chore: remove unnecessary return (#4226) 2024-07-02 23:45:18 +08:00
kesonan
4a62d084a9 fix: disable array request body (#4220) 2024-07-02 03:55:01 +00:00
kesonan
2f9b6cf8ec disable nested struct for array and map type (#4222) 2024-06-29 05:44:46 +00:00
Kevin Wan
01bbc78bac Update FUNDING.yml (#4216) 2024-06-27 11:15:42 +08:00
kesonan
a012a9138f (goctl): support nested struct (#4211) 2024-06-25 15:18:15 +00:00
Kevin Wan
4ec9cac82b chore: update readme for goctl installation (#4206) 2024-06-23 11:47:17 +08:00
苏蓝
8d9746e794 Update readme-cn.md (#4205) 2024-06-23 03:32:57 +00:00
Kevin Wan
8f83705199 Update version.go (#4204) 2024-06-21 20:09:36 +08:00
Kevin Wan
f1ed7bd75d Update readme-cn.md (#4195) 2024-06-17 22:28:24 +08:00
Kevin Wan
7a20608756 chore: add trending badge (#4194) 2024-06-17 22:26:08 +08:00
dependabot[bot]
5cfff95e95 chore(deps): bump google.golang.org/protobuf from 1.34.1 to 1.34.2 (#4185)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-12 14:26:26 +08:00
dependabot[bot]
1e1cc1a0d9 chore(deps): bump github.com/redis/go-redis/v9 from 9.5.2 to 9.5.3 (#4183)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-08 09:28:45 +08:00
dependabot[bot]
0a1440a839 chore(deps): bump golang.org/x/net from 0.25.0 to 0.26.0 (#4180) 2024-06-05 08:01:46 +08:00
kesonan
23980d29c3 fix no such dir if not create goctl home (#4177) 2024-06-04 10:55:56 +00:00
jiz4oh
424119d796 chore: fix the confused log level in comment (#4175) 2024-06-04 10:43:26 +00:00
kesonan
97c7835d9e fix #4161 (#4176) 2024-06-04 10:26:34 +00:00
kesonan
7954ad3759 fix: fix readme (#4174) 2024-06-02 15:22:33 +00:00
dependabot[bot]
e8c9b0ddf8 chore(deps): bump go.etcd.io/etcd/client/v3 from 3.5.13 to 3.5.14 (#4169)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-02 11:17:55 +08:00
dependabot[bot]
70112e59cb chore(deps): bump github.com/redis/go-redis/v9 from 9.4.0 to 9.5.2 (#4172)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-01 13:12:33 +08:00
dependabot[bot]
7ba5ced2d9 chore(deps): bump github.com/alicebob/miniredis/v2 from 2.32.1 to 2.33.0 (#4168)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-30 11:15:59 +08:00
Kevin Wan
962b36d745 fix: log concurrency problems after calling WithXXX methods (#4164) 2024-05-26 12:52:05 +08:00
dependabot[bot]
57060cc6d7 chore(deps): bump google.golang.org/grpc from 1.63.2 to 1.64.0 (#4155) 2024-05-16 07:47:19 +08:00
Kevin Wan
e0c16059d9 optimize: simplify breaker algorithm (#4151) 2024-05-14 17:02:21 +08:00
dependabot[bot]
a0d954dfab chore(deps): bump github.com/fatih/color from 1.16.0 to 1.17.0 (#4150)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-14 10:37:25 +08:00
Alex Last
a5ece25c07 feat: add secure option for sending traces via otlphttp (#3973) 2024-05-12 17:00:54 +00:00
Kevin Wan
0cac41a38b chore: refactor mapping unmarshaler (#4145) 2024-05-12 14:37:36 +08:00
Kevin Wan
f10084a3f5 chore: refactor and coding style (#4144) 2024-05-11 23:06:59 +08:00
Leo
040fee5669 feat: httpx.Parse supports parsing structures that implement the Unmarshaler interface (#4143) 2024-05-11 22:25:10 +08:00
Kevin Wan
42b3bae65a optimize: improve breaker algorithm on recovery time (#4141) 2024-05-11 21:44:26 +08:00
guangwu
7c730b97d8 fix: make: command: Command not found (#4132)
Signed-off-by: guoguangwu <guoguangwug@gmail.com>
2024-05-10 13:33:03 +00:00
Kevin Wan
057bae92ab fix: log panic on Error() or String() panics (#4136) 2024-05-10 12:49:34 +08:00
Kevin Wan
74331a45c9 fix: log panic when use nil error or stringer with Field method (#4130) 2024-05-10 00:31:36 +08:00
chen quan
9d551d507f chore(api/maxconnshandler): add tracing information to the log (#4126) 2024-05-08 05:25:35 +00:00
Kevin Wan
02dd81c05c Update FUNDING.yml (#4128) 2024-05-08 12:52:42 +08:00
dependabot[bot]
3095ba2b1f chore(deps): bump golang.org/x/net from 0.24.0 to 0.25.0 (#4124)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 12:52:13 +08:00
dependabot[bot]
2afa60132c chore(deps): bump google.golang.org/protobuf from 1.34.0 to 1.34.1 (#4123)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 12:45:10 +08:00
Kevin Wan
e71ed7294b Update FUNDING.yml (#4127) 2024-05-08 12:26:41 +08:00
dependabot[bot]
95822281bf chore(deps): bump golang.org/x/sys from 0.19.0 to 0.20.0 (#4122)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 12:22:06 +08:00
Kevin Wan
588e10daef chore: refactor and coding style (#4120) 2024-05-06 18:16:56 +08:00
soasurs
62ba01120e fix: zrpc kube resolver builder (#4119)
Signed-off-by: soasurs <soasurs@gmail.com>
2024-05-06 14:50:35 +08:00
dependabot[bot]
527de1c50e chore(deps): bump google.golang.org/protobuf from 1.33.1-0.20240408130810-98873a205002 to 1.34.0 (#4115)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-04 23:59:47 +08:00
dependabot[bot]
abfe62a2d7 chore(deps): bump github.com/pelletier/go-toml/v2 from 2.2.1 to 2.2.2 (#4116)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-04 23:39:25 +08:00
Kevin Wan
36f4cf97ff Update FUNDING.yml (#4114) 2024-04-30 22:58:51 +08:00
Kevin Wan
b3cd8a32ed feat: trigger breaker on underlying service timeout (#4112) 2024-04-30 19:01:20 +08:00
kesonan
a9d27cda8a (goctl): fix prefix syntax (#4113) 2024-04-30 09:11:08 +00:00
kesonan
04116f647d chore(goctl): change goctl version to 1.6.5 (#4111) 2024-04-30 04:25:47 +00:00
Kevin Wan
a8ccda0c06 feat: add fx.ParallelErr (#4107) 2024-04-29 00:18:30 +08:00
Kevin Wan
bfddb9dae4 feat: add errorx.In to facility error checking (#4105) 2024-04-27 20:43:45 +08:00
Kevin Wan
b337ae36e5 Update readme-cn.md 2024-04-20 10:00:10 +08:00
Kevin Wan
5e5123caa3 chore: add more tests (#4094) 2024-04-19 11:13:23 +08:00
Kevin Wan
d371ab5479 feat: use breaker with ctx to prevent deadline exceeded (#4091)
Signed-off-by: kevin <wanjunfeng@gmail.com>
2024-04-18 23:18:49 +08:00
jaron
1b9b61f505 fix(goctl): GOPROXY env should set by ourself (#4087) 2024-04-18 22:50:30 +08:00
suyhuai
e1f15efb3b add customized.tpl for model template (#4086)
Co-authored-by: sudaoxyz <sudaoxyz@gmail.com>
2024-04-18 14:40:54 +00:00
Kevin Wan
1540bdc4c9 optimize: improve breaker algorithm on recovery time (#4077) 2024-04-18 22:33:25 +08:00
Kevin Wan
95b32b5779 chore: add code coverage (#4090) 2024-04-18 20:58:36 +08:00
Kevin Wan
815a4f7eed feat: support context in breaker methods (#4088) 2024-04-18 18:00:17 +08:00
dependabot[bot]
4b0bacc9c6 chore(deps): bump k8s.io/apimachinery from 0.29.3 to 0.29.4 (#4084)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-18 11:22:49 +08:00
Kevin Wan
e9dc96af17 chore: coding style (#4082) 2024-04-17 23:37:35 +08:00
fearlessfei
62c88a84d1 feat: migrate lua script to lua file (#4069) 2024-04-17 15:20:10 +00:00
Kevin Wan
36088ea0d4 fix: avoid duplicate in logx plain mode (#4080) 2024-04-17 17:43:22 +08:00
dependabot[bot]
164f5aa86c chore(deps): bump github.com/pelletier/go-toml/v2 from 2.2.0 to 2.2.1 (#4073)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-13 12:32:56 +08:00
dependabot[bot]
07d07cdd23 chore(deps): bump github.com/fullstorydev/grpcurl from 1.8.9 to 1.9.1 (#4065)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-11 13:02:52 +08:00
dependabot[bot]
0efe99af66 chore(deps): bump github.com/jhump/protoreflect from 1.15.6 to 1.16.0 (#4064)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-11 12:39:00 +08:00
Kevin Wan
927f8bc821 fix: fix ignored context.DeadlineExceeded (#4066) 2024-04-11 11:14:20 +08:00
kesonan
2a7ada993b (goctl)feature/model config (#4062)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-04-10 15:01:59 +00:00
Kevin Wan
682460c1c8 fix: fix ignored scanner.Err() (#4063) 2024-04-10 17:28:52 +08:00
Kevin Wan
a66ae0d4c4 fix: timeout on query should return context.DeadlineExceeded (#4060) 2024-04-10 04:17:39 +00:00
dependabot[bot]
d1f24ab70f chore(deps): bump google.golang.org/grpc from 1.63.0 to 1.63.2 (#4058)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-10 10:23:33 +08:00
dependabot[bot]
d0983948b5 chore(deps): bump google.golang.org/grpc from 1.63.0 to 1.63.2 in /tools/goctl (#4059)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-10 10:14:15 +08:00
Kevin Wan
3343fc2cdb chore: update goctl version to 1.6.4 (#4057) 2024-04-09 22:59:33 +08:00
Kevin Wan
3866b5741a feat: support http stream response (#4055) 2024-04-09 20:46:44 +08:00
Kevin Wan
5fbe8ff5c4 chore: coding style (#4054) 2024-04-09 17:19:47 +08:00
jaron
6f763f71f9 chore(goctl): update readme (#4053) 2024-04-09 08:30:25 +00:00
dependabot[bot]
80377f18e7 chore(deps): bump codecov/codecov-action from 3 to 4 (#4051)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-09 15:24:24 +08:00
dependabot[bot]
8690859c7d chore(deps): bump golang.org/x/net from 0.23.0 to 0.24.0 (#4048)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-06 10:03:06 +08:00
dependabot[bot]
d744038198 chore(deps): bump google.golang.org/grpc from 1.62.1 to 1.63.0 (#4045)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-05 19:43:17 +08:00
dependabot[bot]
58ad8cac8a chore(deps): bump golang.org/x/sys from 0.18.0 to 0.19.0 (#4046)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-05 19:28:54 +08:00
dependabot[bot]
74886a151e chore(deps): bump google.golang.org/grpc from 1.62.1 to 1.63.0 in /tools/goctl (#4047)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-05 18:45:49 +08:00
Kevin Wan
c5eda1f155 chore: fix codecov (#4044) 2024-04-05 00:53:13 +08:00
Kevin Wan
b5b7c054ca chore: fix codecov (#4043) 2024-04-05 00:43:38 +08:00
Kevin Wan
6c8073b691 chore: add more tests (#4042) 2024-04-05 00:13:42 +08:00
Kevin Wan
64d430d424 fix: bug on form data with slices (#4040) 2024-04-04 20:28:54 +08:00
Jayson Wang
f138cc792e fix(goctl): multi imports the api cause redeclared error in types.go (#3988)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-04-04 11:39:24 +00:00
dependabot[bot]
b20ec8aedb chore(deps): bump golang.org/x/net from 0.22.0 to 0.23.0 (#4039)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-04 10:48:52 +08:00
Kevin Wan
a53254fa91 chore: update codecov config (#4038) 2024-04-03 23:58:02 +08:00
Kevin Wan
08563482e5 chore: coding style (#4037) 2024-04-03 22:55:52 +08:00
fearlessfei
968727412d add custom health response information (#4034)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-04-03 14:33:55 +00:00
linden-in-China
6f3d094eba opton to option (#4035) 2024-04-03 14:15:21 +00:00
kesonan
2d3ebb9b62 (goctl) fix #4027 (#4032) 2024-04-01 15:22:29 +00:00
chentong
8c0bb27136 feat: add gen api @doc comment to logic handler routes (#3790)
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2024-03-30 11:09:54 +00:00
ak5w
cf987295df fix the usage datasource url of postgresql (#4029) (#4030) 2024-03-30 05:51:54 +00:00
dependabot[bot]
8c92b3af7d chore(deps): bump go.etcd.io/etcd/client/v3 from 3.5.12 to 3.5.13 (#4028)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-30 13:40:24 +08:00
Kevin Wan
5dd9342703 chore: fix test failure (#4031) 2024-03-30 13:29:58 +08:00
shyandsy
3ef59f6a71 fix(httpx): support array field for request dto (#4026)
Co-authored-by: yshi3 <yshi3@tesla.com>
2024-03-30 12:10:56 +08:00
dependabot[bot]
f12802abc7 chore(deps): bump github.com/go-sql-driver/mysql from 1.8.0 to 1.8.1 (#4022)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-27 18:42:35 +08:00
dependabot[bot]
6f0fe67804 chore(deps): bump github.com/go-sql-driver/mysql from 1.8.0 to 1.8.1 in /tools/goctl (#4023)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-27 18:07:00 +08:00
dependabot[bot]
f44f0e7e62 chore(deps): bump github.com/pelletier/go-toml/v2 from 2.1.1 to 2.2.0 (#4017)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-20 23:34:06 +08:00
dependabot[bot]
cdd95296db chore(deps): bump k8s.io/client-go from 0.29.2 to 0.29.3 (#4012)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-19 15:41:20 +08:00
kesonan
3e794cf991 (goctl)fix code_ql (#4009) 2024-03-17 02:21:36 +00:00
Kevin Wan
bbce95e7e1 fix: didn't count failure in allow method with breaker algorithm (#4008) 2024-03-16 22:19:36 +08:00
dependabot[bot]
0449450c64 chore(deps): bump github.com/jackc/pgx/v5 from 5.5.3 to 5.5.4 in /tools/goctl (#4007)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-15 12:55:57 +08:00
dependabot[bot]
9f9a12ea57 chore(deps): bump github.com/alicebob/miniredis/v2 from 2.31.1 to 2.32.1 (#4003)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-14 11:19:44 +08:00
Kevin Wan
cc2a7e97f9 chore: coding style, add code for prometheus (#4002) 2024-03-13 20:00:35 +08:00
dependabot[bot]
09d7af76af chore(deps): bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.0 in /tools/goctl (#3997)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-13 13:28:27 +08:00
dependabot[bot]
c233a66601 chore(deps): bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.0 (#3998)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-13 12:56:41 +08:00
dependabot[bot]
94fa12560c chore(deps): bump github.com/jackc/pgx/v5 from 5.5.4 to 5.5.5 (#3999)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-12 12:28:28 +08:00
MarkJoyMa
7d90f906f5 feat: migrate redis breaker into hook (#3982) 2024-03-12 04:21:33 +00:00
Viktor Patchev
f372b98d96 Add: Optimize the error log to be more specific (#3994) 2024-03-11 13:06:50 +08:00
mongobaba
459d3025c5 optimize: change err == xx to errors.Is(err, xx) (#3991) 2024-03-09 12:49:16 +00:00
Kevin Wan
e9e55125a9 chore: fix warnings (#3990) 2024-03-09 13:48:11 +08:00
Kevin Wan
159ecb7386 chore: fix warnings (#3989) 2024-03-08 22:35:17 +08:00
ansoda
69bb746a1d fix: StopAgent panics when trace agent disabled (#3981)
Co-authored-by: ansoda <ansoda@gmail.com>
2024-03-08 10:28:23 +00:00
Kevin Wan
d184f96b13 chore: coding style (#3987) 2024-03-08 16:11:28 +08:00
MarkJoyMa
c7dacb0146 fix: mysql WithAcceptable bug (#3986) 2024-03-08 04:23:41 +00:00
dependabot[bot]
2207477b60 chore(deps): bump google.golang.org/protobuf from 1.32.0 to 1.33.0 in /tools/goctl (#3978)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 11:15:48 +08:00
dependabot[bot]
105ab590ff chore(deps): bump google.golang.org/grpc from 1.62.0 to 1.62.1 in /tools/goctl (#3977)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 11:01:14 +08:00
dependabot[bot]
2f4c58ed73 chore(deps): bump google.golang.org/grpc from 1.62.0 to 1.62.1 (#3976)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 10:45:19 +08:00
dependabot[bot]
1631aa02ad chore(deps): bump github.com/golang/protobuf from 1.5.3 to 1.5.4 (#3984)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-07 10:24:51 +08:00
dependabot[bot]
4df10eef5d chore(deps): bump golang.org/x/net from 0.21.0 to 0.22.0 (#3975)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-06 23:31:02 +08:00
dependabot[bot]
3d552ea7a8 chore(deps): bump google.golang.org/protobuf from 1.32.0 to 1.33.0 (#3974)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-06 21:33:45 +08:00
Kevin Wan
74b87ac9fd chore: coding style (#3972) 2024-03-05 14:40:10 +08:00
Alex Last
ba1d6e3664 fix: only add log middleware to not found handler when enabled (#3969) 2024-03-05 04:14:54 +00:00
dependabot[bot]
2096cd5749 chore(deps): bump github.com/jackc/pgx/v5 from 5.5.3 to 5.5.4 (#3970)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-05 12:09:18 +08:00
dependabot[bot]
2eb2fa26f6 chore(deps): bump golang.org/x/sys from 0.17.0 to 0.18.0 (#3971)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-05 12:03:06 +08:00
Kevin Wan
bc4187ca90 Create SECURITY.md (#3968) 2024-03-04 23:07:54 +08:00
Kevin Wan
b7be25b98b Update readme-cn.md (#3966) 2024-03-03 14:18:27 +08:00
Kevin Wan
dd01695d45 chore: update goctl version to 1.6.3 (#3965) 2024-03-03 13:36:35 +08:00
294 changed files with 8340 additions and 3894 deletions

12
.github/FUNDING.yml vendored
View File

@@ -1,13 +1,3 @@
# These are supported funding model platforms
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
ethereum: # 0x5052b7f6B937B02563996D23feb69b38D06Ca150 | kevwan
github: [zeromicro]

View File

@@ -17,7 +17,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v5
with:
go-version: '1.19'
go-version-file: go.mod
check-latest: true
cache: true
id: go
@@ -40,7 +40,7 @@ jobs:
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
- name: Codecov
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
test-win:
name: Windows
@@ -52,8 +52,8 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v5
with:
# use 1.19 to guarantee Go 1.19 compatibility
go-version: '1.19'
# make sure Go version compatible with go-zero
go-version-file: go.mod
check-latest: true
cache: true

View File

@@ -22,7 +22,7 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
goversion: "https://dl.google.com/go/go1.19.13.linux-amd64.tar.gz"
goversion: "https://dl.google.com/go/go1.20.14.linux-amd64.tar.gz"
project_path: "tools/goctl"
binary_name: "goctl"
extra_files: tools/goctl/readme.md tools/goctl/readme-cn.md

16
SECURITY.md Normal file
View File

@@ -0,0 +1,16 @@
# Security Policy
## Supported Versions
We publish releases monthly.
| Version | Supported |
| ------- | ------------------ |
| >= 1.4.4 | :white_check_mark: |
| < 1.4.4 | :x: |
## Reporting a Vulnerability
https://github.com/zeromicro/go-zero/security/advisories
Accepted vulnerabilities are expected to be fixed within a month.

View File

@@ -2,6 +2,7 @@ package bloom
import (
"context"
_ "embed"
"errors"
"strconv"
@@ -17,19 +18,13 @@ var (
// ErrTooLargeOffset indicates the offset is too large in bitset.
ErrTooLargeOffset = errors.New("too large offset")
setScript = redis.NewScript(`
for _, offset in ipairs(ARGV) do
redis.call("setbit", KEYS[1], offset, 1)
end
`)
testScript = redis.NewScript(`
for _, offset in ipairs(ARGV) do
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
return false
end
end
return true
`)
//go:embed setscript.lua
setLuaScript string
setScript = redis.NewScript(setLuaScript)
//go:embed testscript.lua
testLuaScript string
testScript = redis.NewScript(testLuaScript)
)
type (
@@ -110,7 +105,7 @@ func newRedisBitSet(store *redis.Redis, key string, bits uint) *redisBitSet {
}
func (r *redisBitSet) buildOffsetArgs(offsets []uint) ([]string, error) {
var args []string
args := make([]string, 0, len(offsets))
for _, offset := range offsets {
if offset >= r.bits {
@@ -130,7 +125,7 @@ func (r *redisBitSet) check(ctx context.Context, offsets []uint) (bool, error) {
}
resp, err := r.store.ScriptRunCtx(ctx, testScript, []string{r.key}, args)
if err == redis.Nil {
if errors.Is(err, redis.Nil) {
return false, nil
} else if err != nil {
return false, err
@@ -162,7 +157,7 @@ func (r *redisBitSet) set(ctx context.Context, offsets []uint) error {
}
_, err = r.store.ScriptRunCtx(ctx, setScript, []string{r.key}, args)
if err == redis.Nil {
if errors.Is(err, redis.Nil) {
return nil
}

3
core/bloom/setscript.lua Normal file
View File

@@ -0,0 +1,3 @@
for _, offset in ipairs(ARGV) do
redis.call("setbit", KEYS[1], offset, 1)
end

View File

@@ -0,0 +1,6 @@
for _, offset in ipairs(ARGV) do
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
return false
end
end
return true

View File

@@ -1,6 +1,7 @@
package breaker
import (
"context"
"errors"
"fmt"
"strings"
@@ -31,16 +32,21 @@ type (
Name() string
// Allow checks if the request is allowed.
// If allowed, a promise will be returned, the caller needs to call promise.Accept()
// on success, or call promise.Reject() on failure.
// If not allow, ErrServiceUnavailable will be returned.
// If allowed, a promise will be returned,
// otherwise ErrServiceUnavailable will be returned as the error.
// The caller needs to call promise.Accept() on success,
// or call promise.Reject() on failure.
Allow() (Promise, error)
// AllowCtx checks if the request is allowed when ctx isn't done.
AllowCtx(ctx context.Context) (Promise, error)
// Do runs the given request if the Breaker accepts it.
// Do returns an error instantly if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again.
Do(req func() error) error
// DoCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoCtx(ctx context.Context, req func() error) error
// DoWithAcceptable runs the given request if the Breaker accepts it.
// DoWithAcceptable returns an error instantly if the Breaker rejects the request.
@@ -48,12 +54,16 @@ type (
// and causes the same panic again.
// acceptable checks if it's a successful call, even if the error is not nil.
DoWithAcceptable(req func() error, acceptable Acceptable) error
// DoWithAcceptableCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoWithAcceptableCtx(ctx context.Context, req func() error, acceptable Acceptable) error
// DoWithFallback runs the given request if the Breaker accepts it.
// DoWithFallback runs the fallback if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again.
DoWithFallback(req func() error, fallback Fallback) error
// DoWithFallbackCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoWithFallbackCtx(ctx context.Context, req func() error, fallback Fallback) error
// DoWithFallbackAcceptable runs the given request if the Breaker accepts it.
// DoWithFallbackAcceptable runs the fallback if the Breaker rejects the request.
@@ -61,6 +71,9 @@ type (
// and causes the same panic again.
// acceptable checks if it's a successful call, even if the error is not nil.
DoWithFallbackAcceptable(req func() error, fallback Fallback, acceptable Acceptable) error
// DoWithFallbackAcceptableCtx runs the given request if the Breaker accepts it when ctx isn't done.
DoWithFallbackAcceptableCtx(ctx context.Context, req func() error, fallback Fallback,
acceptable Acceptable) error
}
// Fallback is the func to be called if the request is rejected.
@@ -117,23 +130,71 @@ func (cb *circuitBreaker) Allow() (Promise, error) {
return cb.throttle.allow()
}
func (cb *circuitBreaker) AllowCtx(ctx context.Context) (Promise, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
return cb.Allow()
}
}
func (cb *circuitBreaker) Do(req func() error) error {
return cb.throttle.doReq(req, nil, defaultAcceptable)
}
func (cb *circuitBreaker) DoCtx(ctx context.Context, req func() error) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.Do(req)
}
}
func (cb *circuitBreaker) DoWithAcceptable(req func() error, acceptable Acceptable) error {
return cb.throttle.doReq(req, nil, acceptable)
}
func (cb *circuitBreaker) DoWithAcceptableCtx(ctx context.Context, req func() error,
acceptable Acceptable) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.DoWithAcceptable(req, acceptable)
}
}
func (cb *circuitBreaker) DoWithFallback(req func() error, fallback Fallback) error {
return cb.throttle.doReq(req, fallback, defaultAcceptable)
}
func (cb *circuitBreaker) DoWithFallbackCtx(ctx context.Context, req func() error,
fallback Fallback) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.DoWithFallback(req, fallback)
}
}
func (cb *circuitBreaker) DoWithFallbackAcceptable(req func() error, fallback Fallback,
acceptable Acceptable) error {
return cb.throttle.doReq(req, fallback, acceptable)
}
func (cb *circuitBreaker) DoWithFallbackAcceptableCtx(ctx context.Context, req func() error,
fallback Fallback, acceptable Acceptable) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return cb.DoWithFallbackAcceptable(req, fallback, acceptable)
}
}
func (cb *circuitBreaker) Name() string {
return cb.name
}
@@ -208,7 +269,7 @@ func (ew *errorWindow) add(reason string) {
}
func (ew *errorWindow) String() string {
var reasons []string
reasons := make([]string, 0, ew.count)
ew.lock.Lock()
// reverse order

View File

@@ -1,11 +1,13 @@
package breaker
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/stat"
@@ -16,10 +18,274 @@ func init() {
}
func TestCircuitBreaker_Allow(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
_, err := b.Allow()
assert.Nil(t, err)
t.Run("allow", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
_, err := b.Allow()
assert.Nil(t, err)
})
t.Run("allow with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
_, err := b.AllowCtx(context.Background())
assert.Nil(t, err)
})
t.Run("allow with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
_, err := b.AllowCtx(ctx)
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("allow with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
_, err := b.AllowCtx(ctx)
assert.ErrorIs(t, err, context.Canceled)
}
_, err := b.AllowCtx(context.Background())
assert.NoError(t, err)
})
}
func TestCircuitBreaker_Do(t *testing.T) {
t.Run("do", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.Do(func() error {
return nil
})
assert.Nil(t, err)
})
t.Run("do with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoCtx(context.Background(), func() error {
return nil
})
assert.Nil(t, err)
})
t.Run("do with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoCtx(ctx, func() error {
return nil
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("do with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoCtx(ctx, func() error {
return nil
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoCtx(context.Background(), func() error {
return nil
}))
})
}
func TestCircuitBreaker_DoWithAcceptable(t *testing.T) {
t.Run("doWithAcceptable", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithAcceptable(func() error {
return nil
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithAcceptable with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithAcceptable with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoWithAcceptableCtx(ctx, func() error {
return nil
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("doWithAcceptable with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoWithAcceptableCtx(ctx, func() error {
return nil
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoWithAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) bool {
return true
}))
})
}
func TestCircuitBreaker_DoWithFallback(t *testing.T) {
t.Run("doWithFallback", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallback(func() error {
return nil
}, func(err error) error {
return err
})
assert.Nil(t, err)
})
t.Run("doWithFallback with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallbackCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
})
assert.Nil(t, err)
})
t.Run("doWithFallback with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoWithFallbackCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("doWithFallback with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoWithFallbackCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoWithFallbackCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
}))
})
}
func TestCircuitBreaker_DoWithFallbackAcceptable(t *testing.T) {
t.Run("doWithFallbackAcceptable", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallbackAcceptable(func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithFallbackAcceptable with ctx", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
err := b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.Nil(t, err)
})
t.Run("doWithFallbackAcceptable with ctx timeout", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
err := b.DoWithFallbackAcceptableCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("doWithFallbackAcceptable with ctx cancel", func(t *testing.T) {
b := NewBreaker()
assert.True(t, len(b.Name()) > 0)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cancel()
err := b.DoWithFallbackAcceptableCtx(ctx, func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
})
assert.ErrorIs(t, err, context.Canceled)
}
assert.NoError(t, b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
return nil
}, func(err error) error {
return err
}, func(err error) bool {
return true
}))
})
}
func TestLogReason(t *testing.T) {

View File

@@ -1,6 +1,9 @@
package breaker
import "sync"
import (
"context"
"sync"
)
var (
lock sync.RWMutex
@@ -14,6 +17,13 @@ func Do(name string, req func() error) error {
})
}
// DoCtx calls Breaker.DoCtx on the Breaker with given name.
func DoCtx(ctx context.Context, name string, req func() error) error {
return do(name, func(b Breaker) error {
return b.DoCtx(ctx, req)
})
}
// DoWithAcceptable calls Breaker.DoWithAcceptable on the Breaker with given name.
func DoWithAcceptable(name string, req func() error, acceptable Acceptable) error {
return do(name, func(b Breaker) error {
@@ -21,6 +31,14 @@ func DoWithAcceptable(name string, req func() error, acceptable Acceptable) erro
})
}
// DoWithAcceptableCtx calls Breaker.DoWithAcceptableCtx on the Breaker with given name.
func DoWithAcceptableCtx(ctx context.Context, name string, req func() error,
acceptable Acceptable) error {
return do(name, func(b Breaker) error {
return b.DoWithAcceptableCtx(ctx, req, acceptable)
})
}
// DoWithFallback calls Breaker.DoWithFallback on the Breaker with given name.
func DoWithFallback(name string, req func() error, fallback Fallback) error {
return do(name, func(b Breaker) error {
@@ -28,6 +46,13 @@ func DoWithFallback(name string, req func() error, fallback Fallback) error {
})
}
// DoWithFallbackCtx calls Breaker.DoWithFallbackCtx on the Breaker with given name.
func DoWithFallbackCtx(ctx context.Context, name string, req func() error, fallback Fallback) error {
return do(name, func(b Breaker) error {
return b.DoWithFallbackCtx(ctx, req, fallback)
})
}
// DoWithFallbackAcceptable calls Breaker.DoWithFallbackAcceptable on the Breaker with given name.
func DoWithFallbackAcceptable(name string, req func() error, fallback Fallback,
acceptable Acceptable) error {
@@ -36,6 +61,14 @@ func DoWithFallbackAcceptable(name string, req func() error, fallback Fallback,
})
}
// DoWithFallbackAcceptableCtx calls Breaker.DoWithFallbackAcceptableCtx on the Breaker with given name.
func DoWithFallbackAcceptableCtx(ctx context.Context, name string, req func() error,
fallback Fallback, acceptable Acceptable) error {
return do(name, func(b Breaker) error {
return b.DoWithFallbackAcceptableCtx(ctx, req, fallback, acceptable)
})
}
// GetBreaker returns the Breaker with the given name.
func GetBreaker(name string) Breaker {
lock.RLock()

View File

@@ -1,6 +1,7 @@
package breaker
import (
"context"
"errors"
"fmt"
"testing"
@@ -22,6 +23,9 @@ func TestBreakersDo(t *testing.T) {
assert.Equal(t, errDummy, Do("any", func() error {
return errDummy
}))
assert.Equal(t, errDummy, DoCtx(context.Background(), "any", func() error {
return errDummy
}))
}
func TestBreakersDoWithAcceptable(t *testing.T) {
@@ -38,6 +42,13 @@ func TestBreakersDoWithAcceptable(t *testing.T) {
return nil
}) == nil
})
verify(t, func() bool {
return DoWithAcceptableCtx(context.Background(), "anyone", func() error {
return nil
}, func(err error) bool {
return true
}) == nil
})
for i := 0; i < 10000; i++ {
err := DoWithAcceptable("another", func() error {
@@ -76,6 +87,12 @@ func TestBreakersFallback(t *testing.T) {
return nil
})
assert.True(t, err == nil || errors.Is(err, errDummy))
err = DoWithFallbackCtx(context.Background(), "fallback", func() error {
return errDummy
}, func(err error) error {
return nil
})
assert.True(t, err == nil || errors.Is(err, errDummy))
}
verify(t, func() bool {
return errors.Is(Do("fallback", func() error {
@@ -86,7 +103,7 @@ func TestBreakersFallback(t *testing.T) {
func TestBreakersAcceptableFallback(t *testing.T) {
errDummy := errors.New("any")
for i := 0; i < 10000; i++ {
for i := 0; i < 5000; i++ {
err := DoWithFallbackAcceptable("acceptablefallback", func() error {
return errDummy
}, func(err error) error {
@@ -95,6 +112,14 @@ func TestBreakersAcceptableFallback(t *testing.T) {
return err == nil
})
assert.True(t, err == nil || errors.Is(err, errDummy))
err = DoWithFallbackAcceptableCtx(context.Background(), "acceptablefallback", func() error {
return errDummy
}, func(err error) error {
return nil
}, func(err error) bool {
return err == nil
})
assert.True(t, err == nil || errors.Is(err, errDummy))
}
verify(t, func() bool {
return errors.Is(Do("acceptablefallback", func() error {
@@ -110,5 +135,5 @@ func verify(t *testing.T, fn func() bool) {
count++
}
}
assert.True(t, count >= 80, fmt.Sprintf("should be greater than 80, actual %d", count))
assert.True(t, count >= 75, fmt.Sprintf("should be greater than 75, actual %d", count))
}

48
core/breaker/bucket.go Normal file
View File

@@ -0,0 +1,48 @@
package breaker
const (
success = iota
fail
drop
)
// bucket defines the bucket that holds sum and num of additions.
type bucket struct {
Sum int64
Success int64
Failure int64
Drop int64
}
func (b *bucket) Add(v int64) {
switch v {
case fail:
b.fail()
case drop:
b.drop()
default:
b.succeed()
}
}
func (b *bucket) Reset() {
b.Sum = 0
b.Success = 0
b.Failure = 0
b.Drop = 0
}
func (b *bucket) drop() {
b.Sum++
b.Drop++
}
func (b *bucket) fail() {
b.Sum++
b.Failure++
}
func (b *bucket) succeed() {
b.Sum++
b.Success++
}

View File

@@ -0,0 +1,43 @@
package breaker
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestBucketAdd(t *testing.T) {
b := &bucket{}
// Test succeed
b.Add(0) // Using 0 for success
assert.Equal(t, int64(1), b.Sum, "Sum should be incremented")
assert.Equal(t, int64(1), b.Success, "Success should be incremented")
assert.Equal(t, int64(0), b.Failure, "Failure should not be incremented")
assert.Equal(t, int64(0), b.Drop, "Drop should not be incremented")
// Test failure
b.Add(fail)
assert.Equal(t, int64(2), b.Sum, "Sum should be incremented")
assert.Equal(t, int64(1), b.Failure, "Failure should be incremented")
assert.Equal(t, int64(0), b.Drop, "Drop should not be incremented")
// Test drop
b.Add(drop)
assert.Equal(t, int64(3), b.Sum, "Sum should be incremented")
assert.Equal(t, int64(1), b.Drop, "Drop should be incremented")
}
func TestBucketReset(t *testing.T) {
b := &bucket{
Sum: 3,
Success: 1,
Failure: 1,
Drop: 1,
}
b.Reset()
assert.Equal(t, int64(0), b.Sum, "Sum should be reset to 0")
assert.Equal(t, int64(0), b.Success, "Success should be reset to 0")
assert.Equal(t, int64(0), b.Failure, "Failure should be reset to 0")
assert.Equal(t, int64(0), b.Drop, "Drop should be reset to 0")
}

View File

@@ -5,53 +5,83 @@ import (
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
)
const (
// 250ms for bucket duration
window = time.Second * 10
buckets = 40
k = 1.5
protection = 5
window = time.Second * 10
buckets = 40
forcePassDuration = time.Second
k = 1.5
minK = 1.1
protection = 5
)
// googleBreaker is a netflixBreaker pattern from google.
// see Client-Side Throttling section in https://landing.google.com/sre/sre-book/chapters/handling-overload/
type googleBreaker struct {
k float64
stat *collection.RollingWindow
proba *mathx.Proba
}
type (
googleBreaker struct {
k float64
stat *collection.RollingWindow[int64, *bucket]
proba *mathx.Proba
lastPass *syncx.AtomicDuration
}
windowResult struct {
accepts int64
total int64
failingBuckets int64
workingBuckets int64
}
)
func newGoogleBreaker() *googleBreaker {
bucketDuration := time.Duration(int64(window) / int64(buckets))
st := collection.NewRollingWindow(buckets, bucketDuration)
st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
return new(bucket)
}, buckets, bucketDuration)
return &googleBreaker{
stat: st,
k: k,
proba: mathx.NewProba(),
stat: st,
k: k,
proba: mathx.NewProba(),
lastPass: syncx.NewAtomicDuration(),
}
}
func (b *googleBreaker) accept() error {
accepts, total := b.history()
weightedAccepts := b.k * float64(accepts)
var w float64
history := b.history()
w = b.k - (b.k-minK)*float64(history.failingBuckets)/buckets
weightedAccepts := mathx.AtLeast(w, minK) * float64(history.accepts)
// https://landing.google.com/sre/sre-book/chapters/handling-overload/#eq2101
// for better performance, no need to care about negative ratio
dropRatio := (float64(total-protection) - weightedAccepts) / float64(total+1)
// for better performance, no need to care about the negative ratio
dropRatio := (float64(history.total-protection) - weightedAccepts) / float64(history.total+1)
if dropRatio <= 0 {
return nil
}
lastPass := b.lastPass.Load()
if lastPass > 0 && timex.Since(lastPass) > forcePassDuration {
b.lastPass.Set(timex.Now())
return nil
}
dropRatio *= float64(buckets-history.workingBuckets) / buckets
if b.proba.TrueOnProba(dropRatio) {
return ErrServiceUnavailable
}
b.lastPass.Set(timex.Now())
return nil
}
func (b *googleBreaker) allow() (internalPromise, error) {
if err := b.accept(); err != nil {
b.markDrop()
return nil, err
}
@@ -62,7 +92,7 @@ func (b *googleBreaker) allow() (internalPromise, error) {
func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Acceptable) error {
if err := b.accept(); err != nil {
b.markFailure()
b.markDrop()
if fallback != nil {
return fallback(err)
}
@@ -70,10 +100,10 @@ func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Ac
return err
}
var success bool
var succ bool
defer func() {
// if req() panic, success is false, mark as failure
if success {
if succ {
b.markSuccess()
} else {
b.markFailure()
@@ -82,27 +112,43 @@ func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Ac
err := req()
if acceptable(err) {
success = true
succ = true
}
return err
}
func (b *googleBreaker) markSuccess() {
b.stat.Add(1)
func (b *googleBreaker) markDrop() {
b.stat.Add(drop)
}
func (b *googleBreaker) markFailure() {
b.stat.Add(0)
b.stat.Add(fail)
}
func (b *googleBreaker) history() (accepts, total int64) {
b.stat.Reduce(func(b *collection.Bucket) {
accepts += int64(b.Sum)
total += b.Count
func (b *googleBreaker) markSuccess() {
b.stat.Add(success)
}
func (b *googleBreaker) history() windowResult {
var result windowResult
b.stat.Reduce(func(b *bucket) {
result.accepts += b.Success
result.total += b.Sum
if b.Failure > 0 {
result.workingBuckets = 0
} else if b.Success > 0 {
result.workingBuckets++
}
if b.Success > 0 {
result.failingBuckets = 0
} else if b.Failure > 0 {
result.failingBuckets++
}
})
return
return result
}
type googlePromise struct {

View File

@@ -10,6 +10,7 @@ import (
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/syncx"
)
const (
@@ -22,11 +23,14 @@ func init() {
}
func getGoogleBreaker() *googleBreaker {
st := collection.NewRollingWindow(testBuckets, testInterval)
st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
return new(bucket)
}, testBuckets, testInterval)
return &googleBreaker{
stat: st,
k: 5,
proba: mathx.NewProba(),
stat: st,
k: 5,
proba: mathx.NewProba(),
lastPass: syncx.NewAtomicDuration(),
}
}
@@ -63,6 +67,33 @@ func TestGoogleBreakerOpen(t *testing.T) {
})
}
func TestGoogleBreakerRecover(t *testing.T) {
st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
return new(bucket)
}, testBuckets*2, testInterval)
b := &googleBreaker{
stat: st,
k: k,
proba: mathx.NewProba(),
lastPass: syncx.NewAtomicDuration(),
}
for i := 0; i < testBuckets; i++ {
for j := 0; j < 100; j++ {
b.stat.Add(1)
}
time.Sleep(testInterval)
}
for i := 0; i < testBuckets; i++ {
for j := 0; j < 100; j++ {
b.stat.Add(0)
}
time.Sleep(testInterval)
}
verify(t, func() bool {
return b.accept() == nil
})
}
func TestGoogleBreakerFallback(t *testing.T) {
b := getGoogleBreaker()
markSuccess(b, 1)
@@ -89,6 +120,43 @@ func TestGoogleBreakerReject(t *testing.T) {
}, nil, defaultAcceptable))
}
func TestGoogleBreakerMoreFallingBuckets(t *testing.T) {
t.Parallel()
t.Run("more falling buckets", func(t *testing.T) {
b := getGoogleBreaker()
func() {
stopChan := time.After(testInterval * 6)
for {
time.Sleep(time.Millisecond)
select {
case <-stopChan:
return
default:
assert.Error(t, b.doReq(func() error {
return errors.New("foo")
}, func(err error) error {
return err
}, func(err error) bool {
return err == nil
}))
}
}
}()
var count int
for i := 0; i < 100; i++ {
if errors.Is(b.doReq(func() error {
return ErrServiceUnavailable
}, nil, defaultAcceptable), ErrServiceUnavailable) {
count++
}
}
assert.True(t, count > 90)
})
}
func TestGoogleBreakerAcceptable(t *testing.T) {
b := getGoogleBreaker()
errAcceptable := errors.New("any")
@@ -164,41 +232,38 @@ func TestGoogleBreakerSelfProtection(t *testing.T) {
}
func TestGoogleBreakerHistory(t *testing.T) {
var b *googleBreaker
var accepts, total int64
sleep := testInterval
t.Run("accepts == total", func(t *testing.T) {
b = getGoogleBreaker()
b := getGoogleBreaker()
markSuccessWithDuration(b, 10, sleep/2)
accepts, total = b.history()
assert.Equal(t, int64(10), accepts)
assert.Equal(t, int64(10), total)
result := b.history()
assert.Equal(t, int64(10), result.accepts)
assert.Equal(t, int64(10), result.total)
})
t.Run("fail == total", func(t *testing.T) {
b = getGoogleBreaker()
b := getGoogleBreaker()
markFailedWithDuration(b, 10, sleep/2)
accepts, total = b.history()
assert.Equal(t, int64(0), accepts)
assert.Equal(t, int64(10), total)
result := b.history()
assert.Equal(t, int64(0), result.accepts)
assert.Equal(t, int64(10), result.total)
})
t.Run("accepts = 1/2 * total, fail = 1/2 * total", func(t *testing.T) {
b = getGoogleBreaker()
b := getGoogleBreaker()
markFailedWithDuration(b, 5, sleep/2)
markSuccessWithDuration(b, 5, sleep/2)
accepts, total = b.history()
assert.Equal(t, int64(5), accepts)
assert.Equal(t, int64(10), total)
result := b.history()
assert.Equal(t, int64(5), result.accepts)
assert.Equal(t, int64(10), result.total)
})
t.Run("auto reset rolling counter", func(t *testing.T) {
b = getGoogleBreaker()
b := getGoogleBreaker()
time.Sleep(testInterval * testBuckets)
accepts, total = b.history()
assert.Equal(t, int64(0), accepts)
assert.Equal(t, int64(0), total)
result := b.history()
assert.Equal(t, int64(0), result.accepts)
assert.Equal(t, int64(0), result.total)
})
}

View File

@@ -1,5 +1,7 @@
package breaker
import "context"
const nopBreakerName = "nopBreaker"
type nopBreaker struct{}
@@ -17,22 +19,43 @@ func (b nopBreaker) Allow() (Promise, error) {
return nopPromise{}, nil
}
func (b nopBreaker) AllowCtx(_ context.Context) (Promise, error) {
return nopPromise{}, nil
}
func (b nopBreaker) Do(req func() error) error {
return req()
}
func (b nopBreaker) DoCtx(_ context.Context, req func() error) error {
return req()
}
func (b nopBreaker) DoWithAcceptable(req func() error, _ Acceptable) error {
return req()
}
func (b nopBreaker) DoWithAcceptableCtx(_ context.Context, req func() error, _ Acceptable) error {
return req()
}
func (b nopBreaker) DoWithFallback(req func() error, _ Fallback) error {
return req()
}
func (b nopBreaker) DoWithFallbackCtx(_ context.Context, req func() error, _ Fallback) error {
return req()
}
func (b nopBreaker) DoWithFallbackAcceptable(req func() error, _ Fallback, _ Acceptable) error {
return req()
}
func (b nopBreaker) DoWithFallbackAcceptableCtx(_ context.Context, req func() error,
_ Fallback, _ Acceptable) error {
return req()
}
type nopPromise struct{}
func (p nopPromise) Accept() {

View File

@@ -1,6 +1,7 @@
package breaker
import (
"context"
"errors"
"testing"
@@ -12,6 +13,8 @@ func TestNopBreaker(t *testing.T) {
assert.Equal(t, nopBreakerName, b.Name())
p, err := b.Allow()
assert.Nil(t, err)
p, err = b.AllowCtx(context.Background())
assert.Nil(t, err)
p.Accept()
for i := 0; i < 1000; i++ {
p, err := b.Allow()
@@ -21,18 +24,34 @@ func TestNopBreaker(t *testing.T) {
assert.Nil(t, b.Do(func() error {
return nil
}))
assert.Nil(t, b.DoCtx(context.Background(), func() error {
return nil
}))
assert.Nil(t, b.DoWithAcceptable(func() error {
return nil
}, defaultAcceptable))
assert.Nil(t, b.DoWithAcceptableCtx(context.Background(), func() error {
return nil
}, defaultAcceptable))
errDummy := errors.New("any")
assert.Equal(t, errDummy, b.DoWithFallback(func() error {
return errDummy
}, func(err error) error {
return nil
}))
assert.Equal(t, errDummy, b.DoWithFallbackCtx(context.Background(), func() error {
return errDummy
}, func(err error) error {
return nil
}))
assert.Equal(t, errDummy, b.DoWithFallbackAcceptable(func() error {
return errDummy
}, func(err error) error {
return nil
}, defaultAcceptable))
assert.Equal(t, errDummy, b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
return errDummy
}, func(err error) error {
return nil
}, defaultAcceptable))
}

View File

@@ -23,7 +23,7 @@ var (
zero = big.NewInt(0)
)
// DhKey defines the Diffie Hellman key.
// DhKey defines the Diffie-Hellman key.
type DhKey struct {
PriKey *big.Int
PubKey *big.Int
@@ -46,7 +46,7 @@ func ComputeKey(pubKey, priKey *big.Int) (*big.Int, error) {
return new(big.Int).Exp(pubKey, priKey, p), nil
}
// GenerateKey returns a Diffie Hellman key.
// GenerateKey returns a Diffie-Hellman key.
func GenerateKey() (*DhKey, error) {
var err error
var x *big.Int

View File

@@ -128,8 +128,8 @@ func (c *Cache) Take(key string, fetch func() (any, error)) (any, error) {
var fresh bool
val, err := c.barrier.Do(key, func() (any, error) {
// because O(1) on map search in memory, and fetch is an IO query
// so we do double check, cache might be taken by another call
// because O(1) on map search in memory, and fetch is an IO query,
// so we do double-check, cache might be taken by another call
if val, ok := c.doGet(key); ok {
return val, nil
}

View File

@@ -4,18 +4,28 @@ import (
"sync"
"time"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/timex"
)
type (
// RollingWindowOption let callers customize the RollingWindow.
RollingWindowOption func(rollingWindow *RollingWindow)
// BucketInterface is the interface that defines the buckets.
BucketInterface[T Numerical] interface {
Add(v T)
Reset()
}
// RollingWindow defines a rolling window to calculate the events in buckets with time interval.
RollingWindow struct {
// Numerical is the interface that restricts the numerical type.
Numerical = mathx.Numerical
// RollingWindowOption let callers customize the RollingWindow.
RollingWindowOption[T Numerical, B BucketInterface[T]] func(rollingWindow *RollingWindow[T, B])
// RollingWindow defines a rolling window to calculate the events in buckets with the time interval.
RollingWindow[T Numerical, B BucketInterface[T]] struct {
lock sync.RWMutex
size int
win *window
win *window[T, B]
interval time.Duration
offset int
ignoreCurrent bool
@@ -25,14 +35,15 @@ type (
// NewRollingWindow returns a RollingWindow that with size buckets and time interval,
// use opts to customize the RollingWindow.
func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOption) *RollingWindow {
func NewRollingWindow[T Numerical, B BucketInterface[T]](newBucket func() B, size int,
interval time.Duration, opts ...RollingWindowOption[T, B]) *RollingWindow[T, B] {
if size < 1 {
panic("size must be greater than 0")
}
w := &RollingWindow{
w := &RollingWindow[T, B]{
size: size,
win: newWindow(size),
win: newWindow[T, B](newBucket, size),
interval: interval,
lastTime: timex.Now(),
}
@@ -43,7 +54,7 @@ func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOpt
}
// Add adds value to current bucket.
func (rw *RollingWindow) Add(v float64) {
func (rw *RollingWindow[T, B]) Add(v T) {
rw.lock.Lock()
defer rw.lock.Unlock()
rw.updateOffset()
@@ -51,13 +62,13 @@ func (rw *RollingWindow) Add(v float64) {
}
// Reduce runs fn on all buckets, ignore current bucket if ignoreCurrent was set.
func (rw *RollingWindow) Reduce(fn func(b *Bucket)) {
func (rw *RollingWindow[T, B]) Reduce(fn func(b B)) {
rw.lock.RLock()
defer rw.lock.RUnlock()
var diff int
span := rw.span()
// ignore current bucket, because of partial data
// ignore the current bucket, because of partial data
if span == 0 && rw.ignoreCurrent {
diff = rw.size - 1
} else {
@@ -69,7 +80,7 @@ func (rw *RollingWindow) Reduce(fn func(b *Bucket)) {
}
}
func (rw *RollingWindow) span() int {
func (rw *RollingWindow[T, B]) span() int {
offset := int(timex.Since(rw.lastTime) / rw.interval)
if 0 <= offset && offset < rw.size {
return offset
@@ -78,7 +89,7 @@ func (rw *RollingWindow) span() int {
return rw.size
}
func (rw *RollingWindow) updateOffset() {
func (rw *RollingWindow[T, B]) updateOffset() {
span := rw.span()
if span <= 0 {
return
@@ -97,54 +108,54 @@ func (rw *RollingWindow) updateOffset() {
}
// Bucket defines the bucket that holds sum and num of additions.
type Bucket struct {
Sum float64
type Bucket[T Numerical] struct {
Sum T
Count int64
}
func (b *Bucket) add(v float64) {
func (b *Bucket[T]) Add(v T) {
b.Sum += v
b.Count++
}
func (b *Bucket) reset() {
func (b *Bucket[T]) Reset() {
b.Sum = 0
b.Count = 0
}
type window struct {
buckets []*Bucket
type window[T Numerical, B BucketInterface[T]] struct {
buckets []B
size int
}
func newWindow(size int) *window {
buckets := make([]*Bucket, size)
func newWindow[T Numerical, B BucketInterface[T]](newBucket func() B, size int) *window[T, B] {
buckets := make([]B, size)
for i := 0; i < size; i++ {
buckets[i] = new(Bucket)
buckets[i] = newBucket()
}
return &window{
return &window[T, B]{
buckets: buckets,
size: size,
}
}
func (w *window) add(offset int, v float64) {
w.buckets[offset%w.size].add(v)
func (w *window[T, B]) add(offset int, v T) {
w.buckets[offset%w.size].Add(v)
}
func (w *window) reduce(start, count int, fn func(b *Bucket)) {
func (w *window[T, B]) reduce(start, count int, fn func(b B)) {
for i := 0; i < count; i++ {
fn(w.buckets[(start+i)%w.size])
}
}
func (w *window) resetBucket(offset int) {
w.buckets[offset%w.size].reset()
func (w *window[T, B]) resetBucket(offset int) {
w.buckets[offset%w.size].Reset()
}
// IgnoreCurrentBucket lets the Reduce call ignore current bucket.
func IgnoreCurrentBucket() RollingWindowOption {
return func(w *RollingWindow) {
func IgnoreCurrentBucket[T Numerical, B BucketInterface[T]]() RollingWindowOption[T, B] {
return func(w *RollingWindow[T, B]) {
w.ignoreCurrent = true
}
}

View File

@@ -12,18 +12,24 @@ import (
const duration = time.Millisecond * 50
func TestNewRollingWindow(t *testing.T) {
assert.NotNil(t, NewRollingWindow(10, time.Second))
assert.NotNil(t, NewRollingWindow[int64, *Bucket[int64]](func() *Bucket[int64] {
return new(Bucket[int64])
}, 10, time.Second))
assert.Panics(t, func() {
NewRollingWindow(0, time.Second)
NewRollingWindow[int64, *Bucket[int64]](func() *Bucket[int64] {
return new(Bucket[int64])
}, 0, time.Second)
})
}
func TestRollingWindowAdd(t *testing.T) {
const size = 3
r := NewRollingWindow(size, duration)
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration)
listBuckets := func() []float64 {
var buckets []float64
r.Reduce(func(b *Bucket) {
r.Reduce(func(b *Bucket[float64]) {
buckets = append(buckets, b.Sum)
})
return buckets
@@ -47,10 +53,12 @@ func TestRollingWindowAdd(t *testing.T) {
func TestRollingWindowReset(t *testing.T) {
const size = 3
r := NewRollingWindow(size, duration, IgnoreCurrentBucket())
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration, IgnoreCurrentBucket[float64, *Bucket[float64]]())
listBuckets := func() []float64 {
var buckets []float64
r.Reduce(func(b *Bucket) {
r.Reduce(func(b *Bucket[float64]) {
buckets = append(buckets, b.Sum)
})
return buckets
@@ -72,15 +80,19 @@ func TestRollingWindowReset(t *testing.T) {
func TestRollingWindowReduce(t *testing.T) {
const size = 4
tests := []struct {
win *RollingWindow
win *RollingWindow[float64, *Bucket[float64]]
expect float64
}{
{
win: NewRollingWindow(size, duration),
win: NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration),
expect: 10,
},
{
win: NewRollingWindow(size, duration, IgnoreCurrentBucket()),
win: NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration, IgnoreCurrentBucket[float64, *Bucket[float64]]()),
expect: 4,
},
}
@@ -97,7 +109,7 @@ func TestRollingWindowReduce(t *testing.T) {
}
}
var result float64
r.Reduce(func(b *Bucket) {
r.Reduce(func(b *Bucket[float64]) {
result += b.Sum
})
assert.Equal(t, test.expect, result)
@@ -108,10 +120,12 @@ func TestRollingWindowReduce(t *testing.T) {
func TestRollingWindowBucketTimeBoundary(t *testing.T) {
const size = 3
interval := time.Millisecond * 30
r := NewRollingWindow(size, interval)
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, interval)
listBuckets := func() []float64 {
var buckets []float64
r.Reduce(func(b *Bucket) {
r.Reduce(func(b *Bucket[float64]) {
buckets = append(buckets, b.Sum)
})
return buckets
@@ -138,7 +152,9 @@ func TestRollingWindowBucketTimeBoundary(t *testing.T) {
func TestRollingWindowDataRace(t *testing.T) {
const size = 3
r := NewRollingWindow(size, duration)
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
return new(Bucket[float64])
}, size, duration)
stop := make(chan bool)
go func() {
for {
@@ -157,7 +173,7 @@ func TestRollingWindowDataRace(t *testing.T) {
case <-stop:
return
default:
r.Reduce(func(b *Bucket) {})
r.Reduce(func(b *Bucket[float64]) {})
}
}
}()

View File

@@ -133,7 +133,7 @@ func addOrMergeFields(info *fieldInfo, key string, child *fieldInfo, fullName st
return newConflictKeyError(fullName)
}
if err := mergeFields(prev, key, child.children, fullName); err != nil {
if err := mergeFields(prev, child.children, fullName); err != nil {
return err
}
} else {
@@ -189,7 +189,7 @@ func buildFieldsInfo(tp reflect.Type, fullName string) (*fieldInfo, error) {
switch tp.Kind() {
case reflect.Struct:
return buildStructFieldsInfo(tp, fullName)
case reflect.Array, reflect.Slice:
case reflect.Array, reflect.Slice, reflect.Map:
return buildFieldsInfo(mapping.Deref(tp.Elem()), fullName)
case reflect.Chan, reflect.Func:
return nil, fmt.Errorf("unsupported type: %s", tp.Kind())
@@ -281,7 +281,7 @@ func getTagName(field reflect.StructField) string {
return field.Name
}
func mergeFields(prev *fieldInfo, key string, children map[string]*fieldInfo, fullName string) error {
func mergeFields(prev *fieldInfo, children map[string]*fieldInfo, fullName string) error {
if len(prev.children) == 0 || len(children) == 0 {
return newConflictKeyError(fullName)
}
@@ -332,6 +332,8 @@ func toLowerCaseKeyMap(m map[string]any, info *fieldInfo) map[string]any {
res[lk] = toLowerCaseInterface(v, ti)
} else if info.mapField != nil {
res[k] = toLowerCaseInterface(v, info.mapField)
} else if vv, ok := v.(map[string]any); ok {
res[k] = toLowerCaseKeyMap(vv, info)
} else {
res[k] = v
}

View File

@@ -1192,6 +1192,29 @@ Email = "bar"`)
assert.Len(t, c.Value, 2)
}
})
t.Run("multi layer map", func(t *testing.T) {
type Value struct {
User struct {
Name string
}
}
type Config struct {
Value map[string]map[string]Value
}
var input = []byte(`
[Value.first.User1.User]
Name = "foo"
[Value.second.User2.User]
Name = "bar"
`)
var c Config
if assert.NoError(t, LoadFromTomlBytes(input, &c)) {
assert.Len(t, c.Value, 2)
}
})
}
func Test_getFullName(t *testing.T) {

View File

@@ -0,0 +1,200 @@
package configurator
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
"github.com/zeromicro/go-zero/core/configcenter/subscriber"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/mapping"
"github.com/zeromicro/go-zero/core/threading"
)
var (
errEmptyConfig = errors.New("empty config value")
errMissingUnmarshalerType = errors.New("missing unmarshaler type")
)
// Configurator is the interface for configuration center.
type Configurator[T any] interface {
// GetConfig returns the subscription value.
GetConfig() (T, error)
// AddListener adds a listener to the subscriber.
AddListener(listener func())
}
type (
// Config is the configuration for Configurator.
Config struct {
// Type is the value type, yaml, json or toml.
Type string `json:",default=yaml,options=[yaml,json,toml]"`
// Log is the flag to control logging.
Log bool `json:",default=true"`
}
configCenter[T any] struct {
conf Config
unmarshaler LoaderFn
subscriber subscriber.Subscriber
listeners []func()
lock sync.Mutex
snapshot atomic.Value
}
value[T any] struct {
data string
marshalData T
err error
}
)
// Configurator is the interface for configuration center.
var _ Configurator[any] = (*configCenter[any])(nil)
// MustNewConfigCenter returns a Configurator, exits on errors.
func MustNewConfigCenter[T any](c Config, subscriber subscriber.Subscriber) Configurator[T] {
cc, err := NewConfigCenter[T](c, subscriber)
logx.Must(err)
return cc
}
// NewConfigCenter returns a Configurator.
func NewConfigCenter[T any](c Config, subscriber subscriber.Subscriber) (Configurator[T], error) {
unmarshaler, ok := Unmarshaler(strings.ToLower(c.Type))
if !ok {
return nil, fmt.Errorf("unknown format: %s", c.Type)
}
cc := &configCenter[T]{
conf: c,
unmarshaler: unmarshaler,
subscriber: subscriber,
}
if err := cc.loadConfig(); err != nil {
return nil, err
}
if err := cc.subscriber.AddListener(cc.onChange); err != nil {
return nil, err
}
if _, err := cc.GetConfig(); err != nil {
return nil, err
}
return cc, nil
}
// AddListener adds listener to s.
func (c *configCenter[T]) AddListener(listener func()) {
c.lock.Lock()
defer c.lock.Unlock()
c.listeners = append(c.listeners, listener)
}
// GetConfig return structured config.
func (c *configCenter[T]) GetConfig() (T, error) {
v := c.value()
if v == nil || len(v.data) == 0 {
var empty T
return empty, errEmptyConfig
}
return v.marshalData, v.err
}
// Value returns the subscription value.
func (c *configCenter[T]) Value() string {
v := c.value()
if v == nil {
return ""
}
return v.data
}
func (c *configCenter[T]) loadConfig() error {
v, err := c.subscriber.Value()
if err != nil {
if c.conf.Log {
logx.Errorf("ConfigCenter loads changed configuration, error: %v", err)
}
return err
}
if c.conf.Log {
logx.Infof("ConfigCenter loads changed configuration, content [%s]", v)
}
c.snapshot.Store(c.genValue(v))
return nil
}
func (c *configCenter[T]) onChange() {
if err := c.loadConfig(); err != nil {
return
}
c.lock.Lock()
listeners := make([]func(), len(c.listeners))
copy(listeners, c.listeners)
c.lock.Unlock()
for _, l := range listeners {
threading.GoSafe(l)
}
}
func (c *configCenter[T]) value() *value[T] {
content := c.snapshot.Load()
if content == nil {
return nil
}
return content.(*value[T])
}
func (c *configCenter[T]) genValue(data string) *value[T] {
v := &value[T]{
data: data,
}
if len(data) == 0 {
return v
}
t := reflect.TypeOf(v.marshalData)
// if the type is nil, it means that the user has not set the type of the configuration.
if t == nil {
v.err = errMissingUnmarshalerType
return v
}
t = mapping.Deref(t)
switch t.Kind() {
case reflect.Struct, reflect.Array, reflect.Slice:
if err := c.unmarshaler([]byte(data), &v.marshalData); err != nil {
v.err = err
if c.conf.Log {
logx.Errorf("ConfigCenter unmarshal configuration failed, err: %+v, content [%s]",
err.Error(), data)
}
}
case reflect.String:
if str, ok := any(data).(T); ok {
v.marshalData = str
} else {
v.err = errMissingUnmarshalerType
}
default:
if c.conf.Log {
logx.Errorf("ConfigCenter unmarshal configuration missing unmarshaler for type: %s, content [%s]",
t.Kind(), data)
}
v.err = errMissingUnmarshalerType
}
return v
}

View File

@@ -0,0 +1,233 @@
package configurator
import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewConfigCenter(t *testing.T) {
_, err := NewConfigCenter[any](Config{
Log: true,
}, &mockSubscriber{})
assert.Error(t, err)
_, err = NewConfigCenter[any](Config{
Type: "json",
Log: true,
}, &mockSubscriber{})
assert.Error(t, err)
}
func TestConfigCenter_GetConfig(t *testing.T) {
mock := &mockSubscriber{}
type Data struct {
Name string `json:"name"`
}
mock.v = `{"name": "go-zero"}`
c1, err := NewConfigCenter[Data](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
data, err := c1.GetConfig()
assert.NoError(t, err)
assert.Equal(t, "go-zero", data.Name)
mock.v = `{"name": "111"}`
c2, err := NewConfigCenter[Data](Config{Type: "json"}, mock)
assert.NoError(t, err)
mock.v = `{}`
c3, err := NewConfigCenter[string](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
_, err = c3.GetConfig()
assert.NoError(t, err)
data, err = c2.GetConfig()
assert.NoError(t, err)
mock.lisErr = errors.New("mock error")
_, err = NewConfigCenter[Data](Config{
Type: "json",
Log: true,
}, mock)
assert.Error(t, err)
}
func TestConfigCenter_onChange(t *testing.T) {
mock := &mockSubscriber{}
type Data struct {
Name string `json:"name"`
}
mock.v = `{"name": "go-zero"}`
c1, err := NewConfigCenter[Data](Config{Type: "json", Log: true}, mock)
assert.NoError(t, err)
data, err := c1.GetConfig()
assert.NoError(t, err)
assert.Equal(t, "go-zero", data.Name)
mock.v = `{"name": "go-zero2"}`
mock.change()
data, err = c1.GetConfig()
assert.NoError(t, err)
assert.Equal(t, "go-zero2", data.Name)
mock.valErr = errors.New("mock error")
_, err = NewConfigCenter[Data](Config{Type: "json", Log: false}, mock)
assert.Error(t, err)
}
func TestConfigCenter_Value(t *testing.T) {
mock := &mockSubscriber{}
mock.v = "1234"
c, err := NewConfigCenter[string](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
cc := c.(*configCenter[string])
assert.Equal(t, cc.Value(), "1234")
mock.valErr = errors.New("mock error")
_, err = NewConfigCenter[any](Config{
Type: "json",
Log: true,
}, mock)
assert.Error(t, err)
}
func TestConfigCenter_AddListener(t *testing.T) {
mock := &mockSubscriber{}
mock.v = "1234"
c, err := NewConfigCenter[string](Config{
Type: "json",
Log: true,
}, mock)
assert.NoError(t, err)
cc := c.(*configCenter[string])
var a, b int
var mutex sync.Mutex
cc.AddListener(func() {
mutex.Lock()
a = 1
mutex.Unlock()
})
cc.AddListener(func() {
mutex.Lock()
b = 2
mutex.Unlock()
})
assert.Equal(t, 2, len(cc.listeners))
mock.change()
time.Sleep(time.Millisecond * 100)
mutex.Lock()
assert.Equal(t, 1, a)
assert.Equal(t, 2, b)
mutex.Unlock()
}
func TestConfigCenter_genValue(t *testing.T) {
t.Run("data is empty", func(t *testing.T) {
c := &configCenter[string]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("")
assert.Equal(t, "", v.data)
})
t.Run("invalid template type", func(t *testing.T) {
c := &configCenter[any]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("xxxx")
assert.Equal(t, errMissingUnmarshalerType, v.err)
})
t.Run("unsupported template type", func(t *testing.T) {
c := &configCenter[int]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("1")
assert.Equal(t, errMissingUnmarshalerType, v.err)
})
t.Run("supported template string type", func(t *testing.T) {
c := &configCenter[string]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue("12345")
assert.NoError(t, v.err)
assert.Equal(t, "12345", v.data)
})
t.Run("unmarshal fail", func(t *testing.T) {
c := &configCenter[struct {
Name string `json:"name"`
}]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue(`{"name":"new name}`)
assert.Equal(t, `{"name":"new name}`, v.data)
assert.Error(t, v.err)
})
t.Run("success", func(t *testing.T) {
c := &configCenter[struct {
Name string `json:"name"`
}]{
unmarshaler: registry.unmarshalers["json"],
conf: Config{Log: true},
}
v := c.genValue(`{"name":"new name"}`)
assert.Equal(t, `{"name":"new name"}`, v.data)
assert.Equal(t, "new name", v.marshalData.Name)
assert.NoError(t, v.err)
})
}
type mockSubscriber struct {
v string
lisErr, valErr error
listener func()
}
func (m *mockSubscriber) AddListener(listener func()) error {
m.listener = listener
return m.lisErr
}
func (m *mockSubscriber) Value() (string, error) {
return m.v, m.valErr
}
func (m *mockSubscriber) change() {
if m.listener != nil {
m.listener()
}
}

View File

@@ -0,0 +1,67 @@
package subscriber
import (
"github.com/zeromicro/go-zero/core/discov"
"github.com/zeromicro/go-zero/core/logx"
)
type (
// etcdSubscriber is a subscriber that subscribes to etcd.
etcdSubscriber struct {
*discov.Subscriber
}
// EtcdConf is the configuration for etcd.
EtcdConf = discov.EtcdConf
)
// MustNewEtcdSubscriber returns an etcd Subscriber, exits on errors.
func MustNewEtcdSubscriber(conf EtcdConf) Subscriber {
s, err := NewEtcdSubscriber(conf)
logx.Must(err)
return s
}
// NewEtcdSubscriber returns an etcd Subscriber.
func NewEtcdSubscriber(conf EtcdConf) (Subscriber, error) {
opts := buildSubOptions(conf)
s, err := discov.NewSubscriber(conf.Hosts, conf.Key, opts...)
if err != nil {
return nil, err
}
return &etcdSubscriber{Subscriber: s}, nil
}
// buildSubOptions constructs the options for creating a new etcd subscriber.
func buildSubOptions(conf EtcdConf) []discov.SubOption {
opts := []discov.SubOption{
discov.WithExactMatch(),
}
if len(conf.User) > 0 {
opts = append(opts, discov.WithSubEtcdAccount(conf.User, conf.Pass))
}
if len(conf.CertFile) > 0 || len(conf.CertKeyFile) > 0 || len(conf.CACertFile) > 0 {
opts = append(opts, discov.WithSubEtcdTLS(conf.CertFile, conf.CertKeyFile,
conf.CACertFile, conf.InsecureSkipVerify))
}
return opts
}
// AddListener adds a listener to the subscriber.
func (s *etcdSubscriber) AddListener(listener func()) error {
s.Subscriber.AddListener(listener)
return nil
}
// Value returns the value of the subscriber.
func (s *etcdSubscriber) Value() (string, error) {
vs := s.Subscriber.Values()
if len(vs) > 0 {
return vs[len(vs)-1], nil
}
return "", nil
}

View File

@@ -0,0 +1,9 @@
package subscriber
// Subscriber is the interface for configcenter subscribers.
type Subscriber interface {
// AddListener adds a listener to the subscriber.
AddListener(listener func()) error
// Value returns the value of the subscriber.
Value() (string, error)
}

View File

@@ -0,0 +1,41 @@
package configurator
import (
"sync"
"github.com/zeromicro/go-zero/core/conf"
)
var registry = &unmarshalerRegistry{
unmarshalers: map[string]LoaderFn{
"json": conf.LoadFromJsonBytes,
"toml": conf.LoadFromTomlBytes,
"yaml": conf.LoadFromYamlBytes,
},
}
type (
// LoaderFn is the function type for loading configuration.
LoaderFn func([]byte, any) error
// unmarshalerRegistry is the registry for unmarshalers.
unmarshalerRegistry struct {
unmarshalers map[string]LoaderFn
mu sync.RWMutex
}
)
// RegisterUnmarshaler registers an unmarshaler.
func RegisterUnmarshaler(name string, fn LoaderFn) {
registry.mu.Lock()
defer registry.mu.Unlock()
registry.unmarshalers[name] = fn
}
// Unmarshaler returns the unmarshaler by name.
func Unmarshaler(name string) (LoaderFn, bool) {
registry.mu.RLock()
defer registry.mu.RUnlock()
fn, ok := registry.unmarshalers[name]
return fn, ok
}

View File

@@ -0,0 +1,28 @@
package configurator
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestRegisterUnmarshaler(t *testing.T) {
RegisterUnmarshaler("test", func(data []byte, v interface{}) error {
return nil
})
_, ok := Unmarshaler("test")
assert.True(t, ok)
_, ok = Unmarshaler("test2")
assert.False(t, ok)
_, ok = Unmarshaler("json")
assert.True(t, ok)
_, ok = Unmarshaler("toml")
assert.True(t, ok)
_, ok = Unmarshaler("yaml")
assert.True(t, ok)
}

View File

@@ -10,13 +10,14 @@ import (
"sync"
"time"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
"github.com/zeromicro/go-zero/core/contextx"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/threading"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
)
var (
@@ -30,7 +31,7 @@ var (
// A Registry is a registry that manages the etcd client connections.
type Registry struct {
clusters map[string]*cluster
lock sync.Mutex
lock sync.RWMutex
}
// GetRegistry returns a global Registry.
@@ -45,7 +46,7 @@ func (r *Registry) GetConn(endpoints []string) (EtcdClient, error) {
}
// Monitor monitors the key on given etcd endpoints, notify with the given UpdateListener.
func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) error {
func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener, exactMatch bool) error {
c, exists := r.getCluster(endpoints)
// if exists, the existing values should be updated to the listener.
if exists {
@@ -55,17 +56,24 @@ func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) err
}
}
return c.monitor(key, l)
return c.monitor(key, l, exactMatch)
}
func (r *Registry) getCluster(endpoints []string) (c *cluster, exists bool) {
clusterKey := getClusterKey(endpoints)
r.lock.Lock()
defer r.lock.Unlock()
r.lock.RLock()
c, exists = r.clusters[clusterKey]
r.lock.RUnlock()
if !exists {
c = newCluster(endpoints)
r.clusters[clusterKey] = c
r.lock.Lock()
defer r.lock.Unlock()
// double-check locking
c, exists = r.clusters[clusterKey]
if !exists {
c = newCluster(endpoints)
r.clusters[clusterKey] = c
}
}
return
@@ -78,7 +86,8 @@ type cluster struct {
listeners map[string][]UpdateListener
watchGroup *threading.RoutineGroup
done chan lang.PlaceholderType
lock sync.Mutex
lock sync.RWMutex
exactMatch bool
}
func newCluster(endpoints []string) *cluster {
@@ -108,8 +117,8 @@ func (c *cluster) getClient() (EtcdClient, error) {
}
func (c *cluster) getCurrent(key string) []KV {
c.lock.Lock()
defer c.lock.Unlock()
c.lock.RLock()
defer c.lock.RUnlock()
var kvs []KV
for k, v := range c.values[key] {
@@ -125,6 +134,7 @@ func (c *cluster) getCurrent(key string) []KV {
func (c *cluster) handleChanges(key string, kvs []KV) {
var add []KV
var remove []KV
c.lock.Lock()
listeners := append([]UpdateListener(nil), c.listeners[key]...)
vals, ok := c.values[key]
@@ -173,9 +183,9 @@ func (c *cluster) handleChanges(key string, kvs []KV) {
}
func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) {
c.lock.Lock()
c.lock.RLock()
listeners := append([]UpdateListener(nil), c.listeners[key]...)
c.lock.Unlock()
c.lock.RUnlock()
for _, ev := range events {
switch ev.Type {
@@ -216,13 +226,18 @@ func (c *cluster) load(cli EtcdClient, key string) int64 {
for {
var err error
ctx, cancel := context.WithTimeout(c.context(cli), RequestTimeout)
resp, err = cli.Get(ctx, makeKeyPrefix(key), clientv3.WithPrefix())
if c.exactMatch {
resp, err = cli.Get(ctx, key)
} else {
resp, err = cli.Get(ctx, makeKeyPrefix(key), clientv3.WithPrefix())
}
cancel()
if err == nil {
break
}
logx.Error(err)
logx.Errorf("%s, key is %s", err.Error(), key)
time.Sleep(coolDownInterval)
}
@@ -239,9 +254,10 @@ func (c *cluster) load(cli EtcdClient, key string) int64 {
return resp.Header.Revision
}
func (c *cluster) monitor(key string, l UpdateListener) error {
func (c *cluster) monitor(key string, l UpdateListener, exactMatch bool) error {
c.lock.Lock()
c.listeners[key] = append(c.listeners[key], l)
c.exactMatch = exactMatch
c.lock.Unlock()
cli, err := c.getClient()
@@ -307,14 +323,20 @@ func (c *cluster) watch(cli EtcdClient, key string, rev int64) {
}
func (c *cluster) watchStream(cli EtcdClient, key string, rev int64) error {
var rch clientv3.WatchChan
if rev != 0 {
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key),
clientv3.WithPrefix(), clientv3.WithRev(rev+1))
} else {
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key),
clientv3.WithPrefix())
var (
rch clientv3.WatchChan
ops []clientv3.OpOption
watchKey = key
)
if !c.exactMatch {
watchKey = makeKeyPrefix(key)
ops = append(ops, clientv3.WithPrefix())
}
if rev != 0 {
ops = append(ops, clientv3.WithRev(rev+1))
}
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), watchKey, ops...)
for {
select {

View File

@@ -289,7 +289,7 @@ func TestRegistry_Monitor(t *testing.T) {
},
}
GetRegistry().lock.Unlock()
assert.Error(t, GetRegistry().Monitor(endpoints, "foo", new(mockListener)))
assert.Error(t, GetRegistry().Monitor(endpoints, "foo", new(mockListener), false))
}
type mockListener struct {

View File

@@ -15,9 +15,10 @@ type (
// A Subscriber is used to subscribe the given key on an etcd cluster.
Subscriber struct {
endpoints []string
exclusive bool
items *container
endpoints []string
exclusive bool
exactMatch bool
items *container
}
)
@@ -34,7 +35,7 @@ func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscrib
}
sub.items = newContainer(sub.exclusive)
if err := internal.GetRegistry().Monitor(endpoints, key, sub.items); err != nil {
if err := internal.GetRegistry().Monitor(endpoints, key, sub.items, sub.exactMatch); err != nil {
return nil, err
}
@@ -59,6 +60,13 @@ func Exclusive() SubOption {
}
}
// WithExactMatch turn off querying using key prefixes.
func WithExactMatch() SubOption {
return func(sub *Subscriber) {
sub.exactMatch = true
}
}
// WithSubEtcdAccount provides the etcd username/password.
func WithSubEtcdAccount(user, pass string) SubOption {
return func(sub *Subscriber) {

View File

@@ -1,21 +1,17 @@
package errorx
import (
"bytes"
"errors"
"sync"
)
type (
// A BatchError is an error that can hold multiple errors.
BatchError struct {
errs errorArray
lock sync.Mutex
}
// BatchError is an error that can hold multiple errors.
type BatchError struct {
errs []error
lock sync.RWMutex
}
errorArray []error
)
// Add adds errs to be, nil errors are ignored.
// Add adds one or more non-nil errors to the BatchError instance.
func (be *BatchError) Add(errs ...error) {
be.lock.Lock()
defer be.lock.Unlock()
@@ -27,39 +23,20 @@ func (be *BatchError) Add(errs ...error) {
}
}
// Err returns an error that represents all errors.
// Err returns an error that represents all accumulated errors.
// It returns nil if there are no errors.
func (be *BatchError) Err() error {
be.lock.Lock()
defer be.lock.Unlock()
be.lock.RLock()
defer be.lock.RUnlock()
switch len(be.errs) {
case 0:
return nil
case 1:
return be.errs[0]
default:
return be.errs
}
// If there are no non-nil errors, errors.Join(...) returns nil.
return errors.Join(be.errs...)
}
// NotNil checks if any error inside.
// NotNil checks if there is at least one error inside the BatchError.
func (be *BatchError) NotNil() bool {
be.lock.Lock()
defer be.lock.Unlock()
be.lock.RLock()
defer be.lock.RUnlock()
return len(be.errs) > 0
}
// Error returns a string that represents inside errors.
func (ea errorArray) Error() string {
var buf bytes.Buffer
for i := range ea {
if i > 0 {
buf.WriteByte('\n')
}
buf.WriteString(ea[i].Error())
}
return buf.String()
}

View File

@@ -66,3 +66,82 @@ func TestBatchErrorConcurrentAdd(t *testing.T) {
assert.Equal(t, count, len(batch.errs))
assert.True(t, batch.NotNil())
}
func TestBatchError_Unwrap(t *testing.T) {
t.Run("nil", func(t *testing.T) {
var be BatchError
assert.Nil(t, be.Err())
assert.True(t, errors.Is(be.Err(), nil))
})
t.Run("one error", func(t *testing.T) {
var errFoo = errors.New("foo")
var errBar = errors.New("bar")
var be BatchError
be.Add(errFoo)
assert.True(t, errors.Is(be.Err(), errFoo))
assert.False(t, errors.Is(be.Err(), errBar))
})
t.Run("two errors", func(t *testing.T) {
var errFoo = errors.New("foo")
var errBar = errors.New("bar")
var errBaz = errors.New("baz")
var be BatchError
be.Add(errFoo)
be.Add(errBar)
assert.True(t, errors.Is(be.Err(), errFoo))
assert.True(t, errors.Is(be.Err(), errBar))
assert.False(t, errors.Is(be.Err(), errBaz))
})
}
func TestBatchError_Add(t *testing.T) {
var be BatchError
// Test adding nil errors
be.Add(nil, nil)
assert.False(t, be.NotNil(), "Expected BatchError to be empty after adding nil errors")
// Test adding non-nil errors
err1 := errors.New("error 1")
err2 := errors.New("error 2")
be.Add(err1, err2)
assert.True(t, be.NotNil(), "Expected BatchError to be non-empty after adding errors")
// Test adding a mix of nil and non-nil errors
err3 := errors.New("error 3")
be.Add(nil, err3, nil)
assert.True(t, be.NotNil(), "Expected BatchError to be non-empty after adding a mix of nil and non-nil errors")
}
func TestBatchError_Err(t *testing.T) {
var be BatchError
// Test Err() on empty BatchError
assert.Nil(t, be.Err(), "Expected nil error for empty BatchError")
// Test Err() with multiple errors
err1 := errors.New("error 1")
err2 := errors.New("error 2")
be.Add(err1, err2)
combinedErr := be.Err()
assert.NotNil(t, combinedErr, "Expected nil error for BatchError with multiple errors")
// Check if the combined error contains both error messages
errString := combinedErr.Error()
assert.Truef(t, errors.Is(combinedErr, err1), "Combined error doesn't contain first error: %s", errString)
assert.Truef(t, errors.Is(combinedErr, err2), "Combined error doesn't contain second error: %s", errString)
}
func TestBatchError_NotNil(t *testing.T) {
var be BatchError
// Test NotNil() on empty BatchError
assert.Nil(t, be.Err(), "Expected nil error for empty BatchError")
// Test NotNil() after adding an error
be.Add(errors.New("test error"))
assert.NotNil(t, be.Err(), "Expected non-nil error after adding an error")
}

14
core/errorx/check.go Normal file
View File

@@ -0,0 +1,14 @@
package errorx
import "errors"
// In checks if the given err is one of errs.
func In(err error, errs ...error) bool {
for _, each := range errs {
if errors.Is(err, each) {
return true
}
}
return false
}

70
core/errorx/check_test.go Normal file
View File

@@ -0,0 +1,70 @@
package errorx
import (
"errors"
"testing"
)
func TestIn(t *testing.T) {
err1 := errors.New("error 1")
err2 := errors.New("error 2")
err3 := errors.New("error 3")
tests := []struct {
name string
err error
errs []error
want bool
}{
{
name: "Error matches one of the errors in the list",
err: err1,
errs: []error{err1, err2},
want: true,
},
{
name: "Error does not match any errors in the list",
err: err3,
errs: []error{err1, err2},
want: false,
},
{
name: "Empty error list",
err: err1,
errs: []error{},
want: false,
},
{
name: "Nil error with non-nil list",
err: nil,
errs: []error{err1, err2},
want: false,
},
{
name: "Non-nil error with nil in list",
err: err1,
errs: []error{nil, err2},
want: false,
},
{
name: "Error matches nil error in the list",
err: nil,
errs: []error{nil, err2},
want: true,
},
{
name: "Nil error with empty list",
err: nil,
errs: []error{},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := In(tt.err, tt.errs...); got != tt.want {
t.Errorf("In() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -35,6 +35,7 @@ func firstLine(file *os.File) (string, error) {
for {
buf := make([]byte, bufSize)
n, err := file.ReadAt(buf, offset)
if err != nil && err != io.EOF {
return "", err
}
@@ -45,6 +46,10 @@ func firstLine(file *os.File) (string, error) {
}
}
if err == io.EOF {
return string(append(first, buf[:n]...)), nil
}
first = append(first, buf[:n]...)
offset += bufSize
}
@@ -57,30 +62,42 @@ func lastLine(filename string, file *os.File) (string, error) {
}
var last []byte
bufLen := int64(bufSize)
offset := info.Size()
for {
offset -= bufSize
if offset < 0 {
for offset > 0 {
if offset < bufLen {
bufLen = offset
offset = 0
} else {
offset -= bufLen
}
buf := make([]byte, bufSize)
buf := make([]byte, bufLen)
n, err := file.ReadAt(buf, offset)
if err != nil && err != io.EOF {
return "", err
}
if n == 0 {
break
}
if buf[n-1] == '\n' {
buf = buf[:n-1]
n--
} else {
buf = buf[:n]
}
for n--; n >= 0; n-- {
if buf[n] == '\n' {
return string(append(buf[n+1:], last...)), nil
for i := n - 1; i >= 0; i-- {
if buf[i] == '\n' {
return string(append(buf[i+1:], last...)), nil
}
}
last = append(buf, last...)
}
return string(last), nil
}

View File

@@ -52,6 +52,7 @@ last line`
second line
last line
`
emptyContent = ``
)
func TestFirstLine(t *testing.T) {
@@ -79,6 +80,26 @@ func TestFirstLineError(t *testing.T) {
assert.Error(t, err)
}
func TestFirstLineEmptyFile(t *testing.T) {
filename, err := fs.TempFilenameWithText(emptyContent)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, "", val)
}
func TestFirstLineWithoutNewline(t *testing.T) {
filename, err := fs.TempFilenameWithText(longLine)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, longLine, val)
}
func TestLastLine(t *testing.T) {
filename, err := fs.TempFilenameWithText(text)
assert.Nil(t, err)
@@ -99,6 +120,16 @@ func TestLastLineWithLastNewline(t *testing.T) {
assert.Equal(t, longLine, val)
}
func TestLastLineWithoutLastNewline(t *testing.T) {
filename, err := fs.TempFilenameWithText(longLine)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, longLine, val)
}
func TestLastLineShort(t *testing.T) {
filename, err := fs.TempFilenameWithText(shortText)
assert.Nil(t, err)
@@ -123,3 +154,67 @@ func TestLastLineError(t *testing.T) {
_, err := LastLine("/tmp/does-not-exist")
assert.Error(t, err)
}
func TestLastLineEmptyFile(t *testing.T) {
filename, err := fs.TempFilenameWithText(emptyContent)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, "", val)
}
func TestFirstLineExactlyBufSize(t *testing.T) {
content := make([]byte, bufSize)
for i := range content {
content[i] = 'a'
}
content[bufSize-1] = '\n' // Ensure there is a newline at the edge
filename, err := fs.TempFilenameWithText(string(content))
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, string(content[:bufSize-1]), val)
}
func TestLastLineExactlyBufSize(t *testing.T) {
content := make([]byte, bufSize)
for i := range content {
content[i] = 'a'
}
content[bufSize-1] = '\n' // Ensure there is a newline at the edge
filename, err := fs.TempFilenameWithText(string(content))
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, string(content[:bufSize-1]), val)
}
func TestFirstLineLargeFile(t *testing.T) {
content := text + text + text + "\n" + "extra"
filename, err := fs.TempFilenameWithText(content)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := FirstLine(filename)
assert.Nil(t, err)
assert.Equal(t, "first line", val)
}
func TestLastLineLargeFile(t *testing.T) {
content := text + text + text + "\n" + "extra"
filename, err := fs.TempFilenameWithText(content)
assert.Nil(t, err)
defer os.Remove(filename)
val, err := LastLine(filename)
assert.Nil(t, err)
assert.Equal(t, "extra", val)
}

View File

@@ -5,7 +5,7 @@ import "gopkg.in/cheggaaa/pb.v1"
type (
// A Scanner is used to read lines.
Scanner interface {
// Scan checks if has remaining to read.
// Scan checks if it has remaining to read.
Scan() bool
// Text returns next line.
Text() string

View File

@@ -1,6 +1,9 @@
package fx
import "github.com/zeromicro/go-zero/core/threading"
import (
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/threading"
)
// Parallel runs fns parallelly and waits for done.
func Parallel(fns ...func()) {
@@ -10,3 +13,20 @@ func Parallel(fns ...func()) {
}
group.Wait()
}
func ParallelErr(fns ...func() error) error {
var be errorx.BatchError
group := threading.NewRoutineGroup()
for _, fn := range fns {
f := fn
group.RunSafe(func() {
if err := f(); err != nil {
be.Add(err)
}
})
}
group.Wait()
return be.Err()
}

View File

@@ -1,6 +1,7 @@
package fx
import (
"errors"
"sync/atomic"
"testing"
"time"
@@ -22,3 +23,54 @@ func TestParallel(t *testing.T) {
})
assert.Equal(t, int32(6), count)
}
func TestParallelErr(t *testing.T) {
var count int32
err := ParallelErr(
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 1)
return errors.New("failed to exec #1")
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 2)
return errors.New("failed to exec #2")
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 3)
return nil
},
)
assert.Equal(t, int32(6), count)
assert.Error(t, err)
assert.ErrorContains(t, err, "failed to exec #1", "failed to exec #2")
}
func TestParallelErrErrorNil(t *testing.T) {
var count int32
err := ParallelErr(
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 1)
return nil
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 2)
return nil
},
func() error {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 3)
return nil
},
)
assert.Equal(t, int32(6), count)
assert.NoError(t, err)
}

View File

@@ -84,10 +84,10 @@ func Range(source <-chan any) Stream {
}
}
// AllMach returns whether all elements of this stream match the provided predicate.
// AllMatch returns whether all elements of this stream match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then true is returned and the predicate is not evaluated.
func (s Stream) AllMach(predicate func(item any) bool) bool {
func (s Stream) AllMatch(predicate func(item any) bool) bool {
for item := range s.source {
if !predicate(item) {
// make sure the former goroutine not block, and current func returns fast.
@@ -99,10 +99,10 @@ func (s Stream) AllMach(predicate func(item any) bool) bool {
return true
}
// AnyMach returns whether any elements of this stream match the provided predicate.
// AnyMatch returns whether any elements of this stream match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then false is returned and the predicate is not evaluated.
func (s Stream) AnyMach(predicate func(item any) bool) bool {
func (s Stream) AnyMatch(predicate func(item any) bool) bool {
for item := range s.source {
if predicate(item) {
// make sure the former goroutine not block, and current func returns fast.
@@ -352,7 +352,7 @@ func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
}, opts...).Done()
}
// Reduce is an utility method to let the caller deal with the underlying channel.
// Reduce is a utility method to let the caller deal with the underlying channel.
func (s Stream) Reduce(fn ReduceFunc) (any, error) {
return fn(s.source)
}

View File

@@ -398,16 +398,16 @@ func TestWalk(t *testing.T) {
func TestStream_AnyMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
assetEqual(t, false, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 4
}))
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
assetEqual(t, false, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 0
}))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
assetEqual(t, true, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 2
}))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
assetEqual(t, true, Just(1, 2, 3).AnyMatch(func(item any) bool {
return item.(int) == 2
}))
})
@@ -416,17 +416,17 @@ func TestStream_AnyMach(t *testing.T) {
func TestStream_AllMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
assetEqual(
t, true, Just(1, 2, 3).AllMach(func(item any) bool {
t, true, Just(1, 2, 3).AllMatch(func(item any) bool {
return true
}),
)
assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item any) bool {
t, false, Just(1, 2, 3).AllMatch(func(item any) bool {
return false
}),
)
assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item any) bool {
t, false, Just(1, 2, 3).AllMatch(func(item any) bool {
return item.(int) == 1
}),
)

View File

@@ -2,7 +2,7 @@ package iox
import "os"
// RedirectInOut redirects stdin to r, stdout to w, and callers need to call restore afterwards.
// RedirectInOut redirects stdin to r, stdout to w, and callers need to call restore afterward.
func RedirectInOut() (restore func(), err error) {
var r, w *os.File
r, w, err = os.Pipe()

View File

@@ -9,7 +9,7 @@ import (
const bufSize = 32 * 1024
// CountLines returns the number of lines in file.
// CountLines returns the number of lines in the file.
func CountLines(file string) (int, error) {
f, err := os.Open(file)
if err != nil {

View File

@@ -2,11 +2,12 @@ package iox
import (
"bufio"
"errors"
"io"
"strings"
)
// A TextLineScanner is a scanner that can scan lines from given reader.
// A TextLineScanner is a scanner that can scan lines from the given reader.
type TextLineScanner struct {
reader *bufio.Reader
hasNext bool
@@ -14,7 +15,7 @@ type TextLineScanner struct {
err error
}
// NewTextLineScanner returns a TextLineScanner with given reader.
// NewTextLineScanner returns a TextLineScanner with the given reader.
func NewTextLineScanner(reader io.Reader) *TextLineScanner {
return &TextLineScanner{
reader: bufio.NewReader(reader),
@@ -30,7 +31,7 @@ func (scanner *TextLineScanner) Scan() bool {
line, err := scanner.reader.ReadString('\n')
scanner.line = strings.TrimRight(line, "\n")
if err == io.EOF {
if errors.Is(err, io.EOF) {
scanner.hasNext = false
return true
} else if err != nil {

View File

@@ -2,6 +2,7 @@ package limit
import (
"context"
_ "embed"
"errors"
"strconv"
"time"
@@ -28,20 +29,9 @@ var (
// ErrUnknownCode is an error that represents unknown status code.
ErrUnknownCode = errors.New("unknown status code")
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
periodScript = redis.NewScript(`local limit = tonumber(ARGV[1])
local window = tonumber(ARGV[2])
local current = redis.call("INCRBY", KEYS[1], 1)
if current == 1 then
redis.call("expire", KEYS[1], window)
end
if current < limit then
return 1
elseif current == limit then
return 2
else
return 0
end`)
//go:embed periodscript.lua
periodLuaScript string
periodScript = redis.NewScript(periodLuaScript)
)
type (

View File

@@ -0,0 +1,14 @@
-- to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
local limit = tonumber(ARGV[1])
local window = tonumber(ARGV[2])
local current = redis.call("INCRBY", KEYS[1], 1)
if current == 1 then
redis.call("expire", KEYS[1], window)
end
if current < limit then
return 1
elseif current == limit then
return 2
else
return 0
end

View File

@@ -2,6 +2,7 @@ package limit
import (
"context"
_ "embed"
"errors"
"fmt"
"strconv"
@@ -9,6 +10,7 @@ import (
"sync/atomic"
"time"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stores/redis"
xrate "golang.org/x/time/rate"
@@ -20,37 +22,11 @@ const (
pingInterval = time.Millisecond * 100
)
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
// KEYS[1] as tokens_key
// KEYS[2] as timestamp_key
var script = redis.NewScript(`local rate = tonumber(ARGV[1])
local capacity = tonumber(ARGV[2])
local now = tonumber(ARGV[3])
local requested = tonumber(ARGV[4])
local fill_time = capacity/rate
local ttl = math.floor(fill_time*2)
local last_tokens = tonumber(redis.call("get", KEYS[1]))
if last_tokens == nil then
last_tokens = capacity
end
local last_refreshed = tonumber(redis.call("get", KEYS[2]))
if last_refreshed == nil then
last_refreshed = 0
end
local delta = math.max(0, now-last_refreshed)
local filled_tokens = math.min(capacity, last_tokens+(delta*rate))
local allowed = filled_tokens >= requested
local new_tokens = filled_tokens
if allowed then
new_tokens = filled_tokens - requested
end
redis.call("setex", KEYS[1], ttl, new_tokens)
redis.call("setex", KEYS[2], ttl, now)
return allowed`)
var (
//go:embed tokenscript.lua
tokenLuaScript string
tokenScript = redis.NewScript(tokenLuaScript)
)
// A TokenLimiter controls how frequently events are allowed to happen with in one second.
type TokenLimiter struct {
@@ -112,7 +88,7 @@ func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) boo
}
resp, err := lim.store.ScriptRunCtx(ctx,
script,
tokenScript,
[]string{
lim.tokenKey,
lim.timestampKey,
@@ -125,10 +101,10 @@ func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) boo
})
// redis allowed == false
// Lua boolean false -> r Nil bulk reply
if err == redis.Nil {
if errors.Is(err, redis.Nil) {
return false
}
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
if errorx.In(err, context.DeadlineExceeded, context.Canceled) {
logx.Errorf("fail to use rate limiter: %s", err)
return false
}

View File

@@ -0,0 +1,31 @@
-- to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
-- KEYS[1] as tokens_key
-- KEYS[2] as timestamp_key
local rate = tonumber(ARGV[1])
local capacity = tonumber(ARGV[2])
local now = tonumber(ARGV[3])
local requested = tonumber(ARGV[4])
local fill_time = capacity/rate
local ttl = math.floor(fill_time*2)
local last_tokens = tonumber(redis.call("get", KEYS[1]))
if last_tokens == nil then
last_tokens = capacity
end
local last_refreshed = tonumber(redis.call("get", KEYS[2]))
if last_refreshed == nil then
last_refreshed = 0
end
local delta = math.max(0, now-last_refreshed)
local filled_tokens = math.min(capacity, last_tokens+(delta*rate))
local allowed = filled_tokens >= requested
local new_tokens = filled_tokens
if allowed then
new_tokens = filled_tokens - requested
end
redis.call("setex", KEYS[1], ttl, new_tokens)
redis.call("setex", KEYS[2], ttl, now)
return allowed

View File

@@ -76,8 +76,8 @@ type (
avgFlyingLock syncx.SpinLock
overloadTime *syncx.AtomicDuration
droppedRecently *syncx.AtomicBool
passCounter *collection.RollingWindow
rtCounter *collection.RollingWindow
passCounter *collection.RollingWindow[int64, *collection.Bucket[int64]]
rtCounter *collection.RollingWindow[int64, *collection.Bucket[int64]]
}
)
@@ -107,15 +107,16 @@ func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
opt(&options)
}
bucketDuration := options.window / time.Duration(options.buckets)
newBucket := func() *collection.Bucket[int64] {
return new(collection.Bucket[int64])
}
return &adaptiveShedder{
cpuThreshold: options.cpuThreshold,
windowScale: float64(time.Second) / float64(bucketDuration) / millisecondsPerSecond,
overloadTime: syncx.NewAtomicDuration(),
droppedRecently: syncx.NewAtomicBool(),
passCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
collection.IgnoreCurrentBucket()),
rtCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
collection.IgnoreCurrentBucket()),
passCounter: collection.NewRollingWindow[int64, *collection.Bucket[int64]](newBucket, options.buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]()),
rtCounter: collection.NewRollingWindow[int64, *collection.Bucket[int64]](newBucket, options.buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]()),
}
}
@@ -138,10 +139,10 @@ func (as *adaptiveShedder) Allow() (Promise, error) {
func (as *adaptiveShedder) addFlying(delta int64) {
flying := atomic.AddInt64(&as.flying, delta)
// update avgFlying when the request is finished.
// this strategy makes avgFlying have a little bit lag against flying, and smoother.
// this strategy makes avgFlying have a little bit of lag against flying, and smoother.
// when the flying requests increase rapidly, avgFlying increase slower, accept more requests.
// when the flying requests drop rapidly, avgFlying drop slower, accept fewer requests.
// it makes the service to serve as more requests as possible.
// it makes the service to serve as many requests as possible.
if delta < 0 {
as.avgFlyingLock.Lock()
as.avgFlying = as.avgFlying*flyingBeta + float64(flying)*(1-flyingBeta)
@@ -167,15 +168,15 @@ func (as *adaptiveShedder) maxFlight() float64 {
}
func (as *adaptiveShedder) maxPass() int64 {
var result float64 = 1
var result int64 = 1
as.passCounter.Reduce(func(b *collection.Bucket) {
as.passCounter.Reduce(func(b *collection.Bucket[int64]) {
if b.Sum > result {
result = b.Sum
}
})
return int64(result)
return result
}
func (as *adaptiveShedder) minRt() float64 {
@@ -183,12 +184,12 @@ func (as *adaptiveShedder) minRt() float64 {
// its a reasonable large value to avoid dropping requests.
result := defaultMinRt
as.rtCounter.Reduce(func(b *collection.Bucket) {
as.rtCounter.Reduce(func(b *collection.Bucket[int64]) {
if b.Count <= 0 {
return
}
avg := math.Round(b.Sum / float64(b.Count))
avg := math.Round(float64(b.Sum) / float64(b.Count))
if avg < result {
result = avg
}
@@ -200,7 +201,7 @@ func (as *adaptiveShedder) minRt() float64 {
func (as *adaptiveShedder) overloadFactor() float64 {
// as.cpuThreshold must be less than cpuMax
factor := (cpuMax - float64(stat.CpuUsage())) / (cpuMax - float64(as.cpuThreshold))
// at least accept 10% of acceptable requests even cpu is highly overloaded.
// at least accept 10% of acceptable requests, even cpu is highly overloaded.
return mathx.Between(factor, overloadFactorLowerBound, 1)
}
@@ -250,14 +251,14 @@ func (as *adaptiveShedder) systemOverloaded() bool {
return true
}
// WithBuckets customizes the Shedder with given number of buckets.
// WithBuckets customizes the Shedder with the given number of buckets.
func WithBuckets(buckets int) ShedderOption {
return func(opts *shedderOptions) {
opts.buckets = buckets
}
}
// WithCpuThreshold customizes the Shedder with given cpu threshold.
// WithCpuThreshold customizes the Shedder with the given cpu threshold.
func WithCpuThreshold(threshold int64) ShedderOption {
return func(opts *shedderOptions) {
opts.cpuThreshold = threshold
@@ -283,6 +284,6 @@ func (p *promise) Fail() {
func (p *promise) Pass() {
rt := float64(timex.Since(p.start)) / float64(time.Millisecond)
p.shedder.addFlying(-1)
p.shedder.rtCounter.Add(math.Ceil(rt))
p.shedder.rtCounter.Add(int64(math.Ceil(rt)))
p.shedder.passCounter.Add(1)
}

View File

@@ -58,7 +58,7 @@ func TestAdaptiveShedder(t *testing.T) {
func TestAdaptiveShedderMaxPass(t *testing.T) {
passCounter := newRollingWindow()
for i := 1; i <= 10; i++ {
passCounter.Add(float64(i * 100))
passCounter.Add(int64(i * 100))
time.Sleep(bucketDuration)
}
shedder := &adaptiveShedder{
@@ -83,7 +83,7 @@ func TestAdaptiveShedderMinRt(t *testing.T) {
time.Sleep(bucketDuration)
}
for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j))
rtCounter.Add(int64(j))
}
}
shedder := &adaptiveShedder{
@@ -107,9 +107,9 @@ func TestAdaptiveShedderMaxFlight(t *testing.T) {
if i > 0 {
time.Sleep(bucketDuration)
}
passCounter.Add(float64((i + 1) * 100))
passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j))
rtCounter.Add(int64(j))
}
}
shedder := &adaptiveShedder{
@@ -129,9 +129,9 @@ func TestAdaptiveShedderShouldDrop(t *testing.T) {
if i > 0 {
time.Sleep(bucketDuration)
}
passCounter.Add(float64((i + 1) * 100))
passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j))
rtCounter.Add(int64(j))
}
}
shedder := &adaptiveShedder{
@@ -184,9 +184,9 @@ func TestAdaptiveShedderStillHot(t *testing.T) {
if i > 0 {
time.Sleep(bucketDuration)
}
passCounter.Add(float64((i + 1) * 100))
passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j))
rtCounter.Add(int64(j))
}
}
shedder := &adaptiveShedder{
@@ -248,9 +248,9 @@ func BenchmarkMaxFlight(b *testing.B) {
if i > 0 {
time.Sleep(bucketDuration)
}
passCounter.Add(float64((i + 1) * 100))
passCounter.Add(int64((i + 1) * 100))
for j := i*10 + 1; j <= i*10+10; j++ {
rtCounter.Add(float64(j))
rtCounter.Add(int64(j))
}
}
shedder := &adaptiveShedder{
@@ -265,6 +265,8 @@ func BenchmarkMaxFlight(b *testing.B) {
}
}
func newRollingWindow() *collection.RollingWindow {
return collection.NewRollingWindow(buckets, bucketDuration, collection.IgnoreCurrentBucket())
func newRollingWindow() *collection.RollingWindow[int64, *collection.Bucket[int64]] {
return collection.NewRollingWindow[int64, *collection.Bucket[int64]](func() *collection.Bucket[int64] {
return new(collection.Bucket[int64])
}, buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]())
}

View File

@@ -6,7 +6,7 @@ import (
"github.com/zeromicro/go-zero/core/syncx"
)
// A ShedderGroup is a manager to manage key based shedders.
// A ShedderGroup is a manager to manage key-based shedders.
type ShedderGroup struct {
options []ShedderOption
manager *syncx.ResourceManager

View File

@@ -42,7 +42,7 @@ func Debugv(ctx context.Context, v interface{}) {
getLogger(ctx).Debugv(v)
}
// Debugw writes msg along with fields into access log.
// Debugw writes msg along with fields into the access log.
func Debugw(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Debugw(msg, fields...)
}
@@ -63,7 +63,7 @@ func Errorv(ctx context.Context, v any) {
getLogger(ctx).Errorv(v)
}
// Errorw writes msg along with fields into error log.
// Errorw writes msg along with fields into the error log.
func Errorw(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Errorw(msg, fields...)
}
@@ -88,7 +88,7 @@ func Infov(ctx context.Context, v any) {
getLogger(ctx).Infov(v)
}
// Infow writes msg along with fields into access log.
// Infow writes msg along with fields into the access log.
func Infow(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Infow(msg, fields...)
}
@@ -108,10 +108,11 @@ func SetLevel(level uint32) {
logx.SetLevel(level)
}
// SetUp sets up the logx. If already set up, just return nil.
// we allow SetUp to be called multiple times, because for example
// SetUp sets up the logx.
// If already set up, return nil.
// We allow SetUp to be called multiple times, because, for example,
// we need to allow different service frameworks to initialize logx respectively.
// the same logic for SetUp
// The same logic for SetUp
func SetUp(c LogConf) error {
return logx.SetUp(c)
}

View File

@@ -42,4 +42,6 @@ type LogConf struct {
// daily: daily rotation.
// size: size limited rotation.
Rotation string `json:",default=daily,options=[daily,size]"`
// FileTimeFormat represents the time format for file name, default is `2006-01-02T15:04:05.000Z07:00`.
FileTimeFormat string `json:",optional"`
}

View File

@@ -1,6 +1,6 @@
package logx
// A LessLogger is a logger that control to log once during the given duration.
// A LessLogger is a logger that controls to log once during the given duration.
type LessLogger struct {
*limitedExecutor
}

View File

@@ -7,13 +7,13 @@ import (
// A Logger represents a logger.
type Logger interface {
// Debug logs a message at info level.
// Debug logs a message at debug level.
Debug(...any)
// Debugf logs a message at info level.
// Debugf logs a message at debug level.
Debugf(string, ...any)
// Debugv logs a message at info level.
// Debugv logs a message at debug level.
Debugv(any)
// Debugw logs a message at info level.
// Debugw logs a message at debug level.
Debugw(string, ...LogField)
// Error logs a message at error level.
Error(...any)

View File

@@ -6,6 +6,7 @@ import (
"log"
"os"
"path"
"reflect"
"runtime/debug"
"sync"
"sync/atomic"
@@ -51,6 +52,26 @@ type (
}
)
// AddWriter adds a new writer.
// If there is already a writer, the new writer will be added to the writer chain.
// For example, to write logs to both file and console, if there is already a file writer,
// ```go
// logx.AddWriter(logx.NewWriter(os.Stdout))
// ```
func AddWriter(w Writer) {
ow := Reset()
if ow == nil {
SetWriter(w)
} else {
// no need to check if the existing writer is a comboWriter,
// because it is not common to add more than one writer.
// even more than one writer, the behavior is the same.
SetWriter(comboWriter{
writers: []Writer{ow, w},
})
}
}
// Alert alerts v in alert level, and the message is written to error log.
func Alert(v string) {
getWriter().Alert(v)
@@ -86,7 +107,7 @@ func Debugv(v any) {
}
}
// Debugw writes msg along with fields into access log.
// Debugw writes msg along with fields into the access log.
func Debugw(msg string, fields ...LogField) {
if shallLog(DebugLevel) {
writeDebug(msg, fields...)
@@ -142,7 +163,7 @@ func Errorv(v any) {
}
}
// Errorw writes msg along with fields into error log.
// Errorw writes msg along with fields into the error log.
func Errorw(msg string, fields ...LogField) {
if shallLog(ErrorLevel) {
writeError(msg, fields...)
@@ -153,11 +174,11 @@ func Errorw(msg string, fields ...LogField) {
func Field(key string, value any) LogField {
switch val := value.(type) {
case error:
return LogField{Key: key, Value: val.Error()}
return LogField{Key: key, Value: encodeError(val)}
case []error:
var errs []string
for _, err := range val {
errs = append(errs, err.Error())
errs = append(errs, encodeError(err))
}
return LogField{Key: key, Value: errs}
case time.Duration:
@@ -175,11 +196,11 @@ func Field(key string, value any) LogField {
}
return LogField{Key: key, Value: times}
case fmt.Stringer:
return LogField{Key: key, Value: val.String()}
return LogField{Key: key, Value: encodeStringer(val)}
case []fmt.Stringer:
var strs []string
for _, str := range val {
strs = append(strs, str.String())
strs = append(strs, encodeStringer(str))
}
return LogField{Key: key, Value: strs}
default:
@@ -208,7 +229,7 @@ func Infov(v any) {
}
}
// Infow writes msg along with fields into access log.
// Infow writes msg along with fields into the access log.
func Infow(msg string, fields ...LogField) {
if shallLog(InfoLevel) {
writeInfo(msg, fields...)
@@ -254,11 +275,12 @@ func SetWriter(w Writer) {
}
}
// SetUp sets up the logx. If already set up, just return nil.
// we allow SetUp to be called multiple times, because for example
// SetUp sets up the logx.
// If already set up, return nil.
// We allow SetUp to be called multiple times, because, for example,
// we need to allow different service frameworks to initialize logx respectively.
func SetUp(c LogConf) (err error) {
// Just ignore the subsequent SetUp calls.
// Ignore the later SetUp calls.
// Because multiple services in one process might call SetUp respectively.
// Need to wait for the first caller to complete the execution.
setupOnce.Do(func() {
@@ -272,6 +294,10 @@ func SetUp(c LogConf) (err error) {
timeFormat = c.TimeFormat
}
if len(c.FileTimeFormat) > 0 {
fileTimeFormat = c.FileTimeFormat
}
atomic.StoreUint32(&maxContentLength, c.MaxContentLength)
switch c.Encoding {
@@ -413,6 +439,32 @@ func createOutput(path string) (io.WriteCloser, error) {
return NewLogger(path, rule, options.gzipEnabled)
}
func encodeError(err error) (ret string) {
return encodeWithRecover(err, func() string {
return err.Error()
})
}
func encodeStringer(v fmt.Stringer) (ret string) {
return encodeWithRecover(v, func() string {
return v.String()
})
}
func encodeWithRecover(arg any, fn func() string) (ret string) {
defer func() {
if err := recover(); err != nil {
if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() {
ret = nilAngleString
} else {
ret = fmt.Sprintf("panic: %v", err)
}
}
}()
return fn()
}
func getWriter() Writer {
w := writer.Load()
if w == nil {
@@ -480,7 +532,7 @@ func writeDebug(val any, fields ...LogField) {
getWriter().Debug(val, addCaller(fields...)...)
}
// writeError writes v into error log.
// writeError writes v into the error log.
// Not checking shallLog here is for performance consideration.
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
// The caller should check shallLog before calling this function.
@@ -520,7 +572,7 @@ func writeStack(msg string) {
getWriter().Stack(fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
}
// writeStat writes v into stat log.
// writeStat writes v into the stat log.
// Not checking shallLog here is for performance consideration.
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
// The caller should check shallLog before calling this function.

View File

@@ -348,6 +348,27 @@ func TestStructedLogInfow(t *testing.T) {
})
}
func TestStructedLogFieldNil(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
defer writer.Store(old)
assert.NotPanics(t, func() {
var s *string
Infow("test", Field("bb", s))
var d *nilStringer
Infow("test", Field("bb", d))
var e *nilError
Errorw("test", Field("bb", e))
})
assert.NotPanics(t, func() {
var p panicStringer
Infow("test", Field("bb", p))
var ps innerPanicStringer
Infow("test", Field("bb", ps))
})
}
func TestStructedLogInfoConsoleAny(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
@@ -570,7 +591,7 @@ func TestErrorfWithWrappedError(t *testing.T) {
old := writer.Swap(w)
defer writer.Store(old)
Errorf("hello %w", errors.New(message))
Errorf("hello %s", errors.New(message))
assert.True(t, strings.Contains(w.String(), "hello there"))
}
@@ -658,6 +679,10 @@ func TestSetup(t *testing.T) {
func TestDisable(t *testing.T) {
Disable()
defer func() {
SetLevel(InfoLevel)
atomic.StoreUint32(&encoding, jsonEncodingType)
}()
var opt logOptions
WithKeepDays(1)(&opt)
@@ -680,6 +705,17 @@ func TestDisableStat(t *testing.T) {
assert.Equal(t, 0, w.builder.Len())
}
func TestAddWriter(t *testing.T) {
const message = "hello there"
w := new(mockWriter)
AddWriter(w)
w1 := new(mockWriter)
AddWriter(w1)
Error(message)
assert.Contains(t, w.String(), message)
assert.Contains(t, w1.String(), message)
}
func TestSetWriter(t *testing.T) {
atomic.StoreUint32(&logLevel, 0)
Reset()
@@ -814,12 +850,13 @@ func doTestStructedLogConsole(t *testing.T, w *mockWriter, write func(...any)) {
func testSetLevelTwiceWithMode(t *testing.T, mode string, w *mockWriter) {
writer.Store(nil)
SetUp(LogConf{
Mode: mode,
Level: "debug",
Path: "/dev/null",
Encoding: plainEncoding,
Stat: false,
TimeFormat: time.RFC3339,
Mode: mode,
Level: "debug",
Path: "/dev/null",
Encoding: plainEncoding,
Stat: false,
TimeFormat: time.RFC3339,
FileTimeFormat: time.DateTime,
})
SetUp(LogConf{
Mode: mode,
@@ -859,3 +896,36 @@ func validateFields(t *testing.T, content string, fields map[string]any) {
}
}
}
type nilError struct {
Name string
}
func (e *nilError) Error() string {
return e.Name
}
type nilStringer struct {
Name string
}
func (s *nilStringer) String() string {
return s.Name
}
type innerPanicStringer struct {
Inner *struct {
Name string
}
}
func (s innerPanicStringer) String() string {
return s.Inner.Name
}
type panicStringer struct {
}
func (s panicStringer) String() string {
panic("panic")
}

View File

@@ -141,23 +141,43 @@ func (l *richLogger) WithCallerSkip(skip int) Logger {
return l
}
l.callerSkip = skip
return l
return &richLogger{
ctx: l.ctx,
callerSkip: skip,
fields: l.fields,
}
}
func (l *richLogger) WithContext(ctx context.Context) Logger {
l.ctx = ctx
return l
return &richLogger{
ctx: ctx,
callerSkip: l.callerSkip,
fields: l.fields,
}
}
func (l *richLogger) WithDuration(duration time.Duration) Logger {
l.fields = append(l.fields, Field(durationKey, timex.ReprOfDuration(duration)))
return l
fields := append(l.fields, Field(durationKey, timex.ReprOfDuration(duration)))
return &richLogger{
ctx: l.ctx,
callerSkip: l.callerSkip,
fields: fields,
}
}
func (l *richLogger) WithFields(fields ...LogField) Logger {
l.fields = append(l.fields, fields...)
return l
if len(fields) == 0 {
return l
}
f := append(l.fields, fields...)
return &richLogger{
ctx: l.ctx,
callerSkip: l.callerSkip,
fields: f,
}
}
func (l *richLogger) buildFields(fields ...LogField) []LogField {

View File

@@ -287,6 +287,54 @@ func TestLogWithCallerSkip(t *testing.T) {
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
}
func TestLogWithCallerSkipCopy(t *testing.T) {
log1 := WithCallerSkip(2)
log2 := log1.WithCallerSkip(3)
log3 := log2.WithCallerSkip(-1)
assert.Equal(t, 2, log1.(*richLogger).callerSkip)
assert.Equal(t, 3, log2.(*richLogger).callerSkip)
assert.Equal(t, 3, log3.(*richLogger).callerSkip)
}
func TestLogWithContextCopy(t *testing.T) {
c1 := context.Background()
c2 := context.WithValue(context.Background(), "foo", "bar")
log1 := WithContext(c1)
log2 := log1.WithContext(c2)
assert.Equal(t, c1, log1.(*richLogger).ctx)
assert.Equal(t, c2, log2.(*richLogger).ctx)
}
func TestLogWithDurationCopy(t *testing.T) {
log1 := WithContext(context.Background())
log2 := log1.WithDuration(time.Second)
assert.Empty(t, log1.(*richLogger).fields)
assert.Equal(t, 1, len(log2.(*richLogger).fields))
var w mockWriter
old := writer.Swap(&w)
defer writer.Store(old)
log2.Info("hello")
assert.Contains(t, w.String(), `"duration":"1000.0ms"`)
}
func TestLogWithFieldsCopy(t *testing.T) {
log1 := WithContext(context.Background())
log2 := log1.WithFields(Field("foo", "bar"))
log3 := log1.WithFields()
assert.Empty(t, log1.(*richLogger).fields)
assert.Equal(t, 1, len(log2.(*richLogger).fields))
assert.Equal(t, log1, log3)
assert.Empty(t, log3.(*richLogger).fields)
var w mockWriter
old := writer.Swap(&w)
defer writer.Store(old)
log2.Info("hello")
assert.Contains(t, w.String(), `"foo":"bar"`)
}
func TestLoggerWithFields(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)

View File

@@ -19,7 +19,6 @@ import (
const (
dateFormat = "2006-01-02"
fileTimeFormat = time.RFC3339
hoursPerDay = 24
bufferSize = 100
defaultDirMode = 0o755
@@ -28,8 +27,12 @@ const (
megaBytes = 1 << 20
)
// ErrLogFileClosed is an error that indicates the log file is already closed.
var ErrLogFileClosed = errors.New("error: log file closed")
var (
// ErrLogFileClosed is an error that indicates the log file is already closed.
ErrLogFileClosed = errors.New("error: log file closed")
fileTimeFormat = time.RFC3339
)
type (
// A RotateRule interface is used to define the log rotating rules.
@@ -319,7 +322,7 @@ func (l *RotateLogger) maybeCompressFile(file string) {
}()
if _, err := os.Stat(file); err != nil {
// file not exists or other error, ignore compression
// file doesn't exist or another error, ignore compression
return
}

View File

@@ -48,6 +48,7 @@ const (
levelDebug = "debug"
backupFileDelimiter = "-"
nilAngleString = "<nil>"
flags = 0x0
)

View File

@@ -13,6 +13,7 @@ import (
fatihcolor "github.com/fatih/color"
"github.com/zeromicro/go-zero/core/color"
"github.com/zeromicro/go-zero/core/errorx"
)
type (
@@ -33,6 +34,10 @@ type (
lock sync.RWMutex
}
comboWriter struct {
writers []Writer
}
concreteWriter struct {
infoLog io.WriteCloser
errorLog io.WriteCloser
@@ -88,6 +93,62 @@ func (w *atomicWriter) Swap(v Writer) Writer {
return old
}
func (c comboWriter) Alert(v any) {
for _, w := range c.writers {
w.Alert(v)
}
}
func (c comboWriter) Close() error {
var be errorx.BatchError
for _, w := range c.writers {
be.Add(w.Close())
}
return be.Err()
}
func (c comboWriter) Debug(v any, fields ...LogField) {
for _, w := range c.writers {
w.Debug(v, fields...)
}
}
func (c comboWriter) Error(v any, fields ...LogField) {
for _, w := range c.writers {
w.Error(v, fields...)
}
}
func (c comboWriter) Info(v any, fields ...LogField) {
for _, w := range c.writers {
w.Info(v, fields...)
}
}
func (c comboWriter) Severe(v any) {
for _, w := range c.writers {
w.Severe(v)
}
}
func (c comboWriter) Slow(v any, fields ...LogField) {
for _, w := range c.writers {
w.Slow(v, fields...)
}
}
func (c comboWriter) Stack(v any) {
for _, w := range c.writers {
w.Stack(v)
}
}
func (c comboWriter) Stat(v any, fields ...LogField) {
for _, w := range c.writers {
w.Stat(v, fields...)
}
}
func newConsoleWriter() Writer {
outLog := newLogWriter(log.New(fatihcolor.Output, "", flags))
errLog := newLogWriter(log.New(fatihcolor.Error, "", flags))
@@ -254,11 +315,10 @@ func (n nopWriter) Stack(_ any) {
func (n nopWriter) Stat(_ any, _ ...LogField) {
}
func buildPlainFields(fields ...LogField) []string {
var items []string
for _, field := range fields {
items = append(items, fmt.Sprintf("%s=%+v", field.Key, field.Value))
func buildPlainFields(fields logEntry) []string {
items := make([]string, 0, len(fields))
for k, v := range fields {
items = append(items, fmt.Sprintf("%s=%+v", k, v))
}
return items
@@ -278,6 +338,20 @@ func combineGlobalFields(fields []LogField) []LogField {
return ret
}
func marshalJson(t interface{}) ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
err := encoder.Encode(t)
// go 1.5+ will append a newline to the end of the json string
// https://github.com/golang/go/issues/13520
if l := buf.Len(); l > 0 && buf.Bytes()[l-1] == '\n' {
buf.Truncate(l - 1)
}
return buf.Bytes(), err
}
func output(writer io.Writer, level string, val any, fields ...LogField) {
// only truncate string content, don't know how to truncate the values of other types.
if v, ok := val.(string); ok {
@@ -289,15 +363,17 @@ func output(writer io.Writer, level string, val any, fields ...LogField) {
}
fields = combineGlobalFields(fields)
// +3 for timestamp, level and content
entry := make(logEntry, len(fields)+3)
for _, field := range fields {
entry[field.Key] = field.Value
}
switch atomic.LoadUint32(&encoding) {
case plainEncodingType:
writePlainAny(writer, level, val, buildPlainFields(fields...)...)
plainFields := buildPlainFields(entry)
writePlainAny(writer, level, val, plainFields...)
default:
entry := make(logEntry)
for _, field := range fields {
entry[field.Key] = field.Value
}
entry[timestampKey] = getTimestamp()
entry[levelKey] = level
entry[contentKey] = val
@@ -332,7 +408,7 @@ func wrapLevelWithColor(level string) string {
}
func writeJson(writer io.Writer, info any) {
if content, err := json.Marshal(info); err != nil {
if content, err := marshalJson(info); err != nil {
log.Printf("err: %s\n\n%s", err.Error(), debug.Stack())
} else if writer == nil {
log.Println(string(content))

View File

@@ -9,6 +9,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestNewWriter(t *testing.T) {
@@ -189,6 +190,41 @@ func TestWritePlainAny(t *testing.T) {
assert.Contains(t, buf.String(), "runtime/debug.Stack")
}
func TestWritePlainDuplicate(t *testing.T) {
old := atomic.SwapUint32(&encoding, plainEncodingType)
t.Cleanup(func() {
atomic.StoreUint32(&encoding, old)
})
var buf bytes.Buffer
output(&buf, levelInfo, "foo", LogField{
Key: "first",
Value: "a",
}, LogField{
Key: "first",
Value: "b",
})
assert.Contains(t, buf.String(), "foo")
assert.NotContains(t, buf.String(), "first=a")
assert.Contains(t, buf.String(), "first=b")
buf.Reset()
output(&buf, levelInfo, "foo", LogField{
Key: "first",
Value: "a",
}, LogField{
Key: "first",
Value: "b",
}, LogField{
Key: "second",
Value: "c",
})
assert.Contains(t, buf.String(), "foo")
assert.NotContains(t, buf.String(), "first=a")
assert.Contains(t, buf.String(), "first=b")
assert.Contains(t, buf.String(), "second=c")
}
func TestLogWithLimitContentLength(t *testing.T) {
maxLen := atomic.LoadUint32(&maxContentLength)
atomic.StoreUint32(&maxContentLength, 10)
@@ -219,6 +255,117 @@ func TestLogWithLimitContentLength(t *testing.T) {
})
}
func TestComboWriter(t *testing.T) {
var mockWriters []Writer
for i := 0; i < 3; i++ {
mockWriters = append(mockWriters, new(tracedWriter))
}
cw := comboWriter{
writers: mockWriters,
}
t.Run("Alert", func(t *testing.T) {
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Alert", "test alert").Once()
}
cw.Alert("test alert")
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Alert", "test alert")
}
})
t.Run("Close", func(t *testing.T) {
for i := range cw.writers {
if i == 1 {
cw.writers[i].(*tracedWriter).On("Close").Return(errors.New("error")).Once()
} else {
cw.writers[i].(*tracedWriter).On("Close").Return(nil).Once()
}
}
err := cw.Close()
assert.Error(t, err)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Close")
}
})
t.Run("Debug", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Debug", "test debug", fields).Once()
}
cw.Debug("test debug", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Debug", "test debug", fields)
}
})
t.Run("Error", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Error", "test error", fields).Once()
}
cw.Error("test error", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Error", "test error", fields)
}
})
t.Run("Info", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Info", "test info", fields).Once()
}
cw.Info("test info", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Info", "test info", fields)
}
})
t.Run("Severe", func(t *testing.T) {
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Severe", "test severe").Once()
}
cw.Severe("test severe")
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Severe", "test severe")
}
})
t.Run("Slow", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Slow", "test slow", fields).Once()
}
cw.Slow("test slow", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Slow", "test slow", fields)
}
})
t.Run("Stack", func(t *testing.T) {
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Stack", "test stack").Once()
}
cw.Stack("test stack")
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Stack", "test stack")
}
})
t.Run("Stat", func(t *testing.T) {
fields := []LogField{{Key: "key", Value: "value"}}
for _, mw := range cw.writers {
mw.(*tracedWriter).On("Stat", "test stat", fields).Once()
}
cw.Stat("test stat", fields...)
for _, mw := range cw.writers {
mw.(*tracedWriter).AssertCalled(t, "Stat", "test stat", fields)
}
})
}
type mockedEntry struct {
Level string `json:"level"`
Content string `json:"content"`
@@ -250,3 +397,44 @@ type hardToWriteWriter struct{}
func (h hardToWriteWriter) Write(_ []byte) (_ int, _ error) {
return 0, errors.New("write error")
}
type tracedWriter struct {
mock.Mock
}
func (w *tracedWriter) Alert(v any) {
w.Called(v)
}
func (w *tracedWriter) Close() error {
args := w.Called()
return args.Error(0)
}
func (w *tracedWriter) Debug(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Error(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Info(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Severe(v any) {
w.Called(v)
}
func (w *tracedWriter) Slow(v any, fields ...LogField) {
w.Called(v, fields)
}
func (w *tracedWriter) Stack(v any) {
w.Called(v)
}
func (w *tracedWriter) Stat(v any, fields ...LogField) {
w.Called(v, fields)
}

View File

@@ -12,7 +12,7 @@ const (
)
// Marshal marshals the given val and returns the map that contains the fields.
// optional=another is not implemented, and it's hard to implement and not common used.
// optional=another is not implemented, and it's hard to implement and not commonly used.
func Marshal(val any) (map[string]map[string]any, error) {
ret := make(map[string]map[string]any)
tp := reflect.TypeOf(val)

View File

@@ -18,6 +18,7 @@ import (
)
const (
comma = ","
defaultKeyName = "key"
delimiter = '.'
ignoreKey = "-"
@@ -36,10 +37,11 @@ var (
defaultCacheLock sync.Mutex
emptyMap = map[string]any{}
emptyValue = reflect.ValueOf(lang.Placeholder)
stringSliceType = reflect.TypeOf([]string{})
)
type (
// Unmarshaler is used to unmarshal with given tag key.
// Unmarshaler is used to unmarshal with the given tag key.
Unmarshaler struct {
key string
opts unmarshalOptions
@@ -50,6 +52,7 @@ type (
unmarshalOptions struct {
fillDefault bool
fromArray bool
fromString bool
opaqueKeys bool
canonicalKey func(key string) string
@@ -69,7 +72,7 @@ func NewUnmarshaler(key string, opts ...UnmarshalOption) *Unmarshaler {
return &unmarshaler
}
// UnmarshalKey unmarshals m into v with tag key.
// UnmarshalKey unmarshals m into v with the tag key.
func UnmarshalKey(m map[string]any, v any) error {
return keyUnmarshaler.Unmarshal(m, v)
}
@@ -79,41 +82,13 @@ func (u *Unmarshaler) Unmarshal(i, v any) error {
return u.unmarshal(i, v, "")
}
func (u *Unmarshaler) unmarshal(i, v any, fullName string) error {
valueType := reflect.TypeOf(v)
if valueType.Kind() != reflect.Ptr {
return errValueNotSettable
}
elemType := Deref(valueType)
switch iv := i.(type) {
case map[string]any:
if elemType.Kind() != reflect.Struct {
return errTypeMismatch
}
return u.unmarshalValuer(mapValuer(iv), v, fullName)
case []any:
if elemType.Kind() != reflect.Slice {
return errTypeMismatch
}
return u.fillSlice(elemType, reflect.ValueOf(v).Elem(), iv, fullName)
default:
return errUnsupportedType
}
}
// UnmarshalValuer unmarshals m into v.
func (u *Unmarshaler) UnmarshalValuer(m Valuer, v any) error {
return u.unmarshalValuer(simpleValuer{current: m}, v, "")
}
func (u *Unmarshaler) unmarshalValuer(m Valuer, v any, fullName string) error {
return u.unmarshalWithFullName(simpleValuer{current: m}, v, fullName)
}
func (u *Unmarshaler) fillMap(fieldType reflect.Type, value reflect.Value, mapValue any, fullName string) error {
func (u *Unmarshaler) fillMap(fieldType reflect.Type, value reflect.Value,
mapValue any, fullName string) error {
if !value.CanSet() {
return errValueNotSettable
}
@@ -154,7 +129,8 @@ func (u *Unmarshaler) fillMapFromString(value reflect.Value, mapValue any) error
return nil
}
func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, mapValue any, fullName string) error {
func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value,
mapValue any, fullName string) error {
if !value.CanSet() {
return errValueNotSettable
}
@@ -170,13 +146,18 @@ func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, map
baseType := fieldType.Elem()
dereffedBaseType := Deref(baseType)
dereffedBaseKind := dereffedBaseType.Kind()
conv := reflect.MakeSlice(reflect.SliceOf(baseType), refValue.Len(), refValue.Cap())
if refValue.Len() == 0 {
value.Set(conv)
value.Set(reflect.MakeSlice(reflect.SliceOf(baseType), 0, 0))
return nil
}
if u.opts.fromArray {
refValue = makeStringSlice(refValue)
}
var valid bool
conv := reflect.MakeSlice(reflect.SliceOf(baseType), refValue.Len(), refValue.Cap())
for i := 0; i < refValue.Len(); i++ {
ithValue := refValue.Index(i).Interface()
if ithValue == nil {
@@ -188,17 +169,9 @@ func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, map
switch dereffedBaseKind {
case reflect.Struct:
target := reflect.New(dereffedBaseType)
val, ok := ithValue.(map[string]any)
if !ok {
return errTypeMismatch
}
if err := u.unmarshal(val, target.Interface(), sliceFullName); err != nil {
if err := u.fillStructElement(baseType, conv.Index(i), ithValue, sliceFullName); err != nil {
return err
}
SetValue(fieldType.Elem(), conv.Index(i), target.Elem())
case reflect.Slice:
if err := u.fillSlice(dereffedBaseType, conv.Index(i), ithValue, sliceFullName); err != nil {
return err
@@ -233,7 +206,7 @@ func (u *Unmarshaler) fillSliceFromString(fieldType reflect.Type, value reflect.
return errUnsupportedType
}
baseFieldType := Deref(fieldType.Elem())
baseFieldType := fieldType.Elem()
baseFieldKind := baseFieldType.Kind()
conv := reflect.MakeSlice(reflect.SliceOf(baseFieldType), len(slice), cap(slice))
@@ -254,29 +227,39 @@ func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int,
}
ithVal := slice.Index(index)
ithValType := ithVal.Type()
switch v := value.(type) {
case fmt.Stringer:
return setValueFromString(baseKind, ithVal, v.String())
case string:
return setValueFromString(baseKind, ithVal, v)
case map[string]any:
return u.fillMap(ithVal.Type(), ithVal, value, fullName)
// deref to handle both pointer and non-pointer types.
switch Deref(ithValType).Kind() {
case reflect.Struct:
return u.fillStructElement(ithValType, ithVal, v, fullName)
case reflect.Map:
return u.fillMap(ithValType, ithVal, value, fullName)
default:
return errTypeMismatch
}
default:
// don't need to consider the difference between int, int8, int16, int32, int64,
// uint, uint8, uint16, uint32, uint64, because they're handled as json.Number.
if ithVal.Kind() == reflect.Ptr {
baseType := Deref(ithVal.Type())
baseType := Deref(ithValType)
if !reflect.TypeOf(value).AssignableTo(baseType) {
return errTypeMismatch
}
target := reflect.New(baseType).Elem()
target.Set(reflect.ValueOf(value))
SetValue(ithVal.Type(), ithVal, target)
SetValue(ithValType, ithVal, target)
return nil
}
if !reflect.TypeOf(value).AssignableTo(ithVal.Type()) {
if !reflect.TypeOf(value).AssignableTo(ithValType) {
return errTypeMismatch
}
@@ -307,7 +290,51 @@ func (u *Unmarshaler) fillSliceWithDefault(derefedType reflect.Type, value refle
return u.fillSlice(derefedType, value, slice, fullName)
}
func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue any, fullName string) (reflect.Value, error) {
func (u *Unmarshaler) fillStructElement(baseType reflect.Type, target reflect.Value,
value any, fullName string) error {
val, ok := value.(map[string]any)
if !ok {
return errTypeMismatch
}
// use Deref(baseType) to get the base type in case the type is a pointer type.
ptr := reflect.New(Deref(baseType))
if err := u.unmarshal(val, ptr.Interface(), fullName); err != nil {
return err
}
SetValue(baseType, target, ptr.Elem())
return nil
}
func (u *Unmarshaler) fillUnmarshalerStruct(fieldType reflect.Type,
value reflect.Value, targetValue string) error {
if !value.CanSet() {
return errValueNotSettable
}
baseType := Deref(fieldType)
target := reflect.New(baseType)
switch u.key {
case jsonTagKey:
unmarshaler, ok := target.Interface().(json.Unmarshaler)
if !ok {
return errUnsupportedType
}
if err := unmarshaler.UnmarshalJSON([]byte(targetValue)); err != nil {
return err
}
default:
return errUnsupportedType
}
value.Set(target)
return nil
}
func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue any,
fullName string) (reflect.Value, error) {
mapType := reflect.MapOf(keyType, elemType)
valueType := reflect.TypeOf(mapValue)
if mapType == valueType {
@@ -399,6 +426,15 @@ func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue any,
return targetValue, nil
}
func (u *Unmarshaler) implementsUnmarshaler(t reflect.Type) bool {
switch u.key {
case jsonTagKey:
return t.Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem())
default:
return false
}
}
func (u *Unmarshaler) parseOptionsWithContext(field reflect.StructField, m Valuer, fullName string) (
string, *fieldOptionsWithContext, error) {
key, options, err := parseKeyAndOptions(u.key, field)
@@ -576,6 +612,8 @@ func (u *Unmarshaler) processFieldNotFromString(fieldType reflect.Type, value re
return u.fillSliceFromString(fieldType, value, mapValue, fullName)
case valueKind == reflect.String && derefedFieldType == durationType:
return fillDurationValue(fieldType, value, mapValue.(string))
case valueKind == reflect.String && typeKind == reflect.Struct && u.implementsUnmarshaler(fieldType):
return u.fillUnmarshalerStruct(fieldType, value, mapValue.(string))
default:
return u.processFieldPrimitive(fieldType, value, mapValue, opts, fullName)
}
@@ -629,7 +667,7 @@ func (u *Unmarshaler) processFieldPrimitiveWithJSONNumber(fieldType reflect.Type
return err
}
// if value is a pointer, we need to check overflow with the pointer's value.
// if the value is a pointer, we need to check overflow with the pointer's value.
derefedValue := value
for derefedValue.Type().Kind() == reflect.Ptr {
derefedValue = derefedValue.Elem()
@@ -771,6 +809,19 @@ func (u *Unmarshaler) processNamedField(field reflect.StructField, value reflect
return u.processNamedFieldWithoutValue(field.Type, value, opts, fullName)
}
if u.opts.fromArray {
fieldKind := field.Type.Kind()
if fieldKind != reflect.Slice && fieldKind != reflect.Array {
valueKind := reflect.TypeOf(mapValue).Kind()
if valueKind == reflect.Slice || valueKind == reflect.Array {
val := reflect.ValueOf(mapValue)
if val.Len() > 0 {
mapValue = val.Index(0).Interface()
}
}
}
}
return u.processNamedFieldWithValue(field.Type, value, valueWithParent{
value: mapValue,
parent: valuer,
@@ -898,6 +949,35 @@ func (u *Unmarshaler) processNamedFieldWithoutValue(fieldType reflect.Type, valu
return nil
}
func (u *Unmarshaler) unmarshal(i, v any, fullName string) error {
valueType := reflect.TypeOf(v)
if valueType.Kind() != reflect.Ptr {
return errValueNotSettable
}
elemType := Deref(valueType)
switch iv := i.(type) {
case map[string]any:
if elemType.Kind() != reflect.Struct {
return errTypeMismatch
}
return u.unmarshalValuer(mapValuer(iv), v, fullName)
case []any:
if elemType.Kind() != reflect.Slice {
return errTypeMismatch
}
return u.fillSlice(elemType, reflect.ValueOf(v).Elem(), iv, fullName)
default:
return errUnsupportedType
}
}
func (u *Unmarshaler) unmarshalValuer(m Valuer, v any, fullName string) error {
return u.unmarshalWithFullName(simpleValuer{current: m}, v, fullName)
}
func (u *Unmarshaler) unmarshalWithFullName(m valuerWithParent, v any, fullName string) error {
rv := reflect.ValueOf(v)
if err := ValidatePtr(rv); err != nil {
@@ -950,6 +1030,16 @@ func WithDefault() UnmarshalOption {
}
}
// WithFromArray customizes an Unmarshaler with converting array values to non-array types.
// For example, if the field type is []string, and the value is [hello],
// the field type can be `string`, instead of `[]string`.
// Typically, this option is used for unmarshaling from form values.
func WithFromArray() UnmarshalOption {
return func(opt *unmarshalOptions) {
opt.fromArray = true
}
}
// WithOpaqueKeys customizes an Unmarshaler with opaque keys.
// Opaque keys are keys that are not processed by the unmarshaler.
func WithOpaqueKeys() UnmarshalOption {
@@ -1082,6 +1172,35 @@ func join(elem ...string) string {
return builder.String()
}
func makeStringSlice(refValue reflect.Value) reflect.Value {
if refValue.Len() != 1 {
return refValue
}
element := refValue.Index(0)
if element.Kind() != reflect.String {
return refValue
}
val, ok := element.Interface().(string)
if !ok {
return refValue
}
splits := strings.Split(val, comma)
if len(splits) <= 1 {
return refValue
}
slice := reflect.MakeSlice(stringSliceType, len(splits), len(splits))
for i, split := range splits {
// allow empty strings
slice.Index(i).Set(reflect.ValueOf(split))
}
return slice
}
func newInitError(name string) error {
return fmt.Errorf("field %q is not set", name)
}

View File

@@ -2,6 +2,7 @@ package mapping
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"strconv"
@@ -260,6 +261,7 @@ func TestUnmarshalInt(t *testing.T) {
Int64FromStr int64 `key:"int64str,string"`
DefaultInt int64 `key:"defaultint,default=11"`
Optional int `key:"optional,optional"`
IntOptDef int `key:"intopt,optional,default=6"`
}
m := map[string]any{
"int": 1,
@@ -288,6 +290,7 @@ func TestUnmarshalInt(t *testing.T) {
ast.Equal(int64(9), in.Int64)
ast.Equal(int64(10), in.Int64FromStr)
ast.Equal(int64(11), in.DefaultInt)
ast.Equal(6, in.IntOptDef)
}
}
@@ -348,7 +351,7 @@ func TestUnmarshalIntSliceOfPtr(t *testing.T) {
assert.Error(t, UnmarshalKey(m, &in))
})
t.Run("int slice with nil", func(t *testing.T) {
t.Run("int slice with nil element", func(t *testing.T) {
type inner struct {
Ints []int `key:"ints"`
}
@@ -362,6 +365,21 @@ func TestUnmarshalIntSliceOfPtr(t *testing.T) {
assert.Empty(t, in.Ints)
}
})
t.Run("int slice with nil", func(t *testing.T) {
type inner struct {
Ints []int `key:"ints"`
}
m := map[string]any{
"ints": []any(nil),
}
var in inner
if assert.NoError(t, UnmarshalKey(m, &in)) {
assert.Empty(t, in.Ints)
}
})
}
func TestUnmarshalIntWithDefault(t *testing.T) {
@@ -1371,20 +1389,82 @@ func TestUnmarshalWithFloatPtr(t *testing.T) {
}
func TestUnmarshalIntSlice(t *testing.T) {
var v struct {
Ages []int `key:"ages"`
Slice []int `key:"slice"`
}
m := map[string]any{
"ages": []int{1, 2},
"slice": []any{},
}
t.Run("int slice from int", func(t *testing.T) {
var v struct {
Ages []int `key:"ages"`
Slice []int `key:"slice"`
}
m := map[string]any{
"ages": []int{1, 2},
"slice": []any{},
}
ast := assert.New(t)
if ast.NoError(UnmarshalKey(m, &v)) {
ast.ElementsMatch([]int{1, 2}, v.Ages)
ast.Equal([]int{}, v.Slice)
}
ast := assert.New(t)
if ast.NoError(UnmarshalKey(m, &v)) {
ast.ElementsMatch([]int{1, 2}, v.Ages)
ast.Equal([]int{}, v.Slice)
}
})
t.Run("int slice from one int", func(t *testing.T) {
var v struct {
Ages []int `key:"ages"`
}
m := map[string]any{
"ages": []int{2},
}
ast := assert.New(t)
unmarshaler := NewUnmarshaler(defaultKeyName, WithFromArray())
if ast.NoError(unmarshaler.Unmarshal(m, &v)) {
ast.ElementsMatch([]int{2}, v.Ages)
}
})
t.Run("int slice from one int string", func(t *testing.T) {
var v struct {
Ages []int `key:"ages"`
}
m := map[string]any{
"ages": []string{"2"},
}
ast := assert.New(t)
unmarshaler := NewUnmarshaler(defaultKeyName, WithFromArray())
if ast.NoError(unmarshaler.Unmarshal(m, &v)) {
ast.ElementsMatch([]int{2}, v.Ages)
}
})
t.Run("int slice from one json.Number", func(t *testing.T) {
var v struct {
Ages []int `key:"ages"`
}
m := map[string]any{
"ages": []json.Number{"2"},
}
ast := assert.New(t)
unmarshaler := NewUnmarshaler(defaultKeyName, WithFromArray())
if ast.NoError(unmarshaler.Unmarshal(m, &v)) {
ast.ElementsMatch([]int{2}, v.Ages)
}
})
t.Run("int slice from one int strings", func(t *testing.T) {
var v struct {
Ages []int `key:"ages"`
}
m := map[string]any{
"ages": []string{"1,2"},
}
ast := assert.New(t)
unmarshaler := NewUnmarshaler(defaultKeyName, WithFromArray())
if ast.NoError(unmarshaler.Unmarshal(m, &v)) {
ast.ElementsMatch([]int{1, 2}, v.Ages)
}
})
}
func TestUnmarshalString(t *testing.T) {
@@ -1439,6 +1519,36 @@ func TestUnmarshalStringSliceFromString(t *testing.T) {
}
})
t.Run("slice from empty string", func(t *testing.T) {
var v struct {
Names []string `key:"names"`
}
m := map[string]any{
"names": []string{""},
}
ast := assert.New(t)
unmarshaler := NewUnmarshaler(defaultKeyName, WithFromArray())
if ast.NoError(unmarshaler.Unmarshal(m, &v)) {
ast.ElementsMatch([]string{""}, v.Names)
}
})
t.Run("slice from empty and valid string", func(t *testing.T) {
var v struct {
Names []string `key:"names"`
}
m := map[string]any{
"names": []string{","},
}
ast := assert.New(t)
unmarshaler := NewUnmarshaler(defaultKeyName, WithFromArray())
if ast.NoError(unmarshaler.Unmarshal(m, &v)) {
ast.ElementsMatch([]string{"", ""}, v.Names)
}
})
t.Run("slice from string with slice error", func(t *testing.T) {
var v struct {
Names []int `key:"names"`
@@ -5636,6 +5746,62 @@ func TestUnmarshalFromStringSliceForTypeMismatch(t *testing.T) {
}, &v))
}
func TestUnmarshalWithFromArray(t *testing.T) {
t.Run("array", func(t *testing.T) {
var v struct {
Value []string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{
"value": []string{"foo", "bar"},
}, &v)) {
assert.ElementsMatch(t, []string{"foo", "bar"}, v.Value)
}
})
t.Run("not array", func(t *testing.T) {
var v struct {
Value string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{
"value": []string{"foo"},
}, &v)) {
assert.Equal(t, "foo", v.Value)
}
})
t.Run("not array and empty", func(t *testing.T) {
var v struct {
Value string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{
"value": []string{""},
}, &v)) {
assert.Empty(t, v.Value)
}
})
t.Run("not array and no value", func(t *testing.T) {
var v struct {
Value string `key:"value"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
assert.Error(t, unmarshaler.Unmarshal(map[string]any{}, &v))
})
t.Run("not array and no value and optional", func(t *testing.T) {
var v struct {
Value string `key:"value,optional"`
}
unmarshaler := NewUnmarshaler("key", WithFromArray())
if assert.NoError(t, unmarshaler.Unmarshal(map[string]any{}, &v)) {
assert.Empty(t, v.Value)
}
})
}
func TestUnmarshalWithOpaqueKeys(t *testing.T) {
var v struct {
Opaque string `key:"opaque.key"`
@@ -5760,6 +5926,81 @@ func TestUnmarshalWithIgnoreFields(t *testing.T) {
}
}
func TestUnmarshal_Unmarshaler(t *testing.T) {
t.Run("success", func(t *testing.T) {
v := struct {
Foo *mockUnmarshaler `json:"name"`
}{}
body := `{"name": "hello"}`
assert.NoError(t, UnmarshalJsonBytes([]byte(body), &v))
assert.Equal(t, "hello", v.Foo.Name)
})
t.Run("failure", func(t *testing.T) {
v := struct {
Foo *mockUnmarshalerWithError `json:"name"`
}{}
body := `{"name": "hello"}`
assert.Error(t, UnmarshalJsonBytes([]byte(body), &v))
})
t.Run("not json unmarshaler", func(t *testing.T) {
v := struct {
Foo *struct {
Name string
} `key:"name"`
}{}
u := NewUnmarshaler(defaultKeyName)
assert.Error(t, u.Unmarshal(map[string]any{
"name": "hello",
}, &v))
})
t.Run("not with json key", func(t *testing.T) {
v := struct {
Foo *mockUnmarshaler `json:"name"`
}{}
u := NewUnmarshaler(defaultKeyName)
// with different key, ignore
assert.NoError(t, u.Unmarshal(map[string]any{
"name": "hello",
}, &v))
assert.Nil(t, v.Foo)
})
}
func TestParseJsonStringValue(t *testing.T) {
t.Run("string", func(t *testing.T) {
type GoodsInfo struct {
Sku int64 `json:"sku,optional"`
}
type GetReq struct {
GoodsList []*GoodsInfo `json:"goods_list"`
}
input := map[string]any{"goods_list": "[{\"sku\":11},{\"sku\":22}]"}
var v GetReq
assert.NotPanics(t, func() {
assert.NoError(t, UnmarshalJsonMap(input, &v))
assert.Equal(t, 2, len(v.GoodsList))
assert.ElementsMatch(t, []int64{11, 22}, []int64{v.GoodsList[0].Sku, v.GoodsList[1].Sku})
})
})
t.Run("string with invalid type", func(t *testing.T) {
type GetReq struct {
GoodsList []*int `json:"goods_list"`
}
input := map[string]any{"goods_list": "[{\"sku\":11},{\"sku\":22}]"}
var v GetReq
assert.NotPanics(t, func() {
assert.Error(t, UnmarshalJsonMap(input, &v))
})
})
}
func BenchmarkDefaultValue(b *testing.B) {
for i := 0; i < b.N; i++ {
var a struct {
@@ -5866,10 +6107,27 @@ type mockValuerWithParent struct {
ok bool
}
func (m mockValuerWithParent) Value(key string) (any, bool) {
func (m mockValuerWithParent) Value(_ string) (any, bool) {
return m.value, m.ok
}
func (m mockValuerWithParent) Parent() valuerWithParent {
return m.parent
}
type mockUnmarshaler struct {
Name string
}
func (m *mockUnmarshaler) UnmarshalJSON(b []byte) error {
m.Name = string(b)
return nil
}
type mockUnmarshalerWithError struct {
Name string
}
func (m *mockUnmarshalerWithError) UnmarshalJSON(b []byte) error {
return errors.New("foo")
}

View File

@@ -416,7 +416,7 @@ func parseOption(fieldOpts *fieldOptions, fieldName, option string) error {
}
// parseOptions parses the given options in tag.
// for example: `json:"name,options=foo|bar"` or `json:"name,options=[foo,bar]"`
// for example, `json:"name,options=foo|bar"` or `json:"name,options=[foo,bar]"`
func parseOptions(val string) []string {
if len(val) == 0 {
return nil

View File

@@ -26,9 +26,9 @@ type (
parent valuerWithParent
}
// mapValuer is a type for map to meet the Valuer interface.
// mapValuer is a type for the map to meet the Valuer interface.
mapValuer map[string]any
// simpleValuer is a type to get value from current node.
// simpleValuer is a type to get value from the current node.
simpleValuer node
// recursiveValuer is a type to get the value recursively from current and parent nodes.
recursiveValuer node

View File

@@ -1,13 +1,13 @@
package mathx
type numerical interface {
type Numerical interface {
~int | ~int8 | ~int16 | ~int32 | ~int64 |
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 |
~float32 | ~float64
}
// AtLeast returns the greater of x or lower.
func AtLeast[T numerical](x, lower T) T {
func AtLeast[T Numerical](x, lower T) T {
if x < lower {
return lower
}
@@ -15,7 +15,7 @@ func AtLeast[T numerical](x, lower T) T {
}
// AtMost returns the smaller of x or upper.
func AtMost[T numerical](x, upper T) T {
func AtMost[T Numerical](x, upper T) T {
if x > upper {
return upper
}
@@ -23,7 +23,7 @@ func AtMost[T numerical](x, upper T) T {
}
// Between returns the value of x clamped to the range [lower, upper].
func Between[T numerical](x, lower, upper T) T {
func Between[T Numerical](x, lower, upper T) T {
if x < lower {
return lower
}

View File

@@ -363,9 +363,7 @@ func newGuardedWriter[T any](ctx context.Context, channel chan<- T, done <-chan
func (gw guardedWriter[T]) Write(v T) {
select {
case <-gw.ctx.Done():
return
case <-gw.done:
return
default:
gw.channel <- v
}

View File

@@ -36,6 +36,6 @@ type fakeCreator struct {
err error
}
func (fc fakeCreator) Create(name string) (file *os.File, err error) {
func (fc fakeCreator) Create(_ string) (file *os.File, err error) {
return fc.file, fc.err
}

View File

@@ -14,17 +14,29 @@ import (
)
const (
wrapUpTime = time.Second
// why we use 5500 milliseconds is because most of our queue are blocking mode with 5 seconds
waitTime = 5500 * time.Millisecond
// defaultWrapUpTime is the default time to wait before calling wrap up listeners.
defaultWrapUpTime = time.Second
// defaultWaitTime is the default time to wait before force quitting.
// why we use 5500 milliseconds is because most of our queues are blocking mode with 5 seconds
defaultWaitTime = 5500 * time.Millisecond
)
var (
wrapUpListeners = new(listenerManager)
shutdownListeners = new(listenerManager)
delayTimeBeforeForceQuit = waitTime
wrapUpListeners = new(listenerManager)
shutdownListeners = new(listenerManager)
wrapUpTime = defaultWrapUpTime
waitTime = defaultWaitTime
shutdownLock sync.Mutex
)
// ShutdownConf defines the shutdown configuration for the process.
type ShutdownConf struct {
// WrapUpTime is the time to wait before calling shutdown listeners.
WrapUpTime time.Duration `json:",default=1s"`
// WaitTime is the time to wait before force quitting.
WaitTime time.Duration `json:",default=5.5s"`
}
// AddShutdownListener adds fn as a shutdown listener.
// The returned func can be used to wait for fn getting called.
func AddShutdownListener(fn func()) (waitForCalled func()) {
@@ -39,7 +51,21 @@ func AddWrapUpListener(fn func()) (waitForCalled func()) {
// SetTimeToForceQuit sets the waiting time before force quitting.
func SetTimeToForceQuit(duration time.Duration) {
delayTimeBeforeForceQuit = duration
shutdownLock.Lock()
defer shutdownLock.Unlock()
waitTime = duration
}
func Setup(conf ShutdownConf) {
shutdownLock.Lock()
defer shutdownLock.Unlock()
if conf.WrapUpTime > 0 {
wrapUpTime = conf.WrapUpTime
}
if conf.WaitTime > 0 {
waitTime = conf.WaitTime
}
}
// Shutdown calls the registered shutdown listeners, only for test purpose.
@@ -61,8 +87,12 @@ func gracefulStop(signals chan os.Signal, sig syscall.Signal) {
time.Sleep(wrapUpTime)
go shutdownListeners.notifyListeners()
time.Sleep(delayTimeBeforeForceQuit - wrapUpTime)
logx.Infof("Still alive after %v, going to force kill the process...", delayTimeBeforeForceQuit)
shutdownLock.Lock()
remainingTime := waitTime - wrapUpTime
shutdownLock.Unlock()
time.Sleep(remainingTime)
logx.Infof("Still alive after %v, going to force kill the process...", waitTime)
_ = syscall.Kill(syscall.Getpid(), sig)
}
@@ -82,6 +112,9 @@ func (lm *listenerManager) addListener(fn func()) (waitForCalled func()) {
})
lm.lock.Unlock()
// we can return lm.waitGroup.Wait directly,
// but we want to make the returned func more readable.
// creating an extra closure would be negligible in practice.
return func() {
lm.waitGroup.Wait()
}

View File

@@ -3,6 +3,7 @@
package proc
import (
"sync/atomic"
"testing"
"time"
@@ -10,8 +11,12 @@ import (
)
func TestShutdown(t *testing.T) {
t.Cleanup(restoreSettings)
SetTimeToForceQuit(time.Hour)
assert.Equal(t, time.Hour, delayTimeBeforeForceQuit)
shutdownLock.Lock()
assert.Equal(t, time.Hour, waitTime)
shutdownLock.Unlock()
var val int
called := AddWrapUpListener(func() {
@@ -29,7 +34,53 @@ func TestShutdown(t *testing.T) {
assert.Equal(t, 3, val)
}
func TestShutdownWithMultipleServices(t *testing.T) {
t.Cleanup(restoreSettings)
SetTimeToForceQuit(time.Hour)
shutdownLock.Lock()
assert.Equal(t, time.Hour, waitTime)
shutdownLock.Unlock()
var val int32
called1 := AddShutdownListener(func() {
atomic.AddInt32(&val, 1)
})
called2 := AddShutdownListener(func() {
atomic.AddInt32(&val, 2)
})
Shutdown()
called1()
called2()
assert.Equal(t, int32(3), atomic.LoadInt32(&val))
}
func TestWrapUpWithMultipleServices(t *testing.T) {
t.Cleanup(restoreSettings)
SetTimeToForceQuit(time.Hour)
shutdownLock.Lock()
assert.Equal(t, time.Hour, waitTime)
shutdownLock.Unlock()
var val int32
called1 := AddWrapUpListener(func() {
atomic.AddInt32(&val, 1)
})
called2 := AddWrapUpListener(func() {
atomic.AddInt32(&val, 2)
})
WrapUp()
called1()
called2()
assert.Equal(t, int32(3), atomic.LoadInt32(&val))
}
func TestNotifyMoreThanOnce(t *testing.T) {
t.Cleanup(restoreSettings)
ch := make(chan struct{}, 1)
go func() {
@@ -58,3 +109,38 @@ func TestNotifyMoreThanOnce(t *testing.T) {
t.Fatal("timeout, check error logs")
}
}
func TestSetup(t *testing.T) {
t.Run("valid time", func(t *testing.T) {
defer restoreSettings()
Setup(ShutdownConf{
WrapUpTime: time.Second * 2,
WaitTime: time.Second * 30,
})
shutdownLock.Lock()
assert.Equal(t, time.Second*2, wrapUpTime)
assert.Equal(t, time.Second*30, waitTime)
shutdownLock.Unlock()
})
t.Run("valid time", func(t *testing.T) {
defer restoreSettings()
Setup(ShutdownConf{})
shutdownLock.Lock()
assert.Equal(t, defaultWrapUpTime, wrapUpTime)
assert.Equal(t, defaultWaitTime, waitTime)
shutdownLock.Unlock()
})
}
func restoreSettings() {
shutdownLock.Lock()
defer shutdownLock.Unlock()
wrapUpTime = defaultWrapUpTime
waitTime = defaultWaitTime
}

View File

@@ -76,7 +76,7 @@ func (q *Queue) AddListener(listener Listener) {
q.listeners = append(q.listeners, listener)
}
// Broadcast broadcasts message to all event channels.
// Broadcast broadcasts the message to all event channels.
func (q *Queue) Broadcast(message any) {
go func() {
q.eventLock.Lock()
@@ -202,7 +202,7 @@ func (q *Queue) produce() {
}
func (q *Queue) produceOne(producer Producer) (string, bool) {
// avoid panic quit the producer, just log it and continue
// avoid panic quit the producer, log it and continue
defer rescue.Recover()
return producer.Produce()

View File

@@ -67,7 +67,7 @@ func (p *mockedPusher) Name() string {
return p.name
}
func (p *mockedPusher) Push(s string) error {
func (p *mockedPusher) Push(_ string) error {
if proba.TrueOnProba(failProba) {
return errors.New("dummy")
}

View File

@@ -37,6 +37,7 @@ type (
Prometheus prometheus.Config `json:",optional"`
Telemetry trace.Config `json:",optional"`
DevServer DevServerConfig `json:",optional"`
Shutdown proc.ShutdownConf `json:",optional"`
}
)
@@ -61,6 +62,7 @@ func (sc ServiceConf) SetUp() error {
sc.Telemetry.Name = sc.Name
}
trace.StartAgent(sc.Telemetry)
proc.Setup(sc.Shutdown)
proc.AddShutdownListener(func() {
trace.StopAgent()
})

View File

@@ -76,9 +76,14 @@ func (sg *ServiceGroup) doStart() {
}
func (sg *ServiceGroup) doStop() {
group := threading.NewRoutineGroup()
for _, service := range sg.services {
service.Stop()
// new variable to avoid closure problems, can be removed after go 1.22
// see https://golang.org/doc/faq#closures_and_goroutines
service := service
group.Run(service.Stop)
}
group.Wait()
}
// WithStart wraps a start func as a Service.

View File

@@ -71,6 +71,6 @@ func (m *mockedWriter) Write(report *StatReport) error {
type badWriter struct{}
func (b *badWriter) Write(report *StatReport) error {
func (b *badWriter) Write(_ *StatReport) error {
return errors.New("bad")
}

View File

@@ -1,6 +1,7 @@
package stat
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
@@ -28,3 +29,14 @@ func TestRemoteWriterFail(t *testing.T) {
})
assert.NotNil(t, err)
}
func TestRemoteWriterError(t *testing.T) {
defer gock.Off()
gock.New("http://foo.com").ReplyError(errors.New("foo"))
writer := NewRemoteWriter("http://foo.com")
err := writer.Write(&StatReport{
Name: "bar",
})
assert.NotNil(t, err)
}

View File

@@ -2,7 +2,7 @@ package stat
import "time"
// A Task is a task that is reported to Metrics.
// A Task is a task reported to Metrics.
type Task struct {
Drop bool
Duration time.Duration

View File

@@ -41,7 +41,7 @@ func RawFieldNames(in any, postgreSql ...bool) []string {
out = append(out, fmt.Sprintf("`%s`", fi.Name))
}
default:
// get tag name with the tag opton, e.g.:
// get tag name with the tag option, e.g.:
// `db:"id"`
// `db:"id,type=char,length=16"`
// `db:",type=char,length=16"`

View File

@@ -8,7 +8,7 @@ const (
)
type (
// An Options is used to store the cache options.
// Options is used to store the cache options.
Options struct {
Expiry time.Duration
NotFoundExpiry time.Duration

View File

@@ -7,6 +7,10 @@ import (
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
)
func init() {
_ = mtest.Setup()
}
func TestClientManger_getClient(t *testing.T) {
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
mt.Run("test", func(mt *mtest.T) {

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/zeromicro/go-zero/core/breaker"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/timex"
"go.mongodb.org/mongo-driver/mongo"
mopt "go.mongodb.org/mongo-driver/mongo/options"
@@ -15,7 +16,8 @@ import (
const (
defaultSlowThreshold = time.Millisecond * 500
// spanName is the span name of the mongo calls.
spanName = "mongo"
spanName = "mongo"
duplicateKeyCode = 11000
// mongodb method names
aggregate = "Aggregate"
@@ -141,7 +143,7 @@ func (c *decoratedCollection) Aggregate(ctx context.Context, pipeline any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, aggregate, starTime, err)
@@ -161,7 +163,7 @@ func (c *decoratedCollection) BulkWrite(ctx context.Context, models []mongo.Writ
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, bulkWrite, startTime, err)
@@ -181,7 +183,7 @@ func (c *decoratedCollection) CountDocuments(ctx context.Context, filter any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, countDocuments, startTime, err)
@@ -201,7 +203,7 @@ func (c *decoratedCollection) DeleteMany(ctx context.Context, filter any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, deleteMany, startTime, err)
@@ -221,7 +223,7 @@ func (c *decoratedCollection) DeleteOne(ctx context.Context, filter any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, deleteOne, startTime, err, filter)
@@ -241,7 +243,7 @@ func (c *decoratedCollection) Distinct(ctx context.Context, fieldName string, fi
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, distinct, startTime, err)
@@ -261,7 +263,7 @@ func (c *decoratedCollection) EstimatedDocumentCount(ctx context.Context,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, estimatedDocumentCount, startTime, err)
@@ -281,7 +283,7 @@ func (c *decoratedCollection) Find(ctx context.Context, filter any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, find, startTime, err, filter)
@@ -301,7 +303,7 @@ func (c *decoratedCollection) FindOne(ctx context.Context, filter any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, findOne, startTime, err, filter)
@@ -322,7 +324,7 @@ func (c *decoratedCollection) FindOneAndDelete(ctx context.Context, filter any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, findOneAndDelete, startTime, err, filter)
@@ -344,7 +346,7 @@ func (c *decoratedCollection) FindOneAndReplace(ctx context.Context, filter any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, findOneAndReplace, startTime, err, filter, replacement)
@@ -365,7 +367,7 @@ func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter, upda
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, findOneAndUpdate, startTime, err, filter, update)
@@ -386,7 +388,7 @@ func (c *decoratedCollection) InsertMany(ctx context.Context, documents []any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, insertMany, startTime, err)
@@ -406,7 +408,7 @@ func (c *decoratedCollection) InsertOne(ctx context.Context, document any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, insertOne, startTime, err, document)
@@ -426,7 +428,7 @@ func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter, replacemen
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, replaceOne, startTime, err, filter, replacement)
@@ -446,7 +448,7 @@ func (c *decoratedCollection) UpdateByID(ctx context.Context, id, update any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, updateByID, startTime, err, id, update)
@@ -466,7 +468,7 @@ func (c *decoratedCollection) UpdateMany(ctx context.Context, filter, update any
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDurationSimple(ctx, updateMany, startTime, err)
@@ -486,7 +488,7 @@ func (c *decoratedCollection) UpdateOne(ctx context.Context, filter, update any,
endSpan(span, err)
}()
err = c.brk.DoWithAcceptable(func() error {
err = c.brk.DoWithAcceptableCtx(ctx, func() error {
startTime := timex.Now()
defer func() {
c.logDuration(ctx, updateOne, startTime, err, filter, update)
@@ -527,19 +529,20 @@ func (p keepablePromise) keep(err error) error {
}
func acceptable(err error) bool {
return err == nil ||
errors.Is(err, mongo.ErrNoDocuments) ||
errors.Is(err, mongo.ErrNilValue) ||
errors.Is(err, mongo.ErrNilDocument) ||
errors.Is(err, mongo.ErrNilCursor) ||
errors.Is(err, mongo.ErrEmptySlice) ||
// session errors
errors.Is(err, session.ErrSessionEnded) ||
errors.Is(err, session.ErrNoTransactStarted) ||
errors.Is(err, session.ErrTransactInProgress) ||
errors.Is(err, session.ErrAbortAfterCommit) ||
errors.Is(err, session.ErrAbortTwice) ||
errors.Is(err, session.ErrCommitAfterAbort) ||
errors.Is(err, session.ErrUnackWCUnsupported) ||
errors.Is(err, session.ErrSnapshotTransaction)
return err == nil || isDupKeyError(err) ||
errorx.In(err, mongo.ErrNoDocuments, mongo.ErrNilValue,
mongo.ErrNilDocument, mongo.ErrNilCursor, mongo.ErrEmptySlice,
// session errors
session.ErrSessionEnded, session.ErrNoTransactStarted, session.ErrTransactInProgress,
session.ErrAbortAfterCommit, session.ErrAbortTwice, session.ErrCommitAfterAbort,
session.ErrUnackWCUnsupported, session.ErrSnapshotTransaction)
}
func isDupKeyError(err error) bool {
var e mongo.WriteException
if !errors.As(err, &e) {
return false
}
return e.HasErrorCode(duplicateKeyCode)
}

View File

@@ -15,6 +15,7 @@ import (
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
mopt "go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
)
var errDummy = errors.New("dummy")
@@ -572,6 +573,56 @@ func TestDecoratedCollection_LogDuration(t *testing.T) {
assert.Contains(t, buf.String(), "slowcall")
}
func TestAcceptable(t *testing.T) {
tests := []struct {
name string
err error
want bool
}{
{"NilError", nil, true},
{"NoDocuments", mongo.ErrNoDocuments, true},
{"NilValue", mongo.ErrNilValue, true},
{"NilDocument", mongo.ErrNilDocument, true},
{"NilCursor", mongo.ErrNilCursor, true},
{"EmptySlice", mongo.ErrEmptySlice, true},
{"SessionEnded", session.ErrSessionEnded, true},
{"NoTransactStarted", session.ErrNoTransactStarted, true},
{"TransactInProgress", session.ErrTransactInProgress, true},
{"AbortAfterCommit", session.ErrAbortAfterCommit, true},
{"AbortTwice", session.ErrAbortTwice, true},
{"CommitAfterAbort", session.ErrCommitAfterAbort, true},
{"UnackWCUnsupported", session.ErrUnackWCUnsupported, true},
{"SnapshotTransaction", session.ErrSnapshotTransaction, true},
{"DuplicateKeyError", mongo.WriteException{WriteErrors: []mongo.WriteError{{Code: duplicateKeyCode}}}, true},
{"OtherError", errors.New("other error"), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, acceptable(tt.err))
})
}
}
func TestIsDupKeyError(t *testing.T) {
tests := []struct {
name string
err error
want bool
}{
{"NilError", nil, false},
{"NonDupKeyError", errors.New("some other error"), false},
{"DupKeyError", mongo.WriteException{WriteErrors: []mongo.WriteError{{Code: duplicateKeyCode}}}, true},
{"OtherMongoError", mongo.WriteException{WriteErrors: []mongo.WriteError{{Code: 12345}}}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, isDupKeyError(tt.err))
})
}
}
type mockPromise struct {
accepted bool
reason string
@@ -595,19 +646,40 @@ func (d *dropBreaker) Allow() (breaker.Promise, error) {
return nil, errDummy
}
func (d *dropBreaker) AllowCtx(_ context.Context) (breaker.Promise, error) {
return nil, errDummy
}
func (d *dropBreaker) Do(_ func() error) error {
return nil
}
func (d *dropBreaker) DoCtx(_ context.Context, _ func() error) error {
return nil
}
func (d *dropBreaker) DoWithAcceptable(_ func() error, _ breaker.Acceptable) error {
return errDummy
}
func (d *dropBreaker) DoWithAcceptableCtx(_ context.Context, _ func() error, _ breaker.Acceptable) error {
return errDummy
}
func (d *dropBreaker) DoWithFallback(_ func() error, _ breaker.Fallback) error {
return nil
}
func (d *dropBreaker) DoWithFallbackCtx(_ context.Context, _ func() error, _ breaker.Fallback) error {
return nil
}
func (d *dropBreaker) DoWithFallbackAcceptable(_ func() error, _ breaker.Fallback,
_ breaker.Acceptable) error {
return nil
}
func (d *dropBreaker) DoWithFallbackAcceptableCtx(_ context.Context, _ func() error,
_ breaker.Fallback, _ breaker.Acceptable) error {
return nil
}

View File

@@ -69,27 +69,21 @@ func newModel(name string, cli *mongo.Client, coll Collection, brk breaker.Break
// StartSession starts a new session.
func (m *Model) StartSession(opts ...*mopt.SessionOptions) (sess mongo.Session, err error) {
err = m.brk.DoWithAcceptable(func() error {
starTime := timex.Now()
defer func() {
logDuration(context.Background(), m.name, startSession, starTime, err)
}()
starTime := timex.Now()
defer func() {
logDuration(context.Background(), m.name, startSession, starTime, err)
}()
session, sessionErr := m.cli.StartSession(opts...)
if sessionErr != nil {
return sessionErr
}
session, sessionErr := m.cli.StartSession(opts...)
if sessionErr != nil {
return nil, sessionErr
}
sess = &wrappedSession{
Session: session,
name: m.name,
brk: m.brk,
}
return nil
}, acceptable)
return
return &wrappedSession{
Session: session,
name: m.name,
brk: m.brk,
}, nil
}
// Aggregate executes an aggregation pipeline.
@@ -184,7 +178,7 @@ func (w *wrappedSession) AbortTransaction(ctx context.Context) (err error) {
endSpan(span, err)
}()
return w.brk.DoWithAcceptable(func() error {
return w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now()
defer func() {
logDuration(ctx, w.name, abortTransaction, starTime, err)
@@ -201,7 +195,7 @@ func (w *wrappedSession) CommitTransaction(ctx context.Context) (err error) {
endSpan(span, err)
}()
return w.brk.DoWithAcceptable(func() error {
return w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now()
defer func() {
logDuration(ctx, w.name, commitTransaction, starTime, err)
@@ -222,7 +216,7 @@ func (w *wrappedSession) WithTransaction(
endSpan(span, err)
}()
err = w.brk.DoWithAcceptable(func() error {
err = w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now()
defer func() {
logDuration(ctx, w.name, withTransaction, starTime, err)
@@ -243,7 +237,7 @@ func (w *wrappedSession) EndSession(ctx context.Context) {
endSpan(span, err)
}()
err = w.brk.DoWithAcceptable(func() error {
err = w.brk.DoWithAcceptableCtx(ctx, func() error {
starTime := timex.Now()
defer func() {
logDuration(ctx, w.name, endSession, starTime, err)

View File

@@ -2,8 +2,8 @@ package mon
import (
"context"
"errors"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/trace"
"go.mongodb.org/mongo-driver/mongo"
"go.opentelemetry.io/otel/attribute"
@@ -24,8 +24,7 @@ func startSpan(ctx context.Context, cmd string) (context.Context, oteltrace.Span
func endSpan(span oteltrace.Span, err error) {
defer span.End()
if err == nil || errors.Is(err, mongo.ErrNoDocuments) ||
errors.Is(err, mongo.ErrNilValue) || errors.Is(err, mongo.ErrNilDocument) {
if err == nil || errorx.In(err, mongo.ErrNoDocuments, mongo.ErrNilValue, mongo.ErrNilDocument) {
span.SetStatus(codes.Ok, "")
return
}

View File

@@ -0,0 +1,41 @@
package redis
import (
"context"
red "github.com/redis/go-redis/v9"
"github.com/zeromicro/go-zero/core/breaker"
"github.com/zeromicro/go-zero/core/lang"
)
var ignoreCmds = map[string]lang.PlaceholderType{
"blpop": {},
}
type breakerHook struct {
brk breaker.Breaker
}
func (h breakerHook) DialHook(next red.DialHook) red.DialHook {
return next
}
func (h breakerHook) ProcessHook(next red.ProcessHook) red.ProcessHook {
return func(ctx context.Context, cmd red.Cmder) error {
if _, ok := ignoreCmds[cmd.Name()]; ok {
return next(ctx, cmd)
}
return h.brk.DoWithAcceptableCtx(ctx, func() error {
return next(ctx, cmd)
}, acceptable)
}
}
func (h breakerHook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipelineHook {
return func(ctx context.Context, cmds []red.Cmder) error {
return h.brk.DoWithAcceptableCtx(ctx, func() error {
return next(ctx, cmds)
}, acceptable)
}
}

View File

@@ -0,0 +1,135 @@
package redis
import (
"context"
"errors"
"testing"
"time"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/breaker"
)
func TestBreakerHook_ProcessHook(t *testing.T) {
t.Run("breakerHookOpen", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
someError := errors.New("ERR some error")
s.SetError(someError.Error())
var err error
for i := 0; i < 1000; i++ {
_, err = rds.Get("key")
if err != nil && err.Error() != someError.Error() {
break
}
}
assert.Equal(t, breaker.ErrServiceUnavailable, err)
})
t.Run("breakerHookClose", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
var err error
for i := 0; i < 1000; i++ {
_, err = rds.Get("key")
if err != nil {
break
}
}
assert.NotEqual(t, breaker.ErrServiceUnavailable, err)
})
t.Run("breakerHook_ignoreCmd", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
someError := errors.New("ERR some error")
s.SetError(someError.Error())
var err error
node, err := getRedis(rds)
assert.NoError(t, err)
for i := 0; i < 1000; i++ {
_, err = rds.Blpop(node, "key")
if err != nil && err.Error() != someError.Error() {
break
}
}
assert.Equal(t, someError.Error(), err.Error())
})
}
func TestBreakerHook_ProcessPipelineHook(t *testing.T) {
t.Run("breakerPipelineHookOpen", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
someError := errors.New("ERR some error")
s.SetError(someError.Error())
var err error
for i := 0; i < 1000; i++ {
err = rds.Pipelined(
func(pipe Pipeliner) error {
pipe.Incr(context.Background(), "pipelined_counter")
pipe.Expire(context.Background(), "pipelined_counter", time.Hour)
pipe.ZAdd(context.Background(), "zadd", Z{Score: 12, Member: "zadd"})
return nil
},
)
if err != nil && err.Error() != someError.Error() {
break
}
}
assert.Equal(t, breaker.ErrServiceUnavailable, err)
})
t.Run("breakerPipelineHookClose", func(t *testing.T) {
s := miniredis.RunT(t)
rds := MustNewRedis(RedisConf{
Host: s.Addr(),
Type: NodeType,
})
var err error
for i := 0; i < 1000; i++ {
err = rds.Pipelined(
func(pipe Pipeliner) error {
pipe.Incr(context.Background(), "pipelined_counter")
pipe.Expire(context.Background(), "pipelined_counter", time.Hour)
pipe.ZAdd(context.Background(), "zadd", Z{Score: 12, Member: "zadd"})
return nil
},
)
if err != nil {
break
}
}
assert.NotEqual(t, breaker.ErrServiceUnavailable, err)
})
}

View File

@@ -47,7 +47,7 @@ func (rc RedisConf) NewRedis() *Redis {
opts = append(opts, WithTLS())
}
return New(rc.Host, opts...)
return newRedis(rc.Host, opts...)
}
// Validate validates the RedisConf.

View File

@@ -0,0 +1,5 @@
if redis.call("GET", KEYS[1]) == ARGV[1] then
return redis.call("DEL", KEYS[1])
else
return 0
end

View File

@@ -23,17 +23,18 @@ import (
const spanName = "redis"
var (
durationHook = hook{}
defaultDurationHook = durationHook{}
redisCmdsAttributeKey = attribute.Key("redis.cmds")
)
type hook struct{}
type durationHook struct {
}
func (h hook) DialHook(next red.DialHook) red.DialHook {
func (h durationHook) DialHook(next red.DialHook) red.DialHook {
return next
}
func (h hook) ProcessHook(next red.ProcessHook) red.ProcessHook {
func (h durationHook) ProcessHook(next red.ProcessHook) red.ProcessHook {
return func(ctx context.Context, cmd red.Cmder) error {
start := timex.Now()
ctx, endSpan := h.startSpan(ctx, cmd)
@@ -57,7 +58,7 @@ func (h hook) ProcessHook(next red.ProcessHook) red.ProcessHook {
}
}
func (h hook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipelineHook {
func (h durationHook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipelineHook {
return func(ctx context.Context, cmds []red.Cmder) error {
if len(cmds) == 0 {
return next(ctx, cmds)
@@ -83,6 +84,33 @@ func (h hook) ProcessPipelineHook(next red.ProcessPipelineHook) red.ProcessPipel
}
}
func (h durationHook) startSpan(ctx context.Context, cmds ...red.Cmder) (context.Context, func(err error)) {
tracer := trace.TracerFromContext(ctx)
ctx, span := tracer.Start(ctx,
spanName,
oteltrace.WithSpanKind(oteltrace.SpanKindClient),
)
cmdStrs := make([]string, 0, len(cmds))
for _, cmd := range cmds {
cmdStrs = append(cmdStrs, cmd.Name())
}
span.SetAttributes(redisCmdsAttributeKey.StringSlice(cmdStrs))
return ctx, func(err error) {
defer span.End()
if err == nil || errors.Is(err, red.Nil) {
span.SetStatus(codes.Ok, "")
return
}
span.SetStatus(codes.Error, err.Error())
span.RecordError(err)
}
}
func formatError(err error) string {
if err == nil || errors.Is(err, red.Nil) {
return ""
@@ -95,7 +123,7 @@ func formatError(err error) string {
}
switch {
case err == io.EOF:
case errors.Is(err, io.EOF):
return "eof"
case errors.Is(err, context.DeadlineExceeded):
return "context deadline"
@@ -123,30 +151,3 @@ func logDuration(ctx context.Context, cmds []red.Cmder, duration time.Duration)
}
logx.WithContext(ctx).WithDuration(duration).Slowf("[REDIS] slowcall on executing: %s", buf.String())
}
func (h hook) startSpan(ctx context.Context, cmds ...red.Cmder) (context.Context, func(err error)) {
tracer := trace.TracerFromContext(ctx)
ctx, span := tracer.Start(ctx,
spanName,
oteltrace.WithSpanKind(oteltrace.SpanKindClient),
)
cmdStrs := make([]string, 0, len(cmds))
for _, cmd := range cmds {
cmdStrs = append(cmdStrs, cmd.Name())
}
span.SetAttributes(redisCmdsAttributeKey.StringSlice(cmdStrs))
return ctx, func(err error) {
defer span.End()
if err == nil || errors.Is(err, red.Nil) {
span.SetStatus(codes.Ok, "")
return
}
span.SetStatus(codes.Error, err.Error())
span.RecordError(err)
}
}

View File

@@ -21,7 +21,7 @@ func TestHookProcessCase1(t *testing.T) {
tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t)
err := durationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error {
err := defaultDurationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
return nil
})(context.Background(), red.NewCmd(context.Background()))
@@ -36,7 +36,7 @@ func TestHookProcessCase2(t *testing.T) {
tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t)
err := durationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error {
err := defaultDurationHook.ProcessHook(func(ctx context.Context, cmd red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
time.Sleep(slowThreshold.Load() + time.Millisecond)
return nil
@@ -54,12 +54,12 @@ func TestHookProcessPipelineCase1(t *testing.T) {
tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t)
err := durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
err := defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
return nil
})(context.Background(), nil)
assert.NoError(t, err)
err = durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
err = defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
return nil
})(context.Background(), []red.Cmder{
@@ -74,7 +74,7 @@ func TestHookProcessPipelineCase2(t *testing.T) {
tracetest.NewInMemoryExporter(t)
w := logtest.NewCollector(t)
err := durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
err := defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
time.Sleep(slowThreshold.Load() + time.Millisecond)
return nil
@@ -91,7 +91,7 @@ func TestHookProcessPipelineCase2(t *testing.T) {
func TestHookProcessPipelineCase3(t *testing.T) {
te := tracetest.NewInMemoryExporter(t)
err := durationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
err := defaultDurationHook.ProcessPipelineHook(func(ctx context.Context, cmds []red.Cmder) error {
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
return assert.AnError
})(context.Background(), []red.Cmder{

View File

@@ -0,0 +1,6 @@
if redis.call("GET", KEYS[1]) == ARGV[1] then
redis.call("SET", KEYS[1], ARGV[1], "PX", ARGV[2])
return "OK"
else
return redis.call("SET", KEYS[1], ARGV[1], "NX", "PX", ARGV[2])
end

View File

@@ -19,7 +19,7 @@ func TestRedisMetric(t *testing.T) {
cfg := devserver.Config{}
_ = conf.FillDefault(&cfg)
server := devserver.NewServer(cfg)
server.StartAsync()
server.StartAsync(cfg)
time.Sleep(time.Second)
metricReqDur.Observe(8, "test-cmd")

File diff suppressed because it is too large Load Diff

View File

@@ -36,7 +36,7 @@ func (m myHook) ProcessHook(next red.ProcessHook) red.ProcessHook {
if cmd.Name() == "ping" && !m.includePing {
return next(ctx, cmd)
}
return errors.New("hook error")
return errors.New("durationHook error")
}
}
@@ -155,12 +155,12 @@ func TestRedis_NonBlock(t *testing.T) {
t.Run("nonBlock true", func(t *testing.T) {
s := miniredis.RunT(t)
// use hook to simulate redis ping error
// use durationHook to simulate redis ping error
_, err := NewRedis(RedisConf{
Host: s.Addr(),
NonBlock: true,
Type: NodeType,
}, withHook(myHook{includePing: true}))
}, WithHook(myHook{includePing: true}))
assert.NoError(t, err)
})
@@ -170,7 +170,7 @@ func TestRedis_NonBlock(t *testing.T) {
Host: s.Addr(),
NonBlock: false,
Type: NodeType,
}, withHook(myHook{includePing: true}))
}, WithHook(myHook{includePing: true}))
assert.ErrorContains(t, err, "redis connect error")
})
}
@@ -2080,3 +2080,70 @@ func (n mockedNode) BLPop(_ context.Context, _ time.Duration, _ ...string) *red.
return cmd
}
func TestRedisPublish(t *testing.T) {
runOnRedis(t, func(client *Redis) {
_, err := newRedis(client.Addr, badType()).Publish("Test", "message")
assert.NotNil(t, err)
_, err = client.Publish("Test", "message")
assert.Nil(t, err)
})
}
func TestRedisRPopLPush(t *testing.T) {
runOnRedis(t, func(client *Redis) {
_, err := newRedis(client.Addr, badType()).RPopLPush("Source", "Destination")
assert.NotNil(t, err)
_, err = client.Rpush("Source", "Destination")
assert.Nil(t, err)
_, err = client.RPopLPush("Source", "Destination")
assert.Nil(t, err)
})
}
func TestRedisUnlink(t *testing.T) {
runOnRedis(t, func(client *Redis) {
_, err := newRedis(client.Addr, badType()).Unlink("Key1", "Key2")
assert.NotNil(t, err)
err = client.Set("Key1", "Key2")
assert.Nil(t, err)
get, err := client.Get("Key1")
assert.Nil(t, err)
assert.Equal(t, "Key2", get)
res, err := client.Unlink("Key1")
assert.Nil(t, err)
assert.Equal(t, int64(1), res)
})
}
func TestRedisTxPipeline(t *testing.T) {
runOnRedis(t, func(client *Redis) {
ctx := context.Background()
pipe, err := newRedis(client.Addr, badType()).TxPipeline()
assert.NotNil(t, err)
pipe, err = client.TxPipeline()
assert.Nil(t, err)
key := "key"
hashKey := "field"
hashValue := "value"
// setting value
pipe.HSet(ctx, key, hashKey, hashValue)
existsCmd := pipe.Exists(ctx, key)
getCmd := pipe.HGet(ctx, key, hashKey)
// execution
_, err = pipe.Exec(ctx)
assert.Nil(t, err)
// verification results
exists, err := existsCmd.Result()
assert.Nil(t, err)
assert.Equal(t, int64(1), exists)
value, err := getCmd.Result()
assert.Nil(t, err)
assert.Equal(t, hashValue, value)
})
}

View File

@@ -37,8 +37,11 @@ func getClient(r *Redis) (*red.Client, error) {
MinIdleConns: idleConns,
TLSConfig: tlsConfig,
})
store.AddHook(durationHook)
for _, hook := range r.hooks {
hooks := append([]red.Hook{defaultDurationHook, breakerHook{
brk: r.brk,
}}, r.hooks...)
for _, hook := range hooks {
store.AddHook(hook)
}

Some files were not shown because too many files have changed in this diff Show More