Compare commits

...

232 Commits

Author SHA1 Message Date
kevin
2ea0a843f8 chore: remove any keywords 2023-03-04 20:54:26 +08:00
Kevin Wan
9e0e01b2bc chore: add tests (#2960) 2023-03-04 20:38:50 +08:00
yangjinheng
af50a80d01 timeout writer add hijack 2023-03-04 20:38:45 +08:00
yangjinheng
703fb8d970 Update timeouthandler.go 2023-03-04 20:38:40 +08:00
MarkJoyMa
e964e530e1 x 2023-03-04 20:32:21 +08:00
MarkJoyMa
52265087d1 x 2023-03-04 20:32:16 +08:00
MarkJoyMa
b4c2677eb9 add ut 2023-03-04 20:32:10 +08:00
MarkJoyMa
30296fb1ca feat: conf add FillDefault func 2023-03-04 20:31:44 +08:00
zhoumingji
356c80defd Fix bug in dartgen: The property 'isEmpty' can't be unconditionally accessed because the receiver can be 'null' 2023-03-04 20:31:38 +08:00
zhoumingji
8c31525378 Fix bug in dartgen: Increase the processing logic when route.RequestType is empty 2023-03-04 20:31:30 +08:00
cui fliter
2cf09f3c36 fix functiom name
Signed-off-by: cui fliter <imcusg@gmail.com>
2023-03-04 20:31:20 +08:00
Kevin Wan
d41e542c92 feat: support grpc client keepalive config (#2950) 2023-03-04 20:30:31 +08:00
tanglihao
265a24ac6d fix code format style use const config.DefaultFormat 2023-03-04 20:30:21 +08:00
tanglihao
7d88fc39dc fix log name conflict 2023-03-04 20:30:16 +08:00
anqiansong
6957b6a344 format code 2023-03-04 20:30:10 +08:00
anqiansong
bca6a230c8 remove unused code 2023-03-04 20:30:04 +08:00
anqiansong
cc8413d683 remove unused code 2023-03-04 20:29:56 +08:00
anqiansong
3842283fa8 Fix #2879 2023-03-04 20:29:41 +08:00
qiying.wang
fe13a533f5 chore: remove redundant prefix of "error: " in error creation 2023-03-04 20:26:40 +08:00
qiying.wang
7a327ccda4 chore: add tests for logc debug 2023-03-04 20:25:52 +08:00
qiying.wang
06e4507406 feat: add debug log for logc 2023-03-04 20:25:27 +08:00
kevin
8794d5b753 chore: add comments 2023-03-04 20:25:21 +08:00
kevin
9bfa63d995 chore: add more tests 2023-03-04 20:25:15 +08:00
kevin
a432b121fb chore: add more tests 2023-03-04 20:25:07 +08:00
kevin
b61c94bb66 feat: check key overwritten 2023-03-04 20:24:33 +08:00
Kevin Wan
93fcf899dc fix: config map cannot handle case-insensitive keys. (#2932)
* fix: #2922

* chore: rename const

* feat: support anonymous map field

* feat: support anonymous map field
2023-03-04 20:23:53 +08:00
Kevin Wan
9f4b3bae92 fix: #2899, using autoscaling/v2beta2 instead of v2beta1 (#2900)
* fix: #2899, using autoscaling/v2 instead of v2beta1

* chore: change hpa definition

---------

Co-authored-by: kevin.wan <kevin.wan@yijinin.com>
2023-03-04 20:22:27 +08:00
Kevin Wan
805cb87d98 chore: refine rest validator (#2928)
* chore: refine rest validator

* chore: add more tests

* chore: reformat code

* chore: add comments
2023-03-04 20:22:10 +08:00
Qiying Wang
366131640e feat: add configurable validator for httpx.Parse (#2923)
Co-authored-by: qiying.wang <qiying.wang@highlight.mobi>
2023-03-04 20:22:05 +08:00
Kevin Wan
956884a3ff fix: timeout not working if greater than global rest timeout (#2926) 2023-03-04 20:21:59 +08:00
raymonder jin
f571cb8af2 del unnecessary blank 2023-03-04 20:21:54 +08:00
Kevin Wan
cc5acf3b90 chore: reformat code (#2925) 2023-03-04 20:21:49 +08:00
chenquan
e1aa665443 fix: fixed the bug that old trace instances may be fetched 2023-03-04 20:21:43 +08:00
xiandong
cd357d9484 rm parseErr when kindJaeger 2023-03-04 20:21:28 +08:00
xiandong
6d4d7cbd6b rm kindJaegerUdp 2023-03-04 20:21:18 +08:00
xiandong
c593b5b531 add parseEndpoint 2023-03-04 20:20:29 +08:00
xiandong
fd5b38b07c add parseEndpoint 2023-03-04 20:20:17 +08:00
xiandong
41efb48f55 add test for Endpoint of kindJaegerUdp 2023-03-04 20:19:40 +08:00
xiandong
0ef3626839 add test for Endpoint of kindJaegerUdp 2023-03-04 20:19:34 +08:00
xiandong
77a72b16e9 add kindJaegerUdp 2023-03-04 20:19:25 +08:00
Kevin Wan
21566f1b7a chore: reformat code (#2903) 2023-03-04 20:17:35 +08:00
anqiansong
b2646e228b feat: Add request.ts (#2901)
* Add request.ts

* Update comments

* Refactor request filename
2023-03-04 20:17:21 +08:00
cong
588b883710 refactor: simplify sqlx fail fast ping and simplify miniredis setup in test (#2897)
* chore(redistest): simplify miniredis setup in test

* refactor(sqlx): simplify sqlx fail fast ping

* chore: close connection if not available
2023-03-04 20:17:16 +08:00
Kevin Wan
033910bbd8 Update readme-cn.md 2023-03-04 20:17:11 +08:00
fondoger
530dd79e3f Fix bug in dart api gen: path parameter is not replaced 2023-03-04 20:17:05 +08:00
Kevin Wan
cd5263ac75 Update readme-cn.md 2023-03-04 20:16:58 +08:00
Kevin Wan
ea3302a468 fix: test failures (#2892)
Co-authored-by: kevin.wan <kevin.wan@yijinin.com>
2023-03-04 20:16:50 +08:00
fondoger
abf15b373c Fix Dart API generation bugs; Add ability to generate API for path parameters (#2887)
* Fix bug in dartgen: Import path should match the generated api filename

* Use Route.HandlerName as generated dart API function name

Reasons:
- There is bug when using url path name as function name, because it may have invalid characters such as ":"
- Switching to HandlerName aligns with other languages such as typescript generation

* [DartGen] Add ability to generate api for url path parameters such as /path/:param
2023-03-04 20:16:44 +08:00
Kevin Wan
a865e9ee29 refactor: simplify stringx.Replacer, and avoid potential infinite loops (#2877)
* simplify replace

* backup

* refactor: simplify stringx.Replacer

* chore: add comments and const

* chore: add more tests

* chore: rename variable
2023-03-04 20:16:37 +08:00
Kevin Wan
f8292198cf Update readme-cn.md 2023-03-04 20:15:38 +08:00
Kevin Wan
016d965f56 chore: refactor (#2875) 2023-03-04 20:15:30 +08:00
dahaihu
95d7c73409 fix Replacer suffix match, and add test case (#2867)
* fix: replace shoud replace the longest match

* feat: revert bytes.Buffer to strings.Builder

* fix: loop reset nextStart

* feat: add node longest match test

* feat: add replacer suffix match test case

* feat: multiple match

* fix: partial match ends

* fix: replace look back upon error

* feat: rm unnecessary branch

---------

Co-authored-by: hudahai <hscxrzs@gmail.com>
Co-authored-by: hushichang <hushichang@sensetime.com>
2023-03-04 20:15:25 +08:00
Kevin Wan
939ef2a181 chore: add more tests (#2873) 2023-03-04 20:15:18 +08:00
Kevin Wan
f0b8dd45fe fix: test failure (#2874) 2023-03-04 20:15:08 +08:00
Mikael
0ba9335b04 only unmashal public variables (#2872)
* only unmashal public variables

* only unmashal public variables

* only unmashal public variables

* only unmashal public variables
2023-03-04 20:15:01 +08:00
Kevin Wan
04f181f0b4 chore: add more tests (#2866)
* chore: add more tests

* chore: add more tests

* chore: fix test failure
2023-03-04 20:14:54 +08:00
hudahai
89f841c126 fix: loop reset nextStart 2023-03-04 20:14:48 +08:00
hudahai
d785c8c377 feat: revert bytes.Buffer to strings.Builder 2023-03-04 20:14:41 +08:00
hudahai
687a1d15da fix: replace shoud replace the longest match 2023-03-04 20:14:35 +08:00
Kevin Wan
aaa974e1ad Update readme-cn.md 2023-03-04 20:14:22 +08:00
Kevin Wan
2779568ccf fix: conf anonymous overlay problem (#2847) 2023-03-04 20:14:10 +08:00
Kevin Wan
f7d50ae626 Update readme-cn.md 2023-03-04 20:14:01 +08:00
Kevin Wan
33594ea350 Chore/rewire (#2836)
* fix: problem on name overlaping in config (#2820)

* chore: fix missing funcs on windows (#2825)

* chore: add more tests (#2812)

* chore: add more tests

* chore: add more tests

* chore: add more tests (#2814)

* chore: add more tests (#2815)

* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests

* feat: upgrade go to v1.18 (#2817)

* feat: upgrade go to v1.18

* feat: upgrade go to v1.18

* chore: change interface{} to any (#2818)

* chore: change interface{} to any

* chore: update goctl version to 1.5.0

* chore: update goctl deps

* chore: update goctl interface{} to any (#2819)

* chore: update goctl interface{} to any

* chore: update goctl interface{} to any

* chore(deps): bump google.golang.org/grpc from 1.52.0 to 1.52.3 (#2823)

* support custom maxBytes in API file (#2822)

* feat: mapreduce generic version (#2827)

* feat: mapreduce generic version

* fix: gateway mr type issue

---------

Co-authored-by: kevin.wan <kevin.wan@yijinin.com>

* feat: add MustNewRedis (#2824)

* feat: add MustNewRedis

* feat: add MustNewRedis

* feat: add MustNewRedis

* x

* x

* fix ut

* x

* x

* x

* x

* x

* chore: improve codecov (#2828)

* feat: converge grpc interceptor processing (#2830)

* feat: converge grpc interceptor processing

* x

* x

* chore(deps): bump go.opentelemetry.io/otel/exporters/zipkin (#2831)

* chore(deps): bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp (#2833)

Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) from 1.11.2 to 1.12.0.
- [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...v1.12.0)

---
updated-dependencies:
- dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

* chore(deps): bump go.opentelemetry.io/otel/exporters/jaeger (#2832)

Bumps [go.opentelemetry.io/otel/exporters/jaeger](https://github.com/open-telemetry/opentelemetry-go) from 1.11.2 to 1.12.0.
- [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...v1.12.0)

---
updated-dependencies:
- dependency-name: go.opentelemetry.io/otel/exporters/jaeger
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Xiaoju Jiang <44432198+jiang4869@users.noreply.github.com>
Co-authored-by: kevin.wan <kevin.wan@yijinin.com>
Co-authored-by: MarkJoyMa <64180138+MarkJoyMa@users.noreply.github.com>
2023-03-04 20:13:37 +08:00
MarkJoyMa
ee2ec974c4 feat: converge grpc interceptor processing (#2830)
* feat: converge grpc interceptor processing

* x

* x
2023-03-04 20:12:30 +08:00
Kevin Wan
fd2f2f0f54 chore: improve codecov (#2828) 2023-03-04 20:12:16 +08:00
MarkJoyMa
86a2429d7d feat: add MustNewRedis (#2824)
* feat: add MustNewRedis

* feat: add MustNewRedis

* feat: add MustNewRedis

* x

* x

* fix ut

* x

* x

* x

* x

* x
2023-03-04 20:12:05 +08:00
Xiaoju Jiang
e5fe5dcc50 support custom maxBytes in API file (#2822) 2023-03-04 20:11:55 +08:00
Kevin Wan
b510e7c242 chore: fix missing funcs on windows (#2825) 2023-03-04 20:11:46 +08:00
Kevin Wan
dfe92e709f fix: problem on name overlaping in config (#2820) 2023-03-04 20:11:18 +08:00
Kevin Wan
cb649cf627 chore: add more tests (#2815)
* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests
2023-03-04 20:11:03 +08:00
Kevin Wan
ce19a5ade6 chore: add more tests (#2814) 2023-03-04 20:10:57 +08:00
Kevin Wan
6dc56de714 chore: add more tests (#2812)
* chore: add more tests

* chore: add more tests
2023-03-04 20:09:03 +08:00
Kevin Wan
f3369f8e81 chore: update goctl version to 1.4.4 (#2811) 2023-01-21 21:45:25 +08:00
Kevin Wan
c9b05ae07e fix: mapping optional dep not canonicaled (#2807) 2023-01-20 23:57:49 +08:00
Kevin Wan
32a59dbc27 chore: refactor func name (#2804)
* chore: refactor func name

* chore: make plain log clearer
2023-01-18 17:20:45 +08:00
Kevin Wan
ba0dff2d61 chore: add more tests (#2803)
* chore: add more tests

* chore: add more tests
2023-01-18 13:15:41 +08:00
Kevin Wan
10da5e0424 chore: add more tests (#2801) 2023-01-17 21:55:36 +08:00
Kevin Wan
4bed34090f chore: add more tests (#2800) 2023-01-17 09:59:42 +08:00
Kevin Wan
2bfecf9354 chore: remove mgo related packages (#2799) 2023-01-16 23:13:59 +08:00
Kevin Wan
6d129e0264 chore: add more tests (#2797)
* chore: add more tests

* chore: add more tests

* chore: add more tests
2023-01-16 22:33:39 +08:00
foliet
a2df1bb164 fix: modify the generated update function and add return values for update and delete functions (#2793) 2023-01-15 22:11:08 +08:00
Kevin Wan
5f02e623f5 chore: add more tests (#2795)
* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests
2023-01-15 21:32:41 +08:00
Kevin Wan
963b52fb1b chore: add more tests (#2794) 2023-01-15 15:28:27 +08:00
Kevin Wan
02265d0bfe chore: add more tests (#2792)
* chore: add more tests

* chore: add more tests

* chore: add more tests

* chore: add more tests
2023-01-15 00:16:12 +08:00
Kevin Wan
2e57e91826 Update readme-cn.md 2023-01-13 23:03:18 +08:00
Ofey Chan
82c642d3f4 feat: expose NewTimingWheelWithClock (#2787) 2023-01-13 17:46:40 +08:00
Kevin Wan
b2571883ca chore: refactor (#2785)
* chore: refactor

* chore: refactor
2023-01-13 14:04:37 +08:00
Alonexy
00ff50c2cc add zset withsocre float (#2689)
* add zset withsocre float

* update

* add IncrbyFloat,HincrbyFloat

Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2023-01-12 22:37:14 +08:00
Kevin Wan
4d7fa08b0b feat: support **struct in mapping (#2784)
* feat: support **struct in mapping

* chore: fix test failure
2023-01-12 20:45:32 +08:00
Kevin Wan
367afb544c feat: support ptr of ptr of ... in mapping (#2779)
* feat: support ptr of ptr of ... in mapping

* feat: support ptr of ptr of time.Duration in mapping

* feat: support ptr of ptr of json.Number in mapping

* chore: improve setting in mapping

* feat: support ptr of ptr encoding.TextUnmarshaler in mapping

* chore: add more tests

* fix: string ptr

* chore: update tests
2023-01-12 15:56:51 +08:00
cong
43b8c7f641 chore(trace): improve rest tracinghandler (#2783) 2023-01-12 12:50:57 +08:00
dependabot[bot]
a2dcb0079a chore(deps): bump github.com/jhump/protoreflect from 1.14.0 to 1.14.1 (#2782) 2023-01-12 09:41:03 +08:00
cong
f9619328f2 refactor(rest): use static config for trace ignore paths. (#2773) 2023-01-12 09:40:18 +08:00
Kevin Wan
bae061a67e chore: add tests (#2778) 2023-01-11 15:21:39 +08:00
Kevin Wan
0b176e17ac fix: #2576 (#2776) 2023-01-11 00:45:11 +08:00
Kevin Wan
6340e24c17 chore: add tests (#2774) 2023-01-09 23:48:31 +08:00
Kevin Wan
74e0676617 feat: add config to truncate long log content (#2767) 2023-01-09 09:39:30 +08:00
MarkJoyMa
0defb7522f feat: replace NewBetchInserter function name (#2769) 2023-01-09 09:38:57 +08:00
Kevin Wan
0c786ca849 chore: remove simple methods, inlined (#2768) 2023-01-09 00:55:13 +08:00
Kevin Wan
26c541b9cb feat: add middlewares config for zrpc (#2766)
* feat: add middlewares config for zrpc

* chore: add tests

* chore: improve codecov

* chore: improve codecov
2023-01-08 19:34:05 +08:00
Kevin Wan
ade6f9ee46 feat: add middlewares config for rest (#2765)
* feat: add middlewares config for rest

* chore: disable logs in tests

* chore: enable verbose in tests
2023-01-08 16:41:53 +08:00
Kevin Wan
f4502171ea Update readme-cn.md 2023-01-08 12:42:27 +08:00
chensy
8157e2118d fix: replace goctl ExactValidArgs to MatchAll (#2759)
Co-authored-by: chenjieping <chenjieping@kezaihui.com>
2023-01-07 17:07:40 +08:00
Kevin Wan
e52dace416 chore: refactor (#2764) 2023-01-07 14:13:44 +08:00
chen quan
dc260f196a refactor: simplify the code (#2763)
* refactor: simplify the code

* fix: fix data race

* refactor: simplify the code

* refactor: simplify the code
2023-01-07 13:32:56 +08:00
dependabot[bot]
559726112c chore(deps): bump github.com/alicebob/miniredis/v2 from 2.23.1 to 2.30.0 (#2762)
Bumps [github.com/alicebob/miniredis/v2](https://github.com/alicebob/miniredis) from 2.23.1 to 2.30.0.
- [Release notes](https://github.com/alicebob/miniredis/releases)
- [Changelog](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md)
- [Commits](https://github.com/alicebob/miniredis/compare/v2.23.1...v2.30.0)

---
updated-dependencies:
- dependency-name: github.com/alicebob/miniredis/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-01-07 12:15:22 +08:00
MarkJoyMa
a5fcf24c04 feat: add batch inserter (#2755) 2023-01-06 23:30:50 +08:00
chen quan
fc9b3ffdc1 refactor: use opentelemetry's standard api to track http status code (#2760) 2023-01-06 23:27:54 +08:00
MarkJoyMa
e71c505e94 feat: add mongo options (#2753)
* feat: add mongo options

* feat: add mongo options

* feat: add mongo options

* feat: add mongo options

* feat: add mongo options

* feat: add mongo options
2023-01-05 22:14:50 +08:00
chen quan
21c49009c0 chore: remove unnecessary code (#2754) 2023-01-05 22:12:07 +08:00
#Suyghur
69d355eb4b feat(redis): add zscan command implementation (#2729) (#2751) 2023-01-04 13:44:17 +08:00
Kevin Wan
83f88d177f chore: improve codecov (#2752) 2023-01-04 13:42:20 +08:00
xiang
641ebf1667 feat: trace http.status_code (#2708)
* feat: trace http.status_code

* feat: implements http.Flusher & http.Hijacker for traceResponseWriter

* test: delete notTracingSpans after test

* feat: trace http.status_code

* feat: implements http.Flusher & http.Hijacker for traceResponseWriter

* test: delete notTracingSpans after test

* refactor: update trace handler span message

* fix: code conflict
2023-01-04 10:21:57 +08:00
Kevin Wan
cf435bfcc1 chore: remove roadmap file, not updating (#2749) 2023-01-03 23:38:14 +08:00
Kevin Wan
28f1b15b8e Update readme.md (#2748) 2023-01-03 23:14:39 +08:00
cong
42413dc294 feat(trace): support otlp http exporter (#2746)
* feat(trace): support otlp http exporter

* chore: use otlptracehttp v1.10.0 not upgrade grpc version prevent other modules break

* refactor(trace): rename exporter kind grpc to otlpgrpc.

BREAKING CHANGE: trace Config.Batcher should use otlpgrpc instead of grpc now.
2023-01-03 22:49:30 +08:00
Kevin Wan
ec7ac43948 chore: reorg imports (#2745)
* chore: reorg imports

* chore: format code
2023-01-03 22:26:45 +08:00
cong
deefc1a8eb fix(trace): grpc exporter should use nonblock option (#2744)
* fix(trace): grpc exporter should use nonblock option

* chore: sort imports
2023-01-03 18:15:09 +08:00
Kevin Wan
036328f1ea chore: update tests (#2741)
* chore: update tests

* chore: codecov on comments

* chore: codecov on comments
2023-01-03 18:02:35 +08:00
wojiukankan
85057a623d 🐛 debug grpc export (#2379) (#2719)
* 🐛 debug grpc export (#2379) 

#2379 Fixed the issue that the GRPC exporter did not establish an RPC link
原文使用的 otlptracegrpc.NewUnstarted创建的是一个未建立rpc连接的导出器,无法正常使用;改为otlptracegrpc.New才妥

* Update agent_test.go

修复单元测试失败
2023-01-03 17:04:35 +08:00
Xargin
1c544a26be use stat instead of disableStat (#2740) 2023-01-03 11:29:24 +08:00
chainlife
20a61ce43e logx conf add DisableStat (#2434)
Co-authored-by: sunsoft <sunsoft@qq.com>
2023-01-02 23:22:13 +08:00
Kevin Wan
dd294e8cd6 fix: #2700, timeout not enough for writing responses (#2738)
* fix: #2700, timeout not enough for writing responses

* fix: test fail

* chore: add comments
2023-01-02 13:51:15 +08:00
JackSon_tm.m
3e9d0161bc add ServeHTTP to Server/Engin for doing Httptest (#2704) 2023-01-02 00:24:58 +08:00
Kevin Wan
cf6c349118 fix: #2735 (#2736)
* fix: #2735

* chore: make error consistent
2023-01-01 12:21:53 +08:00
Kevin Wan
c7a0ec428c fix: key like TLSConfig not working (#2730)
* fix: key like TLSConfig not working

* fix: remove unnecessary code

* chore: rename variable
2022-12-29 14:50:53 +08:00
chowyu12
ce1c02f4f9 Feat: ignorecolums add sort (#2648)
* add go-grpc_opt and go_opt for grpc new command

* feat: remove log when disable log

* feat: add sort

Co-authored-by: zhouyy <zhouyy@ickey.cn>
2022-12-28 14:53:22 +08:00
Kevin Wan
c3756a8f1c fix: etcd publisher reconnecting problem (#2710)
* fix: etcd publisher reconnecting problem

* chore: fix wrong call
2022-12-27 20:03:03 +08:00
Archer
f4fd735aee Use read-write lock instead of mutex (#2727) 2022-12-26 15:00:47 +08:00
Archer
683d793719 RawFieldNames should ignore the field whose name is start with a dash (#2725) 2022-12-24 21:27:32 +08:00
Kevin Wan
affbcb5698 fix: camel cased key of map item in config (#2715)
* fix: camel cased key of map item in config

* fix: mapping anonymous problem

* fix: mapping anonymous problem

* chore: refactor

* chore: add more tests

* chore: refactor
2022-12-24 21:26:33 +08:00
Kevin Wan
f0d1722bbd chore: pass by value for config in dev server (#2712) 2022-12-24 11:41:23 +08:00
chowyu12
c4f8eca459 Feat update rootpkg (#2718)
* add go-grpc_opt and go_opt for grpc new command

* feat: remove log when disable log

* feat: remove repeat code

Co-authored-by: zhouyy <zhouyy@ickey.cn>
2022-12-23 23:57:56 +08:00
Kevin Wan
251c071418 Update readme.md 2022-12-22 23:21:41 +08:00
Kevin Wan
6652c4e445 Update readme-cn.md 2022-12-16 00:08:12 +08:00
Kevin Wan
f73613dff0 Update readme.md 2022-12-16 00:07:50 +08:00
Kevin Wan
7a75dce465 refactor: remove duplicated code (#2705) 2022-12-14 23:36:56 +08:00
anqiansong
801f1adf71 Remove useless file (#2699) 2022-12-14 23:22:01 +08:00
Kevin Wan
f76b976262 fix: #2684 (#2693) 2022-12-13 00:16:31 +08:00
anqiansong
a49f9060c2 Add more test (#2692) 2022-12-12 22:45:18 +08:00
fyyang
ebe28882eb fix: Fix string.title (#2687)
* fix: unsignedTypeMap type error

* fix: string.Title

* style: string.Title test
2022-12-11 23:44:19 +08:00
Kevin Wan
fdc57d07d7 fix: #2672 (#2681)
* fix: #2672

* chore: fix more cases

* chore: update deps

* chore: update deps

* chore: refactor

* chore: refactor

* chore: refactor
2022-12-11 00:41:50 +08:00
re-dylan
ef22042f4d feat: add dev server and health (#2665)
* feat: add dev server and health

* fix: fix ci

* fix: fix comment.

* feat: add enabled

* remove no need test

* feat: mv devServer to internal

* feat: default enable pprof

Co-authored-by: dylan.wang <dylan.wang@yijinin.com>
2022-12-10 20:40:23 +08:00
Tim Xiao
944193ce25 fix:Remove duplicate code (#2686) 2022-12-10 20:13:37 +08:00
Kevin Wan
dcfc9b79f1 feat: accept camelcase for config keys (#2651)
* feat: accept camelcase for config keys

* chore: refactor

* chore: refactor

* chore: add more tests

* chore: refactor

* fix: map elements of array
2022-12-08 22:01:36 +08:00
Kevin Wan
b7052854bb chore: tidy go.sum (#2675) 2022-12-08 06:50:42 +08:00
Kevin Wan
4729a16142 chore: update deps (#2674) 2022-12-07 23:33:30 +08:00
benqi
3604659027 fix: fix client side in zeromicro#2109 (zeromicro#2116) (#2659)
* fix: fix client side in zeromicro#2109 (zeromicro#2116)

* fix: fix client side in zeromicro#2109 (zeromicro#2116)

* fix: fix client side in zeromicro#2109 (zeromicro#2116)
2022-12-04 00:25:52 +08:00
Kevin Wan
9f7f94b673 chore: upgrade dependencies (#2658) 2022-12-03 22:06:45 +08:00
bensonfx
0b3629b636 Fixes #2603 bump goctl cobra version to macos completion help bug (#2656)
Co-authored-by: Benson Yan <yanyong@axera-tech.com>
2022-12-03 19:03:38 +08:00
heyehang
a644ec7edd feature : responses whit context (#2637) 2022-12-03 18:48:02 +08:00
Kevin Wan
9941055eaa feat: add trace.SpanIDFromContext and trace.TraceIDFromContext (#2654) 2022-12-02 11:00:44 +08:00
EinfachePhy
10fd9131a1 replace strings.Title to cases.Title (#2650) 2022-12-02 00:15:51 +08:00
bigrocs
90828a0d4a The default port is used when there is no port number for k8s (#2598)
* k8s 没有端口号时使用默认端口

* Modify the not port test
2022-11-27 09:00:11 +08:00
edieruby
b1c3c21c81 fix: log currentSize should not be 0 when file exists and size is not 0 (#2639) 2022-11-25 23:48:32 +08:00
chen quan
97a8b3ade5 fix(rest): fix issues#2628 (#2629) 2022-11-23 22:50:08 +08:00
Kevin Wan
95a5f64493 feat: add stringx.ToCamelCase (#2622) 2022-11-20 17:41:39 +08:00
chensy
20e659749a fix: fix conflict with the import package name (#2610)
Co-authored-by: chenjieping <chenjieping@kezaihui.com>
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2022-11-20 15:23:25 +08:00
Kevin Wan
94708cc78f chore: update deps (#2621) 2022-11-19 22:28:40 +08:00
Kevin Wan
06fafd2153 feat: validate value in options for mapping (#2616) 2022-11-18 19:46:23 +08:00
Kevin Wan
79de932646 chore: update dependencies (#2594)
* chore: update deps

* chore: update deps

* chore: update deps

* chore: update deps
2022-11-12 18:04:39 +08:00
Kevin Wan
b562e940e7 feat: support bool for env tag (#2593) 2022-11-12 13:58:35 +08:00
Kevin Wan
69068cdaf0 feat: support env tag in config (#2577)
* feat: support env tag in config

* chore: add more tests

* chore: add more tests, add stringx.Join

* fix: test fail

* chore: remove print code

* chore: rename variable
2022-11-11 23:17:09 +08:00
dependabot[bot]
f25788ebea chore(deps): bump go.mongodb.org/mongo-driver from 1.10.3 to 1.11.0 (#2588) 2022-11-11 07:01:54 +08:00
dependabot[bot]
1293c4321b chore(deps): bump github.com/alicebob/miniredis/v2 from 2.23.0 to 2.23.1 (#2587) 2022-11-11 06:59:13 +08:00
Kevin Wan
e3e08a7396 fix: inherit issue when parent after inherits (#2586)
* fix: inherit issue when parent after inherits

* chore: add more tests
2022-11-10 22:13:05 +08:00
dependabot[bot]
4b071f4c33 chore(deps): bump github.com/jhump/protoreflect from 1.13.0 to 1.14.0 (#2579)
Bumps [github.com/jhump/protoreflect](https://github.com/jhump/protoreflect) from 1.13.0 to 1.14.0.
- [Release notes](https://github.com/jhump/protoreflect/releases)
- [Commits](https://github.com/jhump/protoreflect/compare/v1.13.0...v1.14.0)

---
updated-dependencies:
- dependency-name: github.com/jhump/protoreflect
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-11-10 14:42:53 +08:00
骑着毛驴背单词
81831b60a9 fix(change model template file type): All model template variables ar… (#2573)
* fix(change model template file type): All model template variables are stored in tpl format files with the same name as the template generated using the template init command

* fix(change model template file type): All model template variables are stored in tpl format files with the same name as the template generated using the template init command

Co-authored-by: qilvge <qilvge@.qilvge.com>
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2022-11-10 13:38:35 +08:00
Kevin Wan
1677a4dceb feat: conf inherit (#2568)
* feat: add ValuerWithParent

* feat: make etcd config inherit from parents

* chore: add more tests

* chore: add more tests

* chore: add more comments

* chore: refactor

* chore: add more comments

* fix: fix duplicated code and refactor

* fix: remove unnecessary code

* fix: fix test case for removing print

* feat: support partial inherit
2022-11-08 15:27:48 +08:00
王哈哈
dac3600b53 Modify comment syntax error (#2572) 2022-11-04 21:55:17 +08:00
anqiansong
3db64c7d47 fix(goctl): Fix #2561 (#2562)
* Fix #2561

* format code
2022-10-29 22:40:56 +08:00
Kevin Wan
7eb6aae949 fix: potential slice append issue (#2560) 2022-10-28 08:14:03 +08:00
foliet
07128213d6 chore: update "DO NOT EDIT" format (#2559)
* chore: update "DO NOT EDIT" format

* Update readme.md

* Update head.go
2022-10-27 19:41:24 +08:00
anqiansong
9504d30049 fix(goctl): fix redundant import (#2551) 2022-10-25 06:58:51 +08:00
chen quan
ce73b9a85c chore(action): enable cache dependency (#2549) 2022-10-24 22:50:24 +08:00
re-dylan
4d2a146733 typo(mapping): fix typo for key (#2548) 2022-10-24 21:43:53 +08:00
Kevin Wan
46e236fef7 chore: add more tests (#2547) 2022-10-23 10:54:41 +08:00
Kevin Wan
06e4914e41 feat: add logger.WithFields (#2546) 2022-10-22 23:28:34 +08:00
Kevin Wan
9cadab2684 chore: refactor (#2545)
* chore: refactor

* chore: refactor
2022-10-22 22:52:40 +08:00
chen quan
7fe2492009 feat(trace): support for disabling tracing of specified spanName (#2363) 2022-10-22 22:14:12 +08:00
chen quan
22bdf0bbd5 chore: adjust rpc comment format (#2501) 2022-10-22 22:07:55 +08:00
chowyu12
c92a2d1b77 feat: remove info log when disable log (#2525)
* add go-grpc_opt and go_opt for grpc new command

* feat: remove log when disable log

Co-authored-by: zhouyy <zhouyy@ickey.cn>
2022-10-22 22:07:17 +08:00
swliao425
b21162d638 fix: redis's pipeline logs are not printed completely (#2538)
* fix: redis's pipeline logs are not printed completely

* add unit test

Signed-off-by: liaoshiwei <liaoshiwei@uniontech.com>

Signed-off-by: liaoshiwei <liaoshiwei@uniontech.com>
2022-10-22 21:57:40 +08:00
anqiansong
7c9ef3ca67 fix(goctl): Fix issues (#2543)
* fix #2541

* fix #2432

* Fix review comment

* foramt code

* foramt code
2022-10-22 21:01:15 +08:00
chen quan
bbadbe0175 chore(action): upgrade action (#2521)
- codecov/codecov-action
- actions/setup-go
- usthe/issues-translate-action(origin:omsun28/issues-translate-action)
2022-10-22 19:06:53 +08:00
Kevin Wan
f9beab1095 feat: support uuid.UUID in mapping (#2537) 2022-10-20 20:11:19 +08:00
Kevin Wan
de5c59aad3 chore: add more tests (#2536) 2022-10-19 20:39:46 +08:00
Gang Wu
36d3765c5c Fix typo (#2531) 2022-10-18 17:27:06 +08:00
dependabot[bot]
d326e6f813 chore(deps): bump google.golang.org/grpc from 1.50.0 to 1.50.1 (#2527) 2022-10-18 17:02:11 +08:00
wuleiming2009
ea52fe2e0d Fix the wrong key about FindOne in mongo of goctl. (#2523) 2022-10-17 19:58:57 +08:00
Kevin Wan
05a5de7c6d chore: fix lint errors (#2520) 2022-10-17 06:30:58 +08:00
Kevin Wan
d4c9fd2aff chore: add golangci-lint config file (#2519)
* chore: add golangci-lint config file

* chore: member alignment
2022-10-14 22:45:48 +08:00
dependabot[bot]
776673d57d chore(deps): bump go.opentelemetry.io/otel/exporters/jaeger (#2514)
Bumps [go.opentelemetry.io/otel/exporters/jaeger](https://github.com/open-telemetry/opentelemetry-go) from 1.10.0 to 1.11.0.
- [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...v1.11.0)

---
updated-dependencies:
- dependency-name: go.opentelemetry.io/otel/exporters/jaeger
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2022-10-14 12:42:15 +08:00
anqiansong
1b87f5e30d Fix mongo insert tpl (#2512) 2022-10-14 12:27:04 +08:00
dependabot[bot]
bc47959384 chore(deps): bump go.opentelemetry.io/otel/exporters/zipkin (#2511)
Bumps [go.opentelemetry.io/otel/exporters/zipkin](https://github.com/open-telemetry/opentelemetry-go) from 1.10.0 to 1.11.0.
- [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...v1.11.0)

---
updated-dependencies:
- dependency-name: go.opentelemetry.io/otel/exporters/zipkin
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2022-10-14 11:47:38 +08:00
dependabot[bot]
9f6d926455 chore(deps): bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc (#2510)
Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) from 1.10.0 to 1.11.0.
- [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...v1.11.0)

---
updated-dependencies:
- dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-13 23:13:04 +08:00
foliet
f7a4e3a19e chore: fix naming problem (#2500)
When I was looking for how to mock mongo client, I found some naming problems and wanted to fix them.
2022-10-13 22:53:27 +08:00
swliao425
a515a3c735 chore: sqlx's metric name is different from redis (#2505) 2022-10-13 22:52:36 +08:00
dependabot[bot]
6f6f1ae21f chore(deps): bump go.opentelemetry.io/otel/sdk from 1.10.0 to 1.11.0 (#2504)
Bumps [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) from 1.10.0 to 1.11.0.
- [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...v1.11.0)

---
updated-dependencies:
- dependency-name: go.opentelemetry.io/otel/sdk
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-13 22:51:40 +08:00
Kevin Wan
10f94ffcc2 chore: remove unnecessary code (#2499) 2022-10-11 22:56:12 +08:00
sado
f068062b13 token limit support context (#2335)
* token limit support context

* add token limit with ctx

add token limit with ctx

Co-authored-by: sado <liaoyonglin@bilibili.com>
2022-10-11 22:40:00 +08:00
foliet
799c118d95 feat(goctl): better generate the api code of typescript (#2483) 2022-10-11 22:19:22 +08:00
#Suyghur
74cc6b55e8 fix: replace Infof() with Errorf() in DurationInterceptor (#2495) (#2497) 2022-10-11 21:45:31 +08:00
cui fliter
fc59aec2e7 fix a few function names on comments (#2496)
Signed-off-by: cui fliter <imcusg@gmail.com>

Signed-off-by: cui fliter <imcusg@gmail.com>
2022-10-10 22:12:11 +08:00
dependabot[bot]
7868667b4f chore(deps): bump google.golang.org/grpc from 1.49.0 to 1.50.0 (#2487)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.49.0 to 1.50.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.49.0...v1.50.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-07 22:42:24 +08:00
Kevin Wan
773b59106b chore: remove init if possible (#2485) 2022-10-06 23:57:56 +08:00
dependabot[bot]
97f8667b71 chore(deps): bump go.mongodb.org/mongo-driver from 1.10.2 to 1.10.3 (#2484)
Bumps [go.mongodb.org/mongo-driver](https://github.com/mongodb/mongo-go-driver) from 1.10.2 to 1.10.3.
- [Release notes](https://github.com/mongodb/mongo-go-driver/releases)
- [Commits](https://github.com/mongodb/mongo-go-driver/compare/v1.10.2...v1.10.3)

---
updated-dependencies:
- dependency-name: go.mongodb.org/mongo-driver
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-06 11:04:11 +08:00
foliet
b51339b69b fix(mongo): fix file name generation errors (#2479)
Before this, no matter what style is used, lowercase file names without underscores will be generated.
2022-10-04 18:09:03 +08:00
Kevin Wan
38a73d7fbe fix: etcd reconnecting problem (#2478) 2022-10-02 22:03:56 +08:00
re-dylan
e50689beed fix #2343 (#2349)
Co-authored-by: dylan.wang <dylan.wang@yijinin.com>
2022-10-02 21:46:33 +08:00
Kevin Wan
1bc138bd34 chore: refactor to reduce duplicated code (#2477) 2022-10-01 21:45:53 +08:00
Kevin Wan
4b9066eda6 chore: better shedding algorithm, make sure recover from shedding (#2476)
* backup

* chore: better shedding algorithm, make sure recover from shedding
2022-10-01 20:55:25 +08:00
#Suyghur
0c66e041b5 feat(redis):add timeout method to extend blpop (#2472) 2022-10-01 20:53:54 +08:00
Halo
aa2be0163a fix: add more tests (#2473)
* chore: add string to map in httpx parse method

* feat: add httpx parse stringToMap method test

* fix: add more test
2022-09-30 22:01:39 +08:00
Kevin Wan
ada2941e87 chore: sort methods (#2470) 2022-09-30 14:57:40 +08:00
Kevin Wan
59c0013cd1 feat: add logc package, support AddGlobalFields for both logc and logx. (#2463)
* feat: add logc package

* feat: add logc, add AddGlobalFields for both logc and logx

* chore: add benchmarks

* chore: add more tests

* chore: simplify globalFields in logx

* chore: remove outdated comments
2022-09-29 22:49:41 +08:00
Halo
05737f6519 feat: add string to map in httpx parse method (#2459)
* chore: add string to map in httpx parse method

* feat: add httpx parse stringToMap method test
2022-09-29 22:34:58 +08:00
chen quan
4f6a900fd4 fix(goctl): fix the unit test bug of goctl (#2458) 2022-09-27 23:52:05 +08:00
aV
63cfe60f1a Readme Tweak (#2436)
* Update readme.md

* Update readme.md

* Update readme.md

Co-authored-by: Kevin Wan <wanjunfeng@gmail.com>
2022-09-25 22:59:55 +08:00
bensonfx
e7acadb15d fix #2435 (#2442)
* feat: add color to debug (#2433)

* fix header and path type ts gen

Co-authored-by: chen quan <chenquan.dev@gmail.com>
2022-09-24 22:28:25 +08:00
chen quan
111e626a73 refactor: adjust http request slow log format (#2440) 2022-09-23 21:20:38 +08:00
Kevin Wan
1a6d7b3ef6 chore: gofumpt (#2439) 2022-09-22 22:40:01 +08:00
chen quan
2e1e4f3574 feat: add color to debug (#2433) 2022-09-21 22:30:06 +08:00
Kevin Wan
22d0a2120a chore: replace fmt.Fprint (#2425) 2022-09-20 23:51:58 +08:00
chen quan
68e15360c2 fix: fix log output (#2424) 2022-09-20 22:45:52 +08:00
jesse.tang
1b344a8851 cleanup: deprecated field and func (#2416)
* cleanup: deprecated field and func

* fmt import order
2022-09-20 22:13:34 +08:00
dawn_zhou
d640544a40 refactor: redis error for prometheus metric label (#2412)
Co-authored-by: dawn.zhou <dawn.zhou@yijinin.com>
2022-09-20 21:13:33 +08:00
MarkJoyMa
e6aa6fc361 feat: add log debug level (#2411) 2022-09-20 07:50:11 +08:00
MarkJoyMa
4c927624b0 fix goctl help message (#2414) 2022-09-19 14:05:46 +08:00
Kevin Wan
0ea92b7280 chore: add more tests (#2410) 2022-09-19 13:52:14 +08:00
anqiansong
2cde970c9e feat(goctl):Add ignore-columns flag (#2407)
* fix #2074,#2100

* format code

* fix #2397

* format code

* Support comma spliter

* format code
2022-09-19 11:49:39 +08:00
Kevin Wan
5061158bd6 chore: add more tests (#2409) 2022-09-18 23:17:21 +08:00
339 changed files with 12324 additions and 6772 deletions

View File

@@ -1,3 +1,6 @@
comment: false
comment:
layout: "flags, files"
behavior: once
require_changes: true
ignore:
- "tools"

View File

@@ -11,15 +11,17 @@ jobs:
name: Linux
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.16
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go 1.x
uses: actions/setup-go@v3
with:
go-version: ^1.16
check-latest: true
cache: true
id: go
- name: Get dependencies
run: |
go get -v -t -d ./...
@@ -34,20 +36,23 @@ jobs:
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
- name: Codecov
uses: codecov/codecov-action@v2
uses: codecov/codecov-action@v3
test-win:
name: Windows
runs-on: windows-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.16
- name: Checkout codebase
uses: actions/checkout@v3
- name: Set up Go 1.x
uses: actions/setup-go@v3
with:
# use 1.16 to guarantee Go 1.16 compatibility
go-version: 1.16
check-latest: true
cache: true
- name: Test
run: |
go mod verify

View File

@@ -9,7 +9,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: tomsun28/issues-translate-action@v2.6
- uses: usthe/issues-translate-action@v2.7
with:
IS_MODIFY_TITLE: true
# not require, default false, . Decide whether to modify the issue title

View File

@@ -7,7 +7,7 @@ jobs:
close-issues:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v3
- uses: actions/stale@v6
with:
days-before-issue-stale: 365
days-before-issue-close: 90

1
.gitignore vendored
View File

@@ -22,6 +22,7 @@ go.work.sum
# gitlab ci
.cache
.golangci.yml
# vim auto backup file
*~

View File

@@ -1,28 +0,0 @@
# go-zero Roadmap
This document defines a high level roadmap for go-zero development and upcoming releases.
Community and contributor involvement is vital for successfully implementing all desired items for each release.
We hope that the items listed below will inspire further engagement from the community to keep go-zero progressing and shipping exciting and valuable features.
## 2021 Q2
- [x] Support service discovery through K8S client api
- [x] Log full sql statements for easier sql problem solving
## 2021 Q3
- [x] Support `goctl model pg` to support PostgreSQL code generation
- [x] Adapt builtin tracing mechanism to opentracing solutions
## 2021 Q4
- [x] Support `username/password` authentication in ETCD
- [x] Support `SSL/TLS` in ETCD
- [x] Support `SSL/TLS` in `zRPC`
- [x] Support `TLS` in redis connections
- [x] Support `goctl bug` to report bugs conveniently
## 2022
- [x] Support `context` in redis related methods for timeout and tracing
- [x] Support `context` in sql related methods for timeout and tracing
- [x] Support `context` in mongodb related methods for timeout and tracing
- [x] Add `httpc.Do` with HTTP call governance, like circuit breaker etc.
- [ ] Support `goctl doctor` command to report potential issues for given service
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file

View File

@@ -20,16 +20,16 @@ func (b noOpBreaker) Do(req func() error) error {
return req()
}
func (b noOpBreaker) DoWithAcceptable(req func() error, acceptable Acceptable) error {
func (b noOpBreaker) DoWithAcceptable(req func() error, _ Acceptable) error {
return req()
}
func (b noOpBreaker) DoWithFallback(req func() error, fallback func(err error) error) error {
func (b noOpBreaker) DoWithFallback(req func() error, _ func(err error) error) error {
return req()
}
func (b noOpBreaker) DoWithFallbackAcceptable(req func() error, fallback func(err error) error,
acceptable Acceptable) error {
func (b noOpBreaker) DoWithFallbackAcceptable(req func() error, _ func(err error) error,
_ Acceptable) error {
return req()
}
@@ -38,5 +38,5 @@ type nopPromise struct{}
func (p nopPromise) Accept() {
}
func (p nopPromise) Reject(reason string) {
func (p nopPromise) Reject(_ string) {
}

View File

@@ -32,9 +32,11 @@ func NewECBEncrypter(b cipher.Block) cipher.BlockMode {
return (*ecbEncrypter)(newECB(b))
}
// BlockSize returns the mode's block size.
func (x *ecbEncrypter) BlockSize() int { return x.blockSize }
// why we don't return error is because cipher.BlockMode doesn't allow this
// CryptBlocks encrypts a number of blocks. The length of src must be a multiple of
// the block size. Dst and src must overlap entirely or not at all.
func (x *ecbEncrypter) CryptBlocks(dst, src []byte) {
if len(src)%x.blockSize != 0 {
logx.Error("crypto/cipher: input not full blocks")
@@ -59,11 +61,13 @@ func NewECBDecrypter(b cipher.Block) cipher.BlockMode {
return (*ecbDecrypter)(newECB(b))
}
// BlockSize returns the mode's block size.
func (x *ecbDecrypter) BlockSize() int {
return x.blockSize
}
// why we don't return error is because cipher.BlockMode doesn't allow this
// CryptBlocks decrypts a number of blocks. The length of src must be a multiple of
// the block size. Dst and src must overlap entirely or not at all.
func (x *ecbDecrypter) CryptBlocks(dst, src []byte) {
if len(src)%x.blockSize != 0 {
logx.Error("crypto/cipher: input not full blocks")

View File

@@ -1,6 +1,7 @@
package codec
import (
"crypto/aes"
"encoding/base64"
"testing"
@@ -10,7 +11,8 @@ import (
func TestAesEcb(t *testing.T) {
var (
key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
val = []byte("hello")
val = []byte("helloworld")
valLong = []byte("helloworldlong..")
badKey1 = []byte("aaaaaaaaa")
// more than 32 chars
badKey2 = []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
@@ -31,6 +33,39 @@ func TestAesEcb(t *testing.T) {
src, err := EcbDecrypt(key, dst)
assert.Nil(t, err)
assert.Equal(t, val, src)
block, err := aes.NewCipher(key)
assert.NoError(t, err)
encrypter := NewECBEncrypter(block)
assert.Equal(t, 16, encrypter.BlockSize())
decrypter := NewECBDecrypter(block)
assert.Equal(t, 16, decrypter.BlockSize())
dst = make([]byte, 8)
encrypter.CryptBlocks(dst, val)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
dst = make([]byte, 8)
encrypter.CryptBlocks(dst, valLong)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
dst = make([]byte, 8)
decrypter.CryptBlocks(dst, val)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
dst = make([]byte, 8)
decrypter.CryptBlocks(dst, valLong)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
_, err = EcbEncryptBase64("cTR0N3dDKkYtSmFOZFJnVWpYbjJyNXU4eC9BP0QK", "aGVsbG93b3JsZGxvbmcuLgo=")
assert.Error(t, err)
}
func TestAesEcbBase64(t *testing.T) {

View File

@@ -80,3 +80,17 @@ func TestKeyBytes(t *testing.T) {
assert.Nil(t, err)
assert.True(t, len(key.Bytes()) > 0)
}
func TestDHOnErrors(t *testing.T) {
key, err := GenerateKey()
assert.Nil(t, err)
assert.NotEmpty(t, key.Bytes())
_, err = ComputeKey(key.PubKey, key.PriKey)
assert.NoError(t, err)
_, err = ComputeKey(nil, key.PriKey)
assert.Error(t, err)
_, err = ComputeKey(key.PubKey, nil)
assert.Error(t, err)
assert.NotNil(t, NewPublicKey([]byte("")))
}

View File

@@ -6,7 +6,7 @@ import "sync"
type Ring struct {
elements []interface{}
index int
lock sync.Mutex
lock sync.RWMutex
}
// NewRing returns a Ring object with the given size n.
@@ -31,8 +31,8 @@ func (r *Ring) Add(v interface{}) {
// Take takes all items from r.
func (r *Ring) Take() []interface{} {
r.lock.Lock()
defer r.lock.Unlock()
r.lock.RLock()
defer r.lock.RUnlock()
var size int
var start int

View File

@@ -29,7 +29,7 @@ func NewSet() *Set {
}
}
// NewUnmanagedSet returns a unmanaged Set, which can put values with different types.
// NewUnmanagedSet returns an unmanaged Set, which can put values with different types.
func NewUnmanagedSet() *Set {
return &Set{
data: make(map[interface{}]lang.PlaceholderType),
@@ -213,23 +213,23 @@ func (s *Set) validate(i interface{}) {
switch i.(type) {
case int:
if s.tp != intType {
logx.Errorf("Error: element is int, but set contains elements with type %d", s.tp)
logx.Errorf("element is int, but set contains elements with type %d", s.tp)
}
case int64:
if s.tp != int64Type {
logx.Errorf("Error: element is int64, but set contains elements with type %d", s.tp)
logx.Errorf("element is int64, but set contains elements with type %d", s.tp)
}
case uint:
if s.tp != uintType {
logx.Errorf("Error: element is uint, but set contains elements with type %d", s.tp)
logx.Errorf("element is uint, but set contains elements with type %d", s.tp)
}
case uint64:
if s.tp != uint64Type {
logx.Errorf("Error: element is uint64, but set contains elements with type %d", s.tp)
logx.Errorf("element is uint64, but set contains elements with type %d", s.tp)
}
case string:
if s.tp != stringType {
logx.Errorf("Error: element is string, but set contains elements with type %d", s.tp)
logx.Errorf("element is string, but set contains elements with type %d", s.tp)
}
}
}

View File

@@ -69,10 +69,11 @@ func NewTimingWheel(interval time.Duration, numSlots int, execute Execute) (*Tim
interval, numSlots, execute)
}
return newTimingWheelWithClock(interval, numSlots, execute, timex.NewTicker(interval))
return NewTimingWheelWithTicker(interval, numSlots, execute, timex.NewTicker(interval))
}
func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execute,
// NewTimingWheelWithTicker returns a TimingWheel with the given ticker.
func NewTimingWheelWithTicker(interval time.Duration, numSlots int, execute Execute,
ticker timex.Ticker) (*TimingWheel, error) {
tw := &TimingWheel{
interval: interval,

View File

@@ -26,7 +26,7 @@ func TestNewTimingWheel(t *testing.T) {
func TestTimingWheel_Drain(t *testing.T) {
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {
}, ticker)
tw.SetTimer("first", 3, testStep*4)
tw.SetTimer("second", 5, testStep*7)
@@ -62,7 +62,7 @@ func TestTimingWheel_Drain(t *testing.T) {
func TestTimingWheel_SetTimerSoon(t *testing.T) {
run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {
assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int))
@@ -78,7 +78,7 @@ func TestTimingWheel_SetTimerSoon(t *testing.T) {
func TestTimingWheel_SetTimerTwice(t *testing.T) {
run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {
assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k)
assert.Equal(t, 5, v.(int))
@@ -96,7 +96,7 @@ func TestTimingWheel_SetTimerTwice(t *testing.T) {
func TestTimingWheel_SetTimerWrongDelay(t *testing.T) {
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {}, ticker)
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {}, ticker)
defer tw.Stop()
assert.NotPanics(t, func() {
tw.SetTimer("any", 3, -testStep)
@@ -105,7 +105,7 @@ func TestTimingWheel_SetTimerWrongDelay(t *testing.T) {
func TestTimingWheel_SetTimerAfterClose(t *testing.T) {
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {}, ticker)
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {}, ticker)
tw.Stop()
assert.Equal(t, ErrClosed, tw.SetTimer("any", 3, testStep))
}
@@ -113,7 +113,7 @@ func TestTimingWheel_SetTimerAfterClose(t *testing.T) {
func TestTimingWheel_MoveTimer(t *testing.T) {
run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 3, func(k, v interface{}) {
tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v interface{}) {
assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int))
@@ -139,7 +139,7 @@ func TestTimingWheel_MoveTimer(t *testing.T) {
func TestTimingWheel_MoveTimerSoon(t *testing.T) {
run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 3, func(k, v interface{}) {
tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v interface{}) {
assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int))
@@ -155,7 +155,7 @@ func TestTimingWheel_MoveTimerSoon(t *testing.T) {
func TestTimingWheel_MoveTimerEarlier(t *testing.T) {
run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {
assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int))
@@ -173,7 +173,7 @@ func TestTimingWheel_MoveTimerEarlier(t *testing.T) {
func TestTimingWheel_RemoveTimer(t *testing.T) {
ticker := timex.NewFakeTicker()
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {}, ticker)
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {}, ticker)
tw.SetTimer("any", 3, testStep)
assert.NotPanics(t, func() {
tw.RemoveTimer("any")
@@ -236,7 +236,7 @@ func TestTimingWheel_SetTimer(t *testing.T) {
}
var actual int32
done := make(chan lang.PlaceholderType)
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) {
assert.Equal(t, 1, key.(int))
assert.Equal(t, 2, value.(int))
actual = atomic.LoadInt32(&count)
@@ -317,7 +317,7 @@ func TestTimingWheel_SetAndMoveThenStart(t *testing.T) {
}
var actual int32
done := make(chan lang.PlaceholderType)
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) {
actual = atomic.LoadInt32(&count)
close(done)
}, ticker)
@@ -405,7 +405,7 @@ func TestTimingWheel_SetAndMoveTwice(t *testing.T) {
}
var actual int32
done := make(chan lang.PlaceholderType)
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) {
actual = atomic.LoadInt32(&count)
close(done)
}, ticker)
@@ -486,7 +486,7 @@ func TestTimingWheel_ElapsedAndSet(t *testing.T) {
}
var actual int32
done := make(chan lang.PlaceholderType)
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) {
actual = atomic.LoadInt32(&count)
close(done)
}, ticker)
@@ -577,7 +577,7 @@ func TestTimingWheel_ElapsedAndSetThenMove(t *testing.T) {
}
var actual int32
done := make(chan lang.PlaceholderType)
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) {
actual = atomic.LoadInt32(&count)
close(done)
}, ticker)
@@ -612,7 +612,7 @@ func TestMoveAndRemoveTask(t *testing.T) {
}
}
var keys []int
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {
assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int))
keys = append(keys, v.(int))

View File

@@ -5,16 +5,37 @@ import (
"log"
"os"
"path"
"reflect"
"strings"
"github.com/zeromicro/go-zero/core/jsonx"
"github.com/zeromicro/go-zero/core/mapping"
"github.com/zeromicro/go-zero/internal/encoding"
)
var loaders = map[string]func([]byte, interface{}) error{
".json": LoadFromJsonBytes,
".toml": LoadFromTomlBytes,
".yaml": LoadFromYamlBytes,
".yml": LoadFromYamlBytes,
const jsonTagKey = "json"
var (
fillDefaultUnmarshaler = mapping.NewUnmarshaler(jsonTagKey, mapping.WithDefault())
loaders = map[string]func([]byte, interface{}) error{
".json": LoadFromJsonBytes,
".toml": LoadFromTomlBytes,
".yaml": LoadFromYamlBytes,
".yml": LoadFromYamlBytes,
}
)
// children and mapField should not be both filled.
// named fields and map cannot be bound to the same field name.
type fieldInfo struct {
children map[string]*fieldInfo
mapField *fieldInfo
}
// FillDefault fills the default values for the given v,
// and the premise is that the value of v must be guaranteed to be empty.
func FillDefault(v interface{}) error {
return fillDefaultUnmarshaler.Unmarshal(map[string]interface{}{}, v)
}
// Load loads config into v from file, .json, .yaml and .yml are acceptable.
@@ -49,7 +70,19 @@ func LoadConfig(file string, v interface{}, opts ...Option) error {
// LoadFromJsonBytes loads config into v from content json bytes.
func LoadFromJsonBytes(content []byte, v interface{}) error {
return mapping.UnmarshalJsonBytes(content, v)
info, err := buildFieldsInfo(reflect.TypeOf(v))
if err != nil {
return err
}
var m map[string]interface{}
if err := jsonx.Unmarshal(content, &m); err != nil {
return err
}
lowerCaseKeyMap := toLowerCaseKeyMap(m, info)
return mapping.UnmarshalJsonMap(lowerCaseKeyMap, v, mapping.WithCanonicalKeyFunc(toLowerCase))
}
// LoadConfigFromJsonBytes loads config into v from content json bytes.
@@ -60,12 +93,22 @@ func LoadConfigFromJsonBytes(content []byte, v interface{}) error {
// LoadFromTomlBytes loads config into v from content toml bytes.
func LoadFromTomlBytes(content []byte, v interface{}) error {
return mapping.UnmarshalTomlBytes(content, v)
b, err := encoding.TomlToJson(content)
if err != nil {
return err
}
return LoadFromJsonBytes(b, v)
}
// LoadFromYamlBytes loads config into v from content yaml bytes.
func LoadFromYamlBytes(content []byte, v interface{}) error {
return mapping.UnmarshalYamlBytes(content, v)
b, err := encoding.YamlToJson(content)
if err != nil {
return err
}
return LoadFromJsonBytes(b, v)
}
// LoadConfigFromYamlBytes loads config into v from content yaml bytes.
@@ -80,3 +123,205 @@ func MustLoad(path string, v interface{}, opts ...Option) {
log.Fatalf("error: config file %s, %s", path, err.Error())
}
}
func addOrMergeFields(info *fieldInfo, key string, child *fieldInfo) error {
if prev, ok := info.children[key]; ok {
if child.mapField != nil {
return newDupKeyError(key)
}
if err := mergeFields(prev, key, child.children); err != nil {
return err
}
} else {
info.children[key] = child
}
return nil
}
func buildAnonymousFieldInfo(info *fieldInfo, lowerCaseName string, ft reflect.Type) error {
switch ft.Kind() {
case reflect.Struct:
fields, err := buildFieldsInfo(ft)
if err != nil {
return err
}
for k, v := range fields.children {
if err = addOrMergeFields(info, k, v); err != nil {
return err
}
}
case reflect.Map:
elemField, err := buildFieldsInfo(mapping.Deref(ft.Elem()))
if err != nil {
return err
}
if _, ok := info.children[lowerCaseName]; ok {
return newDupKeyError(lowerCaseName)
}
info.children[lowerCaseName] = &fieldInfo{
children: make(map[string]*fieldInfo),
mapField: elemField,
}
default:
if _, ok := info.children[lowerCaseName]; ok {
return newDupKeyError(lowerCaseName)
}
info.children[lowerCaseName] = &fieldInfo{
children: make(map[string]*fieldInfo),
}
}
return nil
}
func buildFieldsInfo(tp reflect.Type) (*fieldInfo, error) {
tp = mapping.Deref(tp)
switch tp.Kind() {
case reflect.Struct:
return buildStructFieldsInfo(tp)
case reflect.Array, reflect.Slice:
return buildFieldsInfo(mapping.Deref(tp.Elem()))
case reflect.Chan, reflect.Func:
return nil, fmt.Errorf("unsupported type: %s", tp.Kind())
default:
return &fieldInfo{
children: make(map[string]*fieldInfo),
}, nil
}
}
func buildNamedFieldInfo(info *fieldInfo, lowerCaseName string, ft reflect.Type) error {
var finfo *fieldInfo
var err error
switch ft.Kind() {
case reflect.Struct:
finfo, err = buildFieldsInfo(ft)
if err != nil {
return err
}
case reflect.Array, reflect.Slice:
finfo, err = buildFieldsInfo(ft.Elem())
if err != nil {
return err
}
case reflect.Map:
elemInfo, err := buildFieldsInfo(mapping.Deref(ft.Elem()))
if err != nil {
return err
}
finfo = &fieldInfo{
children: make(map[string]*fieldInfo),
mapField: elemInfo,
}
default:
finfo, err = buildFieldsInfo(ft)
if err != nil {
return err
}
}
return addOrMergeFields(info, lowerCaseName, finfo)
}
func buildStructFieldsInfo(tp reflect.Type) (*fieldInfo, error) {
info := &fieldInfo{
children: make(map[string]*fieldInfo),
}
for i := 0; i < tp.NumField(); i++ {
field := tp.Field(i)
name := field.Name
lowerCaseName := toLowerCase(name)
ft := mapping.Deref(field.Type)
// flatten anonymous fields
if field.Anonymous {
if err := buildAnonymousFieldInfo(info, lowerCaseName, ft); err != nil {
return nil, err
}
} else if err := buildNamedFieldInfo(info, lowerCaseName, ft); err != nil {
return nil, err
}
}
return info, nil
}
func mergeFields(prev *fieldInfo, key string, children map[string]*fieldInfo) error {
if len(prev.children) == 0 || len(children) == 0 {
return newDupKeyError(key)
}
// merge fields
for k, v := range children {
if _, ok := prev.children[k]; ok {
return newDupKeyError(k)
}
prev.children[k] = v
}
return nil
}
func toLowerCase(s string) string {
return strings.ToLower(s)
}
func toLowerCaseInterface(v interface{}, info *fieldInfo) interface{} {
switch vv := v.(type) {
case map[string]interface{}:
return toLowerCaseKeyMap(vv, info)
case []interface{}:
var arr []interface{}
for _, vvv := range vv {
arr = append(arr, toLowerCaseInterface(vvv, info))
}
return arr
default:
return v
}
}
func toLowerCaseKeyMap(m map[string]interface{}, info *fieldInfo) map[string]interface{} {
res := make(map[string]interface{})
for k, v := range m {
ti, ok := info.children[k]
if ok {
res[k] = toLowerCaseInterface(v, ti)
continue
}
lk := toLowerCase(k)
if ti, ok = info.children[lk]; ok {
res[lk] = toLowerCaseInterface(v, ti)
} else if info.mapField != nil {
res[k] = toLowerCaseInterface(v, info.mapField)
} else {
res[k] = v
}
}
return res
}
type dupKeyError struct {
key string
}
func newDupKeyError(key string) dupKeyError {
return dupKeyError{key: key}
}
func (e dupKeyError) Error() string {
return fmt.Sprintf("duplicated key %s", e.key)
}

View File

@@ -9,6 +9,8 @@ import (
"github.com/zeromicro/go-zero/core/hash"
)
var dupErr dupKeyError
func TestLoadConfig_notExists(t *testing.T) {
assert.NotNil(t, Load("not_a_file", nil))
}
@@ -17,7 +19,7 @@ func TestLoadConfig_notRecogFile(t *testing.T) {
filename, err := fs.TempFilenameWithText("hello")
assert.Nil(t, err)
defer os.Remove(filename)
assert.NotNil(t, Load(filename, nil))
assert.NotNil(t, LoadConfig(filename, nil))
}
func TestConfigJson(t *testing.T) {
@@ -56,6 +58,22 @@ func TestConfigJson(t *testing.T) {
}
}
func TestLoadFromJsonBytesArray(t *testing.T) {
input := []byte(`{"users": [{"name": "foo"}, {"Name": "bar"}]}`)
var val struct {
Users []struct {
Name string
}
}
assert.NoError(t, LoadConfigFromJsonBytes(input, &val))
var expect []string
for _, user := range val.Users {
expect = append(expect, user.Name)
}
assert.EqualValues(t, []string{"foo", "bar"}, expect)
}
func TestConfigToml(t *testing.T) {
text := `a = "foo"
b = 1
@@ -81,6 +99,89 @@ d = "abcd!@#$112"
assert.Equal(t, "abcd!@#$112", val.D)
}
func TestConfigOptional(t *testing.T) {
text := `a = "foo"
b = 1
c = "FOO"
d = "abcd"
`
tmpfile, err := createTempFile(".toml", text)
assert.Nil(t, err)
defer os.Remove(tmpfile)
var val struct {
A string `json:"a"`
B int `json:"b,optional"`
C string `json:"c,optional=B"`
D string `json:"d,optional=b"`
}
if assert.NoError(t, Load(tmpfile, &val)) {
assert.Equal(t, "foo", val.A)
assert.Equal(t, 1, val.B)
assert.Equal(t, "FOO", val.C)
assert.Equal(t, "abcd", val.D)
}
}
func TestConfigJsonCanonical(t *testing.T) {
text := []byte(`{"a": "foo", "B": "bar"}`)
var val1 struct {
A string `json:"a"`
B string `json:"b"`
}
var val2 struct {
A string
B string
}
assert.NoError(t, LoadFromJsonBytes(text, &val1))
assert.Equal(t, "foo", val1.A)
assert.Equal(t, "bar", val1.B)
assert.NoError(t, LoadFromJsonBytes(text, &val2))
assert.Equal(t, "foo", val2.A)
assert.Equal(t, "bar", val2.B)
}
func TestConfigTomlCanonical(t *testing.T) {
text := []byte(`a = "foo"
B = "bar"`)
var val1 struct {
A string `json:"a"`
B string `json:"b"`
}
var val2 struct {
A string
B string
}
assert.NoError(t, LoadFromTomlBytes(text, &val1))
assert.Equal(t, "foo", val1.A)
assert.Equal(t, "bar", val1.B)
assert.NoError(t, LoadFromTomlBytes(text, &val2))
assert.Equal(t, "foo", val2.A)
assert.Equal(t, "bar", val2.B)
}
func TestConfigYamlCanonical(t *testing.T) {
text := []byte(`a: foo
B: bar`)
var val1 struct {
A string `json:"a"`
B string `json:"b"`
}
var val2 struct {
A string
B string
}
assert.NoError(t, LoadConfigFromYamlBytes(text, &val1))
assert.Equal(t, "foo", val1.A)
assert.Equal(t, "bar", val1.B)
assert.NoError(t, LoadFromYamlBytes(text, &val2))
assert.Equal(t, "foo", val2.A)
assert.Equal(t, "bar", val2.B)
}
func TestConfigTomlEnv(t *testing.T) {
text := `a = "foo"
b = 1
@@ -105,7 +206,6 @@ d = "abcd!@#112"
assert.Equal(t, 1, val.B)
assert.Equal(t, "2", val.C)
assert.Equal(t, "abcd!@#112", val.D)
}
func TestConfigJsonEnv(t *testing.T) {
@@ -144,6 +244,784 @@ func TestConfigJsonEnv(t *testing.T) {
}
}
func TestToCamelCase(t *testing.T) {
tests := []struct {
input string
expect string
}{
{
input: "",
expect: "",
},
{
input: "A",
expect: "a",
},
{
input: "a",
expect: "a",
},
{
input: "hello_world",
expect: "hello_world",
},
{
input: "Hello_world",
expect: "hello_world",
},
{
input: "hello_World",
expect: "hello_world",
},
{
input: "helloWorld",
expect: "helloworld",
},
{
input: "HelloWorld",
expect: "helloworld",
},
{
input: "hello World",
expect: "hello world",
},
{
input: "Hello World",
expect: "hello world",
},
{
input: "Hello World",
expect: "hello world",
},
{
input: "Hello World foo_bar",
expect: "hello world foo_bar",
},
{
input: "Hello World foo_Bar",
expect: "hello world foo_bar",
},
{
input: "Hello World Foo_bar",
expect: "hello world foo_bar",
},
{
input: "Hello World Foo_Bar",
expect: "hello world foo_bar",
},
{
input: "Hello.World Foo_Bar",
expect: "hello.world foo_bar",
},
{
input: "你好 World Foo_Bar",
expect: "你好 world foo_bar",
},
}
for _, test := range tests {
test := test
t.Run(test.input, func(t *testing.T) {
assert.Equal(t, test.expect, toLowerCase(test.input))
})
}
}
func TestLoadFromJsonBytesError(t *testing.T) {
var val struct{}
assert.Error(t, LoadFromJsonBytes([]byte(`hello`), &val))
}
func TestLoadFromTomlBytesError(t *testing.T) {
var val struct{}
assert.Error(t, LoadFromTomlBytes([]byte(`hello`), &val))
}
func TestLoadFromYamlBytesError(t *testing.T) {
var val struct{}
assert.Error(t, LoadFromYamlBytes([]byte(`':hello`), &val))
}
func TestLoadFromYamlBytes(t *testing.T) {
input := []byte(`layer1:
layer2:
layer3: foo`)
var val struct {
Layer1 struct {
Layer2 struct {
Layer3 string
}
}
}
assert.NoError(t, LoadFromYamlBytes(input, &val))
assert.Equal(t, "foo", val.Layer1.Layer2.Layer3)
}
func TestLoadFromYamlBytesTerm(t *testing.T) {
input := []byte(`layer1:
layer2:
tls_conf: foo`)
var val struct {
Layer1 struct {
Layer2 struct {
Layer3 string `json:"tls_conf"`
}
}
}
assert.NoError(t, LoadFromYamlBytes(input, &val))
assert.Equal(t, "foo", val.Layer1.Layer2.Layer3)
}
func TestLoadFromYamlBytesLayers(t *testing.T) {
input := []byte(`layer1:
layer2:
layer3: foo`)
var val struct {
Value string `json:"Layer1.Layer2.Layer3"`
}
assert.NoError(t, LoadFromYamlBytes(input, &val))
assert.Equal(t, "foo", val.Value)
}
func TestLoadFromYamlItemOverlay(t *testing.T) {
type (
Redis struct {
Host string
Port int
}
RedisKey struct {
Redis
Key string
}
Server struct {
Redis RedisKey
}
TestConfig struct {
Server
Redis Redis
}
)
input := []byte(`Redis:
Host: localhost
Port: 6379
Key: test
`)
var c TestConfig
assert.ErrorAs(t, LoadFromYamlBytes(input, &c), &dupErr)
}
func TestLoadFromYamlItemOverlayReverse(t *testing.T) {
type (
Redis struct {
Host string
Port int
}
RedisKey struct {
Redis
Key string
}
Server struct {
Redis Redis
}
TestConfig struct {
Redis RedisKey
Server
}
)
input := []byte(`Redis:
Host: localhost
Port: 6379
Key: test
`)
var c TestConfig
assert.ErrorAs(t, LoadFromYamlBytes(input, &c), &dupErr)
}
func TestLoadFromYamlItemOverlayWithMap(t *testing.T) {
type (
Redis struct {
Host string
Port int
}
RedisKey struct {
Redis
Key string
}
Server struct {
Redis RedisKey
}
TestConfig struct {
Server
Redis map[string]interface{}
}
)
input := []byte(`Redis:
Host: localhost
Port: 6379
Key: test
`)
var c TestConfig
assert.ErrorAs(t, LoadFromYamlBytes(input, &c), &dupErr)
}
func TestUnmarshalJsonBytesMap(t *testing.T) {
input := []byte(`{"foo":{"/mtproto.RPCTos": "bff.bff","bar":"baz"}}`)
var val struct {
Foo map[string]string
}
assert.NoError(t, LoadFromJsonBytes(input, &val))
assert.Equal(t, "bff.bff", val.Foo["/mtproto.RPCTos"])
assert.Equal(t, "baz", val.Foo["bar"])
}
func TestUnmarshalJsonBytesMapWithSliceElements(t *testing.T) {
input := []byte(`{"foo":{"/mtproto.RPCTos": ["bff.bff", "any"],"bar":["baz", "qux"]}}`)
var val struct {
Foo map[string][]string
}
assert.NoError(t, LoadFromJsonBytes(input, &val))
assert.EqualValues(t, []string{"bff.bff", "any"}, val.Foo["/mtproto.RPCTos"])
assert.EqualValues(t, []string{"baz", "qux"}, val.Foo["bar"])
}
func TestUnmarshalJsonBytesMapWithSliceOfStructs(t *testing.T) {
input := []byte(`{"foo":{
"/mtproto.RPCTos": [{"bar": "any"}],
"bar":[{"bar": "qux"}, {"bar": "ever"}]}}`)
var val struct {
Foo map[string][]struct {
Bar string
}
}
assert.NoError(t, LoadFromJsonBytes(input, &val))
assert.Equal(t, 1, len(val.Foo["/mtproto.RPCTos"]))
assert.Equal(t, "any", val.Foo["/mtproto.RPCTos"][0].Bar)
assert.Equal(t, 2, len(val.Foo["bar"]))
assert.Equal(t, "qux", val.Foo["bar"][0].Bar)
assert.Equal(t, "ever", val.Foo["bar"][1].Bar)
}
func TestUnmarshalJsonBytesWithAnonymousField(t *testing.T) {
type (
Int int
InnerConf struct {
Name string
}
Conf struct {
Int
InnerConf
}
)
var (
input = []byte(`{"Name": "hello", "int": 3}`)
c Conf
)
assert.NoError(t, LoadFromJsonBytes(input, &c))
assert.Equal(t, "hello", c.Name)
assert.Equal(t, Int(3), c.Int)
}
func TestUnmarshalJsonBytesWithMapValueOfStruct(t *testing.T) {
type (
Value struct {
Name string
}
Config struct {
Items map[string]Value
}
)
var inputs = [][]byte{
[]byte(`{"Items": {"Key":{"Name": "foo"}}}`),
[]byte(`{"Items": {"Key":{"Name": "foo"}}}`),
[]byte(`{"items": {"key":{"name": "foo"}}}`),
[]byte(`{"items": {"key":{"name": "foo"}}}`),
}
for _, input := range inputs {
var c Config
if assert.NoError(t, LoadFromJsonBytes(input, &c)) {
assert.Equal(t, 1, len(c.Items))
for _, v := range c.Items {
assert.Equal(t, "foo", v.Name)
}
}
}
}
func TestUnmarshalJsonBytesWithMapTypeValueOfStruct(t *testing.T) {
type (
Value struct {
Name string
}
Map map[string]Value
Config struct {
Map
}
)
var inputs = [][]byte{
[]byte(`{"Map": {"Key":{"Name": "foo"}}}`),
[]byte(`{"Map": {"Key":{"Name": "foo"}}}`),
[]byte(`{"map": {"key":{"name": "foo"}}}`),
[]byte(`{"map": {"key":{"name": "foo"}}}`),
}
for _, input := range inputs {
var c Config
if assert.NoError(t, LoadFromJsonBytes(input, &c)) {
assert.Equal(t, 1, len(c.Map))
for _, v := range c.Map {
assert.Equal(t, "foo", v.Name)
}
}
}
}
func Test_FieldOverwrite(t *testing.T) {
t.Run("normal", func(t *testing.T) {
type Base struct {
Name string
}
type St1 struct {
Base
Name2 string
}
type St2 struct {
Base
Name2 string
}
type St3 struct {
*Base
Name2 string
}
type St4 struct {
*Base
Name2 *string
}
validate := func(val interface{}) {
input := []byte(`{"Name": "hello", "Name2": "world"}`)
assert.NoError(t, LoadFromJsonBytes(input, val))
}
validate(&St1{})
validate(&St2{})
validate(&St3{})
validate(&St4{})
})
t.Run("Inherit Override", func(t *testing.T) {
type Base struct {
Name string
}
type St1 struct {
Base
Name string
}
type St2 struct {
Base
Name int
}
type St3 struct {
*Base
Name int
}
type St4 struct {
*Base
Name *string
}
validate := func(val interface{}) {
input := []byte(`{"Name": "hello"}`)
err := LoadFromJsonBytes(input, val)
assert.ErrorAs(t, err, &dupErr)
assert.Equal(t, newDupKeyError("name").Error(), err.Error())
}
validate(&St1{})
validate(&St2{})
validate(&St3{})
validate(&St4{})
})
t.Run("Inherit more", func(t *testing.T) {
type Base1 struct {
Name string
}
type St0 struct {
Base1
Name string
}
type St1 struct {
St0
Name string
}
type St2 struct {
St0
Name int
}
type St3 struct {
*St0
Name int
}
type St4 struct {
*St0
Name *int
}
validate := func(val interface{}) {
input := []byte(`{"Name": "hello"}`)
err := LoadFromJsonBytes(input, val)
assert.ErrorAs(t, err, &dupErr)
assert.Equal(t, newDupKeyError("name").Error(), err.Error())
}
validate(&St0{})
validate(&St1{})
validate(&St2{})
validate(&St3{})
validate(&St4{})
})
}
func TestFieldOverwriteComplicated(t *testing.T) {
t.Run("double maps", func(t *testing.T) {
type (
Base1 struct {
Values map[string]string
}
Base2 struct {
Values map[string]string
}
Config struct {
Base1
Base2
}
)
var c Config
input := []byte(`{"Values": {"Key": "Value"}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("merge children", func(t *testing.T) {
type (
Inner1 struct {
Name string
}
Inner2 struct {
Age int
}
Base1 struct {
Inner Inner1
}
Base2 struct {
Inner Inner2
}
Config struct {
Base1
Base2
}
)
var c Config
input := []byte(`{"Inner": {"Name": "foo", "Age": 10}}`)
if assert.NoError(t, LoadFromJsonBytes(input, &c)) {
assert.Equal(t, "foo", c.Base1.Inner.Name)
assert.Equal(t, 10, c.Base2.Inner.Age)
}
})
t.Run("overwritten maps", func(t *testing.T) {
type (
Inner struct {
Map map[string]string
}
Config struct {
Map map[string]string
Inner
}
)
var c Config
input := []byte(`{"Inner": {"Map": {"Key": "Value"}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten nested maps", func(t *testing.T) {
type (
Inner struct {
Map map[string]string
}
Middle1 struct {
Map map[string]string
Inner
}
Middle2 struct {
Map map[string]string
Inner
}
Config struct {
Middle1
Middle2
}
)
var c Config
input := []byte(`{"Middle1": {"Inner": {"Map": {"Key": "Value"}}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten outer/inner maps", func(t *testing.T) {
type (
Inner struct {
Map map[string]string
}
Middle struct {
Inner
Map map[string]string
}
Config struct {
Middle
}
)
var c Config
input := []byte(`{"Middle": {"Inner": {"Map": {"Key": "Value"}}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten anonymous maps", func(t *testing.T) {
type (
Inner struct {
Map map[string]string
}
Middle struct {
Inner
Map map[string]string
}
Elem map[string]Middle
Config struct {
Elem
}
)
var c Config
input := []byte(`{"Elem": {"Key": {"Inner": {"Map": {"Key": "Value"}}}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten primitive and map", func(t *testing.T) {
type (
Inner struct {
Value string
}
Elem map[string]Inner
Named struct {
Elem string
}
Config struct {
Named
Elem
}
)
var c Config
input := []byte(`{"Elem": {"Key": {"Value": "Value"}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten map and slice", func(t *testing.T) {
type (
Inner struct {
Value string
}
Elem []Inner
Named struct {
Elem string
}
Config struct {
Named
Elem
}
)
var c Config
input := []byte(`{"Elem": {"Key": {"Value": "Value"}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten map and string", func(t *testing.T) {
type (
Elem string
Named struct {
Elem string
}
Config struct {
Named
Elem
}
)
var c Config
input := []byte(`{"Elem": {"Key": {"Value": "Value"}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
}
func TestLoadNamedFieldOverwritten(t *testing.T) {
t.Run("overwritten named struct", func(t *testing.T) {
type (
Elem string
Named struct {
Elem string
}
Base struct {
Named
Elem
}
Config struct {
Val Base
}
)
var c Config
input := []byte(`{"Val": {"Elem": {"Key": {"Value": "Value"}}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten named []struct", func(t *testing.T) {
type (
Elem string
Named struct {
Elem string
}
Base struct {
Named
Elem
}
Config struct {
Vals []Base
}
)
var c Config
input := []byte(`{"Vals": [{"Elem": {"Key": {"Value": "Value"}}}]}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten named map[string]struct", func(t *testing.T) {
type (
Elem string
Named struct {
Elem string
}
Base struct {
Named
Elem
}
Config struct {
Vals map[string]Base
}
)
var c Config
input := []byte(`{"Vals": {"Key": {"Elem": {"Key": {"Value": "Value"}}}}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten named *struct", func(t *testing.T) {
type (
Elem string
Named struct {
Elem string
}
Base struct {
Named
Elem
}
Config struct {
Vals *Base
}
)
var c Config
input := []byte(`{"Vals": [{"Elem": {"Key": {"Value": "Value"}}}]}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten named struct", func(t *testing.T) {
type (
Named struct {
Elem string
}
Base struct {
Named
Elem Named
}
Config struct {
Val Base
}
)
var c Config
input := []byte(`{"Val": {"Elem": "Value"}}`)
assert.ErrorAs(t, LoadFromJsonBytes(input, &c), &dupErr)
})
t.Run("overwritten named struct", func(t *testing.T) {
type Config struct {
Val chan int
}
var c Config
input := []byte(`{"Val": 1}`)
assert.Error(t, LoadFromJsonBytes(input, &c))
})
}
func createTempFile(ext, text string) (string, error) {
tmpfile, err := os.CreateTemp(os.TempDir(), hash.Md5Hex([]byte(text))+"*"+ext)
if err != nil {
@@ -161,3 +1039,55 @@ func createTempFile(ext, text string) (string, error) {
return filename, nil
}
func TestFillDefaultUnmarshal(t *testing.T) {
t.Run("nil", func(t *testing.T) {
type St struct{}
err := FillDefault(St{})
assert.Error(t, err)
})
t.Run("not nil", func(t *testing.T) {
type St struct{}
err := FillDefault(&St{})
assert.NoError(t, err)
})
t.Run("default", func(t *testing.T) {
type St struct {
A string `json:",default=a"`
B string
}
var st St
err := FillDefault(&st)
assert.NoError(t, err)
assert.Equal(t, st.A, "a")
})
t.Run("env", func(t *testing.T) {
type St struct {
A string `json:",default=a"`
B string
C string `json:",env=TEST_C"`
}
t.Setenv("TEST_C", "c")
var st St
err := FillDefault(&st)
assert.NoError(t, err)
assert.Equal(t, st.A, "a")
assert.Equal(t, st.C, "c")
})
t.Run("has vaue", func(t *testing.T) {
type St struct {
A string `json:",default=a"`
B string
}
var st = St{
A: "b",
}
err := FillDefault(&st)
assert.Error(t, err)
})
}

View File

@@ -12,7 +12,6 @@ import (
// PropertyError represents a configuration error message.
type PropertyError struct {
error
message string
}

View File

@@ -4,6 +4,7 @@
```go
type RestfulConf struct {
ServiceName string `json:",env=SERVICE_NAME"` // read from env automatically
Host string `json:",default=0.0.0.0"`
Port int
LogMode string `json:",options=[file,console]"`
@@ -21,20 +22,20 @@ type RestfulConf struct {
```yaml
# most fields are optional or have default values
Port: 8080
LogMode: console
port: 8080
logMode: console
# you can use env settings
MaxBytes: ${MAX_BYTES}
maxBytes: ${MAX_BYTES}
```
- toml example
```toml
# most fields are optional or have default values
Port = 8_080
LogMode = "console"
port = 8_080
logMode = "console"
# you can use env settings
MaxBytes = "${MAX_BYTES}"
maxBytes = "${MAX_BYTES}"
```
3. Load the config from a file:

View File

@@ -297,7 +297,8 @@ func (c *cluster) watch(cli EtcdClient, key string, rev int64) {
func (c *cluster) watchStream(cli EtcdClient, key string, rev int64) bool {
var rch clientv3.WatchChan
if rev != 0 {
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix(), clientv3.WithRev(rev+1))
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix(),
clientv3.WithRev(rev+1))
} else {
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix())
}
@@ -342,6 +343,7 @@ func DialClient(endpoints []string) (EtcdClient, error) {
DialKeepAliveTime: dialKeepAliveTime,
DialKeepAliveTimeout: DialTimeout,
RejectOldCluster: true,
PermitWithoutStream: true,
}
if account, ok := GetAccount(endpoints); ok {
cfg.Username = account.User

View File

@@ -2,7 +2,6 @@ package internal
import (
"context"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"sync"
"testing"
@@ -12,6 +11,7 @@ import (
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stringx"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
clientv3 "go.etcd.io/etcd/client/v3"
)

View File

@@ -1,6 +1,8 @@
package discov
import (
"time"
"github.com/zeromicro/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
@@ -51,12 +53,7 @@ func NewPublisher(endpoints []string, key, value string, opts ...PubOption) *Pub
// KeepAlive keeps key:value alive.
func (p *Publisher) KeepAlive() error {
cli, err := internal.GetRegistry().GetConn(p.endpoints)
if err != nil {
return err
}
p.lease, err = p.register(cli)
cli, err := p.doRegister()
if err != nil {
return err
}
@@ -83,6 +80,43 @@ func (p *Publisher) Stop() {
p.quit.Close()
}
func (p *Publisher) doKeepAlive() error {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
select {
case <-p.quit.Done():
return nil
default:
cli, err := p.doRegister()
if err != nil {
logx.Errorf("etcd publisher doRegister: %s", err.Error())
break
}
if err := p.keepAliveAsync(cli); err != nil {
logx.Errorf("etcd publisher keepAliveAsync: %s", err.Error())
break
}
return nil
}
}
return nil
}
func (p *Publisher) doRegister() (internal.EtcdClient, error) {
cli, err := internal.GetRegistry().GetConn(p.endpoints)
if err != nil {
return nil, err
}
p.lease, err = p.register(cli)
return cli, err
}
func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
ch, err := cli.KeepAlive(cli.Ctx(), p.lease)
if err != nil {
@@ -95,8 +129,8 @@ func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
case _, ok := <-ch:
if !ok {
p.revoke(cli)
if err := p.KeepAlive(); err != nil {
logx.Errorf("KeepAlive: %s", err.Error())
if err := p.doKeepAlive(); err != nil {
logx.Errorf("etcd publisher KeepAlive: %s", err.Error())
}
return
}
@@ -105,8 +139,8 @@ func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
p.revoke(cli)
select {
case <-p.resumeChan:
if err := p.KeepAlive(); err != nil {
logx.Errorf("KeepAlive: %s", err.Error())
if err := p.doKeepAlive(); err != nil {
logx.Errorf("etcd publisher KeepAlive: %s", err.Error())
}
return
case <-p.quit.Done():
@@ -141,7 +175,7 @@ func (p *Publisher) register(client internal.EtcdClient) (clientv3.LeaseID, erro
func (p *Publisher) revoke(cli internal.EtcdClient) {
if _, err := cli.Revoke(cli.Ctx(), p.lease); err != nil {
logx.Error(err)
logx.Errorf("etcd publisher revoke: %s", err.Error())
}
}

View File

@@ -13,7 +13,7 @@ type (
// SubOption defines the method to customize a Subscriber.
SubOption func(sub *Subscriber)
// A Subscriber is used to subscribe the given key on a etcd cluster.
// A Subscriber is used to subscribe the given key on an etcd cluster.
Subscriber struct {
endpoints []string
exclusive bool

View File

@@ -53,10 +53,11 @@ func TestChunkExecutorFlushInterval(t *testing.T) {
}
func TestChunkExecutorEmpty(t *testing.T) {
NewChunkExecutor(func(items []interface{}) {
executor := NewChunkExecutor(func(items []interface{}) {
assert.Fail(t, "should not called")
}, WithChunkBytes(10), WithFlushInterval(time.Millisecond))
time.Sleep(time.Millisecond * 100)
executor.Wait()
}
func TestChunkExecutorFlush(t *testing.T) {

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/timex"
)
@@ -67,6 +68,7 @@ func TestPeriodicalExecutor_QuitGoroutine(t *testing.T) {
ticker.Tick()
ticker.Wait(time.Millisecond * idleRound)
assert.Equal(t, routines, runtime.NumGoroutine())
proc.Shutdown()
}
func TestPeriodicalExecutor_Bulk(t *testing.T) {

15
core/fs/files_test.go Normal file
View File

@@ -0,0 +1,15 @@
package fs
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCloseOnExec(t *testing.T) {
file := os.NewFile(0, os.DevNull)
assert.NotPanics(t, func() {
CloseOnExec(file)
})
}

View File

@@ -328,7 +328,7 @@ func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
}, opts...).Done()
}
// Reduce is a utility method to let the caller deal with the underlying channel.
// Reduce is an utility method to let the caller deal with the underlying channel.
func (s Stream) Reduce(fn ReduceFunc) (interface{}, error) {
return fn(s.source)
}

View File

@@ -7,7 +7,6 @@ import (
"sync"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/mapping"
)
const (
@@ -183,5 +182,5 @@ func innerRepr(node interface{}) string {
}
func repr(node interface{}) string {
return mapping.Repr(node)
return lang.Repr(node)
}

View File

@@ -10,7 +10,7 @@ func (nopCloser) Close() error {
return nil
}
// NopCloser returns a io.WriteCloser that does nothing on calling Close.
// NopCloser returns an io.WriteCloser that does nothing on calling Close.
func NopCloser(w io.Writer) io.WriteCloser {
return nopCloser{w}
}

View File

@@ -1,44 +0,0 @@
package jsontype
import (
"encoding/json"
"time"
"github.com/globalsign/mgo/bson"
)
// MilliTime represents time.Time that works better with mongodb.
type MilliTime struct {
time.Time
}
// MarshalJSON marshals mt to json bytes.
func (mt MilliTime) MarshalJSON() ([]byte, error) {
return json.Marshal(mt.Milli())
}
// UnmarshalJSON unmarshals data into mt.
func (mt *MilliTime) UnmarshalJSON(data []byte) error {
var milli int64
if err := json.Unmarshal(data, &milli); err != nil {
return err
}
mt.Time = time.Unix(0, milli*int64(time.Millisecond))
return nil
}
// GetBSON returns BSON base on mt.
func (mt MilliTime) GetBSON() (interface{}, error) {
return mt.Time, nil
}
// SetBSON sets raw into mt.
func (mt *MilliTime) SetBSON(raw bson.Raw) error {
return raw.Unmarshal(&mt.Time)
}
// Milli returns milliseconds for mt.
func (mt MilliTime) Milli() int64 {
return mt.UnixNano() / int64(time.Millisecond)
}

View File

@@ -1,126 +0,0 @@
package jsontype
import (
"strconv"
"testing"
"time"
"github.com/globalsign/mgo/bson"
"github.com/stretchr/testify/assert"
)
func TestMilliTime_GetBSON(t *testing.T) {
tests := []struct {
name string
tm time.Time
}{
{
name: "now",
tm: time.Now(),
},
{
name: "future",
tm: time.Now().Add(time.Hour),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got, err := MilliTime{test.tm}.GetBSON()
assert.Nil(t, err)
assert.Equal(t, test.tm, got)
})
}
}
func TestMilliTime_MarshalJSON(t *testing.T) {
tests := []struct {
name string
tm time.Time
}{
{
name: "now",
tm: time.Now(),
},
{
name: "future",
tm: time.Now().Add(time.Hour),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
b, err := MilliTime{test.tm}.MarshalJSON()
assert.Nil(t, err)
assert.Equal(t, strconv.FormatInt(test.tm.UnixNano()/1e6, 10), string(b))
})
}
}
func TestMilliTime_Milli(t *testing.T) {
tests := []struct {
name string
tm time.Time
}{
{
name: "now",
tm: time.Now(),
},
{
name: "future",
tm: time.Now().Add(time.Hour),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
n := MilliTime{test.tm}.Milli()
assert.Equal(t, test.tm.UnixNano()/1e6, n)
})
}
}
func TestMilliTime_UnmarshalJSON(t *testing.T) {
tests := []struct {
name string
tm time.Time
}{
{
name: "now",
tm: time.Now(),
},
{
name: "future",
tm: time.Now().Add(time.Hour),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var mt MilliTime
s := strconv.FormatInt(test.tm.UnixNano()/1e6, 10)
err := mt.UnmarshalJSON([]byte(s))
assert.Nil(t, err)
s1, err := mt.MarshalJSON()
assert.Nil(t, err)
assert.Equal(t, s, string(s1))
})
}
}
func TestUnmarshalWithError(t *testing.T) {
var mt MilliTime
assert.NotNil(t, mt.UnmarshalJSON([]byte("hello")))
}
func TestSetBSON(t *testing.T) {
data, err := bson.Marshal(time.Now())
assert.Nil(t, err)
var raw bson.Raw
assert.Nil(t, bson.Unmarshal(data, &raw))
var mt MilliTime
assert.Nil(t, mt.SetBSON(raw))
assert.NotNil(t, mt.SetBSON(bson.Raw{}))
}

View File

@@ -1,5 +1,11 @@
package lang
import (
"fmt"
"reflect"
"strconv"
)
// Placeholder is a placeholder object that can be used globally.
var Placeholder PlaceholderType
@@ -9,3 +15,64 @@ type (
// PlaceholderType represents a placeholder type.
PlaceholderType = struct{}
)
// Repr returns the string representation of v.
func Repr(v interface{}) string {
if v == nil {
return ""
}
// if func (v *Type) String() string, we can't use Elem()
switch vt := v.(type) {
case fmt.Stringer:
return vt.String()
}
val := reflect.ValueOf(v)
for val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
return reprOfValue(val)
}
func reprOfValue(val reflect.Value) string {
switch vt := val.Interface().(type) {
case bool:
return strconv.FormatBool(vt)
case error:
return vt.Error()
case float32:
return strconv.FormatFloat(float64(vt), 'f', -1, 32)
case float64:
return strconv.FormatFloat(vt, 'f', -1, 64)
case fmt.Stringer:
return vt.String()
case int:
return strconv.Itoa(vt)
case int8:
return strconv.Itoa(int(vt))
case int16:
return strconv.Itoa(int(vt))
case int32:
return strconv.Itoa(int(vt))
case int64:
return strconv.FormatInt(vt, 10)
case string:
return vt
case uint:
return strconv.FormatUint(uint64(vt), 10)
case uint8:
return strconv.FormatUint(uint64(vt), 10)
case uint16:
return strconv.FormatUint(uint64(vt), 10)
case uint32:
return strconv.FormatUint(uint64(vt), 10)
case uint64:
return strconv.FormatUint(vt, 10)
case []byte:
return string(vt)
default:
return fmt.Sprint(val.Interface())
}
}

156
core/lang/lang_test.go Normal file
View File

@@ -0,0 +1,156 @@
package lang
import (
"encoding/json"
"errors"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRepr(t *testing.T) {
var (
f32 float32 = 1.1
f64 = 2.2
i8 int8 = 1
i16 int16 = 2
i32 int32 = 3
i64 int64 = 4
u8 uint8 = 5
u16 uint16 = 6
u32 uint32 = 7
u64 uint64 = 8
)
tests := []struct {
v interface{}
expect string
}{
{
nil,
"",
},
{
mockStringable{},
"mocked",
},
{
new(mockStringable),
"mocked",
},
{
newMockPtr(),
"mockptr",
},
{
&mockOpacity{
val: 1,
},
"{1}",
},
{
true,
"true",
},
{
false,
"false",
},
{
f32,
"1.1",
},
{
f64,
"2.2",
},
{
i8,
"1",
},
{
i16,
"2",
},
{
i32,
"3",
},
{
i64,
"4",
},
{
u8,
"5",
},
{
u16,
"6",
},
{
u32,
"7",
},
{
u64,
"8",
},
{
[]byte(`abcd`),
"abcd",
},
{
mockOpacity{val: 1},
"{1}",
},
}
for _, test := range tests {
t.Run(test.expect, func(t *testing.T) {
assert.Equal(t, test.expect, Repr(test.v))
})
}
}
func TestReprOfValue(t *testing.T) {
t.Run("error", func(t *testing.T) {
assert.Equal(t, "error", reprOfValue(reflect.ValueOf(errors.New("error"))))
})
t.Run("stringer", func(t *testing.T) {
assert.Equal(t, "1.23", reprOfValue(reflect.ValueOf(json.Number("1.23"))))
})
t.Run("int", func(t *testing.T) {
assert.Equal(t, "1", reprOfValue(reflect.ValueOf(1)))
})
t.Run("int", func(t *testing.T) {
assert.Equal(t, "1", reprOfValue(reflect.ValueOf("1")))
})
t.Run("int", func(t *testing.T) {
assert.Equal(t, "1", reprOfValue(reflect.ValueOf(uint(1))))
})
}
type mockStringable struct{}
func (m mockStringable) String() string {
return "mocked"
}
type mockPtr struct{}
func newMockPtr() *mockPtr {
return new(mockPtr)
}
func (m *mockPtr) String() string {
return "mockptr"
}
type mockOpacity struct {
val int
}

View File

@@ -1,6 +1,8 @@
package limit
import (
"context"
"errors"
"fmt"
"strconv"
"sync"
@@ -58,8 +60,8 @@ type TokenLimiter struct {
timestampKey string
rescueLock sync.Mutex
redisAlive uint32
rescueLimiter *xrate.Limiter
monitorStarted bool
rescueLimiter *xrate.Limiter
}
// NewTokenLimiter returns a new TokenLimiter that allows events up to rate and permits
@@ -84,19 +86,31 @@ func (lim *TokenLimiter) Allow() bool {
return lim.AllowN(time.Now(), 1)
}
// AllowCtx is shorthand for AllowNCtx(ctx,time.Now(), 1) with incoming context.
func (lim *TokenLimiter) AllowCtx(ctx context.Context) bool {
return lim.AllowNCtx(ctx, time.Now(), 1)
}
// AllowN reports whether n events may happen at time now.
// Use this method if you intend to drop / skip events that exceed the rate.
// Otherwise, use Reserve or Wait.
func (lim *TokenLimiter) AllowN(now time.Time, n int) bool {
return lim.reserveN(now, n)
return lim.reserveN(context.Background(), now, n)
}
func (lim *TokenLimiter) reserveN(now time.Time, n int) bool {
// AllowNCtx reports whether n events may happen at time now with incoming context.
// Use this method if you intend to drop / skip events that exceed the rate.
// Otherwise, use Reserve or Wait.
func (lim *TokenLimiter) AllowNCtx(ctx context.Context, now time.Time, n int) bool {
return lim.reserveN(ctx, now, n)
}
func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) bool {
if atomic.LoadUint32(&lim.redisAlive) == 0 {
return lim.rescueLimiter.AllowN(now, n)
}
resp, err := lim.store.Eval(
resp, err := lim.store.EvalCtx(ctx,
script,
[]string{
lim.tokenKey,
@@ -113,6 +127,10 @@ func (lim *TokenLimiter) reserveN(now time.Time, n int) bool {
if err == redis.Nil {
return false
}
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
logx.Errorf("fail to use rate limiter: %s", err)
return false
}
if err != nil {
logx.Errorf("fail to use rate limiter: %s, use in-process limiter for rescue", err)
lim.startMonitor()

View File

@@ -1,6 +1,7 @@
package limit
import (
"context"
"testing"
"time"
@@ -15,6 +16,30 @@ func init() {
logx.Disable()
}
func TestTokenLimit_WithCtx(t *testing.T) {
s, err := miniredis.Run()
assert.Nil(t, err)
const (
total = 100
rate = 5
burst = 10
)
l := NewTokenLimiter(rate, burst, redis.New(s.Addr()), "tokenlimit")
defer s.Close()
ctx, cancel := context.WithCancel(context.Background())
ok := l.AllowCtx(ctx)
assert.True(t, ok)
cancel()
for i := 0; i < total; i++ {
ok := l.AllowCtx(ctx)
assert.False(t, ok)
assert.False(t, l.monitorStarted)
}
}
func TestTokenLimit_Rescue(t *testing.T) {
s, err := miniredis.Run()
assert.Nil(t, err)

View File

@@ -17,7 +17,7 @@ import (
const (
defaultBuckets = 50
defaultWindow = time.Second * 5
// using 1000m notation, 900m is like 80%, keep it as var for unit test
// using 1000m notation, 900m is like 90%, keep it as var for unit test
defaultCpuThreshold = 900
defaultMinRt = float64(time.Second / time.Millisecond)
// moving average hyperparameter beta for calculating requests on the fly
@@ -70,7 +70,7 @@ type (
flying int64
avgFlying float64
avgFlyingLock syncx.SpinLock
dropTime *syncx.AtomicDuration
overloadTime *syncx.AtomicDuration
droppedRecently *syncx.AtomicBool
passCounter *collection.RollingWindow
rtCounter *collection.RollingWindow
@@ -106,7 +106,7 @@ func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
return &adaptiveShedder{
cpuThreshold: options.cpuThreshold,
windows: int64(time.Second / bucketDuration),
dropTime: syncx.NewAtomicDuration(),
overloadTime: syncx.NewAtomicDuration(),
droppedRecently: syncx.NewAtomicBool(),
passCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
collection.IgnoreCurrentBucket()),
@@ -118,7 +118,6 @@ func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
// Allow implements Shedder.Allow.
func (as *adaptiveShedder) Allow() (Promise, error) {
if as.shouldDrop() {
as.dropTime.Set(timex.Now())
as.droppedRecently.Set(true)
return nil, ErrServiceOverloaded
@@ -215,21 +214,26 @@ func (as *adaptiveShedder) stillHot() bool {
return false
}
dropTime := as.dropTime.Load()
if dropTime == 0 {
overloadTime := as.overloadTime.Load()
if overloadTime == 0 {
return false
}
hot := timex.Since(dropTime) < coolOffDuration
if !hot {
as.droppedRecently.Set(false)
if timex.Since(overloadTime) < coolOffDuration {
return true
}
return hot
as.droppedRecently.Set(false)
return false
}
func (as *adaptiveShedder) systemOverloaded() bool {
return systemOverloadChecker(as.cpuThreshold)
if !systemOverloadChecker(as.cpuThreshold) {
return false
}
as.overloadTime.Set(timex.Now())
return true
}
// WithBuckets customizes the Shedder with given number of buckets.

View File

@@ -13,6 +13,7 @@ import (
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
)
const (
@@ -136,7 +137,7 @@ func TestAdaptiveShedderShouldDrop(t *testing.T) {
passCounter: passCounter,
rtCounter: rtCounter,
windows: buckets,
dropTime: syncx.NewAtomicDuration(),
overloadTime: syncx.NewAtomicDuration(),
droppedRecently: syncx.NewAtomicBool(),
}
// cpu >= 800, inflight < maxPass
@@ -190,12 +191,15 @@ func TestAdaptiveShedderStillHot(t *testing.T) {
passCounter: passCounter,
rtCounter: rtCounter,
windows: buckets,
dropTime: syncx.NewAtomicDuration(),
overloadTime: syncx.NewAtomicDuration(),
droppedRecently: syncx.ForAtomicBool(true),
}
assert.False(t, shedder.stillHot())
shedder.dropTime.Set(-coolOffDuration * 2)
shedder.overloadTime.Set(-coolOffDuration * 2)
assert.False(t, shedder.stillHot())
shedder.droppedRecently.Set(true)
shedder.overloadTime.Set(timex.Now())
assert.True(t, shedder.stillHot())
}
func BenchmarkAdaptiveShedder_Allow(b *testing.B) {

142
core/logc/logs.go Normal file
View File

@@ -0,0 +1,142 @@
package logc
import (
"context"
"fmt"
"github.com/zeromicro/go-zero/core/logx"
)
type (
LogConf = logx.LogConf
LogField = logx.LogField
)
// AddGlobalFields adds global fields.
func AddGlobalFields(fields ...LogField) {
logx.AddGlobalFields(fields...)
}
// Alert alerts v in alert level, and the message is written to error log.
func Alert(_ context.Context, v string) {
logx.Alert(v)
}
// Close closes the logging.
func Close() error {
return logx.Close()
}
// Debug writes v into access log.
func Debug(ctx context.Context, v ...interface{}) {
getLogger(ctx).Debug(v...)
}
// Debugf writes v with format into access log.
func Debugf(ctx context.Context, format string, v ...interface{}) {
getLogger(ctx).Debugf(format, v...)
}
// Debugv writes v into access log with json content.
func Debugv(ctx context.Context, v interface{}) {
getLogger(ctx).Debugv(v)
}
// Debugw writes msg along with fields into access log.
func Debugw(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Debugw(msg, fields...)
}
// Error writes v into error log.
func Error(ctx context.Context, v ...interface{}) {
getLogger(ctx).Error(v...)
}
// Errorf writes v with format into error log.
func Errorf(ctx context.Context, format string, v ...interface{}) {
getLogger(ctx).Errorf(fmt.Errorf(format, v...).Error())
}
// Errorv writes v into error log with json content.
// No call stack attached, because not elegant to pack the messages.
func Errorv(ctx context.Context, v interface{}) {
getLogger(ctx).Errorv(v)
}
// Errorw writes msg along with fields into error log.
func Errorw(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Errorw(msg, fields...)
}
// Field returns a LogField for the given key and value.
func Field(key string, value interface{}) LogField {
return logx.Field(key, value)
}
// Info writes v into access log.
func Info(ctx context.Context, v ...interface{}) {
getLogger(ctx).Info(v...)
}
// Infof writes v with format into access log.
func Infof(ctx context.Context, format string, v ...interface{}) {
getLogger(ctx).Infof(format, v...)
}
// Infov writes v into access log with json content.
func Infov(ctx context.Context, v interface{}) {
getLogger(ctx).Infov(v)
}
// Infow writes msg along with fields into access log.
func Infow(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Infow(msg, fields...)
}
// Must checks if err is nil, otherwise logs the error and exits.
func Must(err error) {
logx.Must(err)
}
// MustSetup sets up logging with given config c. It exits on error.
func MustSetup(c logx.LogConf) {
logx.MustSetup(c)
}
// SetLevel sets the logging level. It can be used to suppress some logs.
func SetLevel(level uint32) {
logx.SetLevel(level)
}
// SetUp sets up the logx. If already set up, just return nil.
// we allow SetUp to be called multiple times, because for example
// we need to allow different service frameworks to initialize logx respectively.
// the same logic for SetUp
func SetUp(c LogConf) error {
return logx.SetUp(c)
}
// Slow writes v into slow log.
func Slow(ctx context.Context, v ...interface{}) {
getLogger(ctx).Slow(v...)
}
// Slowf writes v with format into slow log.
func Slowf(ctx context.Context, format string, v ...interface{}) {
getLogger(ctx).Slowf(format, v...)
}
// Slowv writes v into slow log with json content.
func Slowv(ctx context.Context, v interface{}) {
getLogger(ctx).Slowv(v)
}
// Sloww writes msg along with fields into slow log.
func Sloww(ctx context.Context, msg string, fields ...LogField) {
getLogger(ctx).Sloww(msg, fields...)
}
// getLogger returns the logx.Logger with the given ctx and correct caller.
func getLogger(ctx context.Context) logx.Logger {
return logx.WithContext(ctx).WithCallerSkip(1)
}

266
core/logc/logs_test.go Normal file
View File

@@ -0,0 +1,266 @@
package logc
import (
"bytes"
"context"
"encoding/json"
"fmt"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/logx"
)
func TestAddGlobalFields(t *testing.T) {
var buf bytes.Buffer
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
Info(context.Background(), "hello")
buf.Reset()
AddGlobalFields(Field("a", "1"), Field("b", "2"))
AddGlobalFields(Field("c", "3"))
Info(context.Background(), "world")
var m map[string]interface{}
assert.NoError(t, json.Unmarshal(buf.Bytes(), &m))
assert.Equal(t, "1", m["a"])
assert.Equal(t, "2", m["b"])
assert.Equal(t, "3", m["c"])
}
func TestAlert(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
Alert(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), "foo"), buf.String())
}
func TestError(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Error(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestErrorf(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Errorf(context.Background(), "foo %s", "bar")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestErrorv(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Errorv(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestErrorw(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Errorw(context.Background(), "foo", Field("a", "b"))
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestInfo(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Info(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestInfof(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Infof(context.Background(), "foo %s", "bar")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestInfov(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Infov(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestInfow(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Infow(context.Background(), "foo", Field("a", "b"))
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestDebug(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Debug(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestDebugf(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Debugf(context.Background(), "foo %s", "bar")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestDebugv(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Debugv(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestDebugw(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Debugw(context.Background(), "foo", Field("a", "b"))
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
}
func TestMust(t *testing.T) {
assert.NotPanics(t, func() {
Must(nil)
})
assert.NotPanics(t, func() {
MustSetup(LogConf{})
})
}
func TestMisc(t *testing.T) {
SetLevel(logx.DebugLevel)
assert.NoError(t, SetUp(LogConf{}))
assert.NoError(t, Close())
}
func TestSlow(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Slow(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
}
func TestSlowf(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Slowf(context.Background(), "foo %s", "bar")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
}
func TestSlowv(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Slowv(context.Background(), "foo")
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
}
func TestSloww(t *testing.T) {
var buf strings.Builder
writer := logx.NewWriter(&buf)
old := logx.Reset()
logx.SetWriter(writer)
defer logx.SetWriter(old)
file, line := getFileLine()
Sloww(context.Background(), "foo", Field("a", "b"))
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
}
func getFileLine() (string, int) {
_, file, line, _ := runtime.Caller(1)
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
return short, line
}

View File

@@ -2,15 +2,34 @@ package logx
// A LogConf is a logging config.
type LogConf struct {
ServiceName string `json:",optional"`
Mode string `json:",default=console,options=[console,file,volume]"`
Encoding string `json:",default=json,options=[json,plain]"`
TimeFormat string `json:",optional"`
Path string `json:",default=logs"`
Level string `json:",default=info,options=[info,error,severe]"`
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`
// ServiceName represents the service name.
ServiceName string `json:",optional"`
// Mode represents the logging mode, default is `console`.
// console: log to console.
// file: log to file.
// volume: used in k8s, prepend the hostname to the log file name.
Mode string `json:",default=console,options=[console,file,volume]"`
// Encoding represents the encoding type, default is `json`.
// json: json encoding.
// plain: plain text encoding, typically used in development.
Encoding string `json:",default=json,options=[json,plain]"`
// TimeFormat represents the time format, default is `2006-01-02T15:04:05.000Z07:00`.
TimeFormat string `json:",optional"`
// Path represents the log file path, default is `logs`.
Path string `json:",default=logs"`
// Level represents the log level, default is `info`.
Level string `json:",default=info,options=[debug,info,error,severe]"`
// MaxContentLength represents the max content bytes, default is no limit.
MaxContentLength uint32 `json:",optional"`
// Compress represents whether to compress the log file, default is `false`.
Compress bool `json:",optional"`
// Stdout represents whether to log statistics, default is `true`.
Stat bool `json:",default=true"`
// KeepDays represents how many days the log files will be kept. Default to keep all files.
// Only take effect when Mode is `file` or `volume`, both work when Rotation is `daily` or `size`.
KeepDays int `json:",optional"`
// StackCooldownMillis represents the cooldown time for stack logging, default is 100ms.
StackCooldownMillis int `json:",default=100"`
// MaxBackups represents how many backup log files will be kept. 0 means all files will be kept forever.
// Only take effect when RotationRuleType is `size`.
// Even thougth `MaxBackups` sets 0, log files will still be removed

View File

@@ -1,16 +1,40 @@
package logx
import "context"
import (
"context"
"sync"
"sync/atomic"
)
var fieldsContextKey contextKey
var (
fieldsContextKey contextKey
globalFields atomic.Value
globalFieldsLock sync.Mutex
)
type contextKey struct{}
// AddGlobalFields adds global fields.
func AddGlobalFields(fields ...LogField) {
globalFieldsLock.Lock()
defer globalFieldsLock.Unlock()
old := globalFields.Load()
if old == nil {
globalFields.Store(append([]LogField(nil), fields...))
} else {
globalFields.Store(append(old.([]LogField), fields...))
}
}
// ContextWithFields returns a new context with the given fields.
func ContextWithFields(ctx context.Context, fields ...LogField) context.Context {
if val := ctx.Value(fieldsContextKey); val != nil {
if arr, ok := val.([]LogField); ok {
return context.WithValue(ctx, fieldsContextKey, append(arr, fields...))
allFields := make([]LogField, 0, len(arr)+len(fields))
allFields = append(allFields, arr...)
allFields = append(allFields, fields...)
return context.WithValue(ctx, fieldsContextKey, allFields)
}
}

View File

@@ -1,12 +1,37 @@
package logx
import (
"bytes"
"context"
"encoding/json"
"strconv"
"sync"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddGlobalFields(t *testing.T) {
var buf bytes.Buffer
writer := NewWriter(&buf)
old := Reset()
SetWriter(writer)
defer SetWriter(old)
Info("hello")
buf.Reset()
AddGlobalFields(Field("a", "1"), Field("b", "2"))
AddGlobalFields(Field("c", "3"))
Info("world")
var m map[string]interface{}
assert.NoError(t, json.Unmarshal(buf.Bytes(), &m))
assert.Equal(t, "1", m["a"])
assert.Equal(t, "2", m["b"])
assert.Equal(t, "3", m["c"])
}
func TestContextWithFields(t *testing.T) {
ctx := ContextWithFields(context.Background(), Field("a", 1), Field("b", 2))
vals := ctx.Value(fieldsContextKey)
@@ -16,6 +41,15 @@ func TestContextWithFields(t *testing.T) {
assert.EqualValues(t, []LogField{Field("a", 1), Field("b", 2)}, fields)
}
func TestWithFields(t *testing.T) {
ctx := WithFields(context.Background(), Field("a", 1), Field("b", 2))
vals := ctx.Value(fieldsContextKey)
assert.NotNil(t, vals)
fields, ok := vals.([]LogField)
assert.True(t, ok)
assert.EqualValues(t, []LogField{Field("a", 1), Field("b", 2)}, fields)
}
func TestWithFieldsAppend(t *testing.T) {
var dummyKey struct{}
ctx := context.WithValue(context.Background(), dummyKey, "dummy")
@@ -33,3 +67,55 @@ func TestWithFieldsAppend(t *testing.T) {
Field("d", 4),
}, fields)
}
func TestWithFieldsAppendCopy(t *testing.T) {
const count = 10
ctx := context.Background()
for i := 0; i < count; i++ {
ctx = ContextWithFields(ctx, Field(strconv.Itoa(i), 1))
}
af := Field("foo", 1)
bf := Field("bar", 2)
ctxa := ContextWithFields(ctx, af)
ctxb := ContextWithFields(ctx, bf)
assert.EqualValues(t, af, ctxa.Value(fieldsContextKey).([]LogField)[count])
assert.EqualValues(t, bf, ctxb.Value(fieldsContextKey).([]LogField)[count])
}
func BenchmarkAtomicValue(b *testing.B) {
b.ReportAllocs()
var container atomic.Value
vals := []LogField{
Field("a", "b"),
Field("c", "d"),
Field("e", "f"),
}
container.Store(&vals)
for i := 0; i < b.N; i++ {
val := container.Load()
if val != nil {
_ = *val.(*[]LogField)
}
}
}
func BenchmarkRWMutex(b *testing.B) {
b.ReportAllocs()
var lock sync.RWMutex
vals := []LogField{
Field("a", "b"),
Field("c", "d"),
Field("e", "f"),
}
for i := 0; i < b.N; i++ {
lock.RLock()
_ = vals
lock.RUnlock()
}
}

View File

@@ -7,6 +7,14 @@ import (
// A Logger represents a logger.
type Logger interface {
// Debug logs a message at info level.
Debug(...interface{})
// Debugf logs a message at info level.
Debugf(string, ...interface{})
// Debugv logs a message at info level.
Debugv(interface{})
// Debugw logs a message at info level.
Debugw(string, ...LogField)
// Error logs a message at error level.
Error(...interface{})
// Errorf logs a message at error level.
@@ -37,4 +45,6 @@ type Logger interface {
WithContext(ctx context.Context) Logger
// WithDuration returns a new logger with the given duration.
WithDuration(d time.Duration) Logger
// WithFields returns a new logger with the given fields.
WithFields(fields ...LogField) Logger
}

View File

@@ -20,6 +20,8 @@ var (
timeFormat = "2006-01-02T15:04:05.000Z07:00"
logLevel uint32
encoding uint32 = jsonEncodingType
// maxContentLength is used to truncate the log content, 0 for not truncating.
maxContentLength uint32
// use uint32 for atomic operations
disableLog uint32
disableStat uint32
@@ -64,6 +66,26 @@ func Close() error {
return nil
}
// Debug writes v into access log.
func Debug(v ...interface{}) {
writeDebug(fmt.Sprint(v...))
}
// Debugf writes v with format into access log.
func Debugf(format string, v ...interface{}) {
writeDebug(fmt.Sprintf(format, v...))
}
// Debugv writes v into access log with json content.
func Debugv(v interface{}) {
writeDebug(v)
}
// Debugw writes msg along with fields into access log.
func Debugw(msg string, fields ...LogField) {
writeDebug(msg, fields...)
}
// Disable disables the logging.
func Disable() {
atomic.StoreUint32(&disableLog, 1)
@@ -210,10 +232,16 @@ func SetUp(c LogConf) (err error) {
setupOnce.Do(func() {
setupLogLevel(c)
if !c.Stat {
DisableStat()
}
if len(c.TimeFormat) > 0 {
timeFormat = c.TimeFormat
}
atomic.StoreUint32(&maxContentLength, c.MaxContentLength)
switch c.Encoding {
case plainEncoding:
atomic.StoreUint32(&encoding, plainEncodingType)
@@ -352,6 +380,8 @@ func handleOptions(opts []LogOption) {
func setupLogLevel(c LogConf) {
switch c.Level {
case levelDebug:
SetLevel(DebugLevel)
case levelInfo:
SetLevel(InfoLevel)
case levelError:
@@ -392,6 +422,12 @@ func shallLogStat() bool {
return atomic.LoadUint32(&disableStat) == 0
}
func writeDebug(val interface{}, fields ...LogField) {
if shallLog(DebugLevel) {
getWriter().Debug(val, addCaller(fields...)...)
}
}
func writeError(val interface{}, fields ...LogField) {
if shallLog(ErrorLevel) {
getWriter().Error(val, addCaller(fields...)...)

View File

@@ -35,6 +35,12 @@ func (mw *mockWriter) Alert(v interface{}) {
output(&mw.builder, levelAlert, v)
}
func (mw *mockWriter) Debug(v interface{}, fields ...LogField) {
mw.lock.Lock()
defer mw.lock.Unlock()
output(&mw.builder, levelDebug, v, fields...)
}
func (mw *mockWriter) Error(v interface{}, fields ...LogField) {
mw.lock.Lock()
defer mw.lock.Unlock()
@@ -212,6 +218,46 @@ func TestStructedLogAlert(t *testing.T) {
})
}
func TestStructedLogDebug(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) {
Debug(v...)
})
}
func TestStructedLogDebugf(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) {
Debugf(fmt.Sprint(v...))
})
}
func TestStructedLogDebugv(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) {
Debugv(fmt.Sprint(v...))
})
}
func TestStructedLogDebugw(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) {
Debugw(fmt.Sprint(v...), Field("foo", time.Second))
})
}
func TestStructedLogError(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
@@ -483,9 +529,9 @@ func TestSetLevel(t *testing.T) {
func TestSetLevelTwiceWithMode(t *testing.T) {
testModes := []string{
"mode",
"console",
"volumn",
"mode",
}
w := new(mockWriter)
old := writer.Swap(w)
@@ -569,6 +615,8 @@ func TestSetup(t *testing.T) {
Path: os.TempDir(),
Compress: true,
KeepDays: 1,
MaxBackups: 3,
MaxSize: 1024 * 1024,
}))
setupLogLevel(LogConf{
Level: levelInfo,
@@ -743,9 +791,12 @@ func doTestStructedLogConsole(t *testing.T, w *mockWriter, write func(...interfa
func testSetLevelTwiceWithMode(t *testing.T, mode string, w *mockWriter) {
writer.Store(nil)
SetUp(LogConf{
Mode: mode,
Level: "error",
Path: "/dev/null",
Mode: mode,
Level: "debug",
Path: "/dev/null",
Encoding: plainEncoding,
Stat: false,
TimeFormat: time.RFC3339,
})
SetUp(LogConf{
Mode: mode,

View File

@@ -6,7 +6,7 @@ import (
"time"
"github.com/zeromicro/go-zero/core/timex"
"go.opentelemetry.io/otel/trace"
"github.com/zeromicro/go-zero/internal/trace"
)
// WithCallerSkip returns a Logger with given caller skip.
@@ -40,6 +40,22 @@ type richLogger struct {
fields []LogField
}
func (l *richLogger) Debug(v ...interface{}) {
l.debug(fmt.Sprint(v...))
}
func (l *richLogger) Debugf(format string, v ...interface{}) {
l.debug(fmt.Sprintf(format, v...))
}
func (l *richLogger) Debugv(v interface{}) {
l.debug(v)
}
func (l *richLogger) Debugw(msg string, fields ...LogField) {
l.debug(msg, fields...)
}
func (l *richLogger) Error(v ...interface{}) {
l.err(fmt.Sprint(v...))
}
@@ -107,6 +123,11 @@ func (l *richLogger) WithDuration(duration time.Duration) Logger {
return l
}
func (l *richLogger) WithFields(fields ...LogField) Logger {
l.fields = append(l.fields, fields...)
return l
}
func (l *richLogger) buildFields(fields ...LogField) []LogField {
fields = append(l.fields, fields...)
fields = append(fields, Field(callerKey, getCaller(callerDepth+l.callerSkip)))
@@ -115,12 +136,12 @@ func (l *richLogger) buildFields(fields ...LogField) []LogField {
return fields
}
traceID := traceIdFromContext(l.ctx)
traceID := trace.TraceIDFromContext(l.ctx)
if len(traceID) > 0 {
fields = append(fields, Field(traceKey, traceID))
}
spanID := spanIdFromContext(l.ctx)
spanID := trace.SpanIDFromContext(l.ctx)
if len(spanID) > 0 {
fields = append(fields, Field(spanKey, spanID))
}
@@ -135,6 +156,12 @@ func (l *richLogger) buildFields(fields ...LogField) []LogField {
return fields
}
func (l *richLogger) debug(v interface{}, fields ...LogField) {
if shallLog(DebugLevel) {
getWriter().Debug(v, l.buildFields(fields...)...)
}
}
func (l *richLogger) err(v interface{}, fields ...LogField) {
if shallLog(ErrorLevel) {
getWriter().Error(v, l.buildFields(fields...)...)
@@ -152,21 +179,3 @@ func (l *richLogger) slow(v interface{}, fields ...LogField) {
getWriter().Slow(v, l.buildFields(fields...)...)
}
}
func spanIdFromContext(ctx context.Context) string {
spanCtx := trace.SpanContextFromContext(ctx)
if spanCtx.HasSpanID() {
return spanCtx.SpanID().String()
}
return ""
}
func traceIdFromContext(ctx context.Context) string {
spanCtx := trace.SpanContextFromContext(ctx)
if spanCtx.HasTraceID() {
return spanCtx.TraceID().String()
}
return ""
}

View File

@@ -37,6 +37,41 @@ func TestTraceLog(t *testing.T) {
validate(t, w.String(), true, true)
}
func TestTraceDebug(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
writer.lock.RLock()
defer func() {
writer.lock.RUnlock()
writer.Store(old)
}()
otp := otel.GetTracerProvider()
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
otel.SetTracerProvider(tp)
defer otel.SetTracerProvider(otp)
ctx, span := tp.Tracer("foo").Start(context.Background(), "bar")
defer span.End()
l := WithContext(ctx)
SetLevel(DebugLevel)
l.WithDuration(time.Second).Debug(testlog)
assert.True(t, strings.Contains(w.String(), traceKey))
assert.True(t, strings.Contains(w.String(), spanKey))
w.Reset()
l.WithDuration(time.Second).Debugf(testlog)
validate(t, w.String(), true, true)
w.Reset()
l.WithDuration(time.Second).Debugv(testlog)
validate(t, w.String(), true, true)
w.Reset()
l.WithDuration(time.Second).Debugw(testlog, Field("foo", "bar"))
validate(t, w.String(), true, true)
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
}
func TestTraceError(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
@@ -237,6 +272,23 @@ func TestLogWithCallerSkip(t *testing.T) {
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
}
func TestLoggerWithFields(t *testing.T) {
w := new(mockWriter)
old := writer.Swap(w)
writer.lock.RLock()
defer func() {
writer.lock.RUnlock()
writer.Store(old)
}()
l := WithContext(context.Background()).WithFields(Field("foo", "bar"))
l.Info(testlog)
var val mockValue
assert.Nil(t, json.Unmarshal([]byte(w.String()), &val))
assert.Equal(t, "bar", val.Foo)
}
func validate(t *testing.T, body string, expectedTrace, expectedSpan bool) {
var val mockValue
dec := json.NewDecoder(strings.NewReader(body))

View File

@@ -115,7 +115,9 @@ func (r *DailyRotateRule) OutdatedFiles() []string {
var buf strings.Builder
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(dateFormat)
fmt.Fprintf(&buf, "%s%s%s", r.filename, r.delimiter, boundary)
buf.WriteString(r.filename)
buf.WriteString(r.delimiter)
buf.WriteString(boundary)
if r.gzip {
buf.WriteString(gzipExt)
}
@@ -282,7 +284,7 @@ func (l *RotateLogger) getBackupFilename() string {
func (l *RotateLogger) init() error {
l.backup = l.rule.BackupFileName()
if _, err := os.Stat(l.filename); err != nil {
if fileInfo, err := os.Stat(l.filename); err != nil {
basePath := path.Dir(l.filename)
if _, err = os.Stat(basePath); err != nil {
if err = os.MkdirAll(basePath, defaultDirMode); err != nil {
@@ -293,8 +295,11 @@ func (l *RotateLogger) init() error {
if l.fp, err = os.Create(l.filename); err != nil {
return err
}
} else if l.fp, err = os.OpenFile(l.filename, os.O_APPEND|os.O_WRONLY, defaultFileMode); err != nil {
return err
} else {
if l.fp, err = os.OpenFile(l.filename, os.O_APPEND|os.O_WRONLY, defaultFileMode); err != nil {
return err
}
l.currentSize = fileInfo.Size()
}
fs.CloseOnExec(l.fp)

View File

@@ -3,8 +3,10 @@ package logx
import "errors"
const (
// InfoLevel logs everything
InfoLevel uint32 = iota
// DebugLevel logs everything
DebugLevel uint32 = iota
// InfoLevel does not include debugs
InfoLevel
// ErrorLevel includes errors, slows, stacks
ErrorLevel
// SevereLevel only log severe messages
@@ -14,13 +16,13 @@ const (
const (
jsonEncodingType = iota
plainEncodingType
plainEncoding = "plain"
plainEncodingSep = '\t'
sizeRotationRule = "size"
)
const (
plainEncoding = "plain"
plainEncodingSep = '\t'
sizeRotationRule = "size"
accessFilename = "access.log"
errorFilename = "error.log"
severeFilename = "severe.log"
@@ -37,6 +39,7 @@ const (
levelFatal = "fatal"
levelSlow = "slow"
levelStat = "stat"
levelDebug = "debug"
backupFileDelimiter = "-"
flags = 0x0
@@ -50,6 +53,7 @@ const (
spanKey = "span"
timestampKey = "@timestamp"
traceKey = "trace"
truncatedKey = "truncated"
)
var (
@@ -57,4 +61,6 @@ var (
ErrLogPathNotSet = errors.New("log path must be set")
// ErrLogServiceNameNotSet is an error that indicates that the service name is not set.
ErrLogServiceNameNotSet = errors.New("log service name must be set")
truncatedField = Field(truncatedKey, true)
)

View File

@@ -1,12 +1,12 @@
package logx
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"path"
"strings"
"sync"
"sync/atomic"
@@ -18,6 +18,7 @@ type (
Writer interface {
Alert(v interface{})
Close() error
Debug(v interface{}, fields ...LogField)
Error(v interface{}, fields ...LogField)
Info(v interface{}, fields ...LogField)
Severe(v interface{})
@@ -194,6 +195,10 @@ func (w *concreteWriter) Close() error {
return w.statLog.Close()
}
func (w *concreteWriter) Debug(v interface{}, fields ...LogField) {
output(w.infoLog, levelDebug, v, fields...)
}
func (w *concreteWriter) Error(v interface{}, fields ...LogField) {
output(w.errorLog, levelError, v, fields...)
}
@@ -227,6 +232,9 @@ func (n nopWriter) Close() error {
return nil
}
func (n nopWriter) Debug(_ interface{}, _ ...LogField) {
}
func (n nopWriter) Error(_ interface{}, _ ...LogField) {
}
@@ -245,20 +253,45 @@ func (n nopWriter) Stack(_ interface{}) {
func (n nopWriter) Stat(_ interface{}, _ ...LogField) {
}
func buildFields(fields ...LogField) []string {
func buildPlainFields(fields ...LogField) []string {
var items []string
for _, field := range fields {
items = append(items, fmt.Sprintf("%s=%v", field.Key, field.Value))
items = append(items, fmt.Sprintf("%s=%+v", field.Key, field.Value))
}
return items
}
func combineGlobalFields(fields []LogField) []LogField {
globals := globalFields.Load()
if globals == nil {
return fields
}
gf := globals.([]LogField)
ret := make([]LogField, 0, len(gf)+len(fields))
ret = append(ret, gf...)
ret = append(ret, fields...)
return ret
}
func output(writer io.Writer, level string, val interface{}, fields ...LogField) {
// only truncate string content, don't know how to truncate the values of other types.
if v, ok := val.(string); ok {
maxLen := atomic.LoadUint32(&maxContentLength)
if maxLen > 0 && len(v) > int(maxLen) {
val = v[:maxLen]
fields = append(fields, truncatedField)
}
}
fields = combineGlobalFields(fields)
switch atomic.LoadUint32(&encoding) {
case plainEncodingType:
writePlainAny(writer, level, val, buildFields(fields...)...)
writePlainAny(writer, level, val, buildPlainFields(fields...)...)
default:
entry := make(logEntry)
for _, field := range fields {
@@ -284,6 +317,8 @@ func wrapLevelWithColor(level string) string {
colour = color.FgBlue
case levelSlow:
colour = color.FgYellow
case levelDebug:
colour = color.FgYellow
case levelStat:
colour = color.FgGreen
}
@@ -321,7 +356,7 @@ func writePlainAny(writer io.Writer, level string, val interface{}, fields ...st
}
func writePlainText(writer io.Writer, level, msg string, fields ...string) {
var buf strings.Builder
var buf bytes.Buffer
buf.WriteString(getTimestamp())
buf.WriteByte(plainEncodingSep)
buf.WriteString(level)
@@ -337,13 +372,13 @@ func writePlainText(writer io.Writer, level, msg string, fields ...string) {
return
}
if _, err := fmt.Fprint(writer, buf.String()); err != nil {
if _, err := writer.Write(buf.Bytes()); err != nil {
log.Println(err.Error())
}
}
func writePlainValue(writer io.Writer, level string, val interface{}, fields ...string) {
var buf strings.Builder
var buf bytes.Buffer
buf.WriteString(getTimestamp())
buf.WriteByte(plainEncodingSep)
buf.WriteString(level)
@@ -363,7 +398,7 @@ func writePlainValue(writer io.Writer, level string, val interface{}, fields ...
return
}
if _, err := fmt.Fprint(writer, buf.String()); err != nil {
if _, err := writer.Write(buf.Bytes()); err != nil {
log.Println(err.Error())
}
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"log"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
@@ -16,6 +17,9 @@ func TestNewWriter(t *testing.T) {
w := NewWriter(&buf)
w.Info(literal)
assert.Contains(t, buf.String(), literal)
buf.Reset()
w.Debug(literal)
assert.Contains(t, buf.String(), literal)
}
func TestConsoleWriter(t *testing.T) {
@@ -97,13 +101,14 @@ func TestNopWriter(t *testing.T) {
assert.NotPanics(t, func() {
var w nopWriter
w.Alert("foo")
w.Debug("foo")
w.Error("foo")
w.Info("foo")
w.Severe("foo")
w.Stack("foo")
w.Stat("foo")
w.Slow("foo")
w.Close()
_ = w.Close()
})
}
@@ -123,6 +128,12 @@ func TestWritePlainAny(t *testing.T) {
writePlainAny(nil, levelInfo, "foo")
assert.Contains(t, buf.String(), "foo")
buf.Reset()
writePlainAny(nil, levelDebug, make(chan int))
assert.Contains(t, buf.String(), "unsupported type")
writePlainAny(nil, levelDebug, 100)
assert.Contains(t, buf.String(), "100")
buf.Reset()
writePlainAny(nil, levelError, make(chan int))
assert.Contains(t, buf.String(), "unsupported type")
@@ -147,9 +158,40 @@ func TestWritePlainAny(t *testing.T) {
}
func TestLogWithLimitContentLength(t *testing.T) {
maxLen := atomic.LoadUint32(&maxContentLength)
atomic.StoreUint32(&maxContentLength, 10)
t.Cleanup(func() {
atomic.StoreUint32(&maxContentLength, maxLen)
})
t.Run("alert", func(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
w.Info("1234567890")
var v1 mockedEntry
if err := json.Unmarshal(buf.Bytes(), &v1); err != nil {
t.Fatal(err)
}
assert.Equal(t, "1234567890", v1.Content)
assert.False(t, v1.Truncated)
buf.Reset()
var v2 mockedEntry
w.Info("12345678901")
if err := json.Unmarshal(buf.Bytes(), &v2); err != nil {
t.Fatal(err)
}
assert.Equal(t, "1234567890", v2.Content)
assert.True(t, v2.Truncated)
})
}
type mockedEntry struct {
Level string `json:"level"`
Content string `json:"content"`
Level string `json:"level"`
Content string `json:"content"`
Truncated bool `json:"truncated"`
}
type easyToCloseWriter struct{}

View File

@@ -8,10 +8,12 @@ type (
// use context and OptionalDep option to determine the value of Optional
// nothing to do with context.Context
fieldOptionsWithContext struct {
Inherit bool
FromString bool
Optional bool
Options []string
Default string
EnvVar string
Range *numberRange
}
@@ -40,6 +42,10 @@ func (o *fieldOptionsWithContext) getDefault() (string, bool) {
return o.Default, len(o.Default) > 0
}
func (o *fieldOptionsWithContext) inherit() bool {
return o != nil && o.Inherit
}
func (o *fieldOptionsWithContext) optional() bool {
return o != nil && o.Optional
}
@@ -101,5 +107,6 @@ func (o *fieldOptions) toOptionsWithContext(key string, m Valuer, fullName strin
Optional: optional,
Options: o.Options,
Default: o.Default,
EnvVar: o.EnvVar,
}, nil
}

View File

@@ -11,22 +11,30 @@ const jsonTagKey = "json"
var jsonUnmarshaler = NewUnmarshaler(jsonTagKey)
// UnmarshalJsonBytes unmarshals content into v.
func UnmarshalJsonBytes(content []byte, v interface{}) error {
return unmarshalJsonBytes(content, v, jsonUnmarshaler)
func UnmarshalJsonBytes(content []byte, v interface{}, opts ...UnmarshalOption) error {
return unmarshalJsonBytes(content, v, getJsonUnmarshaler(opts...))
}
// UnmarshalJsonMap unmarshals content from m into v.
func UnmarshalJsonMap(m map[string]interface{}, v interface{}) error {
return jsonUnmarshaler.Unmarshal(m, v)
func UnmarshalJsonMap(m map[string]interface{}, v interface{}, opts ...UnmarshalOption) error {
return getJsonUnmarshaler(opts...).Unmarshal(m, v)
}
// UnmarshalJsonReader unmarshals content from reader into v.
func UnmarshalJsonReader(reader io.Reader, v interface{}) error {
return unmarshalJsonReader(reader, v, jsonUnmarshaler)
func UnmarshalJsonReader(reader io.Reader, v interface{}, opts ...UnmarshalOption) error {
return unmarshalJsonReader(reader, v, getJsonUnmarshaler(opts...))
}
func getJsonUnmarshaler(opts ...UnmarshalOption) *Unmarshaler {
if len(opts) > 0 {
return NewUnmarshaler(jsonTagKey, opts...)
}
return jsonUnmarshaler
}
func unmarshalJsonBytes(content []byte, v interface{}, unmarshaler *Unmarshaler) error {
var m map[string]interface{}
var m interface{}
if err := jsonx.Unmarshal(content, &m); err != nil {
return err
}
@@ -35,7 +43,7 @@ func unmarshalJsonBytes(content []byte, v interface{}, unmarshaler *Unmarshaler)
}
func unmarshalJsonReader(reader io.Reader, v interface{}, unmarshaler *Unmarshaler) error {
var m map[string]interface{}
var m interface{}
if err := jsonx.UnmarshalFromReader(reader, &m); err != nil {
return err
}

View File

@@ -856,8 +856,7 @@ func TestUnmarshalBytesError(t *testing.T) {
}
err := UnmarshalJsonBytes([]byte(payload), &v)
assert.NotNil(t, err)
assert.True(t, strings.Contains(err.Error(), payload))
assert.Equal(t, errTypeMismatch, err)
}
func TestUnmarshalReaderError(t *testing.T) {
@@ -867,9 +866,7 @@ func TestUnmarshalReaderError(t *testing.T) {
Any string
}
err := UnmarshalJsonReader(reader, &v)
assert.NotNil(t, err)
assert.True(t, strings.Contains(err.Error(), payload))
assert.Equal(t, errTypeMismatch, UnmarshalJsonReader(reader, &v))
}
func TestUnmarshalMap(t *testing.T) {
@@ -900,7 +897,9 @@ func TestUnmarshalMap(t *testing.T) {
Any string `json:",optional"`
}
err := UnmarshalJsonMap(m, &v)
err := UnmarshalJsonMap(m, &v, WithCanonicalKeyFunc(func(s string) string {
return s
}))
assert.Nil(t, err)
assert.True(t, len(v.Any) == 0)
})
@@ -918,3 +917,26 @@ func TestUnmarshalMap(t *testing.T) {
assert.Equal(t, "foo", v.Any)
})
}
func TestUnmarshalJsonArray(t *testing.T) {
var v []struct {
Name string `json:"name"`
Age int `json:"age"`
}
body := `[{"name":"kevin", "age": 18}]`
assert.NoError(t, UnmarshalJsonBytes([]byte(body), &v))
assert.Equal(t, 1, len(v))
assert.Equal(t, "kevin", v[0].Name)
assert.Equal(t, 18, v[0].Age)
}
func TestUnmarshalJsonBytesError(t *testing.T) {
var v []struct {
Name string `json:"name"`
Age int `json:"age"`
}
assert.Error(t, UnmarshalJsonBytes([]byte((``)), &v))
assert.Error(t, UnmarshalJsonReader(strings.NewReader(``), &v))
}

View File

@@ -1,29 +1,27 @@
package mapping
import (
"bytes"
"encoding/json"
"io"
"github.com/pelletier/go-toml/v2"
"github.com/zeromicro/go-zero/internal/encoding"
)
// UnmarshalTomlBytes unmarshals TOML bytes into the given v.
func UnmarshalTomlBytes(content []byte, v interface{}) error {
return UnmarshalTomlReader(bytes.NewReader(content), v)
func UnmarshalTomlBytes(content []byte, v interface{}, opts ...UnmarshalOption) error {
b, err := encoding.TomlToJson(content)
if err != nil {
return err
}
return UnmarshalJsonBytes(b, v, opts...)
}
// UnmarshalTomlReader unmarshals TOML from the given io.Reader into the given v.
func UnmarshalTomlReader(r io.Reader, v interface{}) error {
var val interface{}
if err := toml.NewDecoder(r).Decode(&val); err != nil {
func UnmarshalTomlReader(r io.Reader, v interface{}, opts ...UnmarshalOption) error {
b, err := io.ReadAll(r)
if err != nil {
return err
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(val); err != nil {
return err
}
return UnmarshalJsonReader(&buf, v)
return UnmarshalTomlBytes(b, v, opts...)
}

View File

@@ -1,6 +1,7 @@
package mapping
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -18,7 +19,7 @@ d = "abcd!@#$112"
C string `json:"c"`
D string `json:"d"`
}
assert.Nil(t, UnmarshalTomlBytes([]byte(input), &val))
assert.NoError(t, UnmarshalTomlReader(strings.NewReader(input), &val))
assert.Equal(t, "foo", val.A)
assert.Equal(t, 1, val.B)
assert.Equal(t, "${FOO}", val.C)
@@ -37,5 +38,12 @@ d = "abcd!@#$112"
C string `json:"c"`
D string `json:"d"`
}
assert.NotNil(t, UnmarshalTomlBytes([]byte(input), &val))
assert.Error(t, UnmarshalTomlReader(strings.NewReader(input), &val))
}
func TestUnmarshalTomlBadReader(t *testing.T) {
var val struct {
A string `json:"a"`
}
assert.Error(t, UnmarshalTomlReader(new(badReader), &val))
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -10,11 +10,14 @@ import (
"strings"
"sync"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/stringx"
)
const (
defaultOption = "default"
envOption = "env"
inheritOption = "inherit"
stringOption = "string"
optionalOption = "optional"
optionsOption = "options"
@@ -53,7 +56,7 @@ type (
// Deref dereferences a type, if pointer type, returns its element type.
func Deref(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
@@ -62,22 +65,17 @@ func Deref(t reflect.Type) reflect.Type {
// Repr returns the string representation of v.
func Repr(v interface{}) string {
if v == nil {
return ""
}
return lang.Repr(v)
}
// if func (v *Type) String() string, we can't use Elem()
switch vt := v.(type) {
case fmt.Stringer:
return vt.String()
}
// SetValue sets target to value, pointers are processed automatically.
func SetValue(tp reflect.Type, value, target reflect.Value) {
value.Set(convertTypeOfPtr(tp, target))
}
val := reflect.ValueOf(v)
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
return reprOfValue(val)
// SetMapIndexValue sets target to value at key position, pointers are processed automatically.
func SetMapIndexValue(tp reflect.Type, value, key, target reflect.Value) {
value.SetMapIndex(key, convertTypeOfPtr(tp, target))
}
// ValidatePtr validates v if it's a valid pointer.
@@ -91,10 +89,17 @@ func ValidatePtr(v *reflect.Value) error {
return nil
}
func convertType(kind reflect.Kind, str string) (interface{}, error) {
func convertTypeFromString(kind reflect.Kind, str string) (interface{}, error) {
switch kind {
case reflect.Bool:
return str == "1" || strings.ToLower(str) == "true", nil
switch strings.ToLower(str) {
case "1", "true":
return true, nil
case "0", "false":
return false, nil
default:
return false, errTypeMismatch
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
intValue, err := strconv.ParseInt(str, 10, 64)
if err != nil {
@@ -123,6 +128,23 @@ func convertType(kind reflect.Kind, str string) (interface{}, error) {
}
}
func convertTypeOfPtr(tp reflect.Type, target reflect.Value) reflect.Value {
// keep the original value is a pointer
if tp.Kind() == reflect.Ptr && target.CanAddr() {
tp = tp.Elem()
target = target.Addr()
}
for tp.Kind() == reflect.Ptr {
p := reflect.New(target.Type())
p.Elem().Set(target)
target = p
tp = tp.Elem()
}
return target
}
func doParseKeyAndOptions(field reflect.StructField, value string) (string, *fieldOptions, error) {
segments := parseSegments(value)
key := strings.TrimSpace(segments[0])
@@ -215,8 +237,8 @@ func isRightInclude(b byte) (bool, error) {
}
}
func maybeNewValue(field reflect.StructField, value reflect.Value) {
if field.Type.Kind() == reflect.Ptr && value.IsNil() {
func maybeNewValue(fieldType reflect.Type, value reflect.Value) {
if fieldType.Kind() == reflect.Ptr && value.IsNil() {
value.Set(reflect.New(value.Type().Elem()))
}
}
@@ -335,6 +357,8 @@ func parseNumberRange(str string) (*numberRange, error) {
func parseOption(fieldOpts *fieldOptions, fieldName, option string) error {
switch {
case option == inheritOption:
fieldOpts.Inherit = true
case option == stringOption:
fieldOpts.FromString = true
case strings.HasPrefix(option, optionalOption):
@@ -351,26 +375,33 @@ func parseOption(fieldOpts *fieldOptions, fieldName, option string) error {
case option == optionalOption:
fieldOpts.Optional = true
case strings.HasPrefix(option, optionsOption):
segs := strings.Split(option, equalToken)
if len(segs) != 2 {
return fmt.Errorf("field %s has wrong options", fieldName)
val, err := parseProperty(fieldName, optionsOption, option)
if err != nil {
return err
}
fieldOpts.Options = parseOptions(segs[1])
fieldOpts.Options = parseOptions(val)
case strings.HasPrefix(option, defaultOption):
segs := strings.Split(option, equalToken)
if len(segs) != 2 {
return fmt.Errorf("field %s has wrong default option", fieldName)
val, err := parseProperty(fieldName, defaultOption, option)
if err != nil {
return err
}
fieldOpts.Default = strings.TrimSpace(segs[1])
fieldOpts.Default = val
case strings.HasPrefix(option, envOption):
val, err := parseProperty(fieldName, envOption, option)
if err != nil {
return err
}
fieldOpts.EnvVar = val
case strings.HasPrefix(option, rangeOption):
segs := strings.Split(option, equalToken)
if len(segs) != 2 {
return fmt.Errorf("field %s has wrong range", fieldName)
val, err := parseProperty(fieldName, rangeOption, option)
if err != nil {
return err
}
nr, err := parseNumberRange(segs[1])
nr, err := parseNumberRange(val)
if err != nil {
return err
}
@@ -395,6 +426,15 @@ func parseOptions(val string) []string {
return strings.Split(val, optionSeparator)
}
func parseProperty(field, tag, val string) (string, error) {
segs := strings.Split(val, equalToken)
if len(segs) != 2 {
return "", fmt.Errorf("field %s has wrong %s", field, tag)
}
return strings.TrimSpace(segs[1]), nil
}
func parseSegments(val string) []string {
var segments []string
var escaped, grouped bool
@@ -444,47 +484,6 @@ func parseSegments(val string) []string {
return segments
}
func reprOfValue(val reflect.Value) string {
switch vt := val.Interface().(type) {
case bool:
return strconv.FormatBool(vt)
case error:
return vt.Error()
case float32:
return strconv.FormatFloat(float64(vt), 'f', -1, 32)
case float64:
return strconv.FormatFloat(vt, 'f', -1, 64)
case fmt.Stringer:
return vt.String()
case int:
return strconv.Itoa(vt)
case int8:
return strconv.Itoa(int(vt))
case int16:
return strconv.Itoa(int(vt))
case int32:
return strconv.Itoa(int(vt))
case int64:
return strconv.FormatInt(vt, 10)
case string:
return vt
case uint:
return strconv.FormatUint(uint64(vt), 10)
case uint8:
return strconv.FormatUint(uint64(vt), 10)
case uint16:
return strconv.FormatUint(uint64(vt), 10)
case uint32:
return strconv.FormatUint(uint64(vt), 10)
case uint64:
return strconv.FormatUint(vt, 10)
case []byte:
return string(vt)
default:
return fmt.Sprint(val.Interface())
}
}
func setMatchedPrimitiveValue(kind reflect.Kind, value reflect.Value, v interface{}) error {
switch kind {
case reflect.Bool:
@@ -504,13 +503,13 @@ func setMatchedPrimitiveValue(kind reflect.Kind, value reflect.Value, v interfac
return nil
}
func setValue(kind reflect.Kind, value reflect.Value, str string) error {
func setValueFromString(kind reflect.Kind, value reflect.Value, str string) error {
if !value.CanSet() {
return errValueNotSettable
}
value = ensureValue(value)
v, err := convertType(kind, str)
v, err := convertTypeFromString(kind, str)
if err != nil {
return err
}
@@ -583,7 +582,7 @@ func validateAndSetValue(kind reflect.Kind, value reflect.Value, str string, opt
return errValueNotSettable
}
v, err := convertType(kind, str)
v, err := convertTypeFromString(kind, str)
if err != nil {
return err
}

View File

@@ -237,7 +237,7 @@ func TestValidatePtrWithZeroValue(t *testing.T) {
func TestSetValueNotSettable(t *testing.T) {
var i int
assert.NotNil(t, setValue(reflect.Int, reflect.ValueOf(i), "1"))
assert.NotNil(t, setValueFromString(reflect.Int, reflect.ValueOf(i), "1"))
}
func TestParseKeyAndOptionsErrors(t *testing.T) {
@@ -290,133 +290,9 @@ func TestSetValueFormatErrors(t *testing.T) {
for _, test := range tests {
t.Run(test.kind.String(), func(t *testing.T) {
err := setValue(test.kind, test.target, test.value)
err := setValueFromString(test.kind, test.target, test.value)
assert.NotEqual(t, errValueNotSettable, err)
assert.NotNil(t, err)
})
}
}
func TestRepr(t *testing.T) {
var (
f32 float32 = 1.1
f64 = 2.2
i8 int8 = 1
i16 int16 = 2
i32 int32 = 3
i64 int64 = 4
u8 uint8 = 5
u16 uint16 = 6
u32 uint32 = 7
u64 uint64 = 8
)
tests := []struct {
v interface{}
expect string
}{
{
nil,
"",
},
{
mockStringable{},
"mocked",
},
{
new(mockStringable),
"mocked",
},
{
newMockPtr(),
"mockptr",
},
{
&mockOpacity{
val: 1,
},
"{1}",
},
{
true,
"true",
},
{
false,
"false",
},
{
f32,
"1.1",
},
{
f64,
"2.2",
},
{
i8,
"1",
},
{
i16,
"2",
},
{
i32,
"3",
},
{
i64,
"4",
},
{
u8,
"5",
},
{
u16,
"6",
},
{
u32,
"7",
},
{
u64,
"8",
},
{
[]byte(`abcd`),
"abcd",
},
{
mockOpacity{val: 1},
"{1}",
},
}
for _, test := range tests {
t.Run(test.expect, func(t *testing.T) {
assert.Equal(t, test.expect, Repr(test.v))
})
}
}
type mockStringable struct{}
func (m mockStringable) String() string {
return "mocked"
}
type mockPtr struct{}
func newMockPtr() *mockPtr {
return new(mockPtr)
}
func (m *mockPtr) String() string {
return "mockptr"
}
type mockOpacity struct {
val int
}

View File

@@ -7,12 +7,106 @@ type (
Value(key string) (interface{}, bool)
}
// A MapValuer is a map that can use Value method to get values with given keys.
MapValuer map[string]interface{}
// A valuerWithParent defines a node that has a parent node.
valuerWithParent interface {
Valuer
// Parent get the parent valuer for current node.
Parent() valuerWithParent
}
// A node is a map that can use Value method to get values with given keys.
node struct {
current Valuer
parent valuerWithParent
}
// A valueWithParent is used to wrap the value with its parent.
valueWithParent struct {
value interface{}
parent valuerWithParent
}
// mapValuer is a type for map to meet the Valuer interface.
mapValuer map[string]interface{}
// simpleValuer is a type to get value from current node.
simpleValuer node
// recursiveValuer is a type to get the value recursively from current and parent nodes.
recursiveValuer node
)
// Value gets the value associated with the given key from mv.
func (mv MapValuer) Value(key string) (interface{}, bool) {
// Value gets the value assciated with the given key from mv.
func (mv mapValuer) Value(key string) (interface{}, bool) {
v, ok := mv[key]
return v, ok
}
// Value gets the value associated with the given key from sv.
func (sv simpleValuer) Value(key string) (interface{}, bool) {
v, ok := sv.current.Value(key)
return v, ok
}
// Parent get the parent valuer from sv.
func (sv simpleValuer) Parent() valuerWithParent {
if sv.parent == nil {
return nil
}
return recursiveValuer{
current: sv.parent,
parent: sv.parent.Parent(),
}
}
// Value gets the value associated with the given key from rv,
// and it will inherit the value from parent nodes.
func (rv recursiveValuer) Value(key string) (interface{}, bool) {
val, ok := rv.current.Value(key)
if !ok {
if parent := rv.Parent(); parent != nil {
return parent.Value(key)
}
return nil, false
}
vm, ok := val.(map[string]interface{})
if !ok {
return val, true
}
parent := rv.Parent()
if parent == nil {
return val, true
}
pv, ok := parent.Value(key)
if !ok {
return val, true
}
pm, ok := pv.(map[string]interface{})
if !ok {
return val, true
}
for k, v := range pm {
if _, ok := vm[k]; !ok {
vm[k] = v
}
}
return vm, true
}
// Parent get the parent valuer from rv.
func (rv recursiveValuer) Parent() valuerWithParent {
if rv.parent == nil {
return nil
}
return recursiveValuer{
current: rv.parent,
parent: rv.parent.Parent(),
}
}

View File

@@ -0,0 +1,57 @@
package mapping
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMapValuerWithInherit_Value(t *testing.T) {
input := map[string]interface{}{
"discovery": map[string]interface{}{
"host": "localhost",
"port": 8080,
},
"component": map[string]interface{}{
"name": "test",
},
}
valuer := recursiveValuer{
current: mapValuer(input["component"].(map[string]interface{})),
parent: simpleValuer{
current: mapValuer(input),
},
}
val, ok := valuer.Value("discovery")
assert.True(t, ok)
m, ok := val.(map[string]interface{})
assert.True(t, ok)
assert.Equal(t, "localhost", m["host"])
assert.Equal(t, 8080, m["port"])
}
func TestRecursiveValuer_Value(t *testing.T) {
input := map[string]interface{}{
"component": map[string]interface{}{
"name": "test",
"foo": map[string]interface{}{
"bar": "baz",
},
},
"foo": "value",
}
valuer := recursiveValuer{
current: mapValuer(input["component"].(map[string]interface{})),
parent: simpleValuer{
current: mapValuer(input),
},
}
val, ok := valuer.Value("foo")
assert.True(t, ok)
assert.EqualValues(t, map[string]interface{}{
"bar": "baz",
}, val)
}

View File

@@ -1,101 +1,27 @@
package mapping
import (
"encoding/json"
"errors"
"io"
"gopkg.in/yaml.v2"
)
// To make .json & .yaml consistent, we just use json as the tag key.
const yamlTagKey = "json"
var (
// ErrUnsupportedType is an error that indicates the config format is not supported.
ErrUnsupportedType = errors.New("only map-like configs are supported")
yamlUnmarshaler = NewUnmarshaler(yamlTagKey)
"github.com/zeromicro/go-zero/internal/encoding"
)
// UnmarshalYamlBytes unmarshals content into v.
func UnmarshalYamlBytes(content []byte, v interface{}) error {
return unmarshalYamlBytes(content, v, yamlUnmarshaler)
func UnmarshalYamlBytes(content []byte, v interface{}, opts ...UnmarshalOption) error {
b, err := encoding.YamlToJson(content)
if err != nil {
return err
}
return UnmarshalJsonBytes(b, v, opts...)
}
// UnmarshalYamlReader unmarshals content from reader into v.
func UnmarshalYamlReader(reader io.Reader, v interface{}) error {
return unmarshalYamlReader(reader, v, yamlUnmarshaler)
}
func cleanupInterfaceMap(in map[interface{}]interface{}) map[string]interface{} {
res := make(map[string]interface{})
for k, v := range in {
res[Repr(k)] = cleanupMapValue(v)
}
return res
}
func cleanupInterfaceNumber(in interface{}) json.Number {
return json.Number(Repr(in))
}
func cleanupInterfaceSlice(in []interface{}) []interface{} {
res := make([]interface{}, len(in))
for i, v := range in {
res[i] = cleanupMapValue(v)
}
return res
}
func cleanupMapValue(v interface{}) interface{} {
switch v := v.(type) {
case []interface{}:
return cleanupInterfaceSlice(v)
case map[interface{}]interface{}:
return cleanupInterfaceMap(v)
case bool, string:
return v
case int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64, float32, float64:
return cleanupInterfaceNumber(v)
default:
return Repr(v)
}
}
func unmarshal(unmarshaler *Unmarshaler, o interface{}, v interface{}) error {
if m, ok := o.(map[string]interface{}); ok {
return unmarshaler.Unmarshal(m, v)
}
return ErrUnsupportedType
}
func unmarshalYamlBytes(content []byte, v interface{}, unmarshaler *Unmarshaler) error {
var o interface{}
if err := yamlUnmarshal(content, &o); err != nil {
func UnmarshalYamlReader(reader io.Reader, v interface{}, opts ...UnmarshalOption) error {
b, err := io.ReadAll(reader)
if err != nil {
return err
}
return unmarshal(unmarshaler, o, v)
}
func unmarshalYamlReader(reader io.Reader, v interface{}, unmarshaler *Unmarshaler) error {
var res interface{}
if err := yaml.NewDecoder(reader).Decode(&res); err != nil {
return err
}
return unmarshal(unmarshaler, cleanupMapValue(res), v)
}
// yamlUnmarshal YAML to map[string]interface{} instead of map[interface{}]interface{}.
func yamlUnmarshal(in []byte, out interface{}) error {
var res interface{}
if err := yaml.Unmarshal(in, &res); err != nil {
return err
}
*out.(*interface{}) = cleanupMapValue(res)
return nil
return UnmarshalYamlBytes(b, v, opts...)
}

View File

@@ -934,9 +934,8 @@ func TestUnmarshalYamlReaderError(t *testing.T) {
err := UnmarshalYamlReader(reader, &v)
assert.NotNil(t, err)
reader = strings.NewReader("chenquan")
err = UnmarshalYamlReader(reader, &v)
assert.ErrorIs(t, err, ErrUnsupportedType)
reader = strings.NewReader("foo")
assert.Error(t, UnmarshalYamlReader(reader, &v))
}
func TestUnmarshalYamlBadReader(t *testing.T) {
@@ -1012,6 +1011,13 @@ func TestUnmarshalYamlMapRune(t *testing.T) {
assert.Equal(t, rune(3), v.Machine["node3"])
}
func TestUnmarshalYamlBadInput(t *testing.T) {
var v struct {
Any string
}
assert.Error(t, UnmarshalYamlBytes([]byte("':foo"), &v))
}
type badReader struct{}
func (b *badReader) Read(_ []byte) (n int, err error) {

View File

@@ -6,14 +6,14 @@ import (
"time"
)
// A Unstable is used to generate random value around the mean value base on given deviation.
// An Unstable is used to generate random value around the mean value base on given deviation.
type Unstable struct {
deviation float64
r *rand.Rand
lock *sync.Mutex
}
// NewUnstable returns a Unstable.
// NewUnstable returns an Unstable.
func NewUnstable(deviation float64) Unstable {
if deviation < 0 {
deviation = 0

View File

@@ -5,6 +5,7 @@ import (
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/prometheus"
)
@@ -17,6 +18,9 @@ func TestNewCounterVec(t *testing.T) {
})
defer counterVec.close()
counterVecNil := NewCounterVec(nil)
counterVec.Inc("path", "code")
counterVec.Add(1, "path", "code")
proc.Shutdown()
assert.NotNil(t, counterVec)
assert.Nil(t, counterVecNil)
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
)
func TestNewGaugeVec(t *testing.T) {
@@ -18,6 +19,8 @@ func TestNewGaugeVec(t *testing.T) {
gaugeVecNil := NewGaugeVec(nil)
assert.NotNil(t, gaugeVec)
assert.Nil(t, gaugeVecNil)
proc.Shutdown()
}
func TestGaugeInc(t *testing.T) {

View File

@@ -6,6 +6,7 @@ import (
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
)
func TestNewHistogramVec(t *testing.T) {
@@ -47,4 +48,6 @@ func TestHistogramObserve(t *testing.T) {
err := testutil.CollectAndCompare(hv.histogram, strings.NewReader(metadata+val))
assert.Nil(t, err)
proc.Shutdown()
}

View File

@@ -145,7 +145,7 @@ func MapReduceChan(source <-chan interface{}, mapper MapperFunc, reducer Reducer
return mapReduceWithPanicChan(source, panicChan, mapper, reducer, opts...)
}
// MapReduceChan maps all elements from source, and reduce the output elements with given reducer.
// mapReduceWithPanicChan maps all elements from source, and reduce the output elements with given reducer.
func mapReduceWithPanicChan(source <-chan interface{}, panicChan *onceChan, mapper MapperFunc,
reducer ReducerFunc, opts ...Option) (interface{}, error) {
options := buildOptions(opts...)

View File

@@ -19,7 +19,7 @@ func FuzzMapReduce(f *testing.F) {
rand.Seed(time.Now().UnixNano())
f.Add(uint(10), uint(runtime.NumCPU()))
f.Fuzz(func(t *testing.T, num uint, workers uint) {
f.Fuzz(func(t *testing.T, num, workers uint) {
n := int64(num)%5000 + 5000
genPanic := rand.Intn(100) == 0
mapperPanic := rand.Intn(100) == 0

View File

@@ -15,5 +15,14 @@ func AddWrapUpListener(fn func()) func() {
return fn
}
// SetTimeToForceQuit does nothing on windows.
func SetTimeToForceQuit(duration time.Duration) {
}
// Shutdown does nothing on windows.
func Shutdown() {
}
// WrapUp does nothing on windows.
func WrapUp() {
}

View File

@@ -43,6 +43,16 @@ func SetTimeToForceQuit(duration time.Duration) {
delayTimeBeforeForceQuit = duration
}
// Shutdown calls the registered shutdown listeners, only for test purpose.
func Shutdown() {
shutdownListeners.notifyListeners()
}
// WrapUp wraps up the process, only for test purpose.
func WrapUp() {
wrapUpListeners.notifyListeners()
}
func gracefulStop(signals chan os.Signal) {
signal.Stop(signals)

View File

@@ -18,14 +18,14 @@ func TestShutdown(t *testing.T) {
called := AddWrapUpListener(func() {
val++
})
wrapUpListeners.notifyListeners()
WrapUp()
called()
assert.Equal(t, 1, val)
called = AddShutdownListener(func() {
val += 2
})
shutdownListeners.notifyListeners()
Shutdown()
called()
assert.Equal(t, 3, val)
}

View File

@@ -4,7 +4,8 @@ import "github.com/zeromicro/go-zero/core/logx"
// Recover is used with defer to do cleanup on panics.
// Use it like:
// defer Recover(func() {})
//
// defer Recover(func() {})
func Recover(cleanups ...func()) {
for _, cleanup := range cleanups {
cleanup()

View File

@@ -9,6 +9,7 @@ import (
"github.com/zeromicro/go-zero/core/prometheus"
"github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/trace"
"github.com/zeromicro/go-zero/internal/devserver"
)
const (
@@ -28,10 +29,12 @@ const (
type ServiceConf struct {
Name string
Log logx.LogConf
Mode string `json:",default=pro,options=dev|test|rt|pre|pro"`
MetricsUrl string `json:",optional"`
Mode string `json:",default=pro,options=dev|test|rt|pre|pro"`
MetricsUrl string `json:",optional"`
// Deprecated: please use DevServer
Prometheus prometheus.Config `json:",optional"`
Telemetry trace.Config `json:",optional"`
DevServer devserver.Config `json:",optional"`
}
// MustSetUp sets up the service, exits on error.
@@ -64,6 +67,7 @@ func (sc ServiceConf) SetUp() error {
if len(sc.MetricsUrl) > 0 {
stat.SetReportWriter(stat.NewRemoteWriter(sc.MetricsUrl))
}
devserver.StartAgent(sc.DevServer)
return nil
}

View File

@@ -3,6 +3,7 @@ package service
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/logx"
)
@@ -16,3 +17,15 @@ func TestServiceConf(t *testing.T) {
}
c.MustSetUp()
}
func TestServiceConfWithMetricsUrl(t *testing.T) {
c := ServiceConf{
Name: "foo",
Log: logx.LogConf{
Mode: "volume",
},
Mode: "dev",
MetricsUrl: "http://localhost:8080",
}
assert.NoError(t, c.SetUp())
}

View File

@@ -5,6 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
)
var (
@@ -55,6 +56,7 @@ func TestServiceGroup(t *testing.T) {
}
group.Stop()
proc.Shutdown()
mutex.Lock()
defer mutex.Unlock()

View File

@@ -46,14 +46,14 @@ func Report(msg string) {
if fn != nil {
reported := lessExecutor.DoOrDiscard(func() {
var builder strings.Builder
fmt.Fprintf(&builder, "%s\n", time.Now().Format(timeFormat))
builder.WriteString(fmt.Sprintln(time.Now().Format(timeFormat)))
if len(clusterName) > 0 {
fmt.Fprintf(&builder, "cluster: %s\n", clusterName)
builder.WriteString(fmt.Sprintf("cluster: %s\n", clusterName))
}
fmt.Fprintf(&builder, "host: %s\n", sysx.Hostname())
builder.WriteString(fmt.Sprintf("host: %s\n", sysx.Hostname()))
dp := atomic.SwapInt32(&dropped, 0)
if dp > 0 {
fmt.Fprintf(&builder, "dropped: %d\n", dp)
builder.WriteString(fmt.Sprintf("dropped: %d\n", dp))
}
builder.WriteString(strings.TrimSpace(msg))
fn(builder.String())

View File

@@ -4,6 +4,7 @@
package stat
import (
"os"
"strconv"
"sync/atomic"
"testing"
@@ -12,6 +13,9 @@ import (
)
func TestReport(t *testing.T) {
os.Setenv(clusterNameKey, "test-cluster")
defer os.Unsetenv(clusterNameKey)
var count int32
SetReporter(func(s string) {
atomic.AddInt32(&count, 1)

View File

@@ -258,7 +258,7 @@ func parseUints(val string) ([]uint64, error) {
return sets, nil
}
// runningInUserNS detects whether we are currently running in a user namespace.
// runningInUserNS detects whether we are currently running in an user namespace.
func runningInUserNS() bool {
nsOnce.Do(func() {
file, err := os.Open("/proc/self/uid_map")

View File

@@ -33,13 +33,7 @@ func initialize() {
}
cores = uint64(len(cpus))
sets, err := cpuSets()
if err != nil {
logx.Error(err)
return
}
quota = float64(len(sets))
quota = float64(len(cpus))
cq, err := cpuQuota()
if err == nil {
if cq != -1 {

View File

@@ -45,9 +45,13 @@ func RawFieldNames(in interface{}, postgresSql ...bool) []string {
// `db:"id"`
// `db:"id,type=char,length=16"`
// `db:",type=char,length=16"`
// `db:"-,type=char,length=16"`
if strings.Contains(tagv, ",") {
tagv = strings.TrimSpace(strings.Split(tagv, ",")[0])
}
if tagv == "-" {
continue
}
if len(tagv) == 0 {
tagv = fi.Name
}

View File

@@ -39,3 +39,33 @@ func TestFieldNamesWithTagOptions(t *testing.T) {
assert.Equal(t, expected, out)
})
}
type mockedUserWithDashTag struct {
ID string `db:"id" json:"id,omitempty"`
UserName string `db:"user_name" json:"userName,omitempty"`
Mobile string `db:"-" json:"mobile,omitempty"`
}
func TestFieldNamesWithDashTag(t *testing.T) {
t.Run("new", func(t *testing.T) {
var u mockedUserWithDashTag
out := RawFieldNames(&u)
expected := []string{"`id`", "`user_name`"}
assert.Equal(t, expected, out)
})
}
type mockedUserWithDashTagAndOptions struct {
ID string `db:"id" json:"id,omitempty"`
UserName string `db:"user_name,type=varchar,length=255" json:"userName,omitempty"`
Mobile string `db:"-,type=varchar,length=255" json:"mobile,omitempty"`
}
func TestFieldNamesWithDashTagAndOptions(t *testing.T) {
t.Run("new", func(t *testing.T) {
var u mockedUserWithDashTagAndOptions
out := RawFieldNames(&u)
expected := []string{"`id`", "`user_name`"}
assert.Equal(t, expected, out)
})
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/hash"
"github.com/zeromicro/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/core/syncx"
)
@@ -62,12 +63,12 @@ func New(c ClusterConf, barrier syncx.SingleFlight, st *Stat, errNotFound error,
}
if len(c) == 1 {
return NewNode(c[0].NewRedis(), barrier, st, errNotFound, opts...)
return NewNode(redis.MustNewRedis(c[0].RedisConf), barrier, st, errNotFound, opts...)
}
dispatcher := hash.NewConsistentHash()
for _, node := range c {
cn := NewNode(node.NewRedis(), barrier, st, errNotFound, opts...)
cn := NewNode(redis.MustNewRedis(node.RedisConf), barrier, st, errNotFound, opts...)
dispatcher.AddWithWeight(cn, node.Weight)
}

View File

@@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/hash"
@@ -109,51 +110,85 @@ func (mc *mockedNode) TakeWithExpireCtx(ctx context.Context, val interface{}, ke
}
func TestCache_SetDel(t *testing.T) {
const total = 1000
r1, clean1, err := redistest.CreateRedis()
assert.Nil(t, err)
defer clean1()
r2, clean2, err := redistest.CreateRedis()
assert.Nil(t, err)
defer clean2()
conf := ClusterConf{
{
RedisConf: redis.RedisConf{
Host: r1.Addr,
Type: redis.NodeType,
t.Run("test set del", func(t *testing.T) {
const total = 1000
r1, clean1, err := redistest.CreateRedis()
assert.Nil(t, err)
defer clean1()
r2, clean2, err := redistest.CreateRedis()
assert.Nil(t, err)
defer clean2()
conf := ClusterConf{
{
RedisConf: redis.RedisConf{
Host: r1.Addr,
Type: redis.NodeType,
},
Weight: 100,
},
Weight: 100,
},
{
RedisConf: redis.RedisConf{
Host: r2.Addr,
Type: redis.NodeType,
{
RedisConf: redis.RedisConf{
Host: r2.Addr,
Type: redis.NodeType,
},
Weight: 100,
},
Weight: 100,
},
}
c := New(conf, syncx.NewSingleFlight(), NewStat("mock"), errPlaceholder)
for i := 0; i < total; i++ {
if i%2 == 0 {
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
} else {
assert.Nil(t, c.SetWithExpire(fmt.Sprintf("key/%d", i), i, 0))
}
}
for i := 0; i < total; i++ {
var val int
assert.Nil(t, c.Get(fmt.Sprintf("key/%d", i), &val))
assert.Equal(t, i, val)
}
assert.Nil(t, c.Del())
for i := 0; i < total; i++ {
assert.Nil(t, c.Del(fmt.Sprintf("key/%d", i)))
}
for i := 0; i < total; i++ {
var val int
assert.True(t, c.IsNotFound(c.Get(fmt.Sprintf("key/%d", i), &val)))
assert.Equal(t, 0, val)
}
c := New(conf, syncx.NewSingleFlight(), NewStat("mock"), errPlaceholder)
for i := 0; i < total; i++ {
if i%2 == 0 {
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
} else {
assert.Nil(t, c.SetWithExpire(fmt.Sprintf("key/%d", i), i, 0))
}
}
for i := 0; i < total; i++ {
var val int
assert.Nil(t, c.Get(fmt.Sprintf("key/%d", i), &val))
assert.Equal(t, i, val)
}
assert.Nil(t, c.Del())
for i := 0; i < total; i++ {
assert.Nil(t, c.Del(fmt.Sprintf("key/%d", i)))
}
assert.Nil(t, c.Del("a", "b", "c"))
for i := 0; i < total; i++ {
var val int
assert.True(t, c.IsNotFound(c.Get(fmt.Sprintf("key/%d", i), &val)))
assert.Equal(t, 0, val)
}
})
t.Run("test set del error", func(t *testing.T) {
r1, err := miniredis.Run()
assert.NoError(t, err)
defer r1.Close()
r2, err := miniredis.Run()
assert.NoError(t, err)
defer r2.Close()
conf := ClusterConf{
{
RedisConf: redis.RedisConf{
Host: r1.Addr(),
Type: redis.NodeType,
},
Weight: 100,
},
{
RedisConf: redis.RedisConf{
Host: r2.Addr(),
Type: redis.NodeType,
},
Weight: 100,
},
}
c := New(conf, syncx.NewSingleFlight(), NewStat("mock"), errPlaceholder)
r1.SetError("mock error")
r2.SetError("mock error")
assert.NoError(t, c.Del("a", "b", "c"))
})
}
func TestCache_OneNode(t *testing.T) {

View File

@@ -277,5 +277,6 @@ func (c cacheNode) processCache(ctx context.Context, key, data string, v interfa
func (c cacheNode) setCacheWithNotFound(ctx context.Context, key string) error {
seconds := int(math.Ceil(c.aroundDuration(c.notFoundExpiry).Seconds()))
return c.rds.SetexCtx(ctx, key, notFoundPlaceholder, seconds)
_, err := c.rds.SetnxExCtx(ctx, key, notFoundPlaceholder, seconds)
return err
}

View File

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math/rand"
"runtime"
"strconv"
"sync"
"testing"
@@ -11,12 +12,14 @@ import (
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/core/stores/redis/redistest"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
)
var errTestNotFound = errors.New("not found")
@@ -27,27 +30,54 @@ func init() {
}
func TestCacheNode_DelCache(t *testing.T) {
store, clean, err := redistest.CreateRedis()
assert.Nil(t, err)
store.Type = redis.ClusterType
defer clean()
t.Run("del cache", func(t *testing.T) {
store, clean, err := redistest.CreateRedis()
assert.Nil(t, err)
store.Type = redis.ClusterType
defer clean()
cn := cacheNode{
rds: store,
r: rand.New(rand.NewSource(time.Now().UnixNano())),
lock: new(sync.Mutex),
unstableExpiry: mathx.NewUnstable(expiryDeviation),
stat: NewStat("any"),
errNotFound: errTestNotFound,
}
assert.Nil(t, cn.Del())
assert.Nil(t, cn.Del([]string{}...))
assert.Nil(t, cn.Del(make([]string, 0)...))
cn.Set("first", "one")
assert.Nil(t, cn.Del("first"))
cn.Set("first", "one")
cn.Set("second", "two")
assert.Nil(t, cn.Del("first", "second"))
cn := cacheNode{
rds: store,
r: rand.New(rand.NewSource(time.Now().UnixNano())),
lock: new(sync.Mutex),
unstableExpiry: mathx.NewUnstable(expiryDeviation),
stat: NewStat("any"),
errNotFound: errTestNotFound,
}
assert.Nil(t, cn.Del())
assert.Nil(t, cn.Del([]string{}...))
assert.Nil(t, cn.Del(make([]string, 0)...))
cn.Set("first", "one")
assert.Nil(t, cn.Del("first"))
cn.Set("first", "one")
cn.Set("second", "two")
assert.Nil(t, cn.Del("first", "second"))
})
t.Run("del cache with errors", func(t *testing.T) {
old := timingWheel
ticker := timex.NewFakeTicker()
var err error
timingWheel, err = collection.NewTimingWheelWithTicker(
time.Millisecond, timingWheelSlots, func(key, value interface{}) {
clean(key, value)
}, ticker)
assert.NoError(t, err)
t.Cleanup(func() {
timingWheel = old
})
r, err := miniredis.Run()
assert.NoError(t, err)
defer r.Close()
r.SetError("mock error")
node := NewNode(redis.New(r.Addr(), redis.Cluster()), syncx.NewSingleFlight(),
NewStat("any"), errTestNotFound)
assert.NoError(t, node.Del("foo", "bar"))
ticker.Tick()
runtime.Gosched()
})
}
func TestCacheNode_DelCacheWithErrors(t *testing.T) {
@@ -125,6 +155,21 @@ func TestCacheNode_Take(t *testing.T) {
assert.Equal(t, `"value"`, val)
}
func TestCacheNode_TakeBadRedis(t *testing.T) {
r, err := miniredis.Run()
assert.NoError(t, err)
defer r.Close()
r.SetError("mock error")
cn := NewNode(redis.New(r.Addr()), syncx.NewSingleFlight(), NewStat("any"),
errTestNotFound, WithExpiry(time.Second), WithNotFoundExpiry(time.Second))
var str string
assert.Error(t, cn.Take(&str, "any", func(v interface{}) error {
*v.(*string) = "value"
return nil
}))
}
func TestCacheNode_TakeNotFound(t *testing.T) {
store, clean, err := redistest.CreateRedis()
assert.Nil(t, err)
@@ -164,6 +209,35 @@ func TestCacheNode_TakeNotFound(t *testing.T) {
assert.Equal(t, errDummy, err)
}
func TestCacheNode_TakeNotFoundButChangedByOthers(t *testing.T) {
store, clean, err := redistest.CreateRedis()
assert.NoError(t, err)
defer clean()
cn := cacheNode{
rds: store,
r: rand.New(rand.NewSource(time.Now().UnixNano())),
barrier: syncx.NewSingleFlight(),
lock: new(sync.Mutex),
unstableExpiry: mathx.NewUnstable(expiryDeviation),
stat: NewStat("any"),
errNotFound: errTestNotFound,
}
var str string
err = cn.Take(&str, "any", func(v interface{}) error {
store.Set("any", "foo")
return errTestNotFound
})
assert.True(t, cn.IsNotFound(err))
val, err := store.Get("any")
if assert.NoError(t, err) {
assert.Equal(t, "foo", val)
}
assert.True(t, cn.IsNotFound(cn.Get("any", &str)))
}
func TestCacheNode_TakeWithExpire(t *testing.T) {
store, clean, err := redistest.CreateRedis()
assert.Nil(t, err)

View File

@@ -34,14 +34,14 @@ func newOptions(opts ...Option) Options {
return o
}
// WithExpiry returns a func to customize a Options with given expiry.
// WithExpiry returns a func to customize an Options with given expiry.
func WithExpiry(expiry time.Duration) Option {
return func(o *Options) {
o.Expiry = expiry
}
}
// WithNotFoundExpiry returns a func to customize a Options with given not found expiry.
// WithNotFoundExpiry returns a func to customize an Options with given not found expiry.
func WithNotFoundExpiry(expiry time.Duration) Option {
return func(o *Options) {
o.NotFoundExpiry = expiry

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/timex"
)
const statInterval = time.Minute
@@ -25,7 +26,13 @@ func NewStat(name string) *Stat {
ret := &Stat{
name: name,
}
go ret.statLoop()
go func() {
ticker := timex.NewTicker(statInterval)
defer ticker.Stop()
ret.statLoop(ticker)
}()
return ret
}
@@ -50,11 +57,8 @@ func (s *Stat) IncrementDbFails() {
atomic.AddUint64(&s.DbFails, 1)
}
func (s *Stat) statLoop() {
ticker := time.NewTicker(statInterval)
defer ticker.Stop()
for range ticker.C {
func (s *Stat) statLoop(ticker timex.Ticker) {
for range ticker.Chan() {
total := atomic.SwapUint64(&s.Total, 0)
if total == 0 {
continue

28
core/stores/cache/cachestat_test.go vendored Normal file
View File

@@ -0,0 +1,28 @@
package cache
import (
"testing"
"github.com/zeromicro/go-zero/core/timex"
)
func TestCacheStat_statLoop(t *testing.T) {
t.Run("stat loop total 0", func(t *testing.T) {
var stat Stat
ticker := timex.NewFakeTicker()
go stat.statLoop(ticker)
ticker.Tick()
ticker.Tick()
ticker.Stop()
})
t.Run("stat loop total not 0", func(t *testing.T) {
var stat Stat
stat.IncrementTotal()
ticker := timex.NewFakeTicker()
go stat.statLoop(ticker)
ticker.Tick()
ticker.Tick()
ticker.Stop()
})
}

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
)
func TestNextDelay(t *testing.T) {
@@ -51,6 +52,7 @@ func TestNextDelay(t *testing.T) {
next, ok := nextDelay(test.input)
assert.Equal(t, test.ok, ok)
assert.Equal(t, test.output, next)
proc.Shutdown()
})
}
}

View File

@@ -164,7 +164,7 @@ func NewStore(c KvConf) Store {
// because Store and redis.Redis has different methods.
dispatcher := hash.NewConsistentHash()
for _, node := range c {
cn := node.NewRedis()
cn := redis.MustNewRedis(node.RedisConf)
dispatcher.AddWithWeight(cn, node.Weight)
}

View File

@@ -26,9 +26,14 @@ type (
)
// NewBulkInserter returns a BulkInserter.
func NewBulkInserter(coll *mongo.Collection, interval ...time.Duration) *BulkInserter {
func NewBulkInserter(coll Collection, interval ...time.Duration) (*BulkInserter, error) {
cloneColl, err := coll.Clone()
if err != nil {
return nil, err
}
inserter := &dbInserter{
collection: coll,
collection: cloneColl,
}
duration := flushInterval
@@ -39,7 +44,7 @@ func NewBulkInserter(coll *mongo.Collection, interval ...time.Duration) *BulkIns
return &BulkInserter{
executor: executors.NewPeriodicalExecutor(duration, inserter),
inserter: inserter,
}
}, nil
}
// Flush flushes the inserter, writes all pending records.

View File

@@ -15,7 +15,8 @@ func TestBulkInserter(t *testing.T) {
mt.Run("test", func(mt *mtest.T) {
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...))
bulk := NewBulkInserter(mt.Coll)
bulk, err := NewBulkInserter(createModel(mt).Collection)
assert.Equal(t, err, nil)
bulk.SetResultHandler(func(result *mongo.InsertManyResult, err error) {
assert.Nil(t, err)
assert.Equal(t, 2, len(result.InsertedIDs))

View File

@@ -3,15 +3,12 @@ package mon
import (
"context"
"io"
"time"
"github.com/zeromicro/go-zero/core/syncx"
"go.mongodb.org/mongo-driver/mongo"
mopt "go.mongodb.org/mongo-driver/mongo/options"
)
const defaultTimeout = time.Second
var clientManager = syncx.NewResourceManager()
// ClosableClient wraps *mongo.Client and provides a Close method.
@@ -30,9 +27,20 @@ func Inject(key string, client *mongo.Client) {
clientManager.Inject(key, &ClosableClient{client})
}
func getClient(url string) (*mongo.Client, error) {
func getClient(url string, opts ...Option) (*mongo.Client, error) {
val, err := clientManager.GetResource(url, func() (io.Closer, error) {
cli, err := mongo.Connect(context.Background(), mopt.Client().ApplyURI(url))
o := mopt.Client().ApplyURI(url)
opts = append([]Option{defaultTimeoutOption()}, opts...)
for _, opt := range opts {
opt(o)
}
cli, err := mongo.Connect(context.Background(), o)
if err != nil {
return nil, err
}
err = cli.Ping(context.Background(), nil)
if err != nil {
return nil, err
}

View File

@@ -83,12 +83,12 @@ type (
// FindOneAndReplace returns at most one document that matches the filter. If the filter
// matches multiple documents, FindOneAndReplace returns the first document in the
// collection that matches the filter.
FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{},
FindOneAndReplace(ctx context.Context, filter, replacement interface{},
opts ...*mopt.FindOneAndReplaceOptions) (*mongo.SingleResult, error)
// FindOneAndUpdate returns at most one document that matches the filter. If the filter
// matches multiple documents, FindOneAndUpdate returns the first document in the
// collection that matches the filter.
FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{},
FindOneAndUpdate(ctx context.Context, filter, update interface{},
opts ...*mopt.FindOneAndUpdateOptions) (*mongo.SingleResult, error)
// Indexes returns the index view for this collection.
Indexes() mongo.IndexView
@@ -99,16 +99,16 @@ type (
InsertOne(ctx context.Context, document interface{}, opts ...*mopt.InsertOneOptions) (
*mongo.InsertOneResult, error)
// ReplaceOne replaces at most one document that matches the filter.
ReplaceOne(ctx context.Context, filter interface{}, replacement interface{},
ReplaceOne(ctx context.Context, filter, replacement interface{},
opts ...*mopt.ReplaceOptions) (*mongo.UpdateResult, error)
// UpdateByID updates a single document matching the provided filter.
UpdateByID(ctx context.Context, id interface{}, update interface{},
UpdateByID(ctx context.Context, id, update interface{},
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
// UpdateMany updates the provided documents.
UpdateMany(ctx context.Context, filter interface{}, update interface{},
UpdateMany(ctx context.Context, filter, update interface{},
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
// UpdateOne updates a single document matching the provided filter.
UpdateOne(ctx context.Context, filter interface{}, update interface{},
UpdateOne(ctx context.Context, filter, update interface{},
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
// Watch returns a change stream cursor used to receive notifications of changes to the collection.
Watch(ctx context.Context, pipeline interface{}, opts ...*mopt.ChangeStreamOptions) (
@@ -359,7 +359,7 @@ func (c *decoratedCollection) FindOneAndReplace(ctx context.Context, filter inte
return
}
func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{},
func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter, update interface{},
opts ...*mopt.FindOneAndUpdateOptions) (res *mongo.SingleResult, err error) {
ctx, span := startSpan(ctx, findOneAndUpdate)
defer func() {
@@ -420,7 +420,7 @@ func (c *decoratedCollection) InsertOne(ctx context.Context, document interface{
return
}
func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter interface{}, replacement interface{},
func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter, replacement interface{},
opts ...*mopt.ReplaceOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, replaceOne)
defer func() {
@@ -440,7 +440,7 @@ func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter interface{}
return
}
func (c *decoratedCollection) UpdateByID(ctx context.Context, id interface{}, update interface{},
func (c *decoratedCollection) UpdateByID(ctx context.Context, id, update interface{},
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, updateByID)
defer func() {
@@ -460,7 +460,7 @@ func (c *decoratedCollection) UpdateByID(ctx context.Context, id interface{}, up
return
}
func (c *decoratedCollection) UpdateMany(ctx context.Context, filter interface{}, update interface{},
func (c *decoratedCollection) UpdateMany(ctx context.Context, filter, update interface{},
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, updateMany)
defer func() {
@@ -480,7 +480,7 @@ func (c *decoratedCollection) UpdateMany(ctx context.Context, filter interface{}
return
}
func (c *decoratedCollection) UpdateOne(ctx context.Context, filter interface{}, update interface{},
func (c *decoratedCollection) UpdateOne(ctx context.Context, filter, update interface{},
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, updateOne)
defer func() {

View File

@@ -106,14 +106,14 @@ func TestCollection_BulkWrite(t *testing.T) {
}
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...))
res, err := c.BulkWrite(context.Background(), []mongo.WriteModel{
mongo.NewInsertOneModel().SetDocument(bson.D{{Key: "foo", Value: 1}})},
)
mongo.NewInsertOneModel().SetDocument(bson.D{{Key: "foo", Value: 1}}),
})
assert.Nil(t, err)
assert.NotNil(t, res)
c.brk = new(dropBreaker)
_, err = c.BulkWrite(context.Background(), []mongo.WriteModel{
mongo.NewInsertOneModel().SetDocument(bson.D{{Key: "foo", Value: 1}})},
)
mongo.NewInsertOneModel().SetDocument(bson.D{{Key: "foo", Value: 1}}),
})
assert.Equal(t, errDummy, err)
})
}
@@ -204,7 +204,7 @@ func TestCollection_EstimatedDocumentCount(t *testing.T) {
})
}
func TestCollectionFind(t *testing.T) {
func TestCollection_Find(t *testing.T) {
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
defer mt.Close()
@@ -252,7 +252,7 @@ func TestCollectionFind(t *testing.T) {
})
}
func TestCollectionFindOne(t *testing.T) {
func TestCollection_FindOne(t *testing.T) {
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
defer mt.Close()
@@ -436,7 +436,7 @@ func TestCollection_InsertMany(t *testing.T) {
})
}
func TestCollection_Remove(t *testing.T) {
func TestCollection_DeleteOne(t *testing.T) {
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
defer mt.Close()
@@ -456,7 +456,7 @@ func TestCollection_Remove(t *testing.T) {
})
}
func TestCollectionRemoveAll(t *testing.T) {
func TestCollection_DeleteMany(t *testing.T) {
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
defer mt.Close()
@@ -565,7 +565,7 @@ func TestCollection_UpdateMany(t *testing.T) {
})
}
func Test_DecoratedCollectionLogDuration(t *testing.T) {
func TestDecoratedCollection_LogDuration(t *testing.T) {
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
defer mt.Close()
c := decoratedCollection{

View File

@@ -48,7 +48,7 @@ func MustNewModel(uri, db, collection string, opts ...Option) *Model {
// NewModel returns a Model.
func NewModel(uri, db, collection string, opts ...Option) (*Model, error) {
cli, err := getClient(uri)
cli, err := getClient(uri, opts...)
if err != nil {
return nil, err
}
@@ -159,7 +159,7 @@ func (m *Model) FindOneAndDelete(ctx context.Context, v, filter interface{},
}
// FindOneAndReplace finds a single document and replaces it.
func (m *Model) FindOneAndReplace(ctx context.Context, v, filter interface{}, replacement interface{},
func (m *Model) FindOneAndReplace(ctx context.Context, v, filter, replacement interface{},
opts ...*mopt.FindOneAndReplaceOptions) error {
res, err := m.Collection.FindOneAndReplace(ctx, filter, replacement, opts...)
if err != nil {
@@ -170,7 +170,7 @@ func (m *Model) FindOneAndReplace(ctx context.Context, v, filter interface{}, re
}
// FindOneAndUpdate finds a single document and updates it.
func (m *Model) FindOneAndUpdate(ctx context.Context, v, filter interface{}, update interface{},
func (m *Model) FindOneAndUpdate(ctx context.Context, v, filter, update interface{},
opts ...*mopt.FindOneAndUpdateOptions) error {
res, err := m.Collection.FindOneAndUpdate(ctx, filter, update, opts...)
if err != nil {

View File

@@ -4,14 +4,15 @@ import (
"time"
"github.com/zeromicro/go-zero/core/syncx"
mopt "go.mongodb.org/mongo-driver/mongo/options"
)
const defaultTimeout = time.Second * 3
var slowThreshold = syncx.ForAtomicDuration(defaultSlowThreshold)
type (
options struct {
timeout time.Duration
}
options = mopt.ClientOptions
// Option defines the method to customize a mongo model.
Option func(opts *options)
@@ -22,8 +23,15 @@ func SetSlowThreshold(threshold time.Duration) {
slowThreshold.Set(threshold)
}
func defaultOptions() *options {
return &options{
timeout: defaultTimeout,
func defaultTimeoutOption() Option {
return func(opts *options) {
opts.SetTimeout(defaultTimeout)
}
}
// WithTimeout set the mon client operation timeout.
func WithTimeout(timeout time.Duration) Option {
return func(opts *options) {
opts.SetTimeout(timeout)
}
}

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
mopt "go.mongodb.org/mongo-driver/mongo/options"
)
func TestSetSlowThreshold(t *testing.T) {
@@ -13,6 +14,14 @@ func TestSetSlowThreshold(t *testing.T) {
assert.Equal(t, time.Second, slowThreshold.Load())
}
func TestDefaultOptions(t *testing.T) {
assert.Equal(t, defaultTimeout, defaultOptions().timeout)
func Test_defaultTimeoutOption(t *testing.T) {
opts := mopt.Client()
defaultTimeoutOption()(opts)
assert.Equal(t, defaultTimeout, *opts.Timeout)
}
func TestWithTimeout(t *testing.T) {
opts := mopt.Client()
WithTimeout(time.Second)(opts)
assert.Equal(t, time.Second, *opts.Timeout)
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/zeromicro/go-zero/core/trace"
"go.mongodb.org/mongo-driver/mongo"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
oteltrace "go.opentelemetry.io/otel/trace"
@@ -14,12 +13,10 @@ import (
var mongoCmdAttributeKey = attribute.Key("mongo.cmd")
func startSpan(ctx context.Context, cmd string) (context.Context, oteltrace.Span) {
tracer := otel.GetTracerProvider().Tracer(trace.TraceName)
ctx, span := tracer.Start(ctx,
spanName,
oteltrace.WithSpanKind(oteltrace.SpanKindClient),
)
tracer := trace.TracerFromContext(ctx)
ctx, span := tracer.Start(ctx, spanName, oteltrace.WithSpanKind(oteltrace.SpanKindClient))
span.SetAttributes(mongoCmdAttributeKey.String(cmd))
return ctx, span
}

Some files were not shown because too many files have changed in this diff Show More