mirror of
https://github.com/zeromicro/go-zero.git
synced 2026-05-12 01:10:00 +08:00
Compare commits
220 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67db40ed4f | ||
|
|
11c485a5ed | ||
|
|
b0573af9a9 | ||
|
|
09eb53f308 | ||
|
|
11f85d1b80 | ||
|
|
0cb86c6990 | ||
|
|
57d2f22c24 | ||
|
|
fa0c364982 | ||
|
|
a6c8113419 | ||
|
|
4f5c30e083 | ||
|
|
9d0b51fa26 | ||
|
|
ba5f8045a2 | ||
|
|
3a510a9138 | ||
|
|
d3bfa16813 | ||
|
|
28409791fa | ||
|
|
c1abe87953 | ||
|
|
f8367856e8 | ||
|
|
a72b0a689b | ||
|
|
69a4d213a3 | ||
|
|
c28e01fed3 | ||
|
|
e8efcef108 | ||
|
|
d011316997 | ||
|
|
4d22b0c497 | ||
|
|
539215d7df | ||
|
|
3ede597a15 | ||
|
|
01786c5e63 | ||
|
|
6aba5f74fc | ||
|
|
3c894a3fb7 | ||
|
|
1ece3a498f | ||
|
|
b76c7ae55d | ||
|
|
91b10bd3b9 | ||
|
|
7e3fe77e7b | ||
|
|
ba43214dae | ||
|
|
ebc90720ea | ||
|
|
785d100be9 | ||
|
|
f13e6f1149 | ||
|
|
8be0f77d96 | ||
|
|
429f85a9de | ||
|
|
b4d1c6da2c | ||
|
|
3c1cfd4c1e | ||
|
|
a71a210704 | ||
|
|
769d06c8ab | ||
|
|
cd1f8da13f | ||
|
|
8230474667 | ||
|
|
27f553bf84 | ||
|
|
d48bff8c8b | ||
|
|
59b9687f31 | ||
|
|
c1a8ccda11 | ||
|
|
9df6786b09 | ||
|
|
bef5bd4e4f | ||
|
|
68acfb1891 | ||
|
|
9fd3f752d1 | ||
|
|
9c48e9ceab | ||
|
|
bd26783b33 | ||
|
|
eda8230521 | ||
|
|
462ddbb145 | ||
|
|
496a2f341e | ||
|
|
7109d6d635 | ||
|
|
ca72241fa3 | ||
|
|
a6bdffd225 | ||
|
|
5636bf4955 | ||
|
|
a944a7fd7e | ||
|
|
a40fa405e4 | ||
|
|
eab77e21dd | ||
|
|
d41163f5c1 | ||
|
|
265b1f2459 | ||
|
|
c92ea59228 | ||
|
|
afddfea093 | ||
|
|
fa4dc151ca | ||
|
|
44202acb18 | ||
|
|
cf00786209 | ||
|
|
6a8638fc85 | ||
|
|
837a9ffa03 | ||
|
|
d28cfe5f20 | ||
|
|
022c100dc9 | ||
|
|
426b09c356 | ||
|
|
40dc21e4cf | ||
|
|
9b114e3251 | ||
|
|
4c6234f108 | ||
|
|
3cdfcb05f1 | ||
|
|
9f5bfa0088 | ||
|
|
2d42c8fa00 | ||
|
|
10e7922597 | ||
|
|
6e34b55ba7 | ||
|
|
ed15ca04f4 | ||
|
|
295ec27e1b | ||
|
|
d1e702e8a3 | ||
|
|
d1bfb5ef61 | ||
|
|
e43357164c | ||
|
|
cd21c9fa74 | ||
|
|
cdd2fcbbc9 | ||
|
|
8d2db09d45 | ||
|
|
65905b914d | ||
|
|
80e3407be1 | ||
|
|
657d27213a | ||
|
|
8ac18a9422 | ||
|
|
d3ae9cfd49 | ||
|
|
d7f42161fd | ||
|
|
e03229cabe | ||
|
|
8403ed16ae | ||
|
|
d87d203c3b | ||
|
|
3ae6a882a7 | ||
|
|
41c980f00c | ||
|
|
f34d81ca2c | ||
|
|
004ee488a6 | ||
|
|
2e12cd2c99 | ||
|
|
2695c30886 | ||
|
|
c74fb988e0 | ||
|
|
e8a340c1c0 | ||
|
|
06e114e5a3 | ||
|
|
74ad681a66 | ||
|
|
e7bbc09093 | ||
|
|
1eb1450c43 | ||
|
|
9a724fe907 | ||
|
|
30e49f2939 | ||
|
|
a5407479a6 | ||
|
|
7fb5bab26b | ||
|
|
27249e021f | ||
|
|
d809795fec | ||
|
|
c9db9588b7 | ||
|
|
872c50b71a | ||
|
|
7c83155e4f | ||
|
|
358d86b8ae | ||
|
|
f4bb9f5635 | ||
|
|
5c6a3132eb | ||
|
|
2bd95aa007 | ||
|
|
e8376936d5 | ||
|
|
71c0288023 | ||
|
|
9e2f07a842 | ||
|
|
24fd34413f | ||
|
|
3f47251892 | ||
|
|
0b6bc69afa | ||
|
|
5b9bdc8d02 | ||
|
|
ded22e296e | ||
|
|
f0ed2370a3 | ||
|
|
6bf6cfdd01 | ||
|
|
5cc9eb0de4 | ||
|
|
f070d447ef | ||
|
|
f6d9e19ecb | ||
|
|
56807aabf6 | ||
|
|
861dcf2f36 | ||
|
|
c837dc21bb | ||
|
|
96a35ecf1a | ||
|
|
bdec5f2349 | ||
|
|
bc92b57bdb | ||
|
|
d8905b9e9e | ||
|
|
dec6309c55 | ||
|
|
10805577f5 | ||
|
|
a4d8286e36 | ||
|
|
84d2b64e7c | ||
|
|
6476da4a18 | ||
|
|
79eab0ea2f | ||
|
|
3b683fd498 | ||
|
|
d179b342b2 | ||
|
|
58874779e7 | ||
|
|
8829c31c0d | ||
|
|
b42f3fa047 | ||
|
|
9bdadf2381 | ||
|
|
20f665ede8 | ||
|
|
0325d8e92d | ||
|
|
2125977281 | ||
|
|
c26c187e11 | ||
|
|
4ef1859f0b | ||
|
|
407a6cbf9c | ||
|
|
76fc1ef460 | ||
|
|
423955c55f | ||
|
|
db95b3f0e3 | ||
|
|
4bee60eb7f | ||
|
|
7618139dad | ||
|
|
6fd08027ff | ||
|
|
b9e268aae8 | ||
|
|
4c1bb1148b | ||
|
|
50a6bbe6b9 | ||
|
|
dfb3cb510a | ||
|
|
519db812b4 | ||
|
|
3203f8e06b | ||
|
|
b71ac2042a | ||
|
|
d0f9e57022 | ||
|
|
aa68210cde | ||
|
|
280e837c9e | ||
|
|
f669e1226c | ||
|
|
cd15c19250 | ||
|
|
5b35fa17de | ||
|
|
9672298fa8 | ||
|
|
bf3ce16823 | ||
|
|
189721da16 | ||
|
|
a523ab1f93 | ||
|
|
7ea8b636d9 | ||
|
|
b2fea65faa | ||
|
|
a1fe8bf6cd | ||
|
|
67ee9e4391 | ||
|
|
9c1ee50497 | ||
|
|
7c842f22d0 | ||
|
|
14ec29991c | ||
|
|
c7f5aad83a | ||
|
|
e77747cff8 | ||
|
|
f2612db4b1 | ||
|
|
a21ff71373 | ||
|
|
fc04ad7854 | ||
|
|
fbf2eebc42 | ||
|
|
dc43430812 | ||
|
|
c6642bc2e6 | ||
|
|
bdca24dd3b | ||
|
|
00c5734021 | ||
|
|
33f87cf1f0 | ||
|
|
69935c1ba3 | ||
|
|
1fb356f328 | ||
|
|
0b0406f41a | ||
|
|
cc264dcf55 | ||
|
|
e024aebb66 | ||
|
|
f204729482 | ||
|
|
d20cf56a69 | ||
|
|
54d57c7d4b | ||
|
|
28a7c9d38f | ||
|
|
872e75e10d | ||
|
|
af1730079e | ||
|
|
04521e2d24 | ||
|
|
02adcccbf4 | ||
|
|
a74aaf1823 | ||
|
|
1eb2089c69 |
@@ -1,4 +1,3 @@
|
|||||||
|
comment: false
|
||||||
ignore:
|
ignore:
|
||||||
- "doc"
|
- "tools"
|
||||||
- "example"
|
|
||||||
- "tools"
|
|
||||||
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||||
|
patreon: # Replace with a single Patreon username
|
||||||
|
open_collective: # Replace with a single Open Collective username
|
||||||
|
ko_fi: # Replace with a single Ko-fi username
|
||||||
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
|
liberapay: # Replace with a single Liberapay username
|
||||||
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
|
otechie: # Replace with a single Otechie username
|
||||||
|
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
|
||||||
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior, if applicable:
|
||||||
|
|
||||||
|
1. The code is
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
2. The error is
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Environments (please complete the following information):**
|
||||||
|
- OS: [e.g. Linux]
|
||||||
|
- go-zero version [e.g. 1.2.1]
|
||||||
|
- goctl version [e.g. 1.2.1, optional]
|
||||||
|
|
||||||
|
**More description**
|
||||||
|
Add any other context about the problem here.
|
||||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
10
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
name: Question
|
||||||
|
about: Ask a question on using go-zero or goctl
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
12
.github/workflows/go.yml
vendored
12
.github/workflows/go.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
- name: Set up Go 1.x
|
- name: Set up Go 1.x
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: ^1.13
|
go-version: ^1.14
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
@@ -25,10 +25,14 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
go get -v -t -d ./...
|
go get -v -t -d ./...
|
||||||
|
|
||||||
|
- name: Lint
|
||||||
|
run: |
|
||||||
|
go vet -stdmethods=false $(go list ./...)
|
||||||
|
go install mvdan.cc/gofumpt@latest
|
||||||
|
test -z "$(gofumpt -s -l -extra .)" || echo "Please run 'gofumpt -l -w -extra .'"
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
- name: Codecov
|
- name: Codecov
|
||||||
uses: codecov/codecov-action@v1.0.6
|
uses: codecov/codecov-action@v2
|
||||||
with:
|
|
||||||
token: ${{secrets.CODECOV_TOKEN}}
|
|
||||||
|
|||||||
19
.github/workflows/issues.yml
vendored
Normal file
19
.github/workflows/issues.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "30 1 * * *"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v3
|
||||||
|
with:
|
||||||
|
days-before-issue-stale: 30
|
||||||
|
days-before-issue-close: 14
|
||||||
|
stale-issue-label: "stale"
|
||||||
|
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
|
||||||
|
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
19
.github/workflows/reviewdog.yml
vendored
Normal file
19
.github/workflows/reviewdog.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: reviewdog
|
||||||
|
on: [pull_request]
|
||||||
|
jobs:
|
||||||
|
staticcheck:
|
||||||
|
name: runner / staticcheck
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: reviewdog/action-staticcheck@v1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.github_token }}
|
||||||
|
# Change reviewdog reporter if you need [github-pr-check,github-check,github-pr-review].
|
||||||
|
reporter: github-pr-review
|
||||||
|
# Report all results.
|
||||||
|
filter_mode: nofilter
|
||||||
|
# Exit with 1 when it find at least one finding.
|
||||||
|
fail_on_error: true
|
||||||
|
# Set staticcheck flags
|
||||||
|
staticcheck_flags: -checks=inherit,-SA1019,-SA1029,-SA5008
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -10,10 +10,14 @@
|
|||||||
!*/
|
!*/
|
||||||
!api
|
!api
|
||||||
|
|
||||||
|
# ignore
|
||||||
.idea
|
.idea
|
||||||
**/.DS_Store
|
**/.DS_Store
|
||||||
**/logs
|
**/logs
|
||||||
|
|
||||||
|
# for test purpose
|
||||||
|
adhoc
|
||||||
|
|
||||||
# gitlab ci
|
# gitlab ci
|
||||||
.cache
|
.cache
|
||||||
|
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ After that, run these local verifications before submitting pull request to pred
|
|||||||
fail of continuous integration.
|
fail of continuous integration.
|
||||||
|
|
||||||
* Format the code with `gofmt`
|
* Format the code with `gofmt`
|
||||||
* Run the test with data race enabled `go test -race ./…`
|
* Run the test with data race enabled `go test -race ./...`
|
||||||
|
|
||||||
## Code Review
|
## Code Review
|
||||||
|
|
||||||
|
|||||||
26
ROADMAP.md
26
ROADMAP.md
@@ -5,17 +5,23 @@ Community and contributor involvement is vital for successfully implementing all
|
|||||||
We hope that the items listed below will inspire further engagement from the community to keep go-zero progressing and shipping exciting and valuable features.
|
We hope that the items listed below will inspire further engagement from the community to keep go-zero progressing and shipping exciting and valuable features.
|
||||||
|
|
||||||
## 2021 Q2
|
## 2021 Q2
|
||||||
- Support TLS in redis connections
|
- [x] Support service discovery through K8S client api
|
||||||
- Support service discovery through K8S watch api
|
- [x] Log full sql statements for easier sql problem solving
|
||||||
- Log full sql statements for easier sql problem solving
|
|
||||||
|
|
||||||
## 2021 Q3
|
## 2021 Q3
|
||||||
- Support `goctl mock` command to start a mocking server with given `.api` file
|
- [x] Support `goctl model pg` to support PostgreSQL code generation
|
||||||
- Adapt builtin tracing mechanism to opentracing solutions
|
- [x] Adapt builtin tracing mechanism to opentracing solutions
|
||||||
- Support `goctl model pg` to support PostgreSQL code generation
|
|
||||||
|
|
||||||
## 2021 Q4
|
## 2021 Q4
|
||||||
- Support `goctl doctor` command to report potential issues for given service
|
- [x] Support `username/password` authentication in ETCD
|
||||||
- Support `context` in redis related methods for timeout and tracing
|
- [x] Support `SSL/TLS` in `zRPC`
|
||||||
- Support `context` in sql related methods for timeout and tracing
|
- [x] Support `TLS` in redis connections
|
||||||
- Support `context` in mongodb related methods for timeout and tracing
|
- [ ] Support `retry strategies` in `zRPC`
|
||||||
|
|
||||||
|
## 2022
|
||||||
|
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file
|
||||||
|
- [ ] Add `httpx.Client` with governance, like circuit breaker etc.
|
||||||
|
- [ ] Support `goctl doctor` command to report potential issues for given service
|
||||||
|
- [ ] Support `context` in redis related methods for timeout and tracing
|
||||||
|
- [ ] Support `context` in sql related methods for timeout and tracing
|
||||||
|
- [ ] Support `context` in mongodb related methods for timeout and tracing
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ type (
|
|||||||
expire time.Duration
|
expire time.Duration
|
||||||
timingWheel *TimingWheel
|
timingWheel *TimingWheel
|
||||||
lruCache lru
|
lruCache lru
|
||||||
barrier syncx.SharedCalls
|
barrier syncx.SingleFlight
|
||||||
unstableExpiry mathx.Unstable
|
unstableExpiry mathx.Unstable
|
||||||
stats *cacheStat
|
stats *cacheStat
|
||||||
}
|
}
|
||||||
@@ -46,7 +46,7 @@ func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
|||||||
data: make(map[string]interface{}),
|
data: make(map[string]interface{}),
|
||||||
expire: expire,
|
expire: expire,
|
||||||
lruCache: emptyLruCache,
|
lruCache: emptyLruCache,
|
||||||
barrier: syncx.NewSharedCalls(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -47,9 +47,11 @@ func TestUnmarshalContextWithMissing(t *testing.T) {
|
|||||||
Name string `ctx:"name"`
|
Name string `ctx:"name"`
|
||||||
Age int `ctx:"age"`
|
Age int `ctx:"age"`
|
||||||
}
|
}
|
||||||
|
type name string
|
||||||
|
const PersonNameKey name = "name"
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ctx = context.WithValue(ctx, "name", "kevin")
|
ctx = context.WithValue(ctx, PersonNameKey, "kevin")
|
||||||
|
|
||||||
var person Person
|
var person Person
|
||||||
err := For(ctx, &person)
|
err := For(ctx, &person)
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestContextCancel(t *testing.T) {
|
func TestContextCancel(t *testing.T) {
|
||||||
c := context.WithValue(context.Background(), "key", "value")
|
type key string
|
||||||
|
var nameKey key = "name"
|
||||||
|
c := context.WithValue(context.Background(), nameKey, "value")
|
||||||
c1, cancel := context.WithCancel(c)
|
c1, cancel := context.WithCancel(c)
|
||||||
o := ValueOnlyFrom(c1)
|
o := ValueOnlyFrom(c1)
|
||||||
c2, cancel2 := context.WithCancel(o)
|
c2, cancel2 := context.WithCancel(o)
|
||||||
|
|||||||
7
core/discov/accountregistry.go
Normal file
7
core/discov/accountregistry.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package discov
|
||||||
|
|
||||||
|
import "github.com/tal-tech/go-zero/core/discov/internal"
|
||||||
|
|
||||||
|
func RegisterAccount(endpoints []string, user, pass string) {
|
||||||
|
internal.AddAccount(endpoints, user, pass)
|
||||||
|
}
|
||||||
21
core/discov/accountregistry_test.go
Normal file
21
core/discov/accountregistry_test.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package discov
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/discov/internal"
|
||||||
|
"github.com/tal-tech/go-zero/core/stringx"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegisterAccount(t *testing.T) {
|
||||||
|
endpoints := []string{
|
||||||
|
"localhost:2379",
|
||||||
|
}
|
||||||
|
user := "foo" + stringx.Rand()
|
||||||
|
RegisterAccount(endpoints, user, "bar")
|
||||||
|
account, ok := internal.GetAccount(endpoints)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, user, account.User)
|
||||||
|
assert.Equal(t, "bar", account.Pass)
|
||||||
|
}
|
||||||
@@ -6,6 +6,13 @@ import "errors"
|
|||||||
type EtcdConf struct {
|
type EtcdConf struct {
|
||||||
Hosts []string
|
Hosts []string
|
||||||
Key string
|
Key string
|
||||||
|
User string `json:",optional"`
|
||||||
|
Pass string `json:",optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAccount returns if account provided.
|
||||||
|
func (c EtcdConf) HasAccount() bool {
|
||||||
|
return len(c.User) > 0 && len(c.Pass) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates c.
|
// Validate validates c.
|
||||||
|
|||||||
@@ -44,3 +44,39 @@ func TestConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEtcdConf_HasAccount(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
EtcdConf
|
||||||
|
hasAccount bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
EtcdConf: EtcdConf{
|
||||||
|
Hosts: []string{"any"},
|
||||||
|
Key: "key",
|
||||||
|
},
|
||||||
|
hasAccount: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
EtcdConf: EtcdConf{
|
||||||
|
Hosts: []string{"any"},
|
||||||
|
Key: "key",
|
||||||
|
User: "foo",
|
||||||
|
},
|
||||||
|
hasAccount: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
EtcdConf: EtcdConf{
|
||||||
|
Hosts: []string{"any"},
|
||||||
|
Key: "key",
|
||||||
|
User: "foo",
|
||||||
|
Pass: "bar",
|
||||||
|
},
|
||||||
|
hasAccount: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
assert.Equal(t, test.hasAccount, test.EtcdConf.HasAccount())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
31
core/discov/internal/accountmanager.go
Normal file
31
core/discov/internal/accountmanager.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type Account struct {
|
||||||
|
User string
|
||||||
|
Pass string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
accounts = make(map[string]Account)
|
||||||
|
lock sync.RWMutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func AddAccount(endpoints []string, user, pass string) {
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
|
||||||
|
accounts[getClusterKey(endpoints)] = Account{
|
||||||
|
User: user,
|
||||||
|
Pass: pass,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetAccount(endpoints []string) (Account, bool) {
|
||||||
|
lock.RLock()
|
||||||
|
defer lock.RUnlock()
|
||||||
|
|
||||||
|
account, ok := accounts[getClusterKey(endpoints)]
|
||||||
|
return account, ok
|
||||||
|
}
|
||||||
34
core/discov/internal/accountmanager_test.go
Normal file
34
core/discov/internal/accountmanager_test.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/stringx"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccount(t *testing.T) {
|
||||||
|
endpoints := []string{
|
||||||
|
"192.168.0.2:2379",
|
||||||
|
"192.168.0.3:2379",
|
||||||
|
"192.168.0.4:2379",
|
||||||
|
}
|
||||||
|
username := "foo" + stringx.Rand()
|
||||||
|
password := "bar"
|
||||||
|
anotherPassword := "any"
|
||||||
|
|
||||||
|
_, ok := GetAccount(endpoints)
|
||||||
|
assert.False(t, ok)
|
||||||
|
|
||||||
|
AddAccount(endpoints, username, password)
|
||||||
|
account, ok := GetAccount(endpoints)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, username, account.User)
|
||||||
|
assert.Equal(t, password, account.Pass)
|
||||||
|
|
||||||
|
AddAccount(endpoints, username, anotherPassword)
|
||||||
|
account, ok = GetAccount(endpoints)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, username, account.User)
|
||||||
|
assert.Equal(t, anotherPassword, account.Pass)
|
||||||
|
}
|
||||||
@@ -302,14 +302,20 @@ func (c *cluster) watchConnState(cli EtcdClient) {
|
|||||||
|
|
||||||
// DialClient dials an etcd cluster with given endpoints.
|
// DialClient dials an etcd cluster with given endpoints.
|
||||||
func DialClient(endpoints []string) (EtcdClient, error) {
|
func DialClient(endpoints []string) (EtcdClient, error) {
|
||||||
return clientv3.New(clientv3.Config{
|
cfg := clientv3.Config{
|
||||||
Endpoints: endpoints,
|
Endpoints: endpoints,
|
||||||
AutoSyncInterval: autoSyncInterval,
|
AutoSyncInterval: autoSyncInterval,
|
||||||
DialTimeout: DialTimeout,
|
DialTimeout: DialTimeout,
|
||||||
DialKeepAliveTime: dialKeepAliveTime,
|
DialKeepAliveTime: dialKeepAliveTime,
|
||||||
DialKeepAliveTimeout: DialTimeout,
|
DialKeepAliveTimeout: DialTimeout,
|
||||||
RejectOldCluster: true,
|
RejectOldCluster: true,
|
||||||
})
|
}
|
||||||
|
if account, ok := GetAccount(endpoints); ok {
|
||||||
|
cfg.Username = account.User
|
||||||
|
cfg.Password = account.Pass
|
||||||
|
}
|
||||||
|
|
||||||
|
return clientv3.New(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getClusterKey(endpoints []string) string {
|
func getClusterKey(endpoints []string) string {
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ func setMockClient(cli EtcdClient) func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetCluster(t *testing.T) {
|
func TestGetCluster(t *testing.T) {
|
||||||
|
AddAccount([]string{"first"}, "foo", "bar")
|
||||||
c1 := GetRegistry().getCluster([]string{"first"})
|
c1 := GetRegistry().getCluster([]string{"first"})
|
||||||
c2 := GetRegistry().getCluster([]string{"second"})
|
c2 := GetRegistry().getCluster([]string{"second"})
|
||||||
c3 := GetRegistry().getCluster([]string{"first"})
|
c3 := GetRegistry().getCluster([]string{"first"})
|
||||||
|
|||||||
@@ -6,9 +6,10 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
connectivity "google.golang.org/grpc/connectivity"
|
connectivity "google.golang.org/grpc/connectivity"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MocketcdConn is a mock of etcdConn interface
|
// MocketcdConn is a mock of etcdConn interface
|
||||||
|
|||||||
@@ -5,8 +5,9 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockUpdateListener is a mock of UpdateListener interface
|
// MockUpdateListener is a mock of UpdateListener interface
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// PublisherOption defines the method to customize a Publisher.
|
// PubOption defines the method to customize a Publisher.
|
||||||
PublisherOption func(client *Publisher)
|
PubOption func(client *Publisher)
|
||||||
|
|
||||||
// A Publisher can be used to publish the value to an etcd cluster on the given key.
|
// A Publisher can be used to publish the value to an etcd cluster on the given key.
|
||||||
Publisher struct {
|
Publisher struct {
|
||||||
@@ -32,7 +32,7 @@ type (
|
|||||||
// endpoints is the hosts of the etcd cluster.
|
// endpoints is the hosts of the etcd cluster.
|
||||||
// key:value are a pair to be published.
|
// key:value are a pair to be published.
|
||||||
// opts are used to customize the Publisher.
|
// opts are used to customize the Publisher.
|
||||||
func NewPublisher(endpoints []string, key, value string, opts ...PublisherOption) *Publisher {
|
func NewPublisher(endpoints []string, key, value string, opts ...PubOption) *Publisher {
|
||||||
publisher := &Publisher{
|
publisher := &Publisher{
|
||||||
endpoints: endpoints,
|
endpoints: endpoints,
|
||||||
key: key,
|
key: key,
|
||||||
@@ -145,8 +145,15 @@ func (p *Publisher) revoke(cli internal.EtcdClient) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithPubEtcdAccount provides the etcd username/password.
|
||||||
|
func WithPubEtcdAccount(user, pass string) PubOption {
|
||||||
|
return func(pub *Publisher) {
|
||||||
|
internal.AddAccount(pub.endpoints, user, pass)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithId customizes a Publisher with the id.
|
// WithId customizes a Publisher with the id.
|
||||||
func WithId(id int64) PublisherOption {
|
func WithId(id int64) PubOption {
|
||||||
return func(publisher *Publisher) {
|
return func(publisher *Publisher) {
|
||||||
publisher.id = id
|
publisher.id = id
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/discov/internal"
|
"github.com/tal-tech/go-zero/core/discov/internal"
|
||||||
"github.com/tal-tech/go-zero/core/lang"
|
"github.com/tal-tech/go-zero/core/lang"
|
||||||
"github.com/tal-tech/go-zero/core/logx"
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
|
"github.com/tal-tech/go-zero/core/stringx"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,7 +31,8 @@ func TestPublisher_register(t *testing.T) {
|
|||||||
ID: id,
|
ID: id,
|
||||||
}, nil)
|
}, nil)
|
||||||
cli.EXPECT().Put(gomock.Any(), makeEtcdKey("thekey", id), "thevalue", gomock.Any())
|
cli.EXPECT().Put(gomock.Any(), makeEtcdKey("thekey", id), "thevalue", gomock.Any())
|
||||||
pub := NewPublisher(nil, "thekey", "thevalue")
|
pub := NewPublisher(nil, "thekey", "thevalue",
|
||||||
|
WithPubEtcdAccount(stringx.Rand(), "bar"))
|
||||||
_, err := pub.register(cli)
|
_, err := pub.register(cli)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,16 +9,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
subOptions struct {
|
|
||||||
exclusive bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubOption defines the method to customize a Subscriber.
|
// SubOption defines the method to customize a Subscriber.
|
||||||
SubOption func(opts *subOptions)
|
SubOption func(sub *Subscriber)
|
||||||
|
|
||||||
// A Subscriber is used to subscribe the given key on a etcd cluster.
|
// A Subscriber is used to subscribe the given key on a etcd cluster.
|
||||||
Subscriber struct {
|
Subscriber struct {
|
||||||
items *container
|
endpoints []string
|
||||||
|
exclusive bool
|
||||||
|
items *container
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -27,14 +25,14 @@ type (
|
|||||||
// key is the key to subscribe.
|
// key is the key to subscribe.
|
||||||
// opts are used to customize the Subscriber.
|
// opts are used to customize the Subscriber.
|
||||||
func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscriber, error) {
|
func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscriber, error) {
|
||||||
var subOpts subOptions
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(&subOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
sub := &Subscriber{
|
sub := &Subscriber{
|
||||||
items: newContainer(subOpts.exclusive),
|
endpoints: endpoints,
|
||||||
}
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(sub)
|
||||||
|
}
|
||||||
|
sub.items = newContainer(sub.exclusive)
|
||||||
|
|
||||||
if err := internal.GetRegistry().Monitor(endpoints, key, sub.items); err != nil {
|
if err := internal.GetRegistry().Monitor(endpoints, key, sub.items); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -55,8 +53,14 @@ func (s *Subscriber) Values() []string {
|
|||||||
// Exclusive means that key value can only be 1:1,
|
// Exclusive means that key value can only be 1:1,
|
||||||
// which means later added value will remove the keys associated with the same value previously.
|
// which means later added value will remove the keys associated with the same value previously.
|
||||||
func Exclusive() SubOption {
|
func Exclusive() SubOption {
|
||||||
return func(opts *subOptions) {
|
return func(sub *Subscriber) {
|
||||||
opts.exclusive = true
|
sub.exclusive = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSubEtcdAccount(user, pass string) SubOption {
|
||||||
|
return func(sub *Subscriber) {
|
||||||
|
internal.AddAccount(sub.endpoints, user, pass)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tal-tech/go-zero/core/discov/internal"
|
"github.com/tal-tech/go-zero/core/discov/internal"
|
||||||
|
"github.com/tal-tech/go-zero/core/stringx"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -201,11 +202,9 @@ func TestContainer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSubscriber(t *testing.T) {
|
func TestSubscriber(t *testing.T) {
|
||||||
var opt subOptions
|
|
||||||
Exclusive()(&opt)
|
|
||||||
|
|
||||||
sub := new(Subscriber)
|
sub := new(Subscriber)
|
||||||
sub.items = newContainer(opt.exclusive)
|
Exclusive()(sub)
|
||||||
|
sub.items = newContainer(sub.exclusive)
|
||||||
var count int32
|
var count int32
|
||||||
sub.AddListener(func() {
|
sub.AddListener(func() {
|
||||||
atomic.AddInt32(&count, 1)
|
atomic.AddInt32(&count, 1)
|
||||||
@@ -214,3 +213,15 @@ func TestSubscriber(t *testing.T) {
|
|||||||
assert.Empty(t, sub.Values())
|
assert.Empty(t, sub.Values())
|
||||||
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWithSubEtcdAccount(t *testing.T) {
|
||||||
|
endpoints := []string{"localhost:2379"}
|
||||||
|
user := stringx.Rand()
|
||||||
|
WithSubEtcdAccount(user, "bar")(&Subscriber{
|
||||||
|
endpoints: endpoints,
|
||||||
|
})
|
||||||
|
account, ok := internal.GetAccount(endpoints)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, user, account.User)
|
||||||
|
assert.Equal(t, "bar", account.Pass)
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ type AtomicError struct {
|
|||||||
|
|
||||||
// Set sets the error.
|
// Set sets the error.
|
||||||
func (ae *AtomicError) Set(err error) {
|
func (ae *AtomicError) Set(err error) {
|
||||||
ae.err.Store(err)
|
if err != nil {
|
||||||
|
ae.err.Store(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load returns the error.
|
// Load returns the error.
|
||||||
|
|||||||
@@ -17,6 +17,15 @@ func TestAtomicError(t *testing.T) {
|
|||||||
assert.Equal(t, errDummy, err.Load())
|
assert.Equal(t, errDummy, err.Load())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAtomicErrorSetNil(t *testing.T) {
|
||||||
|
var (
|
||||||
|
errNil error
|
||||||
|
err AtomicError
|
||||||
|
)
|
||||||
|
err.Set(errNil)
|
||||||
|
assert.Equal(t, errNil, err.Load())
|
||||||
|
}
|
||||||
|
|
||||||
func TestAtomicErrorNil(t *testing.T) {
|
func TestAtomicErrorNil(t *testing.T) {
|
||||||
var err AtomicError
|
var err AtomicError
|
||||||
assert.Nil(t, err.Load())
|
assert.Nil(t, err.Load())
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux || darwin
|
||||||
// +build linux darwin
|
// +build linux darwin
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|||||||
@@ -395,16 +395,16 @@ func assetEqual(t *testing.T, except, data interface{}) {
|
|||||||
|
|
||||||
func TestStream_AnyMach(t *testing.T) {
|
func TestStream_AnyMach(t *testing.T) {
|
||||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||||
return 4 == item.(int)
|
return item.(int) == 4
|
||||||
}))
|
}))
|
||||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||||
return 0 == item.(int)
|
return item.(int) == 0
|
||||||
}))
|
}))
|
||||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||||
return 2 == item.(int)
|
return item.(int) == 2
|
||||||
}))
|
}))
|
||||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||||
return 2 == item.(int)
|
return item.(int) == 2
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,9 @@ package fx
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,7 +33,8 @@ func DoWithTimeout(fn func() error, timeout time.Duration, opts ...DoOption) err
|
|||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
if p := recover(); p != nil {
|
if p := recover(); p != nil {
|
||||||
panicChan <- p
|
// attach call stack to avoid missing in different goroutine
|
||||||
|
panicChan <- fmt.Sprintf("%+v\n\n%s", p, strings.TrimSpace(string(debug.Stack())))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
done <- fn()
|
done <- fn()
|
||||||
|
|||||||
@@ -74,12 +74,12 @@ func TestConsistentHashIncrementalTransfer(t *testing.T) {
|
|||||||
laterCh := create()
|
laterCh := create()
|
||||||
laterCh.AddWithWeight(node, 10*(i+1))
|
laterCh.AddWithWeight(node, 10*(i+1))
|
||||||
|
|
||||||
for i := 0; i < requestSize; i++ {
|
for j := 0; j < requestSize; j++ {
|
||||||
key, ok := laterCh.Get(requestSize + i)
|
key, ok := laterCh.Get(requestSize + j)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.NotNil(t, key)
|
assert.NotNil(t, key)
|
||||||
value := key.(string)
|
value := key.(string)
|
||||||
assert.True(t, value == keys[i] || value == node)
|
assert.True(t, value == keys[j] || value == node)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,11 @@ func TestMd5(t *testing.T) {
|
|||||||
assert.Equal(t, md5Digest, actual)
|
assert.Equal(t, md5Digest, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMd5Hex(t *testing.T) {
|
||||||
|
actual := Md5Hex([]byte(text))
|
||||||
|
assert.Equal(t, md5Digest, actual)
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkHashFnv(b *testing.B) {
|
func BenchmarkHashFnv(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
h := fnv.New32()
|
h := fnv.New32()
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/globalsign/mgo/bson"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -106,3 +107,20 @@ func TestMilliTime_UnmarshalJSON(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalWithError(t *testing.T) {
|
||||||
|
var mt MilliTime
|
||||||
|
assert.NotNil(t, mt.UnmarshalJSON([]byte("hello")))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetBSON(t *testing.T) {
|
||||||
|
data, err := bson.Marshal(time.Now())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
var raw bson.Raw
|
||||||
|
assert.Nil(t, bson.Unmarshal(data, &raw))
|
||||||
|
|
||||||
|
var mt MilliTime
|
||||||
|
assert.Nil(t, mt.SetBSON(raw))
|
||||||
|
assert.NotNil(t, mt.SetBSON(bson.Raw{}))
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ func TestTokenLimit_Rescue(t *testing.T) {
|
|||||||
rate = 5
|
rate = 5
|
||||||
burst = 10
|
burst = 10
|
||||||
)
|
)
|
||||||
l := NewTokenLimiter(rate, burst, redis.NewRedis(s.Addr(), redis.NodeType), "tokenlimit")
|
l := NewTokenLimiter(rate, burst, redis.New(s.Addr()), "tokenlimit")
|
||||||
s.Close()
|
s.Close()
|
||||||
|
|
||||||
var allowed int
|
var allowed int
|
||||||
|
|||||||
@@ -31,6 +31,8 @@ var (
|
|||||||
|
|
||||||
// default to be enabled
|
// default to be enabled
|
||||||
enabled = syncx.ForAtomicBool(true)
|
enabled = syncx.ForAtomicBool(true)
|
||||||
|
// default to be enabled
|
||||||
|
logEnabled = syncx.ForAtomicBool(true)
|
||||||
// make it a variable for unit test
|
// make it a variable for unit test
|
||||||
systemOverloadChecker = func(cpuThreshold int64) bool {
|
systemOverloadChecker = func(cpuThreshold int64) bool {
|
||||||
return stat.CpuUsage() >= cpuThreshold
|
return stat.CpuUsage() >= cpuThreshold
|
||||||
@@ -80,6 +82,11 @@ func Disable() {
|
|||||||
enabled.Set(false)
|
enabled.Set(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DisableLog disables the stat logs for load shedding.
|
||||||
|
func DisableLog() {
|
||||||
|
logEnabled.Set(false)
|
||||||
|
}
|
||||||
|
|
||||||
// NewAdaptiveShedder returns an adaptive shedder.
|
// NewAdaptiveShedder returns an adaptive shedder.
|
||||||
// opts can be used to customize the Shedder.
|
// opts can be used to customize the Shedder.
|
||||||
func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
|
func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAdaptiveShedder(t *testing.T) {
|
func TestAdaptiveShedder(t *testing.T) {
|
||||||
|
DisableLog()
|
||||||
shedder := NewAdaptiveShedder(WithWindow(bucketDuration), WithBuckets(buckets), WithCpuThreshold(100))
|
shedder := NewAdaptiveShedder(WithWindow(bucketDuration), WithBuckets(buckets), WithCpuThreshold(100))
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var drop int64
|
var drop int64
|
||||||
|
|||||||
@@ -48,6 +48,25 @@ func (s *SheddingStat) IncrementDrop() {
|
|||||||
atomic.AddInt64(&s.drop, 1)
|
atomic.AddInt64(&s.drop, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *SheddingStat) loop(c <-chan time.Time) {
|
||||||
|
for range c {
|
||||||
|
st := s.reset()
|
||||||
|
|
||||||
|
if !logEnabled.True() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c := stat.CpuUsage()
|
||||||
|
if st.Drop == 0 {
|
||||||
|
logx.Statf("(%s) shedding_stat [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
||||||
|
s.name, c, st.Total, st.Pass, st.Drop)
|
||||||
|
} else {
|
||||||
|
logx.Statf("(%s) shedding_stat_drop [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
||||||
|
s.name, c, st.Total, st.Pass, st.Drop)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *SheddingStat) reset() snapshot {
|
func (s *SheddingStat) reset() snapshot {
|
||||||
return snapshot{
|
return snapshot{
|
||||||
Total: atomic.SwapInt64(&s.total, 0),
|
Total: atomic.SwapInt64(&s.total, 0),
|
||||||
@@ -59,15 +78,6 @@ func (s *SheddingStat) reset() snapshot {
|
|||||||
func (s *SheddingStat) run() {
|
func (s *SheddingStat) run() {
|
||||||
ticker := time.NewTicker(time.Minute)
|
ticker := time.NewTicker(time.Minute)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for range ticker.C {
|
|
||||||
c := stat.CpuUsage()
|
s.loop(ticker.C)
|
||||||
st := s.reset()
|
|
||||||
if st.Drop == 0 {
|
|
||||||
logx.Statf("(%s) shedding_stat [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
|
||||||
s.name, c, st.Total, st.Pass, st.Drop)
|
|
||||||
} else {
|
|
||||||
logx.Statf("(%s) shedding_stat_drop [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
|
||||||
s.name, c, st.Total, st.Pass, st.Drop)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package load
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@@ -22,3 +23,32 @@ func TestSheddingStat(t *testing.T) {
|
|||||||
assert.Equal(t, int64(5), result.Pass)
|
assert.Equal(t, int64(5), result.Pass)
|
||||||
assert.Equal(t, int64(7), result.Drop)
|
assert.Equal(t, int64(7), result.Drop)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoopTrue(t *testing.T) {
|
||||||
|
ch := make(chan time.Time, 1)
|
||||||
|
ch <- time.Now()
|
||||||
|
close(ch)
|
||||||
|
st := new(SheddingStat)
|
||||||
|
logEnabled.Set(true)
|
||||||
|
st.loop(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoopTrueAndDrop(t *testing.T) {
|
||||||
|
ch := make(chan time.Time, 1)
|
||||||
|
ch <- time.Now()
|
||||||
|
close(ch)
|
||||||
|
st := new(SheddingStat)
|
||||||
|
st.IncrementDrop()
|
||||||
|
logEnabled.Set(true)
|
||||||
|
st.loop(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoopFalseAndDrop(t *testing.T) {
|
||||||
|
ch := make(chan time.Time, 1)
|
||||||
|
ch <- time.Now()
|
||||||
|
close(ch)
|
||||||
|
st := new(SheddingStat)
|
||||||
|
st.IncrementDrop()
|
||||||
|
logEnabled.Set(false)
|
||||||
|
st.loop(ch)
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,49 +20,67 @@ func WithDuration(d time.Duration) Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Error(v ...interface{}) {
|
func (l *durationLogger) Error(v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Errorf(format string, v ...interface{}) {
|
func (l *durationLogger) Errorf(format string, v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *durationLogger) Errorv(v interface{}) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
l.write(errorLog, levelError, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Info(v ...interface{}) {
|
func (l *durationLogger) Info(v ...interface{}) {
|
||||||
if shouldLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Infof(format string, v ...interface{}) {
|
func (l *durationLogger) Infof(format string, v ...interface{}) {
|
||||||
if shouldLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *durationLogger) Infov(v interface{}) {
|
||||||
|
if shallLog(InfoLevel) {
|
||||||
|
l.write(infoLog, levelInfo, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Slow(v ...interface{}) {
|
func (l *durationLogger) Slow(v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Slowf(format string, v ...interface{}) {
|
func (l *durationLogger) Slowf(format string, v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *durationLogger) Slowv(v interface{}) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
l.write(slowLog, levelSlow, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *durationLogger) WithDuration(duration time.Duration) Logger {
|
func (l *durationLogger) WithDuration(duration time.Duration) Logger {
|
||||||
l.Duration = timex.ReprOfDuration(duration)
|
l.Duration = timex.ReprOfDuration(duration)
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) write(writer io.Writer, level, content string) {
|
func (l *durationLogger) write(writer io.Writer, level string, val interface{}) {
|
||||||
l.Timestamp = getTimestamp()
|
l.Timestamp = getTimestamp()
|
||||||
l.Level = level
|
l.Level = level
|
||||||
l.Content = content
|
l.Content = val
|
||||||
outputJson(writer, logEntry(*l))
|
outputJson(writer, l)
|
||||||
}
|
}
|
||||||
|
|||||||
62
core/logx/limitedexecutor_test.go
Normal file
62
core/logx/limitedexecutor_test.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/timex"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLimitedExecutor_logOrDiscard(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
threshold time.Duration
|
||||||
|
lastTime time.Duration
|
||||||
|
discarded uint32
|
||||||
|
executed bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil executor",
|
||||||
|
executed: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "regular",
|
||||||
|
threshold: time.Hour,
|
||||||
|
lastTime: timex.Now(),
|
||||||
|
discarded: 10,
|
||||||
|
executed: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "slow",
|
||||||
|
threshold: time.Duration(1),
|
||||||
|
lastTime: -1000,
|
||||||
|
discarded: 10,
|
||||||
|
executed: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
executor := newLimitedExecutor(0)
|
||||||
|
executor.threshold = test.threshold
|
||||||
|
executor.discarded = test.discarded
|
||||||
|
executor.lastTime.Set(test.lastTime)
|
||||||
|
|
||||||
|
var run int32
|
||||||
|
executor.logOrDiscard(func() {
|
||||||
|
atomic.AddInt32(&run, 1)
|
||||||
|
})
|
||||||
|
if test.executed {
|
||||||
|
assert.Equal(t, int32(1), atomic.LoadInt32(&run))
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, int32(0), atomic.LoadInt32(&run))
|
||||||
|
assert.Equal(t, test.discarded+1, atomic.LoadUint32(&executor.discarded))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -65,12 +65,14 @@ var (
|
|||||||
timeFormat = "2006-01-02T15:04:05.000Z07"
|
timeFormat = "2006-01-02T15:04:05.000Z07"
|
||||||
writeConsole bool
|
writeConsole bool
|
||||||
logLevel uint32
|
logLevel uint32
|
||||||
infoLog io.WriteCloser
|
// use uint32 for atomic operations
|
||||||
errorLog io.WriteCloser
|
disableStat uint32
|
||||||
severeLog io.WriteCloser
|
infoLog io.WriteCloser
|
||||||
slowLog io.WriteCloser
|
errorLog io.WriteCloser
|
||||||
statLog io.WriteCloser
|
severeLog io.WriteCloser
|
||||||
stackLog io.Writer
|
slowLog io.WriteCloser
|
||||||
|
statLog io.WriteCloser
|
||||||
|
stackLog io.Writer
|
||||||
|
|
||||||
once sync.Once
|
once sync.Once
|
||||||
initialized uint32
|
initialized uint32
|
||||||
@@ -79,10 +81,10 @@ var (
|
|||||||
|
|
||||||
type (
|
type (
|
||||||
logEntry struct {
|
logEntry struct {
|
||||||
Timestamp string `json:"@timestamp"`
|
Timestamp string `json:"@timestamp"`
|
||||||
Level string `json:"level"`
|
Level string `json:"level"`
|
||||||
Duration string `json:"duration,omitempty"`
|
Duration string `json:"duration,omitempty"`
|
||||||
Content string `json:"content"`
|
Content interface{} `json:"content"`
|
||||||
}
|
}
|
||||||
|
|
||||||
logOptions struct {
|
logOptions struct {
|
||||||
@@ -98,10 +100,13 @@ type (
|
|||||||
Logger interface {
|
Logger interface {
|
||||||
Error(...interface{})
|
Error(...interface{})
|
||||||
Errorf(string, ...interface{})
|
Errorf(string, ...interface{})
|
||||||
|
Errorv(interface{})
|
||||||
Info(...interface{})
|
Info(...interface{})
|
||||||
Infof(string, ...interface{})
|
Infof(string, ...interface{})
|
||||||
|
Infov(interface{})
|
||||||
Slow(...interface{})
|
Slow(...interface{})
|
||||||
Slowf(string, ...interface{})
|
Slowf(string, ...interface{})
|
||||||
|
Slowv(interface{})
|
||||||
WithDuration(time.Duration) Logger
|
WithDuration(time.Duration) Logger
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -133,7 +138,7 @@ func SetUp(c LogConf) error {
|
|||||||
|
|
||||||
// Alert alerts v in alert level, and the message is written to error log.
|
// Alert alerts v in alert level, and the message is written to error log.
|
||||||
func Alert(v string) {
|
func Alert(v string) {
|
||||||
output(errorLog, levelAlert, v)
|
outputText(errorLog, levelAlert, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the logging.
|
// Close closes the logging.
|
||||||
@@ -195,24 +200,29 @@ func Disable() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DisableStat disables the stat logs.
|
||||||
|
func DisableStat() {
|
||||||
|
atomic.StoreUint32(&disableStat, 1)
|
||||||
|
}
|
||||||
|
|
||||||
// Error writes v into error log.
|
// Error writes v into error log.
|
||||||
func Error(v ...interface{}) {
|
func Error(v ...interface{}) {
|
||||||
ErrorCaller(1, v...)
|
ErrorCaller(1, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf writes v with format into error log.
|
|
||||||
func Errorf(format string, v ...interface{}) {
|
|
||||||
ErrorCallerf(1, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorCaller writes v with context into error log.
|
// ErrorCaller writes v with context into error log.
|
||||||
func ErrorCaller(callDepth int, v ...interface{}) {
|
func ErrorCaller(callDepth int, v ...interface{}) {
|
||||||
errorSync(fmt.Sprint(v...), callDepth+callerInnerDepth)
|
errorTextSync(fmt.Sprint(v...), callDepth+callerInnerDepth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorCallerf writes v with context in format into error log.
|
// ErrorCallerf writes v with context in format into error log.
|
||||||
func ErrorCallerf(callDepth int, format string, v ...interface{}) {
|
func ErrorCallerf(callDepth int, format string, v ...interface{}) {
|
||||||
errorSync(fmt.Sprintf(format, v...), callDepth+callerInnerDepth)
|
errorTextSync(fmt.Sprintf(format, v...), callDepth+callerInnerDepth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf writes v with format into error log.
|
||||||
|
func Errorf(format string, v ...interface{}) {
|
||||||
|
ErrorCallerf(1, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorStack writes v along with call stack into error log.
|
// ErrorStack writes v along with call stack into error log.
|
||||||
@@ -227,14 +237,25 @@ func ErrorStackf(format string, v ...interface{}) {
|
|||||||
stackSync(fmt.Sprintf(format, v...))
|
stackSync(fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Errorv writes v into error log with json content.
|
||||||
|
// No call stack attached, because not elegant to pack the messages.
|
||||||
|
func Errorv(v interface{}) {
|
||||||
|
errorAnySync(v)
|
||||||
|
}
|
||||||
|
|
||||||
// Info writes v into access log.
|
// Info writes v into access log.
|
||||||
func Info(v ...interface{}) {
|
func Info(v ...interface{}) {
|
||||||
infoSync(fmt.Sprint(v...))
|
infoTextSync(fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof writes v with format into access log.
|
// Infof writes v with format into access log.
|
||||||
func Infof(format string, v ...interface{}) {
|
func Infof(format string, v ...interface{}) {
|
||||||
infoSync(fmt.Sprintf(format, v...))
|
infoTextSync(fmt.Sprintf(format, v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infov writes v into access log with json content.
|
||||||
|
func Infov(v interface{}) {
|
||||||
|
infoAnySync(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Must checks if err is nil, otherwise logs the err and exits.
|
// Must checks if err is nil, otherwise logs the err and exits.
|
||||||
@@ -242,7 +263,7 @@ func Must(err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
msg := formatWithCaller(err.Error(), 3)
|
msg := formatWithCaller(err.Error(), 3)
|
||||||
log.Print(msg)
|
log.Print(msg)
|
||||||
output(severeLog, levelFatal, msg)
|
outputText(severeLog, levelFatal, msg)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -264,12 +285,17 @@ func Severef(format string, v ...interface{}) {
|
|||||||
|
|
||||||
// Slow writes v into slow log.
|
// Slow writes v into slow log.
|
||||||
func Slow(v ...interface{}) {
|
func Slow(v ...interface{}) {
|
||||||
slowSync(fmt.Sprint(v...))
|
slowTextSync(fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slowf writes v with format into slow log.
|
// Slowf writes v with format into slow log.
|
||||||
func Slowf(format string, v ...interface{}) {
|
func Slowf(format string, v ...interface{}) {
|
||||||
slowSync(fmt.Sprintf(format, v...))
|
slowTextSync(fmt.Sprintf(format, v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slowv writes v into slow log with json content.
|
||||||
|
func Slowv(v interface{}) {
|
||||||
|
slowAnySync(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat writes v into stat log.
|
// Stat writes v into stat log.
|
||||||
@@ -312,8 +338,14 @@ func createOutput(path string) (io.WriteCloser, error) {
|
|||||||
options.gzipEnabled), options.gzipEnabled)
|
options.gzipEnabled), options.gzipEnabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func errorSync(msg string, callDepth int) {
|
func errorAnySync(v interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
|
outputAny(errorLog, levelError, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorTextSync(msg string, callDepth int) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
outputError(errorLog, msg, callDepth)
|
outputError(errorLog, msg, callDepth)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -362,13 +394,28 @@ func handleOptions(opts []LogOption) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func infoSync(msg string) {
|
func infoAnySync(val interface{}) {
|
||||||
if shouldLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
output(infoLog, levelInfo, msg)
|
outputAny(infoLog, levelInfo, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func output(writer io.Writer, level, msg string) {
|
func infoTextSync(msg string) {
|
||||||
|
if shallLog(InfoLevel) {
|
||||||
|
outputText(infoLog, levelInfo, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func outputAny(writer io.Writer, level string, val interface{}) {
|
||||||
|
info := logEntry{
|
||||||
|
Timestamp: getTimestamp(),
|
||||||
|
Level: level,
|
||||||
|
Content: val,
|
||||||
|
}
|
||||||
|
outputJson(writer, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func outputText(writer io.Writer, level, msg string) {
|
||||||
info := logEntry{
|
info := logEntry{
|
||||||
Timestamp: getTimestamp(),
|
Timestamp: getTimestamp(),
|
||||||
Level: level,
|
Level: level,
|
||||||
@@ -379,7 +426,7 @@ func output(writer io.Writer, level, msg string) {
|
|||||||
|
|
||||||
func outputError(writer io.Writer, msg string, callDepth int) {
|
func outputError(writer io.Writer, msg string, callDepth int) {
|
||||||
content := formatWithCaller(msg, callDepth)
|
content := formatWithCaller(msg, callDepth)
|
||||||
output(writer, levelError, content)
|
outputText(writer, levelError, content)
|
||||||
}
|
}
|
||||||
|
|
||||||
func outputJson(writer io.Writer, info interface{}) {
|
func outputJson(writer io.Writer, info interface{}) {
|
||||||
@@ -481,30 +528,40 @@ func setupWithVolume(c LogConf) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func severeSync(msg string) {
|
func severeSync(msg string) {
|
||||||
if shouldLog(SevereLevel) {
|
if shallLog(SevereLevel) {
|
||||||
output(severeLog, levelSevere, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
outputText(severeLog, levelSevere, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldLog(level uint32) bool {
|
func shallLog(level uint32) bool {
|
||||||
return atomic.LoadUint32(&logLevel) <= level
|
return atomic.LoadUint32(&logLevel) <= level
|
||||||
}
|
}
|
||||||
|
|
||||||
func slowSync(msg string) {
|
func shallLogStat() bool {
|
||||||
if shouldLog(ErrorLevel) {
|
return atomic.LoadUint32(&disableStat) == 0
|
||||||
output(slowLog, levelSlow, msg)
|
}
|
||||||
|
|
||||||
|
func slowAnySync(v interface{}) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
outputAny(slowLog, levelSlow, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func slowTextSync(msg string) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
outputText(slowLog, levelSlow, msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func stackSync(msg string) {
|
func stackSync(msg string) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
output(stackLog, levelError, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
outputText(stackLog, levelError, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func statSync(msg string) {
|
func statSync(msg string) {
|
||||||
if shouldLog(InfoLevel) {
|
if shallLogStat() && shallLog(InfoLevel) {
|
||||||
output(statLog, levelStat, msg)
|
outputText(statLog, levelStat, msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -92,6 +92,30 @@ func TestStructedLogAlert(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogError(t *testing.T) {
|
||||||
|
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
||||||
|
errorLog = writer
|
||||||
|
}, func(v ...interface{}) {
|
||||||
|
Error(v...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStructedLogErrorf(t *testing.T) {
|
||||||
|
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
||||||
|
errorLog = writer
|
||||||
|
}, func(v ...interface{}) {
|
||||||
|
Errorf("%s", fmt.Sprint(v...))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStructedLogErrorv(t *testing.T) {
|
||||||
|
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
||||||
|
errorLog = writer
|
||||||
|
}, func(v ...interface{}) {
|
||||||
|
Errorv(fmt.Sprint(v...))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogInfo(t *testing.T) {
|
func TestStructedLogInfo(t *testing.T) {
|
||||||
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
||||||
infoLog = writer
|
infoLog = writer
|
||||||
@@ -100,6 +124,22 @@ func TestStructedLogInfo(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogInfof(t *testing.T) {
|
||||||
|
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
||||||
|
infoLog = writer
|
||||||
|
}, func(v ...interface{}) {
|
||||||
|
Infof("%s", fmt.Sprint(v...))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStructedLogInfov(t *testing.T) {
|
||||||
|
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
||||||
|
infoLog = writer
|
||||||
|
}, func(v ...interface{}) {
|
||||||
|
Infov(fmt.Sprint(v...))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogSlow(t *testing.T) {
|
func TestStructedLogSlow(t *testing.T) {
|
||||||
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
||||||
slowLog = writer
|
slowLog = writer
|
||||||
@@ -116,6 +156,14 @@ func TestStructedLogSlowf(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogSlowv(t *testing.T) {
|
||||||
|
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
||||||
|
slowLog = writer
|
||||||
|
}, func(v ...interface{}) {
|
||||||
|
Slowv(fmt.Sprint(v...))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogStat(t *testing.T) {
|
func TestStructedLogStat(t *testing.T) {
|
||||||
doTestStructedLog(t, levelStat, func(writer io.WriteCloser) {
|
doTestStructedLog(t, levelStat, func(writer io.WriteCloser) {
|
||||||
statLog = writer
|
statLog = writer
|
||||||
@@ -246,6 +294,17 @@ func TestDisable(t *testing.T) {
|
|||||||
assert.Nil(t, Close())
|
assert.Nil(t, Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDisableStat(t *testing.T) {
|
||||||
|
DisableStat()
|
||||||
|
|
||||||
|
const message = "hello there"
|
||||||
|
writer := new(mockWriter)
|
||||||
|
statLog = writer
|
||||||
|
atomic.StoreUint32(&initialized, 1)
|
||||||
|
Stat(message)
|
||||||
|
assert.Equal(t, 0, writer.builder.Len())
|
||||||
|
}
|
||||||
|
|
||||||
func TestWithGzip(t *testing.T) {
|
func TestWithGzip(t *testing.T) {
|
||||||
fn := WithGzip()
|
fn := WithGzip()
|
||||||
var opt logOptions
|
var opt logOptions
|
||||||
@@ -357,7 +416,9 @@ func doTestStructedLog(t *testing.T, level string, setup func(writer io.WriteClo
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, level, entry.Level)
|
assert.Equal(t, level, entry.Level)
|
||||||
assert.True(t, strings.Contains(entry.Content, message))
|
val, ok := entry.Content.(string)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.True(t, strings.Contains(val, message))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSetLevelTwiceWithMode(t *testing.T, mode string) {
|
func testSetLevelTwiceWithMode(t *testing.T, mode string) {
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ type (
|
|||||||
done chan lang.PlaceholderType
|
done chan lang.PlaceholderType
|
||||||
rule RotateRule
|
rule RotateRule
|
||||||
compress bool
|
compress bool
|
||||||
keepDays int
|
|
||||||
// can't use threading.RoutineGroup because of cycle import
|
// can't use threading.RoutineGroup because of cycle import
|
||||||
waitGroup sync.WaitGroup
|
waitGroup sync.WaitGroup
|
||||||
closeOnce sync.Once
|
closeOnce sync.Once
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package logx
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -97,7 +98,13 @@ func TestRotateLoggerRotate(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
err = logger.rotate()
|
err = logger.rotate()
|
||||||
assert.Nil(t, err)
|
switch v := err.(type) {
|
||||||
|
case *os.LinkError:
|
||||||
|
// avoid rename error on docker container
|
||||||
|
assert.Equal(t, syscall.EXDEV, v.Err)
|
||||||
|
default:
|
||||||
|
assert.Nil(t, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRotateLoggerWrite(t *testing.T) {
|
func TestRotateLoggerWrite(t *testing.T) {
|
||||||
|
|||||||
@@ -44,5 +44,5 @@ func captureOutput(f func()) string {
|
|||||||
func getContent(jsonStr string) string {
|
func getContent(jsonStr string) string {
|
||||||
var entry logEntry
|
var entry logEntry
|
||||||
json.Unmarshal([]byte(jsonStr), &entry)
|
json.Unmarshal([]byte(jsonStr), &entry)
|
||||||
return entry.Content
|
return entry.Content.(string)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tal-tech/go-zero/core/timex"
|
"github.com/tal-tech/go-zero/core/timex"
|
||||||
"github.com/tal-tech/go-zero/core/trace/tracespec"
|
"go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
type traceLogger struct {
|
type traceLogger struct {
|
||||||
@@ -18,50 +18,68 @@ type traceLogger struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Error(v ...interface{}) {
|
func (l *traceLogger) Error(v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Errorf(format string, v ...interface{}) {
|
func (l *traceLogger) Errorf(format string, v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *traceLogger) Errorv(v interface{}) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
l.write(errorLog, levelError, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Info(v ...interface{}) {
|
func (l *traceLogger) Info(v ...interface{}) {
|
||||||
if shouldLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Infof(format string, v ...interface{}) {
|
func (l *traceLogger) Infof(format string, v ...interface{}) {
|
||||||
if shouldLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *traceLogger) Infov(v interface{}) {
|
||||||
|
if shallLog(InfoLevel) {
|
||||||
|
l.write(infoLog, levelInfo, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Slow(v ...interface{}) {
|
func (l *traceLogger) Slow(v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Slowf(format string, v ...interface{}) {
|
func (l *traceLogger) Slowf(format string, v ...interface{}) {
|
||||||
if shouldLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *traceLogger) Slowv(v interface{}) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
l.write(slowLog, levelSlow, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *traceLogger) WithDuration(duration time.Duration) Logger {
|
func (l *traceLogger) WithDuration(duration time.Duration) Logger {
|
||||||
l.Duration = timex.ReprOfDuration(duration)
|
l.Duration = timex.ReprOfDuration(duration)
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) write(writer io.Writer, level, content string) {
|
func (l *traceLogger) write(writer io.Writer, level string, val interface{}) {
|
||||||
l.Timestamp = getTimestamp()
|
l.Timestamp = getTimestamp()
|
||||||
l.Level = level
|
l.Level = level
|
||||||
l.Content = content
|
l.Content = val
|
||||||
l.Trace = traceIdFromContext(l.ctx)
|
l.Trace = traceIdFromContext(l.ctx)
|
||||||
l.Span = spanIdFromContext(l.ctx)
|
l.Span = spanIdFromContext(l.ctx)
|
||||||
outputJson(writer, l)
|
outputJson(writer, l)
|
||||||
@@ -75,19 +93,19 @@ func WithContext(ctx context.Context) Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func spanIdFromContext(ctx context.Context) string {
|
func spanIdFromContext(ctx context.Context) string {
|
||||||
t, ok := ctx.Value(tracespec.TracingKey).(tracespec.Trace)
|
spanCtx := trace.SpanContextFromContext(ctx)
|
||||||
if !ok {
|
if spanCtx.HasSpanID() {
|
||||||
return ""
|
return spanCtx.SpanID().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.SpanId()
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func traceIdFromContext(ctx context.Context) string {
|
func traceIdFromContext(ctx context.Context) string {
|
||||||
t, ok := ctx.Value(tracespec.TracingKey).(tracespec.Trace)
|
spanCtx := trace.SpanContextFromContext(ctx)
|
||||||
if !ok {
|
if spanCtx.HasTraceID() {
|
||||||
return ""
|
return spanCtx.TraceID().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.TraceId()
|
return ""
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,71 +9,90 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tal-tech/go-zero/core/trace/tracespec"
|
"go.opentelemetry.io/otel"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
mockTraceID = "mock-trace-id"
|
traceKey = "trace"
|
||||||
mockSpanID = "mock-span-id"
|
spanKey = "span"
|
||||||
)
|
)
|
||||||
|
|
||||||
var mock tracespec.Trace = new(mockTrace)
|
|
||||||
|
|
||||||
func TestTraceLog(t *testing.T) {
|
func TestTraceLog(t *testing.T) {
|
||||||
var buf mockWriter
|
var buf mockWriter
|
||||||
atomic.StoreUint32(&initialized, 1)
|
atomic.StoreUint32(&initialized, 1)
|
||||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
otp := otel.GetTracerProvider()
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
|
otel.SetTracerProvider(tp)
|
||||||
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
|
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
WithContext(ctx).(*traceLogger).write(&buf, levelInfo, testlog)
|
WithContext(ctx).(*traceLogger).write(&buf, levelInfo, testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceError(t *testing.T) {
|
func TestTraceError(t *testing.T) {
|
||||||
var buf mockWriter
|
var buf mockWriter
|
||||||
atomic.StoreUint32(&initialized, 1)
|
atomic.StoreUint32(&initialized, 1)
|
||||||
errorLog = newLogWriter(log.New(&buf, "", flags))
|
errorLog = newLogWriter(log.New(&buf, "", flags))
|
||||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
otp := otel.GetTracerProvider()
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
|
otel.SetTracerProvider(tp)
|
||||||
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
|
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
l := WithContext(ctx).(*traceLogger)
|
l := WithContext(ctx).(*traceLogger)
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
l.WithDuration(time.Second).Error(testlog)
|
l.WithDuration(time.Second).Error(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
l.WithDuration(time.Second).Errorf(testlog)
|
l.WithDuration(time.Second).Errorf(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceInfo(t *testing.T) {
|
func TestTraceInfo(t *testing.T) {
|
||||||
var buf mockWriter
|
var buf mockWriter
|
||||||
atomic.StoreUint32(&initialized, 1)
|
atomic.StoreUint32(&initialized, 1)
|
||||||
infoLog = newLogWriter(log.New(&buf, "", flags))
|
infoLog = newLogWriter(log.New(&buf, "", flags))
|
||||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
otp := otel.GetTracerProvider()
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
|
otel.SetTracerProvider(tp)
|
||||||
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
|
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
l := WithContext(ctx).(*traceLogger)
|
l := WithContext(ctx).(*traceLogger)
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
l.WithDuration(time.Second).Info(testlog)
|
l.WithDuration(time.Second).Info(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
l.WithDuration(time.Second).Infof(testlog)
|
l.WithDuration(time.Second).Infof(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceSlow(t *testing.T) {
|
func TestTraceSlow(t *testing.T) {
|
||||||
var buf mockWriter
|
var buf mockWriter
|
||||||
atomic.StoreUint32(&initialized, 1)
|
atomic.StoreUint32(&initialized, 1)
|
||||||
slowLog = newLogWriter(log.New(&buf, "", flags))
|
slowLog = newLogWriter(log.New(&buf, "", flags))
|
||||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
otp := otel.GetTracerProvider()
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
|
otel.SetTracerProvider(tp)
|
||||||
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
|
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
l := WithContext(ctx).(*traceLogger)
|
l := WithContext(ctx).(*traceLogger)
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
l.WithDuration(time.Second).Slow(testlog)
|
l.WithDuration(time.Second).Slow(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
l.WithDuration(time.Second).Slowf(testlog)
|
l.WithDuration(time.Second).Slowf(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceWithoutContext(t *testing.T) {
|
func TestTraceWithoutContext(t *testing.T) {
|
||||||
@@ -83,34 +102,10 @@ func TestTraceWithoutContext(t *testing.T) {
|
|||||||
l := WithContext(context.Background()).(*traceLogger)
|
l := WithContext(context.Background()).(*traceLogger)
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
l.WithDuration(time.Second).Info(testlog)
|
l.WithDuration(time.Second).Info(testlog)
|
||||||
assert.False(t, strings.Contains(buf.String(), mockTraceID))
|
assert.False(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.False(t, strings.Contains(buf.String(), mockSpanID))
|
assert.False(t, strings.Contains(buf.String(), spanKey))
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
l.WithDuration(time.Second).Infof(testlog)
|
l.WithDuration(time.Second).Infof(testlog)
|
||||||
assert.False(t, strings.Contains(buf.String(), mockTraceID))
|
assert.False(t, strings.Contains(buf.String(), traceKey))
|
||||||
assert.False(t, strings.Contains(buf.String(), mockSpanID))
|
assert.False(t, strings.Contains(buf.String(), spanKey))
|
||||||
}
|
|
||||||
|
|
||||||
type mockTrace struct{}
|
|
||||||
|
|
||||||
func (t mockTrace) TraceId() string {
|
|
||||||
return mockTraceID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t mockTrace) SpanId() string {
|
|
||||||
return mockSpanID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t mockTrace) Finish() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t mockTrace) Fork(ctx context.Context, serviceName, operationName string) (context.Context, tracespec.Trace) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t mockTrace) Follow(ctx context.Context, serviceName, operationName string) (context.Context, tracespec.Trace) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t mockTrace) Visit(fn func(key, val string) bool) {
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,7 +43,8 @@ type (
|
|||||||
UnmarshalOption func(*unmarshalOptions)
|
UnmarshalOption func(*unmarshalOptions)
|
||||||
|
|
||||||
unmarshalOptions struct {
|
unmarshalOptions struct {
|
||||||
fromString bool
|
fromString bool
|
||||||
|
canonicalKey func(key string) string
|
||||||
}
|
}
|
||||||
|
|
||||||
keyCache map[string][]string
|
keyCache map[string][]string
|
||||||
@@ -229,7 +230,7 @@ func (u *Unmarshaler) processFieldPrimitive(field reflect.StructField, value ref
|
|||||||
default:
|
default:
|
||||||
switch v := mapValue.(type) {
|
switch v := mapValue.(type) {
|
||||||
case json.Number:
|
case json.Number:
|
||||||
return u.processFieldPrimitiveWithJsonNumber(field, value, v, opts, fullName)
|
return u.processFieldPrimitiveWithJSONNumber(field, value, v, opts, fullName)
|
||||||
default:
|
default:
|
||||||
if typeKind == valueKind {
|
if typeKind == valueKind {
|
||||||
if err := validateValueInOptions(opts.options(), mapValue); err != nil {
|
if err := validateValueInOptions(opts.options(), mapValue); err != nil {
|
||||||
@@ -244,7 +245,7 @@ func (u *Unmarshaler) processFieldPrimitive(field reflect.StructField, value ref
|
|||||||
return newTypeMismatchError(fullName)
|
return newTypeMismatchError(fullName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Unmarshaler) processFieldPrimitiveWithJsonNumber(field reflect.StructField, value reflect.Value,
|
func (u *Unmarshaler) processFieldPrimitiveWithJSONNumber(field reflect.StructField, value reflect.Value,
|
||||||
v json.Number, opts *fieldOptionsWithContext, fullName string) error {
|
v json.Number, opts *fieldOptionsWithContext, fullName string) error {
|
||||||
fieldType := field.Type
|
fieldType := field.Type
|
||||||
fieldKind := fieldType.Kind()
|
fieldKind := fieldType.Kind()
|
||||||
@@ -323,7 +324,11 @@ func (u *Unmarshaler) processNamedField(field reflect.StructField, value reflect
|
|||||||
}
|
}
|
||||||
|
|
||||||
fullName = join(fullName, key)
|
fullName = join(fullName, key)
|
||||||
mapValue, hasValue := getValue(m, key)
|
canonicalKey := key
|
||||||
|
if u.opts.canonicalKey != nil {
|
||||||
|
canonicalKey = u.opts.canonicalKey(key)
|
||||||
|
}
|
||||||
|
mapValue, hasValue := getValue(m, canonicalKey)
|
||||||
if hasValue {
|
if hasValue {
|
||||||
return u.processNamedFieldWithValue(field, value, mapValue, key, opts, fullName)
|
return u.processNamedFieldWithValue(field, value, mapValue, key, opts, fullName)
|
||||||
}
|
}
|
||||||
@@ -513,14 +518,14 @@ func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int, baseKind re
|
|||||||
target.Set(reflect.ValueOf(value))
|
target.Set(reflect.ValueOf(value))
|
||||||
ithVal.Set(target.Addr())
|
ithVal.Set(target.Addr())
|
||||||
return nil
|
return nil
|
||||||
} else {
|
|
||||||
if ithVal.Kind() != reflect.TypeOf(value).Kind() {
|
|
||||||
return errTypeMismatch
|
|
||||||
}
|
|
||||||
|
|
||||||
ithVal.Set(reflect.ValueOf(value))
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ithVal.Kind() != reflect.TypeOf(value).Kind() {
|
||||||
|
return errTypeMismatch
|
||||||
|
}
|
||||||
|
|
||||||
|
ithVal.Set(reflect.ValueOf(value))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -621,6 +626,13 @@ func WithStringValues() UnmarshalOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithCanonicalKeyFunc customizes a Unmarshaler with Canonical Key func
|
||||||
|
func WithCanonicalKeyFunc(f func(string) string) UnmarshalOption {
|
||||||
|
return func(opt *unmarshalOptions) {
|
||||||
|
opt.canonicalKey = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func fillDurationValue(fieldKind reflect.Kind, value reflect.Value, dur string) error {
|
func fillDurationValue(fieldKind reflect.Kind, value reflect.Value, dur string) error {
|
||||||
d, err := time.ParseDuration(dur)
|
d, err := time.ParseDuration(dur)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -209,6 +209,12 @@ func TestRepr(t *testing.T) {
|
|||||||
newMockPtr(),
|
newMockPtr(),
|
||||||
"mockptr",
|
"mockptr",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
&mockOpacity{
|
||||||
|
val: 1,
|
||||||
|
},
|
||||||
|
"{1}",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
true,
|
true,
|
||||||
"true",
|
"true",
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"k8s.io/utils/io"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUnmarshalYamlBytes(t *testing.T) {
|
func TestUnmarshalYamlBytes(t *testing.T) {
|
||||||
@@ -18,6 +19,22 @@ func TestUnmarshalYamlBytes(t *testing.T) {
|
|||||||
assert.Equal(t, "liao", c.Name)
|
assert.Equal(t, "liao", c.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalYamlBytesErrorInput(t *testing.T) {
|
||||||
|
var c struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
content := []byte(`liao`)
|
||||||
|
assert.NotNil(t, UnmarshalYamlBytes(content, &c))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalYamlBytesEmptyInput(t *testing.T) {
|
||||||
|
var c struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
content := []byte(``)
|
||||||
|
assert.NotNil(t, UnmarshalYamlBytes(content, &c))
|
||||||
|
}
|
||||||
|
|
||||||
func TestUnmarshalYamlBytesOptional(t *testing.T) {
|
func TestUnmarshalYamlBytesOptional(t *testing.T) {
|
||||||
var c struct {
|
var c struct {
|
||||||
Name string
|
Name string
|
||||||
@@ -918,3 +935,18 @@ func TestUnmarshalYamlReaderError(t *testing.T) {
|
|||||||
err := UnmarshalYamlReader(reader, &v)
|
err := UnmarshalYamlReader(reader, &v)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalYamlBadReader(t *testing.T) {
|
||||||
|
var v struct {
|
||||||
|
Any string
|
||||||
|
}
|
||||||
|
|
||||||
|
err := UnmarshalYamlReader(new(badReader), &v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type badReader struct{}
|
||||||
|
|
||||||
|
func (b *badReader) Read(p []byte) (n int, err error) {
|
||||||
|
return 0, io.ErrLimitReached
|
||||||
|
}
|
||||||
|
|||||||
@@ -112,6 +112,12 @@ func MapReduceWithSource(source <-chan interface{}, mapper MapperFunc, reducer R
|
|||||||
opts ...Option) (interface{}, error) {
|
opts ...Option) (interface{}, error) {
|
||||||
options := buildOptions(opts...)
|
options := buildOptions(opts...)
|
||||||
output := make(chan interface{})
|
output := make(chan interface{})
|
||||||
|
defer func() {
|
||||||
|
for range output {
|
||||||
|
panic("more than one element written in reducer")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
collector := make(chan interface{}, options.workers)
|
collector := make(chan interface{}, options.workers)
|
||||||
done := syncx.NewDoneChan()
|
done := syncx.NewDoneChan()
|
||||||
writer := newGuardedWriter(output, done.Done())
|
writer := newGuardedWriter(output, done.Done())
|
||||||
|
|||||||
@@ -202,6 +202,22 @@ func TestMapReduce(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMapReduceWithReduerWriteMoreThanOnce(t *testing.T) {
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
MapReduce(func(source chan<- interface{}) {
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
source <- i
|
||||||
|
}
|
||||||
|
}, func(item interface{}, writer Writer, cancel func(error)) {
|
||||||
|
writer.Write(item)
|
||||||
|
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) {
|
||||||
|
drain(pipe)
|
||||||
|
writer.Write("one")
|
||||||
|
writer.Write("two")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestMapReduceVoid(t *testing.T) {
|
func TestMapReduceVoid(t *testing.T) {
|
||||||
var value uint32
|
var value uint32
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux || darwin
|
||||||
// +build linux darwin
|
// +build linux darwin
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux || darwin
|
||||||
// +build linux darwin
|
// +build linux darwin
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux || darwin
|
||||||
// +build linux darwin
|
// +build linux darwin
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
|
|||||||
10
core/proc/signals+polyfill.go
Normal file
10
core/proc/signals+polyfill.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
//go:build windows
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package proc
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
func Done() <-chan struct{} {
|
||||||
|
return context.Background().Done()
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux || darwin
|
||||||
// +build linux darwin
|
// +build linux darwin
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
@@ -12,6 +13,8 @@ import (
|
|||||||
|
|
||||||
const timeFormat = "0102150405"
|
const timeFormat = "0102150405"
|
||||||
|
|
||||||
|
var done = make(chan struct{})
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
go func() {
|
go func() {
|
||||||
var profiler Stopper
|
var profiler Stopper
|
||||||
@@ -33,6 +36,13 @@ func init() {
|
|||||||
profiler = nil
|
profiler = nil
|
||||||
}
|
}
|
||||||
case syscall.SIGTERM:
|
case syscall.SIGTERM:
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
// already closed
|
||||||
|
default:
|
||||||
|
close(done)
|
||||||
|
}
|
||||||
|
|
||||||
gracefulStop(signals)
|
gracefulStop(signals)
|
||||||
default:
|
default:
|
||||||
logx.Error("Got unregistered signal:", v)
|
logx.Error("Got unregistered signal:", v)
|
||||||
@@ -40,3 +50,8 @@ func init() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Done returns the channel that notifies the process quitting.
|
||||||
|
func Done() <-chan struct{} {
|
||||||
|
return done
|
||||||
|
}
|
||||||
|
|||||||
16
core/proc/signals_test.go
Normal file
16
core/proc/signals_test.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
package proc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDone(t *testing.T) {
|
||||||
|
select {
|
||||||
|
case <-Done():
|
||||||
|
assert.Fail(t, "should run")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
assert.NotNil(t, Done())
|
||||||
|
}
|
||||||
@@ -23,11 +23,11 @@ func Enabled() bool {
|
|||||||
|
|
||||||
// StartAgent starts a prometheus agent.
|
// StartAgent starts a prometheus agent.
|
||||||
func StartAgent(c Config) {
|
func StartAgent(c Config) {
|
||||||
once.Do(func() {
|
if len(c.Host) == 0 {
|
||||||
if len(c.Host) == 0 {
|
return
|
||||||
return
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
once.Do(func() {
|
||||||
enabled.Set(true)
|
enabled.Set(true)
|
||||||
threading.GoSafe(func() {
|
threading.GoSafe(func() {
|
||||||
http.Handle(c.Path, promhttp.Handler())
|
http.Handle(c.Path, promhttp.Handler())
|
||||||
|
|||||||
32
core/retry/backoff/backoff.go
Normal file
32
core/retry/backoff/backoff.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Func defines the method to calculate how long to retry.
|
||||||
|
type Func func(attempt int) time.Duration
|
||||||
|
|
||||||
|
// LinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
|
||||||
|
func LinearWithJitter(waitBetween time.Duration, jitterFraction float64) Func {
|
||||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
return func(attempt int) time.Duration {
|
||||||
|
multiplier := jitterFraction * (r.Float64()*2 - 1)
|
||||||
|
return time.Duration(float64(waitBetween) * (1 + multiplier))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interval it waits for a fixed period of time between calls.
|
||||||
|
func Interval(interval time.Duration) Func {
|
||||||
|
return func(attempt int) time.Duration {
|
||||||
|
return interval
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exponential produces increasing intervals for each attempt.
|
||||||
|
func Exponential(scalar time.Duration) Func {
|
||||||
|
return func(attempt int) time.Duration {
|
||||||
|
return scalar * time.Duration((1<<attempt)>>1)
|
||||||
|
}
|
||||||
|
}
|
||||||
30
core/retry/backoff/backoff_test.go
Normal file
30
core/retry/backoff/backoff_test.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWaitBetween(t *testing.T) {
|
||||||
|
fn := Interval(time.Second)
|
||||||
|
assert.EqualValues(t, time.Second, fn(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExponential(t *testing.T) {
|
||||||
|
fn := Exponential(time.Second)
|
||||||
|
assert.EqualValues(t, time.Second, fn(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLinearWithJitter(t *testing.T) {
|
||||||
|
const rounds = 1000000
|
||||||
|
var total time.Duration
|
||||||
|
fn := LinearWithJitter(time.Second, 0.5)
|
||||||
|
for i := 0; i < rounds; i++ {
|
||||||
|
total += fn(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0.1% tolerance
|
||||||
|
assert.True(t, total/time.Duration(rounds)-time.Second < time.Millisecond)
|
||||||
|
}
|
||||||
42
core/retry/options.go
Normal file
42
core/retry/options.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package retry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/tal-tech/go-zero/core/retry/backoff"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithDisable disables the retry behaviour on this call, or this interceptor.
|
||||||
|
// It's semantically the same to `WithMax(0)`
|
||||||
|
func WithDisable() *CallOption {
|
||||||
|
return WithMax(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMax sets the maximum number of retries on this call, or this interceptor.
|
||||||
|
func WithMax(maxRetries int) *CallOption {
|
||||||
|
return &CallOption{apply: func(options *options) {
|
||||||
|
options.max = maxRetries
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackoff sets the `BackoffFunc` used to control time between retries.
|
||||||
|
func WithBackoff(backoffFunc backoff.Func) *CallOption {
|
||||||
|
return &CallOption{apply: func(o *options) {
|
||||||
|
o.backoffFunc = backoffFunc
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCodes Allow code to be retried.
|
||||||
|
func WithCodes(retryCodes ...codes.Code) *CallOption {
|
||||||
|
return &CallOption{apply: func(o *options) {
|
||||||
|
o.codes = retryCodes
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPerRetryTimeout timeout for each retry
|
||||||
|
func WithPerRetryTimeout(timeout time.Duration) *CallOption {
|
||||||
|
return &CallOption{apply: func(o *options) {
|
||||||
|
o.perCallTimeout = timeout
|
||||||
|
}}
|
||||||
|
}
|
||||||
91
core/retry/options_test.go
Normal file
91
core/retry/options_test.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package retry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRetryWithDisable(t *testing.T) {
|
||||||
|
opt := &options{}
|
||||||
|
assert.EqualValues(t, &options{}, parseRetryCallOptions(opt, WithDisable()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryWithMax(t *testing.T) {
|
||||||
|
n := 5
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
opt := &options{}
|
||||||
|
assert.EqualValues(t, &options{max: i}, parseRetryCallOptions(opt, WithMax(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryWithBackoff(t *testing.T) {
|
||||||
|
opt := &options{}
|
||||||
|
|
||||||
|
retryCallOptions := parseRetryCallOptions(opt, WithBackoff(func(attempt int) time.Duration {
|
||||||
|
return time.Millisecond
|
||||||
|
}))
|
||||||
|
assert.EqualValues(t, time.Millisecond, retryCallOptions.backoffFunc(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryWithCodes(t *testing.T) {
|
||||||
|
opt := &options{}
|
||||||
|
c := []codes.Code{codes.Unknown, codes.NotFound}
|
||||||
|
options := parseRetryCallOptions(opt, WithCodes(c...))
|
||||||
|
assert.EqualValues(t, c, options.codes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryWithPerRetryTimeout(t *testing.T) {
|
||||||
|
opt := &options{}
|
||||||
|
options := parseRetryCallOptions(opt, WithPerRetryTimeout(time.Millisecond))
|
||||||
|
assert.EqualValues(t, time.Millisecond, options.perCallTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_waitRetryBackoff(t *testing.T) {
|
||||||
|
logx.Disable()
|
||||||
|
|
||||||
|
opt := &options{perCallTimeout: time.Second, backoffFunc: func(attempt int) time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}}
|
||||||
|
logger := logx.WithContext(context.Background())
|
||||||
|
err := waitRetryBackoff(logger, 1, context.Background(), opt)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Millisecond)
|
||||||
|
defer cancelFunc()
|
||||||
|
err = waitRetryBackoff(logger, 1, ctx, opt)
|
||||||
|
assert.ErrorIs(t, err, status.FromContextError(context.DeadlineExceeded).Err())
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_isRetriable(t *testing.T) {
|
||||||
|
assert.False(t, isRetriable(status.FromContextError(context.DeadlineExceeded).Err(), &options{codes: DefaultRetriableCodes}))
|
||||||
|
assert.True(t, isRetriable(status.Error(codes.ResourceExhausted, ""), &options{codes: DefaultRetriableCodes}))
|
||||||
|
assert.False(t, isRetriable(errors.New("error"), &options{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_perCallContext(t *testing.T) {
|
||||||
|
opt := &options{perCallTimeout: time.Second, includeRetryHeader: true}
|
||||||
|
ctx := metadata.NewIncomingContext(context.Background(), map[string][]string{"1": {"1"}})
|
||||||
|
callContext := perCallContext(ctx, opt, 1)
|
||||||
|
md, ok := metadata.FromOutgoingContext(callContext)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.EqualValues(t, metadata.MD{"1": {"1"}, AttemptMetadataKey: {"1"}}, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_filterCallOptions(t *testing.T) {
|
||||||
|
grpcEmptyCallOpt := &grpc.EmptyCallOption{}
|
||||||
|
retryCallOpt := &CallOption{}
|
||||||
|
options, retryCallOptions := filterCallOptions([]grpc.CallOption{
|
||||||
|
grpcEmptyCallOpt,
|
||||||
|
retryCallOpt,
|
||||||
|
})
|
||||||
|
assert.EqualValues(t, []grpc.CallOption{grpcEmptyCallOpt}, options)
|
||||||
|
assert.EqualValues(t, []*CallOption{retryCallOpt}, retryCallOptions)
|
||||||
|
}
|
||||||
189
core/retry/retryinterceptor.go
Normal file
189
core/retry/retryinterceptor.go
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
package retry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
|
"github.com/tal-tech/go-zero/core/retry/backoff"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const AttemptMetadataKey = "x-retry-attempt"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultRetriableCodes default retry code
|
||||||
|
DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable}
|
||||||
|
// defaultRetryOptions default retry configuration
|
||||||
|
defaultRetryOptions = &options{
|
||||||
|
max: 0, // disabled
|
||||||
|
perCallTimeout: 0, // disabled
|
||||||
|
includeRetryHeader: true,
|
||||||
|
codes: DefaultRetriableCodes,
|
||||||
|
backoffFunc: backoff.LinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// options retry the configuration
|
||||||
|
options struct {
|
||||||
|
max int
|
||||||
|
perCallTimeout time.Duration
|
||||||
|
includeRetryHeader bool
|
||||||
|
codes []codes.Code
|
||||||
|
backoffFunc backoff.Func
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallOption is a grpc.CallOption that is local to grpc retry.
|
||||||
|
CallOption struct {
|
||||||
|
grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
|
||||||
|
apply func(opt *options)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitRetryBackoff(logger logx.Logger, attempt int, ctx context.Context, retryOptions *options) error {
|
||||||
|
var waitTime time.Duration = 0
|
||||||
|
if attempt > 0 {
|
||||||
|
waitTime = retryOptions.backoffFunc(attempt)
|
||||||
|
}
|
||||||
|
if waitTime > 0 {
|
||||||
|
timer := time.NewTimer(waitTime)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
logger.Infof("grpc retry attempt: %d, backoff for %v", attempt, waitTime)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return status.FromContextError(ctx.Err()).Err()
|
||||||
|
case <-timer.C:
|
||||||
|
// double check
|
||||||
|
err := ctx.Err()
|
||||||
|
if err != nil {
|
||||||
|
return status.FromContextError(err).Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRetriable(err error, retryOptions *options) bool {
|
||||||
|
errCode := status.Code(err)
|
||||||
|
if isContextError(err) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, code := range retryOptions.codes {
|
||||||
|
if code == errCode {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isContextError(err error) bool {
|
||||||
|
code := status.Code(err)
|
||||||
|
return code == codes.DeadlineExceeded || code == codes.Canceled
|
||||||
|
}
|
||||||
|
|
||||||
|
func reuseOrNewWithCallOptions(opt *options, retryCallOptions []*CallOption) *options {
|
||||||
|
if len(retryCallOptions) == 0 {
|
||||||
|
return opt
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseRetryCallOptions(opt, retryCallOptions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRetryCallOptions(opt *options, opts ...*CallOption) *options {
|
||||||
|
for _, option := range opts {
|
||||||
|
option.apply(opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return opt
|
||||||
|
}
|
||||||
|
|
||||||
|
func perCallContext(ctx context.Context, callOpts *options, attempt int) context.Context {
|
||||||
|
if attempt > 0 {
|
||||||
|
if callOpts.perCallTimeout != 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
ctx, cancel = context.WithTimeout(ctx, callOpts.perCallTimeout)
|
||||||
|
_ = cancel
|
||||||
|
}
|
||||||
|
if callOpts.includeRetryHeader {
|
||||||
|
cloneMd := extractIncomingAndClone(ctx)
|
||||||
|
cloneMd.Set(AttemptMetadataKey, strconv.Itoa(attempt))
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, cloneMd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractIncomingAndClone(ctx context.Context) metadata.MD {
|
||||||
|
md, ok := metadata.FromIncomingContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
return metadata.MD{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return md.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []*CallOption) {
|
||||||
|
for _, opt := range callOptions {
|
||||||
|
if co, ok := opt.(*CallOption); ok {
|
||||||
|
retryOptions = append(retryOptions, co)
|
||||||
|
} else {
|
||||||
|
grpcOptions = append(grpcOptions, opt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return grpcOptions, retryOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func Do(ctx context.Context, call func(ctx context.Context, opts ...grpc.CallOption) error, opts ...grpc.CallOption) error {
|
||||||
|
logger := logx.WithContext(ctx)
|
||||||
|
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||||
|
callOpts := reuseOrNewWithCallOptions(defaultRetryOptions, retryOpts)
|
||||||
|
|
||||||
|
if callOpts.max == 0 {
|
||||||
|
return call(ctx, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var lastErr error
|
||||||
|
for attempt := 0; attempt <= callOpts.max; attempt++ {
|
||||||
|
if err := waitRetryBackoff(logger, attempt, ctx, callOpts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
callCtx := perCallContext(ctx, callOpts, attempt)
|
||||||
|
lastErr = call(callCtx, grpcOpts...)
|
||||||
|
|
||||||
|
if lastErr == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if attempt == 0 {
|
||||||
|
logger.Errorf("grpc call failed, got err: %v", lastErr)
|
||||||
|
} else {
|
||||||
|
logger.Errorf("grpc retry attempt: %d, got err: %v", attempt, lastErr)
|
||||||
|
}
|
||||||
|
if isContextError(lastErr) {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
logger.Errorf("grpc retry attempt: %d, parent context error: %v", attempt, ctx.Err())
|
||||||
|
return lastErr
|
||||||
|
} else if callOpts.perCallTimeout != 0 {
|
||||||
|
logger.Errorf("grpc retry attempt: %d, context error from retry call", attempt)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !isRetriable(lastErr, callOpts) {
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
24
core/retry/retryinterceptor_test.go
Normal file
24
core/retry/retryinterceptor_test.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package retry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDo(t *testing.T) {
|
||||||
|
n := 4
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
count := 0
|
||||||
|
err := Do(context.Background(), func(ctx context.Context, opts ...grpc.CallOption) error {
|
||||||
|
count++
|
||||||
|
return status.Error(codes.ResourceExhausted, "ResourceExhausted")
|
||||||
|
}, WithMax(i))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, i+1, count)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build debug
|
||||||
// +build debug
|
// +build debug
|
||||||
|
|
||||||
package search
|
package search
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/logx"
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
"github.com/tal-tech/go-zero/core/prometheus"
|
"github.com/tal-tech/go-zero/core/prometheus"
|
||||||
"github.com/tal-tech/go-zero/core/stat"
|
"github.com/tal-tech/go-zero/core/stat"
|
||||||
|
"github.com/tal-tech/go-zero/core/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -29,6 +30,7 @@ type ServiceConf struct {
|
|||||||
Mode string `json:",default=pro,options=dev|test|rt|pre|pro"`
|
Mode string `json:",default=pro,options=dev|test|rt|pre|pro"`
|
||||||
MetricsUrl string `json:",optional"`
|
MetricsUrl string `json:",optional"`
|
||||||
Prometheus prometheus.Config `json:",optional"`
|
Prometheus prometheus.Config `json:",optional"`
|
||||||
|
Telemetry trace.Config `json:",optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustSetUp sets up the service, exits on error.
|
// MustSetUp sets up the service, exits on error.
|
||||||
@@ -49,6 +51,12 @@ func (sc ServiceConf) SetUp() error {
|
|||||||
|
|
||||||
sc.initMode()
|
sc.initMode()
|
||||||
prometheus.StartAgent(sc.Prometheus)
|
prometheus.StartAgent(sc.Prometheus)
|
||||||
|
|
||||||
|
if len(sc.Telemetry.Name) == 0 {
|
||||||
|
sc.Telemetry.Name = sc.Name
|
||||||
|
}
|
||||||
|
trace.StartAgent(sc.Telemetry)
|
||||||
|
|
||||||
if len(sc.MetricsUrl) > 0 {
|
if len(sc.MetricsUrl) > 0 {
|
||||||
stat.SetReportWriter(stat.NewRemoteWriter(sc.MetricsUrl))
|
stat.SetReportWriter(stat.NewRemoteWriter(sc.MetricsUrl))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A ServiceGroup is a group of services.
|
// A ServiceGroup is a group of services.
|
||||||
|
// Attention: the starting order of the added services is not guaranteed.
|
||||||
ServiceGroup struct {
|
ServiceGroup struct {
|
||||||
services []Service
|
services []Service
|
||||||
stopOnce func()
|
stopOnce func()
|
||||||
@@ -41,7 +42,8 @@ func NewServiceGroup() *ServiceGroup {
|
|||||||
|
|
||||||
// Add adds service into sg.
|
// Add adds service into sg.
|
||||||
func (sg *ServiceGroup) Add(service Service) {
|
func (sg *ServiceGroup) Add(service Service) {
|
||||||
sg.services = append(sg.services, service)
|
// push front, stop with reverse order.
|
||||||
|
sg.services = append([]Service{service}, sg.services...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the ServiceGroup.
|
// Start starts the ServiceGroup.
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package stat
|
package stat
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package stat
|
package stat
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package stat
|
package stat
|
||||||
|
|||||||
@@ -7,7 +7,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestRefreshCpu(t *testing.T) {
|
func TestRefreshCpu(t *testing.T) {
|
||||||
assert.True(t, RefreshCpu() >= 0)
|
assert.NotPanics(t, func() {
|
||||||
|
RefreshCpu()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRefreshCpu(b *testing.B) {
|
func BenchmarkRefreshCpu(b *testing.B) {
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package internal
|
package internal
|
||||||
|
|||||||
@@ -38,7 +38,9 @@ func init() {
|
|||||||
atomic.StoreInt64(&cpuUsage, usage)
|
atomic.StoreInt64(&cpuUsage, usage)
|
||||||
})
|
})
|
||||||
case <-allTicker.C:
|
case <-allTicker.C:
|
||||||
printUsage()
|
if logEnabled.True() {
|
||||||
|
printUsage()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
63
core/stores/builder/builder.go
Normal file
63
core/stores/builder/builder.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const dbTag = "db"
|
||||||
|
|
||||||
|
// RawFieldNames converts golang struct field into slice string.
|
||||||
|
func RawFieldNames(in interface{}, postgresSql ...bool) []string {
|
||||||
|
out := make([]string, 0)
|
||||||
|
v := reflect.ValueOf(in)
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
var pg bool
|
||||||
|
if len(postgresSql) > 0 {
|
||||||
|
pg = postgresSql[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// we only accept structs
|
||||||
|
if v.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Errorf("ToMap only accepts structs; got %T", v))
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := v.Type()
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
// gets us a StructField
|
||||||
|
fi := typ.Field(i)
|
||||||
|
if tagv := fi.Tag.Get(dbTag); tagv != "" {
|
||||||
|
if pg {
|
||||||
|
out = append(out, tagv)
|
||||||
|
} else {
|
||||||
|
out = append(out, fmt.Sprintf("`%s`", tagv))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if pg {
|
||||||
|
out = append(out, fi.Name)
|
||||||
|
} else {
|
||||||
|
out = append(out, fmt.Sprintf("`%s`", fi.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostgreSqlJoin concatenates the given elements into a string.
|
||||||
|
func PostgreSqlJoin(elems []string) string {
|
||||||
|
b := new(strings.Builder)
|
||||||
|
for index, e := range elems {
|
||||||
|
b.WriteString(fmt.Sprintf("%s = $%d, ", e, index+2))
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Len() == 0 {
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()[0 : b.Len()-2]
|
||||||
|
}
|
||||||
24
core/stores/builder/builder_test.go
Normal file
24
core/stores/builder/builder_test.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockedUser struct {
|
||||||
|
ID string `db:"id" json:"id,omitempty"`
|
||||||
|
UserName string `db:"user_name" json:"userName,omitempty"`
|
||||||
|
Sex int `db:"sex" json:"sex,omitempty"`
|
||||||
|
UUID string `db:"uuid" uuid:"uuid,omitempty"`
|
||||||
|
Age int `db:"age" json:"age"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFieldNames(t *testing.T) {
|
||||||
|
t.Run("new", func(t *testing.T) {
|
||||||
|
var u mockedUser
|
||||||
|
out := RawFieldNames(&u)
|
||||||
|
expected := []string{"`id`", "`user_name`", "`sex`", "`uuid`", "`age`"}
|
||||||
|
assert.Equal(t, expected, out)
|
||||||
|
})
|
||||||
|
}
|
||||||
2
core/stores/cache/cache.go
vendored
2
core/stores/cache/cache.go
vendored
@@ -29,7 +29,7 @@ type (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// New returns a Cache.
|
// New returns a Cache.
|
||||||
func New(c ClusterConf, barrier syncx.SharedCalls, st *Stat, errNotFound error,
|
func New(c ClusterConf, barrier syncx.SingleFlight, st *Stat, errNotFound error,
|
||||||
opts ...Option) Cache {
|
opts ...Option) Cache {
|
||||||
if len(c) == 0 || TotalWeights(c) <= 0 {
|
if len(c) == 0 || TotalWeights(c) <= 0 {
|
||||||
log.Fatal("no cache nodes")
|
log.Fatal("no cache nodes")
|
||||||
|
|||||||
6
core/stores/cache/cache_test.go
vendored
6
core/stores/cache/cache_test.go
vendored
@@ -23,6 +23,7 @@ type mockedNode struct {
|
|||||||
|
|
||||||
func (mc *mockedNode) Del(keys ...string) error {
|
func (mc *mockedNode) Del(keys ...string) error {
|
||||||
var be errorx.BatchError
|
var be errorx.BatchError
|
||||||
|
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
if _, ok := mc.vals[key]; !ok {
|
if _, ok := mc.vals[key]; !ok {
|
||||||
be.Add(mc.errNotFound)
|
be.Add(mc.errNotFound)
|
||||||
@@ -30,6 +31,7 @@ func (mc *mockedNode) Del(keys ...string) error {
|
|||||||
delete(mc.vals, key)
|
delete(mc.vals, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return be.Err()
|
return be.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,7 +104,7 @@ func TestCache_SetDel(t *testing.T) {
|
|||||||
Weight: 100,
|
Weight: 100,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
c := New(conf, syncx.NewSharedCalls(), NewStat("mock"), errPlaceholder)
|
c := New(conf, syncx.NewSingleFlight(), NewStat("mock"), errPlaceholder)
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
|
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
|
||||||
@@ -140,7 +142,7 @@ func TestCache_OneNode(t *testing.T) {
|
|||||||
Weight: 100,
|
Weight: 100,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
c := New(conf, syncx.NewSharedCalls(), NewStat("mock"), errPlaceholder)
|
c := New(conf, syncx.NewSingleFlight(), NewStat("mock"), errPlaceholder)
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
|
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
|
||||||
|
|||||||
19
core/stores/cache/cachenode.go
vendored
19
core/stores/cache/cachenode.go
vendored
@@ -29,7 +29,7 @@ type cacheNode struct {
|
|||||||
rds *redis.Redis
|
rds *redis.Redis
|
||||||
expiry time.Duration
|
expiry time.Duration
|
||||||
notFoundExpiry time.Duration
|
notFoundExpiry time.Duration
|
||||||
barrier syncx.SharedCalls
|
barrier syncx.SingleFlight
|
||||||
r *rand.Rand
|
r *rand.Rand
|
||||||
lock *sync.Mutex
|
lock *sync.Mutex
|
||||||
unstableExpiry mathx.Unstable
|
unstableExpiry mathx.Unstable
|
||||||
@@ -43,7 +43,7 @@ type cacheNode struct {
|
|||||||
// st is used to stat the cache.
|
// st is used to stat the cache.
|
||||||
// errNotFound defines the error that returned on cache not found.
|
// errNotFound defines the error that returned on cache not found.
|
||||||
// opts are the options that customize the cacheNode.
|
// opts are the options that customize the cacheNode.
|
||||||
func NewNode(rds *redis.Redis, barrier syncx.SharedCalls, st *Stat,
|
func NewNode(rds *redis.Redis, barrier syncx.SingleFlight, st *Stat,
|
||||||
errNotFound error, opts ...Option) Cache {
|
errNotFound error, opts ...Option) Cache {
|
||||||
o := newOptions(opts...)
|
o := newOptions(opts...)
|
||||||
return cacheNode{
|
return cacheNode{
|
||||||
@@ -65,9 +65,18 @@ func (c cacheNode) Del(keys ...string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := c.rds.Del(keys...); err != nil {
|
if len(keys) > 1 && c.rds.Type == redis.ClusterType {
|
||||||
logx.Errorf("failed to clear cache with keys: %q, error: %v", formatKeys(keys), err)
|
for _, key := range keys {
|
||||||
c.asyncRetryDelCache(keys...)
|
if _, err := c.rds.Del(key); err != nil {
|
||||||
|
logx.Errorf("failed to clear cache with key: %q, error: %v", key, err)
|
||||||
|
c.asyncRetryDelCache(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if _, err := c.rds.Del(keys...); err != nil {
|
||||||
|
logx.Errorf("failed to clear cache with keys: %q, error: %v", formatKeys(keys), err)
|
||||||
|
c.asyncRetryDelCache(keys...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
30
core/stores/cache/cachenode_test.go
vendored
30
core/stores/cache/cachenode_test.go
vendored
@@ -29,6 +29,7 @@ func init() {
|
|||||||
func TestCacheNode_DelCache(t *testing.T) {
|
func TestCacheNode_DelCache(t *testing.T) {
|
||||||
store, clean, err := redistest.CreateRedis()
|
store, clean, err := redistest.CreateRedis()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
store.Type = redis.ClusterType
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
cn := cacheNode{
|
cn := cacheNode{
|
||||||
@@ -49,13 +50,30 @@ func TestCacheNode_DelCache(t *testing.T) {
|
|||||||
assert.Nil(t, cn.Del("first", "second"))
|
assert.Nil(t, cn.Del("first", "second"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCacheNode_DelCacheWithErrors(t *testing.T) {
|
||||||
|
store, clean, err := redistest.CreateRedis()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
store.Type = redis.ClusterType
|
||||||
|
clean()
|
||||||
|
|
||||||
|
cn := cacheNode{
|
||||||
|
rds: store,
|
||||||
|
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||||
|
lock: new(sync.Mutex),
|
||||||
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
|
stat: NewStat("any"),
|
||||||
|
errNotFound: errTestNotFound,
|
||||||
|
}
|
||||||
|
assert.Nil(t, cn.Del("third", "fourth"))
|
||||||
|
}
|
||||||
|
|
||||||
func TestCacheNode_InvalidCache(t *testing.T) {
|
func TestCacheNode_InvalidCache(t *testing.T) {
|
||||||
s, err := miniredis.Run()
|
s, err := miniredis.Run()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
cn := cacheNode{
|
cn := cacheNode{
|
||||||
rds: redis.NewRedis(s.Addr(), redis.NodeType),
|
rds: redis.New(s.Addr()),
|
||||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||||
lock: new(sync.Mutex),
|
lock: new(sync.Mutex),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
@@ -78,7 +96,7 @@ func TestCacheNode_Take(t *testing.T) {
|
|||||||
cn := cacheNode{
|
cn := cacheNode{
|
||||||
rds: store,
|
rds: store,
|
||||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||||
barrier: syncx.NewSharedCalls(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
lock: new(sync.Mutex),
|
lock: new(sync.Mutex),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
stat: NewStat("any"),
|
stat: NewStat("any"),
|
||||||
@@ -105,7 +123,7 @@ func TestCacheNode_TakeNotFound(t *testing.T) {
|
|||||||
cn := cacheNode{
|
cn := cacheNode{
|
||||||
rds: store,
|
rds: store,
|
||||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||||
barrier: syncx.NewSharedCalls(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
lock: new(sync.Mutex),
|
lock: new(sync.Mutex),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
stat: NewStat("any"),
|
stat: NewStat("any"),
|
||||||
@@ -144,7 +162,7 @@ func TestCacheNode_TakeWithExpire(t *testing.T) {
|
|||||||
cn := cacheNode{
|
cn := cacheNode{
|
||||||
rds: store,
|
rds: store,
|
||||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||||
barrier: syncx.NewSharedCalls(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
lock: new(sync.Mutex),
|
lock: new(sync.Mutex),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
stat: NewStat("any"),
|
stat: NewStat("any"),
|
||||||
@@ -171,7 +189,7 @@ func TestCacheNode_String(t *testing.T) {
|
|||||||
cn := cacheNode{
|
cn := cacheNode{
|
||||||
rds: store,
|
rds: store,
|
||||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||||
barrier: syncx.NewSharedCalls(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
lock: new(sync.Mutex),
|
lock: new(sync.Mutex),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
stat: NewStat("any"),
|
stat: NewStat("any"),
|
||||||
@@ -188,7 +206,7 @@ func TestCacheValueWithBigInt(t *testing.T) {
|
|||||||
cn := cacheNode{
|
cn := cacheNode{
|
||||||
rds: store,
|
rds: store,
|
||||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||||
barrier: syncx.NewSharedCalls(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
lock: new(sync.Mutex),
|
lock: new(sync.Mutex),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
stat: NewStat("any"),
|
stat: NewStat("any"),
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package clickhouse
|
package clickhouse
|
||||||
|
|
||||||
import (
|
import (
|
||||||
// imports the driver.
|
// imports the driver, don't remove this comment, golint requires.
|
||||||
_ "github.com/ClickHouse/clickhouse-go"
|
_ "github.com/ClickHouse/clickhouse-go"
|
||||||
"github.com/tal-tech/go-zero/core/stores/sqlx"
|
"github.com/tal-tech/go-zero/core/stores/sqlx"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -667,10 +667,12 @@ func TestRedis_HyperLogLog(t *testing.T) {
|
|||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
runOnCluster(t, func(cluster Store) {
|
runOnCluster(t, func(cluster Store) {
|
||||||
_, err := cluster.Pfadd("key")
|
ok, err := cluster.Pfadd("key", "value")
|
||||||
assert.NotNil(t, err)
|
assert.Nil(t, err)
|
||||||
_, err = cluster.Pfcount("key")
|
assert.True(t, ok)
|
||||||
assert.NotNil(t, err)
|
val, err := cluster.Pfcount("key")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), val)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/timex"
|
"github.com/tal-tech/go-zero/core/timex"
|
||||||
)
|
)
|
||||||
|
|
||||||
const slowThreshold = time.Millisecond * 500
|
const defaultSlowThreshold = time.Millisecond * 500
|
||||||
|
|
||||||
// ErrNotFound is an alias of mgo.ErrNotFound.
|
// ErrNotFound is an alias of mgo.ErrNotFound.
|
||||||
var ErrNotFound = mgo.ErrNotFound
|
var ErrNotFound = mgo.ErrNotFound
|
||||||
@@ -203,7 +203,7 @@ func (c *decoratedCollection) logDuration(method string, duration time.Duration,
|
|||||||
if e != nil {
|
if e != nil {
|
||||||
logx.Error(err)
|
logx.Error(err)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
if duration > slowThreshold {
|
if duration > slowThreshold.Load() {
|
||||||
logx.WithDuration(duration).Slowf("[MONGO] mongo(%s) - slowcall - %s - fail(%s) - %s",
|
logx.WithDuration(duration).Slowf("[MONGO] mongo(%s) - slowcall - %s - fail(%s) - %s",
|
||||||
c.name, method, err.Error(), string(content))
|
c.name, method, err.Error(), string(content))
|
||||||
} else {
|
} else {
|
||||||
@@ -211,7 +211,7 @@ func (c *decoratedCollection) logDuration(method string, duration time.Duration,
|
|||||||
c.name, method, err.Error(), string(content))
|
c.name, method, err.Error(), string(content))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if duration > slowThreshold {
|
if duration > slowThreshold.Load() {
|
||||||
logx.WithDuration(duration).Slowf("[MONGO] mongo(%s) - slowcall - %s - ok - %s",
|
logx.WithDuration(duration).Slowf("[MONGO] mongo(%s) - slowcall - %s - ok - %s",
|
||||||
c.name, method, string(content))
|
c.name, method, string(content))
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -5,9 +5,10 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
mgo "github.com/globalsign/mgo"
|
mgo "github.com/globalsign/mgo"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockMgoCollection is a mock of MgoCollection interface
|
// MockMgoCollection is a mock of MgoCollection interface
|
||||||
|
|||||||
@@ -5,9 +5,10 @@
|
|||||||
package mongo
|
package mongo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
bson "github.com/globalsign/mgo/bson"
|
bson "github.com/globalsign/mgo/bson"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockIter is a mock of Iter interface
|
// MockIter is a mock of Iter interface
|
||||||
|
|||||||
@@ -8,23 +8,14 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/breaker"
|
"github.com/tal-tech/go-zero/core/breaker"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
// A Model is a mongo model.
|
||||||
options struct {
|
type Model struct {
|
||||||
timeout time.Duration
|
session *concurrentSession
|
||||||
}
|
db *mgo.Database
|
||||||
|
collection string
|
||||||
// Option defines the method to customize a mongo model.
|
brk breaker.Breaker
|
||||||
Option func(opts *options)
|
opts []Option
|
||||||
|
}
|
||||||
// A Model is a mongo model.
|
|
||||||
Model struct {
|
|
||||||
session *concurrentSession
|
|
||||||
db *mgo.Database
|
|
||||||
collection string
|
|
||||||
brk breaker.Breaker
|
|
||||||
opts []Option
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// MustNewModel returns a Model, exits on errors.
|
// MustNewModel returns a Model, exits on errors.
|
||||||
func MustNewModel(url, collection string, opts ...Option) *Model {
|
func MustNewModel(url, collection string, opts ...Option) *Model {
|
||||||
|
|||||||
14
core/stores/mongo/model_test.go
Normal file
14
core/stores/mongo/model_test.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package mongo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithTimeout(t *testing.T) {
|
||||||
|
o := defaultOptions()
|
||||||
|
WithTimeout(time.Second)(o)
|
||||||
|
assert.Equal(t, time.Second, o.timeout)
|
||||||
|
}
|
||||||
29
core/stores/mongo/options.go
Normal file
29
core/stores/mongo/options.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package mongo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/tal-tech/go-zero/core/syncx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var slowThreshold = syncx.ForAtomicDuration(defaultSlowThreshold)
|
||||||
|
|
||||||
|
type (
|
||||||
|
options struct {
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option defines the method to customize a mongo model.
|
||||||
|
Option func(opts *options)
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetSlowThreshold sets the slow threshold.
|
||||||
|
func SetSlowThreshold(threshold time.Duration) {
|
||||||
|
slowThreshold.Set(threshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultOptions() *options {
|
||||||
|
return &options{
|
||||||
|
timeout: defaultTimeout,
|
||||||
|
}
|
||||||
|
}
|
||||||
14
core/stores/mongo/options_test.go
Normal file
14
core/stores/mongo/options_test.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package mongo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSetSlowThreshold(t *testing.T) {
|
||||||
|
assert.Equal(t, defaultSlowThreshold, slowThreshold.Load())
|
||||||
|
SetSlowThreshold(time.Second)
|
||||||
|
assert.Equal(t, time.Second, slowThreshold.Load())
|
||||||
|
}
|
||||||
@@ -57,9 +57,7 @@ func (cs *concurrentSession) putSession(session *mgo.Session) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cs *concurrentSession) takeSession(opts ...Option) (*mgo.Session, error) {
|
func (cs *concurrentSession) takeSession(opts ...Option) (*mgo.Session, error) {
|
||||||
o := &options{
|
o := defaultOptions()
|
||||||
timeout: defaultTimeout,
|
|
||||||
}
|
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
opt(o)
|
opt(o)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ var (
|
|||||||
// ErrNotFound is an alias of mgo.ErrNotFound.
|
// ErrNotFound is an alias of mgo.ErrNotFound.
|
||||||
ErrNotFound = mgo.ErrNotFound
|
ErrNotFound = mgo.ErrNotFound
|
||||||
|
|
||||||
// can't use one SharedCalls per conn, because multiple conns may share the same cache key.
|
// can't use one SingleFlight per conn, because multiple conns may share the same cache key.
|
||||||
sharedCalls = syncx.NewSharedCalls()
|
sharedCalls = syncx.NewSingleFlight()
|
||||||
stats = cache.NewStat("mongoc")
|
stats = cache.NewStat("mongoc")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ func TestStatCacheFails(t *testing.T) {
|
|||||||
log.SetOutput(ioutil.Discard)
|
log.SetOutput(ioutil.Discard)
|
||||||
defer log.SetOutput(os.Stdout)
|
defer log.SetOutput(os.Stdout)
|
||||||
|
|
||||||
r := redis.NewRedis("localhost:59999", redis.NodeType)
|
r := redis.New("localhost:59999")
|
||||||
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
||||||
c := newCollection(dummyConn{}, cach)
|
c := newCollection(dummyConn{}, cach)
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package postgres
|
package postgres
|
||||||
|
|
||||||
import (
|
import (
|
||||||
// imports the driver.
|
// imports the driver, don't remove this comment, golint requires.
|
||||||
_ "github.com/lib/pq"
|
_ "github.com/lib/pq"
|
||||||
"github.com/tal-tech/go-zero/core/stores/sqlx"
|
"github.com/tal-tech/go-zero/core/stores/sqlx"
|
||||||
)
|
)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user