mirror of
https://github.com/zeromicro/go-zero.git
synced 2026-05-13 01:40:00 +08:00
Compare commits
236 Commits
tools/goct
...
v1.3.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bb33a20bc8 | ||
|
|
5536473a08 | ||
|
|
323b35ed2d | ||
|
|
30958a91f7 | ||
|
|
b94b68a427 | ||
|
|
07145b210e | ||
|
|
321a20add6 | ||
|
|
65098d4737 | ||
|
|
35425f6164 | ||
|
|
a0060ff81b | ||
|
|
289a325757 | ||
|
|
3fbe0f87b7 | ||
|
|
ea98d210fd | ||
|
|
b9bc1fdcf8 | ||
|
|
6dc570bcd7 | ||
|
|
e21997f0d7 | ||
|
|
92c0b7c3c5 | ||
|
|
6d3ed98744 | ||
|
|
fb519fa547 | ||
|
|
e9501c3fb3 | ||
|
|
fd12659729 | ||
|
|
72ebbb9774 | ||
|
|
f1fdd55b38 | ||
|
|
58787746db | ||
|
|
ca88b69d24 | ||
|
|
6b1e15cab1 | ||
|
|
6f86e5bff8 | ||
|
|
3f492df74e | ||
|
|
5e7b1f6bfe | ||
|
|
e80a64fa67 | ||
|
|
95282edb78 | ||
|
|
7b82eda993 | ||
|
|
5d09cd0e7c | ||
|
|
1e717f9f5c | ||
|
|
c6e2b4a43a | ||
|
|
e567a0c718 | ||
|
|
52f060caae | ||
|
|
f486685e99 | ||
|
|
3ae874d75d | ||
|
|
c58eb13328 | ||
|
|
14ca39bc86 | ||
|
|
3ea8a2d4b6 | ||
|
|
6d2b9fd904 | ||
|
|
5451d96a81 | ||
|
|
69c2bad410 | ||
|
|
5383e29ce6 | ||
|
|
51472004a3 | ||
|
|
caf5b7b1f1 | ||
|
|
bef9aa55e6 | ||
|
|
d0a59b13a6 | ||
|
|
469e62067c | ||
|
|
a36d58aac9 | ||
|
|
aa5118c2aa | ||
|
|
974ba5c9aa | ||
|
|
ec1de4f48d | ||
|
|
bab72b7630 | ||
|
|
ac321fc146 | ||
|
|
ae2c76765c | ||
|
|
f21970c117 | ||
|
|
d0a58d1f2d | ||
|
|
3bbc90ec24 | ||
|
|
cef83efd4e | ||
|
|
cc09ab2aba | ||
|
|
f7a60cdc24 | ||
|
|
c3a49ece8d | ||
|
|
1a38eddffe | ||
|
|
5bcee4cf7c | ||
|
|
5c9fae7e62 | ||
|
|
ec3e02624c | ||
|
|
22b157bb6c | ||
|
|
095b603788 | ||
|
|
bc3c9484d1 | ||
|
|
162e9cef86 | ||
|
|
94ddb3380e | ||
|
|
16c61c6657 | ||
|
|
14bf2f33f7 | ||
|
|
305587aa81 | ||
|
|
2cdff97934 | ||
|
|
bbe1249ecb | ||
|
|
e62870e268 | ||
|
|
92b450eb11 | ||
|
|
d58cf7a12a | ||
|
|
036d803fbb | ||
|
|
c6ab11b14f | ||
|
|
9e20b1bbfe | ||
|
|
fadef0ccd9 | ||
|
|
4382ec0e0d | ||
|
|
db99addc64 | ||
|
|
97bf3856c1 | ||
|
|
ff6c6558dd | ||
|
|
5d4e7c84ee | ||
|
|
cb4fcf2c6c | ||
|
|
ee88abce14 | ||
|
|
ecc3653d44 | ||
|
|
ba8ac974aa | ||
|
|
50de01fb49 | ||
|
|
fabea4c448 | ||
|
|
6d9dfc08f9 | ||
|
|
252fabcc4b | ||
|
|
415c4c91fc | ||
|
|
0cc9d4ff8d | ||
|
|
8bc34defc4 | ||
|
|
8dd764679c | ||
|
|
9fe868ade9 | ||
|
|
4e48286838 | ||
|
|
ab01442d46 | ||
|
|
8694e38384 | ||
|
|
d5e550e79b | ||
|
|
affdab660e | ||
|
|
7d5858e83a | ||
|
|
815a6a6485 | ||
|
|
475d17e17d | ||
|
|
8472415472 | ||
|
|
faad6e27e3 | ||
|
|
58a0b17451 | ||
|
|
89eccfdb97 | ||
|
|
78ea0769fd | ||
|
|
e0fa8d820d | ||
|
|
dfd58c213c | ||
|
|
83cacf51b7 | ||
|
|
6dccfa29fd | ||
|
|
7e0b0ab0b1 | ||
|
|
ac18cc470d | ||
|
|
f4471846ff | ||
|
|
9c2d526a11 | ||
|
|
2b9fc26c38 | ||
|
|
321dc2d410 | ||
|
|
500bd87c85 | ||
|
|
e9620c8c05 | ||
|
|
70e51bb352 | ||
|
|
278cd123c8 | ||
|
|
3febb1a5d0 | ||
|
|
d8054d8def | ||
|
|
ec271db7a0 | ||
|
|
bbac994c8a | ||
|
|
c1d9e6a00b | ||
|
|
0aeb49a6b0 | ||
|
|
fe262766b4 | ||
|
|
7181505c8a | ||
|
|
f060a226bc | ||
|
|
93d524b797 | ||
|
|
5c169f4f49 | ||
|
|
d29dfa12e3 | ||
|
|
194f55e08e | ||
|
|
c0f9892fe3 | ||
|
|
227104d7d7 | ||
|
|
448029aa4b | ||
|
|
17e0afeac0 | ||
|
|
18916b5189 | ||
|
|
c11a09be23 | ||
|
|
56e1ecf2f3 | ||
|
|
f9e6013a6c | ||
|
|
b5d1d8b0d1 | ||
|
|
09e6d94f9e | ||
|
|
2a5717d7fb | ||
|
|
85cf662c6f | ||
|
|
3279a7ef0f | ||
|
|
fec908a19b | ||
|
|
f5ed0cda58 | ||
|
|
cc9d16f505 | ||
|
|
c05d74b44c | ||
|
|
32c88b6352 | ||
|
|
7dabec260f | ||
|
|
4feb88f9b5 | ||
|
|
2776caed0e | ||
|
|
c55694d957 | ||
|
|
209ffb934b | ||
|
|
26a33932cd | ||
|
|
d6a692971f | ||
|
|
4624390e54 | ||
|
|
63b7d292c1 | ||
|
|
365c569d7c | ||
|
|
68a81fea8a | ||
|
|
08a8bd7ef7 | ||
|
|
b939ce75ba | ||
|
|
3b7ca86e4f | ||
|
|
60760b52ab | ||
|
|
96c128c58a | ||
|
|
0c35f39a7d | ||
|
|
6a66dde0a1 | ||
|
|
36b9fcba44 | ||
|
|
bf99dda620 | ||
|
|
511dfcb409 | ||
|
|
900bc96420 | ||
|
|
be277a7376 | ||
|
|
f15a4f9188 | ||
|
|
e31128650e | ||
|
|
168740b64d | ||
|
|
cc4c4928e0 | ||
|
|
fba6543b23 | ||
|
|
877eb6ac56 | ||
|
|
259a5a13e7 | ||
|
|
cf7c7cb392 | ||
|
|
86d01e2e99 | ||
|
|
7a28e19a27 | ||
|
|
900ea63d68 | ||
|
|
87ab86cdd0 | ||
|
|
0697494ffd | ||
|
|
ffd69a2f5e | ||
|
|
66f10bb5e6 | ||
|
|
8131a0e777 | ||
|
|
32a557dff6 | ||
|
|
db949e40f1 | ||
|
|
e0454138e0 | ||
|
|
3b07ed1b97 | ||
|
|
daa98f5a27 | ||
|
|
842656aa90 | ||
|
|
aa29036cb3 | ||
|
|
607bae27fa | ||
|
|
7c63676be4 | ||
|
|
9e113909b3 | ||
|
|
bd105474ca | ||
|
|
a078f5d764 | ||
|
|
b215fa3ee6 | ||
|
|
50b1928502 | ||
|
|
493e3bcf4b | ||
|
|
6deb80625d | ||
|
|
6ab051568c | ||
|
|
2732d3cdae | ||
|
|
e8c307e4dc | ||
|
|
84ddc660c4 | ||
|
|
e60e707955 | ||
|
|
cf4321b2d0 | ||
|
|
1993faf2f8 | ||
|
|
0ce85376bf | ||
|
|
a40254156f | ||
|
|
05cc62f5ff | ||
|
|
9c2c90e533 | ||
|
|
822ee2e1c5 | ||
|
|
77482c8946 | ||
|
|
7ef0ab3119 | ||
|
|
8bd89a297a | ||
|
|
bb75cc796e | ||
|
|
0fdd8f54eb | ||
|
|
b1ffc464cd | ||
|
|
50174960e4 |
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -9,4 +9,5 @@ community_bridge: # Replace with a single Community Bridge project-name e.g., cl
|
|||||||
liberapay: # Replace with a single Liberapay username
|
liberapay: # Replace with a single Liberapay username
|
||||||
issuehunt: # Replace with a single IssueHunt username
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
otechie: # Replace with a single Otechie username
|
otechie: # Replace with a single Otechie username
|
||||||
custom: https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
|
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
|
||||||
|
ethereum: 0x5052b7f6B937B02563996D23feb69b38D06Ca150 | kevwan
|
||||||
|
|||||||
60
.github/workflows/go.yml
vendored
60
.github/workflows/go.yml
vendored
@@ -7,32 +7,50 @@ on:
|
|||||||
branches: [ master ]
|
branches: [ master ]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
test-linux:
|
||||||
name: Build
|
name: Linux
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: Set up Go 1.x
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ^1.15
|
||||||
|
id: go
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/setup-go@v2
|
uses: actions/checkout@v2
|
||||||
with:
|
|
||||||
go-version: ^1.15
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Get dependencies
|
||||||
uses: actions/checkout@v2
|
run: |
|
||||||
|
go get -v -t -d ./...
|
||||||
|
|
||||||
- name: Get dependencies
|
- name: Lint
|
||||||
run: |
|
run: |
|
||||||
go get -v -t -d ./...
|
go vet -stdmethods=false $(go list ./...)
|
||||||
|
go install mvdan.cc/gofumpt@latest
|
||||||
|
test -z "$(gofumpt -l -extra .)" || echo "Please run 'gofumpt -l -w -extra .'"
|
||||||
|
|
||||||
- name: Lint
|
- name: Test
|
||||||
run: |
|
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
go vet -stdmethods=false $(go list ./...)
|
|
||||||
go install mvdan.cc/gofumpt@latest
|
|
||||||
test -z "$(gofumpt -s -l -extra .)" || echo "Please run 'gofumpt -l -w -extra .'"
|
|
||||||
|
|
||||||
- name: Test
|
- name: Codecov
|
||||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
uses: codecov/codecov-action@v2
|
||||||
|
|
||||||
- name: Codecov
|
test-win:
|
||||||
uses: codecov/codecov-action@v2
|
name: Windows
|
||||||
|
runs-on: windows-latest
|
||||||
|
steps:
|
||||||
|
- name: Set up Go 1.x
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ^1.15
|
||||||
|
|
||||||
|
- name: Checkout codebase
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: |
|
||||||
|
go mod verify
|
||||||
|
go mod download
|
||||||
|
go test -v -race ./...
|
||||||
|
cd tools/goctl && go build -v goctl.go
|
||||||
|
|||||||
28
.github/workflows/release.yaml
vendored
Normal file
28
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- "tools/goctl/*"
|
||||||
|
jobs:
|
||||||
|
releases-matrix:
|
||||||
|
name: Release goctl binary
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
# build and publish in parallel: linux/386, linux/amd64, linux/arm64,
|
||||||
|
# windows/386, windows/amd64, windows/arm64, darwin/amd64, darwin/arm64
|
||||||
|
goos: [ linux, windows, darwin ]
|
||||||
|
goarch: [ "386", amd64, arm64 ]
|
||||||
|
exclude:
|
||||||
|
- goarch: "386"
|
||||||
|
goos: darwin
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: zeromicro/go-zero-release-action@master
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
goversion: "https://dl.google.com/go/go1.17.5.linux-amd64.tar.gz"
|
||||||
|
project_path: "tools/goctl"
|
||||||
|
binary_name: "goctl"
|
||||||
|
extra_files: tools/goctl/goctl.md
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -17,7 +17,8 @@
|
|||||||
|
|
||||||
# for test purpose
|
# for test purpose
|
||||||
**/adhoc
|
**/adhoc
|
||||||
**/testdata
|
go.work
|
||||||
|
go.work.sum
|
||||||
|
|
||||||
# gitlab ci
|
# gitlab ci
|
||||||
.cache
|
.cache
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2020 xiaoheiban_server_go
|
Copyright (c) 2022 zeromicro
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
10
ROADMAP.md
10
ROADMAP.md
@@ -20,9 +20,9 @@ We hope that the items listed below will inspire further engagement from the com
|
|||||||
- [x] Support `goctl bug` to report bugs conveniently
|
- [x] Support `goctl bug` to report bugs conveniently
|
||||||
|
|
||||||
## 2022
|
## 2022
|
||||||
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file
|
- [x] Support `context` in redis related methods for timeout and tracing
|
||||||
- [ ] Add `httpx.Client` with governance, like circuit breaker etc.
|
- [x] Support `context` in sql related methods for timeout and tracing
|
||||||
|
- [x] Support `context` in mongodb related methods for timeout and tracing
|
||||||
|
- [x] Add `httpc.Do` with HTTP call governance, like circuit breaker etc.
|
||||||
- [ ] Support `goctl doctor` command to report potential issues for given service
|
- [ ] Support `goctl doctor` command to report potential issues for given service
|
||||||
- [ ] Support `context` in redis related methods for timeout and tracing
|
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file
|
||||||
- [ ] Support `context` in sql related methods for timeout and tracing
|
|
||||||
- [ ] Support `context` in mongodb related methods for timeout and tracing
|
|
||||||
|
|||||||
@@ -69,11 +69,8 @@ func (f *Filter) Exists(data []byte) (bool, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if !isSet {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
return isSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Filter) getLocations(data []byte) []uint {
|
func (f *Filter) getLocations(data []byte) []uint {
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/mathx"
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
"github.com/zeromicro/go-zero/core/proc"
|
"github.com/zeromicro/go-zero/core/proc"
|
||||||
"github.com/zeromicro/go-zero/core/stat"
|
"github.com/zeromicro/go-zero/core/stat"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -171,7 +171,7 @@ func (lt loggedThrottle) allow() (Promise, error) {
|
|||||||
func (lt loggedThrottle) doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error {
|
func (lt loggedThrottle) doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error {
|
||||||
return lt.logError(lt.internalThrottle.doReq(req, fallback, func(err error) bool {
|
return lt.logError(lt.internalThrottle.doReq(req, fallback, func(err error) bool {
|
||||||
accept := acceptable(err)
|
accept := acceptable(err)
|
||||||
if !accept {
|
if !accept && err != nil {
|
||||||
lt.errWin.add(err.Error())
|
lt.errWin.add(err.Error())
|
||||||
}
|
}
|
||||||
return accept
|
return accept
|
||||||
@@ -198,7 +198,7 @@ type errorWindow struct {
|
|||||||
|
|
||||||
func (ew *errorWindow) add(reason string) {
|
func (ew *errorWindow) add(reason string) {
|
||||||
ew.lock.Lock()
|
ew.lock.Lock()
|
||||||
ew.reasons[ew.index] = fmt.Sprintf("%s %s", timex.Time().Format(timeFormat), reason)
|
ew.reasons[ew.index] = fmt.Sprintf("%s %s", time.Now().Format(timeFormat), reason)
|
||||||
ew.index = (ew.index + 1) % numHistoryReasons
|
ew.index = (ew.index + 1) % numHistoryReasons
|
||||||
ew.count = mathx.MinInt(ew.count+1, numHistoryReasons)
|
ew.count = mathx.MinInt(ew.count+1, numHistoryReasons)
|
||||||
ew.lock.Unlock()
|
ew.lock.Unlock()
|
||||||
|
|||||||
@@ -98,13 +98,18 @@ func (c *Cache) Get(key string) (interface{}, bool) {
|
|||||||
|
|
||||||
// Set sets value into c with key.
|
// Set sets value into c with key.
|
||||||
func (c *Cache) Set(key string, value interface{}) {
|
func (c *Cache) Set(key string, value interface{}) {
|
||||||
|
c.SetWithExpire(key, value, c.expire)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWithExpire sets value into c with key and expire with the given value.
|
||||||
|
func (c *Cache) SetWithExpire(key string, value interface{}, expire time.Duration) {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
_, ok := c.data[key]
|
_, ok := c.data[key]
|
||||||
c.data[key] = value
|
c.data[key] = value
|
||||||
c.lruCache.add(key)
|
c.lruCache.add(key)
|
||||||
c.lock.Unlock()
|
c.lock.Unlock()
|
||||||
|
|
||||||
expiry := c.unstableExpiry.AroundDuration(c.expire)
|
expiry := c.unstableExpiry.AroundDuration(expire)
|
||||||
if ok {
|
if ok {
|
||||||
c.timingWheel.MoveTimer(key, expiry)
|
c.timingWheel.MoveTimer(key, expiry)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func TestCacheSet(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
cache.Set("first", "first element")
|
cache.Set("first", "first element")
|
||||||
cache.Set("second", "second element")
|
cache.SetWithExpire("second", "second element", time.Second*3)
|
||||||
|
|
||||||
value, ok := cache.Get("first")
|
value, ok := cache.Get("first")
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|||||||
@@ -61,3 +61,41 @@ func TestPutMore(t *testing.T) {
|
|||||||
assert.Equal(t, string(element), string(body.([]byte)))
|
assert.Equal(t, string(element), string(body.([]byte)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPutMoreWithHeaderNotZero(t *testing.T) {
|
||||||
|
elements := [][]byte{
|
||||||
|
[]byte("hello"),
|
||||||
|
[]byte("world"),
|
||||||
|
[]byte("again"),
|
||||||
|
}
|
||||||
|
queue := NewQueue(4)
|
||||||
|
for i := range elements {
|
||||||
|
queue.Put(elements[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// take 1
|
||||||
|
body, ok := queue.Take()
|
||||||
|
assert.True(t, ok)
|
||||||
|
element, ok := body.([]byte)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, element, []byte("hello"))
|
||||||
|
|
||||||
|
// put more
|
||||||
|
queue.Put([]byte("b4"))
|
||||||
|
queue.Put([]byte("b5")) // will store in elements[0]
|
||||||
|
queue.Put([]byte("b6")) // cause expansion
|
||||||
|
|
||||||
|
results := [][]byte{
|
||||||
|
[]byte("world"),
|
||||||
|
[]byte("again"),
|
||||||
|
[]byte("b4"),
|
||||||
|
[]byte("b5"),
|
||||||
|
[]byte("b6"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, element := range results {
|
||||||
|
body, ok := queue.Take()
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, string(element), string(body.([]byte)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package collection
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"container/list"
|
"container/list"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -12,6 +13,11 @@ import (
|
|||||||
|
|
||||||
const drainWorkers = 8
|
const drainWorkers = 8
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrClosed = errors.New("TimingWheel is closed already")
|
||||||
|
ErrArgument = errors.New("incorrect task argument")
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// Execute defines the method to execute the task.
|
// Execute defines the method to execute the task.
|
||||||
Execute func(key, value interface{})
|
Execute func(key, value interface{})
|
||||||
@@ -59,14 +65,15 @@ type (
|
|||||||
// NewTimingWheel returns a TimingWheel.
|
// NewTimingWheel returns a TimingWheel.
|
||||||
func NewTimingWheel(interval time.Duration, numSlots int, execute Execute) (*TimingWheel, error) {
|
func NewTimingWheel(interval time.Duration, numSlots int, execute Execute) (*TimingWheel, error) {
|
||||||
if interval <= 0 || numSlots <= 0 || execute == nil {
|
if interval <= 0 || numSlots <= 0 || execute == nil {
|
||||||
return nil, fmt.Errorf("interval: %v, slots: %d, execute: %p", interval, numSlots, execute)
|
return nil, fmt.Errorf("interval: %v, slots: %d, execute: %p",
|
||||||
|
interval, numSlots, execute)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newTimingWheelWithClock(interval, numSlots, execute, timex.NewTicker(interval))
|
return newTimingWheelWithClock(interval, numSlots, execute, timex.NewTicker(interval))
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execute, ticker timex.Ticker) (
|
func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execute,
|
||||||
*TimingWheel, error) {
|
ticker timex.Ticker) (*TimingWheel, error) {
|
||||||
tw := &TimingWheel{
|
tw := &TimingWheel{
|
||||||
interval: interval,
|
interval: interval,
|
||||||
ticker: ticker,
|
ticker: ticker,
|
||||||
@@ -89,47 +96,67 @@ func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Drain drains all items and executes them.
|
// Drain drains all items and executes them.
|
||||||
func (tw *TimingWheel) Drain(fn func(key, value interface{})) {
|
func (tw *TimingWheel) Drain(fn func(key, value interface{})) error {
|
||||||
tw.drainChannel <- fn
|
select {
|
||||||
|
case tw.drainChannel <- fn:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveTimer moves the task with the given key to the given delay.
|
// MoveTimer moves the task with the given key to the given delay.
|
||||||
func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) {
|
func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) error {
|
||||||
if delay <= 0 || key == nil {
|
if delay <= 0 || key == nil {
|
||||||
return
|
return ErrArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
tw.moveChannel <- baseEntry{
|
select {
|
||||||
|
case tw.moveChannel <- baseEntry{
|
||||||
delay: delay,
|
delay: delay,
|
||||||
key: key,
|
key: key,
|
||||||
|
}:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveTimer removes the task with the given key.
|
// RemoveTimer removes the task with the given key.
|
||||||
func (tw *TimingWheel) RemoveTimer(key interface{}) {
|
func (tw *TimingWheel) RemoveTimer(key interface{}) error {
|
||||||
if key == nil {
|
if key == nil {
|
||||||
return
|
return ErrArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
tw.removeChannel <- key
|
select {
|
||||||
|
case tw.removeChannel <- key:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTimer sets the task value with the given key to the delay.
|
// SetTimer sets the task value with the given key to the delay.
|
||||||
func (tw *TimingWheel) SetTimer(key, value interface{}, delay time.Duration) {
|
func (tw *TimingWheel) SetTimer(key, value interface{}, delay time.Duration) error {
|
||||||
if delay <= 0 || key == nil {
|
if delay <= 0 || key == nil {
|
||||||
return
|
return ErrArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
tw.setChannel <- timingEntry{
|
select {
|
||||||
|
case tw.setChannel <- timingEntry{
|
||||||
baseEntry: baseEntry{
|
baseEntry: baseEntry{
|
||||||
delay: delay,
|
delay: delay,
|
||||||
key: key,
|
key: key,
|
||||||
},
|
},
|
||||||
value: value,
|
value: value,
|
||||||
|
}:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops tw.
|
// Stop stops tw. No more actions after stopping a TimingWheel.
|
||||||
func (tw *TimingWheel) Stop() {
|
func (tw *TimingWheel) Stop() {
|
||||||
close(tw.stopChannel)
|
close(tw.stopChannel)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ func TestTimingWheel_Drain(t *testing.T) {
|
|||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
||||||
}, ticker)
|
}, ticker)
|
||||||
defer tw.Stop()
|
|
||||||
tw.SetTimer("first", 3, testStep*4)
|
tw.SetTimer("first", 3, testStep*4)
|
||||||
tw.SetTimer("second", 5, testStep*7)
|
tw.SetTimer("second", 5, testStep*7)
|
||||||
tw.SetTimer("third", 7, testStep*7)
|
tw.SetTimer("third", 7, testStep*7)
|
||||||
@@ -56,6 +55,8 @@ func TestTimingWheel_Drain(t *testing.T) {
|
|||||||
})
|
})
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
assert.Equal(t, 0, count)
|
assert.Equal(t, 0, count)
|
||||||
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.Drain(func(key, value interface{}) {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimingWheel_SetTimerSoon(t *testing.T) {
|
func TestTimingWheel_SetTimerSoon(t *testing.T) {
|
||||||
@@ -102,6 +103,13 @@ func TestTimingWheel_SetTimerWrongDelay(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTimingWheel_SetTimerAfterClose(t *testing.T) {
|
||||||
|
ticker := timex.NewFakeTicker()
|
||||||
|
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {}, ticker)
|
||||||
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.SetTimer("any", 3, testStep))
|
||||||
|
}
|
||||||
|
|
||||||
func TestTimingWheel_MoveTimer(t *testing.T) {
|
func TestTimingWheel_MoveTimer(t *testing.T) {
|
||||||
run := syncx.NewAtomicBool()
|
run := syncx.NewAtomicBool()
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
@@ -111,7 +119,6 @@ func TestTimingWheel_MoveTimer(t *testing.T) {
|
|||||||
assert.Equal(t, 3, v.(int))
|
assert.Equal(t, 3, v.(int))
|
||||||
ticker.Done()
|
ticker.Done()
|
||||||
}, ticker)
|
}, ticker)
|
||||||
defer tw.Stop()
|
|
||||||
tw.SetTimer("any", 3, testStep*4)
|
tw.SetTimer("any", 3, testStep*4)
|
||||||
tw.MoveTimer("any", testStep*7)
|
tw.MoveTimer("any", testStep*7)
|
||||||
tw.MoveTimer("any", -testStep)
|
tw.MoveTimer("any", -testStep)
|
||||||
@@ -125,6 +132,8 @@ func TestTimingWheel_MoveTimer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
assert.Nil(t, ticker.Wait(waitTime))
|
assert.Nil(t, ticker.Wait(waitTime))
|
||||||
assert.True(t, run.True())
|
assert.True(t, run.True())
|
||||||
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.MoveTimer("any", time.Millisecond))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimingWheel_MoveTimerSoon(t *testing.T) {
|
func TestTimingWheel_MoveTimerSoon(t *testing.T) {
|
||||||
@@ -175,6 +184,7 @@ func TestTimingWheel_RemoveTimer(t *testing.T) {
|
|||||||
ticker.Tick()
|
ticker.Tick()
|
||||||
}
|
}
|
||||||
tw.Stop()
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.RemoveTimer("any"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimingWheel_SetTimer(t *testing.T) {
|
func TestTimingWheel_SetTimer(t *testing.T) {
|
||||||
|
|||||||
73
core/color/color.go
Normal file
73
core/color/color.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package color
|
||||||
|
|
||||||
|
import "github.com/fatih/color"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoColor is no color for both foreground and background.
|
||||||
|
NoColor Color = iota
|
||||||
|
// FgBlack is the foreground color black.
|
||||||
|
FgBlack
|
||||||
|
// FgRed is the foreground color red.
|
||||||
|
FgRed
|
||||||
|
// FgGreen is the foreground color green.
|
||||||
|
FgGreen
|
||||||
|
// FgYellow is the foreground color yellow.
|
||||||
|
FgYellow
|
||||||
|
// FgBlue is the foreground color blue.
|
||||||
|
FgBlue
|
||||||
|
// FgMagenta is the foreground color magenta.
|
||||||
|
FgMagenta
|
||||||
|
// FgCyan is the foreground color cyan.
|
||||||
|
FgCyan
|
||||||
|
// FgWhite is the foreground color white.
|
||||||
|
FgWhite
|
||||||
|
|
||||||
|
// BgBlack is the background color black.
|
||||||
|
BgBlack
|
||||||
|
// BgRed is the background color red.
|
||||||
|
BgRed
|
||||||
|
// BgGreen is the background color green.
|
||||||
|
BgGreen
|
||||||
|
// BgYellow is the background color yellow.
|
||||||
|
BgYellow
|
||||||
|
// BgBlue is the background color blue.
|
||||||
|
BgBlue
|
||||||
|
// BgMagenta is the background color magenta.
|
||||||
|
BgMagenta
|
||||||
|
// BgCyan is the background color cyan.
|
||||||
|
BgCyan
|
||||||
|
// BgWhite is the background color white.
|
||||||
|
BgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
var colors = map[Color][]color.Attribute{
|
||||||
|
FgBlack: {color.FgBlack, color.Bold},
|
||||||
|
FgRed: {color.FgRed, color.Bold},
|
||||||
|
FgGreen: {color.FgGreen, color.Bold},
|
||||||
|
FgYellow: {color.FgYellow, color.Bold},
|
||||||
|
FgBlue: {color.FgBlue, color.Bold},
|
||||||
|
FgMagenta: {color.FgMagenta, color.Bold},
|
||||||
|
FgCyan: {color.FgCyan, color.Bold},
|
||||||
|
FgWhite: {color.FgWhite, color.Bold},
|
||||||
|
BgBlack: {color.BgBlack, color.FgHiWhite, color.Bold},
|
||||||
|
BgRed: {color.BgRed, color.FgHiWhite, color.Bold},
|
||||||
|
BgGreen: {color.BgGreen, color.FgHiWhite, color.Bold},
|
||||||
|
BgYellow: {color.BgHiYellow, color.FgHiBlack, color.Bold},
|
||||||
|
BgBlue: {color.BgBlue, color.FgHiWhite, color.Bold},
|
||||||
|
BgMagenta: {color.BgMagenta, color.FgHiWhite, color.Bold},
|
||||||
|
BgCyan: {color.BgCyan, color.FgHiWhite, color.Bold},
|
||||||
|
BgWhite: {color.BgHiWhite, color.FgHiBlack, color.Bold},
|
||||||
|
}
|
||||||
|
|
||||||
|
type Color uint32
|
||||||
|
|
||||||
|
// WithColor returns a string with the given color applied.
|
||||||
|
func WithColor(text string, colour Color) string {
|
||||||
|
c := color.New(colors[colour]...)
|
||||||
|
return c.Sprint(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithColorPadding returns a string with the given color applied with leading and trailing spaces.
|
||||||
|
func WithColorPadding(text string, colour Color) string {
|
||||||
|
return WithColor(" "+text+" ", colour)
|
||||||
|
}
|
||||||
17
core/color/color_test.go
Normal file
17
core/color/color_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package color
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithColor(t *testing.T) {
|
||||||
|
output := WithColor("Hello", BgRed)
|
||||||
|
assert.Equal(t, "Hello", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithColorPadding(t *testing.T) {
|
||||||
|
output := WithColorPadding("Hello", BgRed)
|
||||||
|
assert.Equal(t, " Hello ", output)
|
||||||
|
}
|
||||||
@@ -6,24 +6,26 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/mapping"
|
"github.com/zeromicro/go-zero/core/mapping"
|
||||||
)
|
)
|
||||||
|
|
||||||
var loaders = map[string]func([]byte, interface{}) error{
|
var loaders = map[string]func([]byte, interface{}) error{
|
||||||
".json": LoadConfigFromJsonBytes,
|
".json": LoadFromJsonBytes,
|
||||||
".yaml": LoadConfigFromYamlBytes,
|
".toml": LoadFromTomlBytes,
|
||||||
".yml": LoadConfigFromYamlBytes,
|
".yaml": LoadFromYamlBytes,
|
||||||
|
".yml": LoadFromYamlBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
|
// Load loads config into v from file, .json, .yaml and .yml are acceptable.
|
||||||
func LoadConfig(file string, v interface{}, opts ...Option) error {
|
func Load(file string, v interface{}, opts ...Option) error {
|
||||||
content, err := ioutil.ReadFile(file)
|
content, err := ioutil.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
loader, ok := loaders[path.Ext(file)]
|
loader, ok := loaders[strings.ToLower(path.Ext(file))]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unrecognized file type: %s", file)
|
return fmt.Errorf("unrecognized file type: %s", file)
|
||||||
}
|
}
|
||||||
@@ -40,19 +42,42 @@ func LoadConfig(file string, v interface{}, opts ...Option) error {
|
|||||||
return loader(content, v)
|
return loader(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfigFromJsonBytes loads config into v from content json bytes.
|
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
|
||||||
func LoadConfigFromJsonBytes(content []byte, v interface{}) error {
|
// Deprecated: use Load instead.
|
||||||
|
func LoadConfig(file string, v interface{}, opts ...Option) error {
|
||||||
|
return Load(file, v, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromJsonBytes loads config into v from content json bytes.
|
||||||
|
func LoadFromJsonBytes(content []byte, v interface{}) error {
|
||||||
return mapping.UnmarshalJsonBytes(content, v)
|
return mapping.UnmarshalJsonBytes(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfigFromYamlBytes loads config into v from content yaml bytes.
|
// LoadConfigFromJsonBytes loads config into v from content json bytes.
|
||||||
func LoadConfigFromYamlBytes(content []byte, v interface{}) error {
|
// Deprecated: use LoadFromJsonBytes instead.
|
||||||
|
func LoadConfigFromJsonBytes(content []byte, v interface{}) error {
|
||||||
|
return LoadFromJsonBytes(content, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromTomlBytes loads config into v from content toml bytes.
|
||||||
|
func LoadFromTomlBytes(content []byte, v interface{}) error {
|
||||||
|
return mapping.UnmarshalTomlBytes(content, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromYamlBytes loads config into v from content yaml bytes.
|
||||||
|
func LoadFromYamlBytes(content []byte, v interface{}) error {
|
||||||
return mapping.UnmarshalYamlBytes(content, v)
|
return mapping.UnmarshalYamlBytes(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadConfigFromYamlBytes loads config into v from content yaml bytes.
|
||||||
|
// Deprecated: use LoadFromYamlBytes instead.
|
||||||
|
func LoadConfigFromYamlBytes(content []byte, v interface{}) error {
|
||||||
|
return LoadFromYamlBytes(content, v)
|
||||||
|
}
|
||||||
|
|
||||||
// MustLoad loads config into v from path, exits on error.
|
// MustLoad loads config into v from path, exits on error.
|
||||||
func MustLoad(path string, v interface{}, opts ...Option) {
|
func MustLoad(path string, v interface{}, opts ...Option) {
|
||||||
if err := LoadConfig(path, v, opts...); err != nil {
|
if err := Load(path, v, opts...); err != nil {
|
||||||
log.Fatalf("error: config file %s, %s", path, err.Error())
|
log.Fatalf("error: config file %s, %s", path, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,14 +11,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadConfig_notExists(t *testing.T) {
|
func TestLoadConfig_notExists(t *testing.T) {
|
||||||
assert.NotNil(t, LoadConfig("not_a_file", nil))
|
assert.NotNil(t, Load("not_a_file", nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadConfig_notRecogFile(t *testing.T) {
|
func TestLoadConfig_notRecogFile(t *testing.T) {
|
||||||
filename, err := fs.TempFilenameWithText("hello")
|
filename, err := fs.TempFilenameWithText("hello")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(filename)
|
defer os.Remove(filename)
|
||||||
assert.NotNil(t, LoadConfig(filename, nil))
|
assert.NotNil(t, Load(filename, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfigJson(t *testing.T) {
|
func TestConfigJson(t *testing.T) {
|
||||||
@@ -57,6 +57,58 @@ func TestConfigJson(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestConfigToml(t *testing.T) {
|
||||||
|
text := `a = "foo"
|
||||||
|
b = 1
|
||||||
|
c = "${FOO}"
|
||||||
|
d = "abcd!@#$112"
|
||||||
|
`
|
||||||
|
os.Setenv("FOO", "2")
|
||||||
|
defer os.Unsetenv("FOO")
|
||||||
|
tmpfile, err := createTempFile(".toml", text)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(tmpfile)
|
||||||
|
|
||||||
|
var val struct {
|
||||||
|
A string `json:"a"`
|
||||||
|
B int `json:"b"`
|
||||||
|
C string `json:"c"`
|
||||||
|
D string `json:"d"`
|
||||||
|
}
|
||||||
|
MustLoad(tmpfile, &val)
|
||||||
|
assert.Equal(t, "foo", val.A)
|
||||||
|
assert.Equal(t, 1, val.B)
|
||||||
|
assert.Equal(t, "${FOO}", val.C)
|
||||||
|
assert.Equal(t, "abcd!@#$112", val.D)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigTomlEnv(t *testing.T) {
|
||||||
|
text := `a = "foo"
|
||||||
|
b = 1
|
||||||
|
c = "${FOO}"
|
||||||
|
d = "abcd!@#112"
|
||||||
|
`
|
||||||
|
os.Setenv("FOO", "2")
|
||||||
|
defer os.Unsetenv("FOO")
|
||||||
|
tmpfile, err := createTempFile(".toml", text)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(tmpfile)
|
||||||
|
|
||||||
|
var val struct {
|
||||||
|
A string `json:"a"`
|
||||||
|
B int `json:"b"`
|
||||||
|
C string `json:"c"`
|
||||||
|
D string `json:"d"`
|
||||||
|
}
|
||||||
|
|
||||||
|
MustLoad(tmpfile, &val, UseEnv())
|
||||||
|
assert.Equal(t, "foo", val.A)
|
||||||
|
assert.Equal(t, 1, val.B)
|
||||||
|
assert.Equal(t, "2", val.C)
|
||||||
|
assert.Equal(t, "abcd!@#112", val.D)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func TestConfigJsonEnv(t *testing.T) {
|
func TestConfigJsonEnv(t *testing.T) {
|
||||||
tests := []string{
|
tests := []string{
|
||||||
".json",
|
".json",
|
||||||
|
|||||||
45
core/conf/readme.md
Normal file
45
core/conf/readme.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
## How to use
|
||||||
|
|
||||||
|
1. Define a config structure, like below:
|
||||||
|
|
||||||
|
```go
|
||||||
|
RestfulConf struct {
|
||||||
|
Host string `json:",default=0.0.0.0"`
|
||||||
|
Port int
|
||||||
|
LogMode string `json:",options=[file,console]"
|
||||||
|
Verbose bool `json:",optional"`
|
||||||
|
MaxConns int `json:",default=10000"`
|
||||||
|
MaxBytes int64 `json:",default=1048576"`
|
||||||
|
Timeout time.Duration `json:",default=3s"`
|
||||||
|
CpuThreshold int64 `json:",default=900,range=[0:1000]"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Write the yaml or json config file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# most fields are optional or have default values
|
||||||
|
Port: 8080
|
||||||
|
LogMode: console
|
||||||
|
# you can use env settings
|
||||||
|
MaxBytes: ${MAX_BYTES}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Load the config from a file:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// exit on error
|
||||||
|
var config RestfulConf
|
||||||
|
conf.MustLoad(configFile, &config)
|
||||||
|
|
||||||
|
// or handle the error on your own
|
||||||
|
var config RestfulConf
|
||||||
|
if err := conf.Load(configFile, &config); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable reading from environments
|
||||||
|
var config RestfulConf
|
||||||
|
conf.MustLoad(configFile, &config, conf.UseEnv())
|
||||||
|
```
|
||||||
|
|
||||||
@@ -2,6 +2,13 @@ package discov
|
|||||||
|
|
||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// errEmptyEtcdHosts indicates that etcd hosts are empty.
|
||||||
|
errEmptyEtcdHosts = errors.New("empty etcd hosts")
|
||||||
|
// errEmptyEtcdKey indicates that etcd key is empty.
|
||||||
|
errEmptyEtcdKey = errors.New("empty etcd key")
|
||||||
|
)
|
||||||
|
|
||||||
// EtcdConf is the config item with the given key on etcd.
|
// EtcdConf is the config item with the given key on etcd.
|
||||||
type EtcdConf struct {
|
type EtcdConf struct {
|
||||||
Hosts []string
|
Hosts []string
|
||||||
@@ -27,9 +34,9 @@ func (c EtcdConf) HasTLS() bool {
|
|||||||
// Validate validates c.
|
// Validate validates c.
|
||||||
func (c EtcdConf) Validate() error {
|
func (c EtcdConf) Validate() error {
|
||||||
if len(c.Hosts) == 0 {
|
if len(c.Hosts) == 0 {
|
||||||
return errors.New("empty etcd hosts")
|
return errEmptyEtcdHosts
|
||||||
} else if len(c.Key) == 0 {
|
} else if len(c.Key) == 0 {
|
||||||
return errors.New("empty etcd key")
|
return errEmptyEtcdKey
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,10 +11,12 @@ type (
|
|||||||
errorArray []error
|
errorArray []error
|
||||||
)
|
)
|
||||||
|
|
||||||
// Add adds err to be.
|
// Add adds errs to be, nil errors are ignored.
|
||||||
func (be *BatchError) Add(err error) {
|
func (be *BatchError) Add(errs ...error) {
|
||||||
if err != nil {
|
for _, err := range errs {
|
||||||
be.errs = append(be.errs, err)
|
if err != nil {
|
||||||
|
be.errs = append(be.errs, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// errExceedFileSize indicates that the file size is exceeded.
|
||||||
|
var errExceedFileSize = errors.New("exceed file size")
|
||||||
|
|
||||||
// A RangeReader is used to read a range of content from a file.
|
// A RangeReader is used to read a range of content from a file.
|
||||||
type RangeReader struct {
|
type RangeReader struct {
|
||||||
file *os.File
|
file *os.File
|
||||||
@@ -29,7 +32,7 @@ func (rr *RangeReader) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rr.stop < rr.start || rr.start >= stat.Size() {
|
if rr.stop < rr.start || rr.start >= stat.Size() {
|
||||||
return 0, errors.New("exceed file size")
|
return 0, errExceedFileSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if rr.stop-rr.start < int64(len(p)) {
|
if rr.stop-rr.start < int64(len(p)) {
|
||||||
|
|||||||
49
core/fs/temps_test.go
Normal file
49
core/fs/temps_test.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTempFileWithText(t *testing.T) {
|
||||||
|
f, err := TempFileWithText("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if f == nil {
|
||||||
|
t.Error("TempFileWithText returned nil")
|
||||||
|
}
|
||||||
|
if f.Name() == "" {
|
||||||
|
t.Error("TempFileWithText returned empty file name")
|
||||||
|
}
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
|
||||||
|
bs, err := ioutil.ReadAll(f)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
if len(bs) != 4 {
|
||||||
|
t.Error("TempFileWithText returned wrong file size")
|
||||||
|
}
|
||||||
|
if f.Close() != nil {
|
||||||
|
t.Error("TempFileWithText returned error on close")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTempFilenameWithText(t *testing.T) {
|
||||||
|
f, err := TempFilenameWithText("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if f == "" {
|
||||||
|
t.Error("TempFilenameWithText returned empty file name")
|
||||||
|
}
|
||||||
|
defer os.Remove(f)
|
||||||
|
|
||||||
|
bs, err := ioutil.ReadFile(f)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
if len(bs) != 4 {
|
||||||
|
t.Error("TempFilenameWithText returned wrong file size")
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -51,5 +51,5 @@ func unmarshalUseNumber(decoder *json.Decoder, v interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func formatError(v string, err error) error {
|
func formatError(v string, err error) error {
|
||||||
return fmt.Errorf("string: `%s`, error: `%s`", v, err.Error())
|
return fmt.Errorf("string: `%s`, error: `%w`", v, err)
|
||||||
}
|
}
|
||||||
|
|||||||
87
core/jsonx/json_test.go
Normal file
87
core/jsonx/json_test.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package jsonx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMarshal(t *testing.T) {
|
||||||
|
var v = struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}{
|
||||||
|
Name: "John",
|
||||||
|
Age: 30,
|
||||||
|
}
|
||||||
|
bs, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, `{"name":"John","age":30}`, string(bs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshal(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30}`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := Unmarshal([]byte(s), &v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", v.Name)
|
||||||
|
assert.Equal(t, 30, v.Age)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalError(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := Unmarshal([]byte(s), &v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromString(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30}`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromString(s, &v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", v.Name)
|
||||||
|
assert.Equal(t, 30, v.Age)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromStringError(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromString(s, &v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromRead(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30}`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromReader(strings.NewReader(s), &v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", v.Name)
|
||||||
|
assert.Equal(t, 30, v.Age)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromReaderError(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromReader(strings.NewReader(s), &v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
@@ -14,8 +14,8 @@ local window = tonumber(ARGV[2])
|
|||||||
local current = redis.call("INCRBY", KEYS[1], 1)
|
local current = redis.call("INCRBY", KEYS[1], 1)
|
||||||
if current == 1 then
|
if current == 1 then
|
||||||
redis.call("expire", KEYS[1], window)
|
redis.call("expire", KEYS[1], window)
|
||||||
return 1
|
end
|
||||||
elseif current < limit then
|
if current < limit then
|
||||||
return 1
|
return 1
|
||||||
elseif current == limit then
|
elseif current == limit then
|
||||||
return 2
|
return 2
|
||||||
|
|||||||
@@ -23,10 +23,9 @@ func TestPeriodLimit_RedisUnavailable(t *testing.T) {
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
seconds = 1
|
seconds = 1
|
||||||
total = 100
|
|
||||||
quota = 5
|
quota = 5
|
||||||
)
|
)
|
||||||
l := NewPeriodLimit(seconds, quota, redis.NewRedis(s.Addr(), redis.NodeType), "periodlimit")
|
l := NewPeriodLimit(seconds, quota, redis.New(s.Addr()), "periodlimit")
|
||||||
s.Close()
|
s.Close()
|
||||||
val, err := l.Take("first")
|
val, err := l.Take("first")
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
@@ -66,3 +65,13 @@ func testPeriodLimit(t *testing.T, opts ...PeriodOption) {
|
|||||||
assert.Equal(t, 1, hitQuota)
|
assert.Equal(t, 1, hitQuota)
|
||||||
assert.Equal(t, total-quota, overQuota)
|
assert.Equal(t, total-quota, overQuota)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestQuotaFull(t *testing.T) {
|
||||||
|
s, err := miniredis.Run()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
l := NewPeriodLimit(1, 1, redis.New(s.Addr()), "periodlimit")
|
||||||
|
val, err := l.Take("first")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, HitQuota, val)
|
||||||
|
}
|
||||||
|
|||||||
26
core/logx/color.go
Normal file
26
core/logx/color.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/color"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithColor is a helper function to add color to a string, only in plain encoding.
|
||||||
|
func WithColor(text string, colour color.Color) string {
|
||||||
|
if atomic.LoadUint32(&encoding) == plainEncodingType {
|
||||||
|
return color.WithColor(text, colour)
|
||||||
|
}
|
||||||
|
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithColorPadding is a helper function to add color to a string with leading and trailing spaces,
|
||||||
|
// only in plain encoding.
|
||||||
|
func WithColorPadding(text string, colour color.Color) string {
|
||||||
|
if atomic.LoadUint32(&encoding) == plainEncodingType {
|
||||||
|
return color.WithColorPadding(text, colour)
|
||||||
|
}
|
||||||
|
|
||||||
|
return text
|
||||||
|
}
|
||||||
33
core/logx/color_test.go
Normal file
33
core/logx/color_test.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/color"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithColor(t *testing.T) {
|
||||||
|
old := atomic.SwapUint32(&encoding, plainEncodingType)
|
||||||
|
defer atomic.StoreUint32(&encoding, old)
|
||||||
|
|
||||||
|
output := WithColor("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, "hello", output)
|
||||||
|
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
|
output = WithColor("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, "hello", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithColorPadding(t *testing.T) {
|
||||||
|
old := atomic.SwapUint32(&encoding, plainEncodingType)
|
||||||
|
defer atomic.StoreUint32(&encoding, old)
|
||||||
|
|
||||||
|
output := WithColorPadding("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, " hello ", output)
|
||||||
|
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
|
output = WithColorPadding("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, "hello", output)
|
||||||
|
}
|
||||||
@@ -1,17 +1,13 @@
|
|||||||
package logx
|
package logx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
)
|
)
|
||||||
|
|
||||||
const durationCallerDepth = 3
|
|
||||||
|
|
||||||
type durationLogger logEntry
|
|
||||||
|
|
||||||
// WithDuration returns a Logger which logs the given duration.
|
// WithDuration returns a Logger which logs the given duration.
|
||||||
func WithDuration(d time.Duration) Logger {
|
func WithDuration(d time.Duration) Logger {
|
||||||
return &durationLogger{
|
return &durationLogger{
|
||||||
@@ -19,57 +15,62 @@ func WithDuration(d time.Duration) Logger {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type durationLogger logEntry
|
||||||
|
|
||||||
func (l *durationLogger) Error(v ...interface{}) {
|
func (l *durationLogger) Error(v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.err(fmt.Sprint(v...))
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Errorf(format string, v ...interface{}) {
|
func (l *durationLogger) Errorf(format string, v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.err(fmt.Sprintf(format, v...))
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Errorv(v interface{}) {
|
func (l *durationLogger) Errorv(v interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.err(v)
|
||||||
l.write(errorLog, levelError, v)
|
}
|
||||||
}
|
|
||||||
|
func (l *durationLogger) Errorw(msg string, fields ...LogField) {
|
||||||
|
l.err(msg, fields...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Info(v ...interface{}) {
|
func (l *durationLogger) Info(v ...interface{}) {
|
||||||
if shallLog(InfoLevel) {
|
l.info(fmt.Sprint(v...))
|
||||||
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Infof(format string, v ...interface{}) {
|
func (l *durationLogger) Infof(format string, v ...interface{}) {
|
||||||
if shallLog(InfoLevel) {
|
l.info(fmt.Sprintf(format, v...))
|
||||||
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Infov(v interface{}) {
|
func (l *durationLogger) Infov(v interface{}) {
|
||||||
if shallLog(InfoLevel) {
|
l.info(v)
|
||||||
l.write(infoLog, levelInfo, v)
|
}
|
||||||
}
|
|
||||||
|
func (l *durationLogger) Infow(msg string, fields ...LogField) {
|
||||||
|
l.info(msg, fields...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Slow(v ...interface{}) {
|
func (l *durationLogger) Slow(v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.slow(fmt.Sprint(v...))
|
||||||
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Slowf(format string, v ...interface{}) {
|
func (l *durationLogger) Slowf(format string, v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.slow(fmt.Sprintf(format, v...))
|
||||||
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) Slowv(v interface{}) {
|
func (l *durationLogger) Slowv(v interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.slow(v)
|
||||||
l.write(slowLog, levelSlow, v)
|
}
|
||||||
|
|
||||||
|
func (l *durationLogger) Sloww(msg string, fields ...LogField) {
|
||||||
|
l.slow(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *durationLogger) WithContext(ctx context.Context) Logger {
|
||||||
|
return &traceLogger{
|
||||||
|
ctx: ctx,
|
||||||
|
logEntry: logEntry{
|
||||||
|
Duration: l.Duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,16 +79,23 @@ func (l *durationLogger) WithDuration(duration time.Duration) Logger {
|
|||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *durationLogger) write(writer io.Writer, level string, val interface{}) {
|
func (l *durationLogger) err(v interface{}, fields ...LogField) {
|
||||||
switch encoding {
|
if shallLog(ErrorLevel) {
|
||||||
case plainEncodingType:
|
fields = append(fields, Field(durationKey, l.Duration))
|
||||||
writePlainAny(writer, level, val, l.Duration)
|
getWriter().Error(v, fields...)
|
||||||
default:
|
}
|
||||||
outputJson(writer, &durationLogger{
|
}
|
||||||
Timestamp: getTimestamp(),
|
|
||||||
Level: level,
|
func (l *durationLogger) info(v interface{}, fields ...LogField) {
|
||||||
Content: val,
|
if shallLog(InfoLevel) {
|
||||||
Duration: l.Duration,
|
fields = append(fields, Field(durationKey, l.Duration))
|
||||||
})
|
getWriter().Info(v, fields...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *durationLogger) slow(v interface{}, fields ...LogField) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
fields = append(fields, Field(durationKey, l.Duration))
|
||||||
|
getWriter().Slow(v, fields...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,86 +1,161 @@
|
|||||||
package logx
|
package logx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWithDurationError(t *testing.T) {
|
func TestWithDurationError(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Error("foo")
|
WithDuration(time.Second).Error("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationErrorf(t *testing.T) {
|
func TestWithDurationErrorf(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Errorf("foo")
|
WithDuration(time.Second).Errorf("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationErrorv(t *testing.T) {
|
func TestWithDurationErrorv(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Errorv("foo")
|
WithDuration(time.Second).Errorv("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithDurationErrorw(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
WithDuration(time.Second).Errorw("foo", Field("foo", "bar"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationInfo(t *testing.T) {
|
func TestWithDurationInfo(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Info("foo")
|
WithDuration(time.Second).Info("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationInfoConsole(t *testing.T) {
|
func TestWithDurationInfoConsole(t *testing.T) {
|
||||||
old := encoding
|
old := atomic.LoadUint32(&encoding)
|
||||||
encoding = plainEncodingType
|
atomic.StoreUint32(&encoding, plainEncodingType)
|
||||||
defer func() {
|
defer func() {
|
||||||
encoding = old
|
atomic.StoreUint32(&encoding, old)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
o := writer.Swap(w)
|
||||||
|
defer writer.Store(o)
|
||||||
|
|
||||||
WithDuration(time.Second).Info("foo")
|
WithDuration(time.Second).Info("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "ms"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "ms"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationInfof(t *testing.T) {
|
func TestWithDurationInfof(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Infof("foo")
|
WithDuration(time.Second).Infof("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationInfov(t *testing.T) {
|
func TestWithDurationInfov(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Infov("foo")
|
WithDuration(time.Second).Infov("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithDurationInfow(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
WithDuration(time.Second).Infow("foo", Field("foo", "bar"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithDurationWithContextInfow(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
otp := otel.GetTracerProvider()
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
|
otel.SetTracerProvider(tp)
|
||||||
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
|
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
|
WithDuration(time.Second).WithContext(ctx).Infow("foo", Field("foo", "bar"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "trace"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "span"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationSlow(t *testing.T) {
|
func TestWithDurationSlow(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Slow("foo")
|
WithDuration(time.Second).Slow("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationSlowf(t *testing.T) {
|
func TestWithDurationSlowf(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).WithDuration(time.Hour).Slowf("foo")
|
WithDuration(time.Second).WithDuration(time.Hour).Slowf("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithDurationSlowv(t *testing.T) {
|
func TestWithDurationSlowv(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).WithDuration(time.Hour).Slowv("foo")
|
WithDuration(time.Second).WithDuration(time.Hour).Slowv("foo")
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithDurationSloww(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
WithDuration(time.Second).WithDuration(time.Hour).Sloww("foo", Field("foo", "bar"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "duration"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package logx
|
package logx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -9,23 +8,27 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestLessLogger_Error(t *testing.T) {
|
func TestLessLogger_Error(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
l := NewLessLogger(500)
|
l := NewLessLogger(500)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
l.Error("hello")
|
l.Error("hello")
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, 1, strings.Count(builder.String(), "\n"))
|
assert.Equal(t, 1, strings.Count(w.String(), "\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLessLogger_Errorf(t *testing.T) {
|
func TestLessLogger_Errorf(t *testing.T) {
|
||||||
var builder strings.Builder
|
w := new(mockWriter)
|
||||||
log.SetOutput(&builder)
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
l := NewLessLogger(500)
|
l := NewLessLogger(500)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
l.Errorf("hello")
|
l.Errorf("hello")
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, 1, strings.Count(builder.String(), "\n"))
|
assert.Equal(t, 1, strings.Count(w.String(), "\n"))
|
||||||
}
|
}
|
||||||
|
|||||||
38
core/logx/logger.go
Normal file
38
core/logx/logger.go
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Logger represents a logger.
|
||||||
|
type Logger interface {
|
||||||
|
// Error logs a message at error level.
|
||||||
|
Error(...interface{})
|
||||||
|
// Errorf logs a message at error level.
|
||||||
|
Errorf(string, ...interface{})
|
||||||
|
// Errorv logs a message at error level.
|
||||||
|
Errorv(interface{})
|
||||||
|
// Errorw logs a message at error level.
|
||||||
|
Errorw(string, ...LogField)
|
||||||
|
// Info logs a message at info level.
|
||||||
|
Info(...interface{})
|
||||||
|
// Infof logs a message at info level.
|
||||||
|
Infof(string, ...interface{})
|
||||||
|
// Infov logs a message at info level.
|
||||||
|
Infov(interface{})
|
||||||
|
// Infow logs a message at info level.
|
||||||
|
Infow(string, ...LogField)
|
||||||
|
// Slow logs a message at slow level.
|
||||||
|
Slow(...interface{})
|
||||||
|
// Slowf logs a message at slow level.
|
||||||
|
Slowf(string, ...interface{})
|
||||||
|
// Slowv logs a message at slow level.
|
||||||
|
Slowv(interface{})
|
||||||
|
// Sloww logs a message at slow level.
|
||||||
|
Sloww(string, ...LogField)
|
||||||
|
// WithContext returns a new logger with the given context.
|
||||||
|
WithContext(context.Context) Logger
|
||||||
|
// WithDuration returns a new logger with the given duration.
|
||||||
|
WithDuration(time.Duration) Logger
|
||||||
|
}
|
||||||
@@ -1,93 +1,29 @@
|
|||||||
package logx
|
package logx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/iox"
|
|
||||||
"github.com/zeromicro/go-zero/core/sysx"
|
"github.com/zeromicro/go-zero/core/sysx"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const callerDepth = 5
|
||||||
// InfoLevel logs everything
|
|
||||||
InfoLevel = iota
|
|
||||||
// ErrorLevel includes errors, slows, stacks
|
|
||||||
ErrorLevel
|
|
||||||
// SevereLevel only log severe messages
|
|
||||||
SevereLevel
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
jsonEncodingType = iota
|
|
||||||
plainEncodingType
|
|
||||||
|
|
||||||
jsonEncoding = "json"
|
|
||||||
plainEncoding = "plain"
|
|
||||||
plainEncodingSep = '\t'
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
accessFilename = "access.log"
|
|
||||||
errorFilename = "error.log"
|
|
||||||
severeFilename = "severe.log"
|
|
||||||
slowFilename = "slow.log"
|
|
||||||
statFilename = "stat.log"
|
|
||||||
|
|
||||||
consoleMode = "console"
|
|
||||||
volumeMode = "volume"
|
|
||||||
|
|
||||||
levelAlert = "alert"
|
|
||||||
levelInfo = "info"
|
|
||||||
levelError = "error"
|
|
||||||
levelSevere = "severe"
|
|
||||||
levelFatal = "fatal"
|
|
||||||
levelSlow = "slow"
|
|
||||||
levelStat = "stat"
|
|
||||||
|
|
||||||
backupFileDelimiter = "-"
|
|
||||||
callerInnerDepth = 5
|
|
||||||
flags = 0x0
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrLogPathNotSet is an error that indicates the log path is not set.
|
timeFormat = "2006-01-02T15:04:05.000Z07:00"
|
||||||
ErrLogPathNotSet = errors.New("log path must be set")
|
logLevel uint32
|
||||||
// ErrLogNotInitialized is an error that log is not initialized.
|
encoding uint32 = jsonEncodingType
|
||||||
ErrLogNotInitialized = errors.New("log not initialized")
|
|
||||||
// ErrLogServiceNameNotSet is an error that indicates that the service name is not set.
|
|
||||||
ErrLogServiceNameNotSet = errors.New("log service name must be set")
|
|
||||||
|
|
||||||
timeFormat = "2006-01-02T15:04:05.000Z07"
|
|
||||||
writeConsole bool
|
|
||||||
logLevel uint32
|
|
||||||
encoding = jsonEncodingType
|
|
||||||
// use uint32 for atomic operations
|
// use uint32 for atomic operations
|
||||||
disableStat uint32
|
disableStat uint32
|
||||||
infoLog io.WriteCloser
|
|
||||||
errorLog io.WriteCloser
|
|
||||||
severeLog io.WriteCloser
|
|
||||||
slowLog io.WriteCloser
|
|
||||||
statLog io.WriteCloser
|
|
||||||
stackLog io.Writer
|
|
||||||
|
|
||||||
once sync.Once
|
options logOptions
|
||||||
initialized uint32
|
writer = new(atomicWriter)
|
||||||
options logOptions
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@@ -95,109 +31,37 @@ type (
|
|||||||
Timestamp string `json:"@timestamp"`
|
Timestamp string `json:"@timestamp"`
|
||||||
Level string `json:"level"`
|
Level string `json:"level"`
|
||||||
Duration string `json:"duration,omitempty"`
|
Duration string `json:"duration,omitempty"`
|
||||||
|
Caller string `json:"caller,omitempty"`
|
||||||
Content interface{} `json:"content"`
|
Content interface{} `json:"content"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logEntryWithFields map[string]interface{}
|
||||||
|
|
||||||
logOptions struct {
|
logOptions struct {
|
||||||
gzipEnabled bool
|
gzipEnabled bool
|
||||||
logStackCooldownMills int
|
logStackCooldownMills int
|
||||||
keepDays int
|
keepDays int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogField is a key-value pair that will be added to the log entry.
|
||||||
|
LogField struct {
|
||||||
|
Key string
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// LogOption defines the method to customize the logging.
|
// LogOption defines the method to customize the logging.
|
||||||
LogOption func(options *logOptions)
|
LogOption func(options *logOptions)
|
||||||
|
|
||||||
// A Logger represents a logger.
|
|
||||||
Logger interface {
|
|
||||||
Error(...interface{})
|
|
||||||
Errorf(string, ...interface{})
|
|
||||||
Errorv(interface{})
|
|
||||||
Info(...interface{})
|
|
||||||
Infof(string, ...interface{})
|
|
||||||
Infov(interface{})
|
|
||||||
Slow(...interface{})
|
|
||||||
Slowf(string, ...interface{})
|
|
||||||
Slowv(interface{})
|
|
||||||
WithDuration(time.Duration) Logger
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MustSetup sets up logging with given config c. It exits on error.
|
|
||||||
func MustSetup(c LogConf) {
|
|
||||||
Must(SetUp(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUp sets up the logx. If already set up, just return nil.
|
|
||||||
// we allow SetUp to be called multiple times, because for example
|
|
||||||
// we need to allow different service frameworks to initialize logx respectively.
|
|
||||||
// the same logic for SetUp
|
|
||||||
func SetUp(c LogConf) error {
|
|
||||||
if len(c.TimeFormat) > 0 {
|
|
||||||
timeFormat = c.TimeFormat
|
|
||||||
}
|
|
||||||
switch c.Encoding {
|
|
||||||
case plainEncoding:
|
|
||||||
encoding = plainEncodingType
|
|
||||||
default:
|
|
||||||
encoding = jsonEncodingType
|
|
||||||
}
|
|
||||||
|
|
||||||
switch c.Mode {
|
|
||||||
case consoleMode:
|
|
||||||
setupWithConsole(c)
|
|
||||||
return nil
|
|
||||||
case volumeMode:
|
|
||||||
return setupWithVolume(c)
|
|
||||||
default:
|
|
||||||
return setupWithFiles(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alert alerts v in alert level, and the message is written to error log.
|
// Alert alerts v in alert level, and the message is written to error log.
|
||||||
func Alert(v string) {
|
func Alert(v string) {
|
||||||
outputText(errorLog, levelAlert, v)
|
getWriter().Alert(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the logging.
|
// Close closes the logging.
|
||||||
func Close() error {
|
func Close() error {
|
||||||
if writeConsole {
|
if w := writer.Swap(nil); w != nil {
|
||||||
return nil
|
return w.(io.Closer).Close()
|
||||||
}
|
|
||||||
|
|
||||||
if atomic.LoadUint32(&initialized) == 0 {
|
|
||||||
return ErrLogNotInitialized
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.StoreUint32(&initialized, 0)
|
|
||||||
|
|
||||||
if infoLog != nil {
|
|
||||||
if err := infoLog.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if errorLog != nil {
|
|
||||||
if err := errorLog.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if severeLog != nil {
|
|
||||||
if err := severeLog.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if slowLog != nil {
|
|
||||||
if err := slowLog.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if statLog != nil {
|
|
||||||
if err := statLog.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -205,16 +69,7 @@ func Close() error {
|
|||||||
|
|
||||||
// Disable disables the logging.
|
// Disable disables the logging.
|
||||||
func Disable() {
|
func Disable() {
|
||||||
once.Do(func() {
|
writer.Store(nopWriter{})
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
|
|
||||||
infoLog = iox.NopCloser(ioutil.Discard)
|
|
||||||
errorLog = iox.NopCloser(ioutil.Discard)
|
|
||||||
severeLog = iox.NopCloser(ioutil.Discard)
|
|
||||||
slowLog = iox.NopCloser(ioutil.Discard)
|
|
||||||
statLog = iox.NopCloser(ioutil.Discard)
|
|
||||||
stackLog = ioutil.Discard
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DisableStat disables the stat logs.
|
// DisableStat disables the stat logs.
|
||||||
@@ -224,22 +79,12 @@ func DisableStat() {
|
|||||||
|
|
||||||
// Error writes v into error log.
|
// Error writes v into error log.
|
||||||
func Error(v ...interface{}) {
|
func Error(v ...interface{}) {
|
||||||
ErrorCaller(1, v...)
|
errorTextSync(fmt.Sprint(v...))
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorCaller writes v with context into error log.
|
|
||||||
func ErrorCaller(callDepth int, v ...interface{}) {
|
|
||||||
errorTextSync(fmt.Sprint(v...), callDepth+callerInnerDepth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorCallerf writes v with context in format into error log.
|
|
||||||
func ErrorCallerf(callDepth int, format string, v ...interface{}) {
|
|
||||||
errorTextSync(fmt.Errorf(format, v...).Error(), callDepth+callerInnerDepth)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf writes v with format into error log.
|
// Errorf writes v with format into error log.
|
||||||
func Errorf(format string, v ...interface{}) {
|
func Errorf(format string, v ...interface{}) {
|
||||||
ErrorCallerf(1, format, v...)
|
errorTextSync(fmt.Errorf(format, v...).Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorStack writes v along with call stack into error log.
|
// ErrorStack writes v along with call stack into error log.
|
||||||
@@ -260,6 +105,49 @@ func Errorv(v interface{}) {
|
|||||||
errorAnySync(v)
|
errorAnySync(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Errorw writes msg along with fields into error log.
|
||||||
|
func Errorw(msg string, fields ...LogField) {
|
||||||
|
errorFieldsSync(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field returns a LogField for the given key and value.
|
||||||
|
func Field(key string, value interface{}) LogField {
|
||||||
|
switch val := value.(type) {
|
||||||
|
case error:
|
||||||
|
return LogField{Key: key, Value: val.Error()}
|
||||||
|
case []error:
|
||||||
|
var errs []string
|
||||||
|
for _, err := range val {
|
||||||
|
errs = append(errs, err.Error())
|
||||||
|
}
|
||||||
|
return LogField{Key: key, Value: errs}
|
||||||
|
case time.Duration:
|
||||||
|
return LogField{Key: key, Value: fmt.Sprint(val)}
|
||||||
|
case []time.Duration:
|
||||||
|
var durs []string
|
||||||
|
for _, dur := range val {
|
||||||
|
durs = append(durs, fmt.Sprint(dur))
|
||||||
|
}
|
||||||
|
return LogField{Key: key, Value: durs}
|
||||||
|
case []time.Time:
|
||||||
|
var times []string
|
||||||
|
for _, t := range val {
|
||||||
|
times = append(times, fmt.Sprint(t))
|
||||||
|
}
|
||||||
|
return LogField{Key: key, Value: times}
|
||||||
|
case fmt.Stringer:
|
||||||
|
return LogField{Key: key, Value: val.String()}
|
||||||
|
case []fmt.Stringer:
|
||||||
|
var strs []string
|
||||||
|
for _, str := range val {
|
||||||
|
strs = append(strs, str.String())
|
||||||
|
}
|
||||||
|
return LogField{Key: key, Value: strs}
|
||||||
|
default:
|
||||||
|
return LogField{Key: key, Value: val}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Info writes v into access log.
|
// Info writes v into access log.
|
||||||
func Info(v ...interface{}) {
|
func Info(v ...interface{}) {
|
||||||
infoTextSync(fmt.Sprint(v...))
|
infoTextSync(fmt.Sprint(v...))
|
||||||
@@ -275,14 +163,32 @@ func Infov(v interface{}) {
|
|||||||
infoAnySync(v)
|
infoAnySync(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Must checks if err is nil, otherwise logs the err and exits.
|
// Infow writes msg along with fields into access log.
|
||||||
|
func Infow(msg string, fields ...LogField) {
|
||||||
|
infoFieldsSync(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must checks if err is nil, otherwise logs the error and exits.
|
||||||
func Must(err error) {
|
func Must(err error) {
|
||||||
if err != nil {
|
if err == nil {
|
||||||
msg := formatWithCaller(err.Error(), 3)
|
return
|
||||||
log.Print(msg)
|
|
||||||
outputText(severeLog, levelFatal, msg)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msg := err.Error()
|
||||||
|
log.Print(msg)
|
||||||
|
getWriter().Severe(msg)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustSetup sets up logging with given config c. It exits on error.
|
||||||
|
func MustSetup(c LogConf) {
|
||||||
|
Must(SetUp(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the writer and resets the log level.
|
||||||
|
func Reset() Writer {
|
||||||
|
SetLevel(InfoLevel)
|
||||||
|
return writer.Swap(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLevel sets the logging level. It can be used to suppress some logs.
|
// SetLevel sets the logging level. It can be used to suppress some logs.
|
||||||
@@ -290,6 +196,43 @@ func SetLevel(level uint32) {
|
|||||||
atomic.StoreUint32(&logLevel, level)
|
atomic.StoreUint32(&logLevel, level)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetWriter sets the logging writer. It can be used to customize the logging.
|
||||||
|
// Call Reset before calling SetWriter again.
|
||||||
|
func SetWriter(w Writer) {
|
||||||
|
if writer.Load() == nil {
|
||||||
|
writer.Store(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUp sets up the logx. If already set up, just return nil.
|
||||||
|
// we allow SetUp to be called multiple times, because for example
|
||||||
|
// we need to allow different service frameworks to initialize logx respectively.
|
||||||
|
// the same logic for SetUp
|
||||||
|
func SetUp(c LogConf) error {
|
||||||
|
setupLogLevel(c)
|
||||||
|
|
||||||
|
if len(c.TimeFormat) > 0 {
|
||||||
|
timeFormat = c.TimeFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
switch c.Encoding {
|
||||||
|
case plainEncoding:
|
||||||
|
atomic.StoreUint32(&encoding, plainEncodingType)
|
||||||
|
default:
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch c.Mode {
|
||||||
|
case fileMode:
|
||||||
|
return setupWithFiles(c)
|
||||||
|
case volumeMode:
|
||||||
|
return setupWithVolume(c)
|
||||||
|
default:
|
||||||
|
setupWithConsole()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Severe writes v into severe log.
|
// Severe writes v into severe log.
|
||||||
func Severe(v ...interface{}) {
|
func Severe(v ...interface{}) {
|
||||||
severeSync(fmt.Sprint(v...))
|
severeSync(fmt.Sprint(v...))
|
||||||
@@ -315,6 +258,11 @@ func Slowv(v interface{}) {
|
|||||||
slowAnySync(v)
|
slowAnySync(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sloww writes msg along with fields into slow log.
|
||||||
|
func Sloww(msg string, fields ...LogField) {
|
||||||
|
slowFieldsSync(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
// Stat writes v into stat log.
|
// Stat writes v into stat log.
|
||||||
func Stat(v ...interface{}) {
|
func Stat(v ...interface{}) {
|
||||||
statSync(fmt.Sprint(v...))
|
statSync(fmt.Sprint(v...))
|
||||||
@@ -357,52 +305,30 @@ func createOutput(path string) (io.WriteCloser, error) {
|
|||||||
|
|
||||||
func errorAnySync(v interface{}) {
|
func errorAnySync(v interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
outputAny(errorLog, levelError, v)
|
getWriter().Error(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func errorTextSync(msg string, callDepth int) {
|
func errorFieldsSync(content string, fields ...LogField) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
outputError(errorLog, msg, callDepth)
|
getWriter().Error(content, fields...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatWithCaller(msg string, callDepth int) string {
|
func errorTextSync(msg string) {
|
||||||
var buf strings.Builder
|
if shallLog(ErrorLevel) {
|
||||||
|
getWriter().Error(msg)
|
||||||
caller := getCaller(callDepth)
|
|
||||||
if len(caller) > 0 {
|
|
||||||
buf.WriteString(caller)
|
|
||||||
buf.WriteByte(' ')
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buf.WriteString(msg)
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCaller(callDepth int) string {
|
func getWriter() Writer {
|
||||||
var buf strings.Builder
|
w := writer.Load()
|
||||||
|
if w == nil {
|
||||||
_, file, line, ok := runtime.Caller(callDepth)
|
w = newConsoleWriter()
|
||||||
if ok {
|
writer.Store(w)
|
||||||
short := file
|
|
||||||
for i := len(file) - 1; i > 0; i-- {
|
|
||||||
if file[i] == '/' {
|
|
||||||
short = file[i+1:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.WriteString(short)
|
|
||||||
buf.WriteByte(':')
|
|
||||||
buf.WriteString(strconv.Itoa(line))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.String()
|
return w
|
||||||
}
|
|
||||||
|
|
||||||
func getTimestamp() string {
|
|
||||||
return timex.Time().Format(timeFormat)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleOptions(opts []LogOption) {
|
func handleOptions(opts []LogOption) {
|
||||||
@@ -413,56 +339,19 @@ func handleOptions(opts []LogOption) {
|
|||||||
|
|
||||||
func infoAnySync(val interface{}) {
|
func infoAnySync(val interface{}) {
|
||||||
if shallLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
outputAny(infoLog, levelInfo, val)
|
getWriter().Info(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func infoFieldsSync(content string, fields ...LogField) {
|
||||||
|
if shallLog(InfoLevel) {
|
||||||
|
getWriter().Info(content, fields...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func infoTextSync(msg string) {
|
func infoTextSync(msg string) {
|
||||||
if shallLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
outputText(infoLog, levelInfo, msg)
|
getWriter().Info(msg)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func outputAny(writer io.Writer, level string, val interface{}) {
|
|
||||||
switch encoding {
|
|
||||||
case plainEncodingType:
|
|
||||||
writePlainAny(writer, level, val)
|
|
||||||
default:
|
|
||||||
info := logEntry{
|
|
||||||
Timestamp: getTimestamp(),
|
|
||||||
Level: level,
|
|
||||||
Content: val,
|
|
||||||
}
|
|
||||||
outputJson(writer, info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func outputText(writer io.Writer, level, msg string) {
|
|
||||||
switch encoding {
|
|
||||||
case plainEncodingType:
|
|
||||||
writePlainText(writer, level, msg)
|
|
||||||
default:
|
|
||||||
info := logEntry{
|
|
||||||
Timestamp: getTimestamp(),
|
|
||||||
Level: level,
|
|
||||||
Content: msg,
|
|
||||||
}
|
|
||||||
outputJson(writer, info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func outputError(writer io.Writer, msg string, callDepth int) {
|
|
||||||
content := formatWithCaller(msg, callDepth)
|
|
||||||
outputText(writer, levelError, content)
|
|
||||||
}
|
|
||||||
|
|
||||||
func outputJson(writer io.Writer, info interface{}) {
|
|
||||||
if content, err := json.Marshal(info); err != nil {
|
|
||||||
log.Println(err.Error())
|
|
||||||
} else if atomic.LoadUint32(&initialized) == 0 || writer == nil {
|
|
||||||
log.Println(string(content))
|
|
||||||
} else {
|
|
||||||
writer.Write(append(content, '\n'))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -477,72 +366,18 @@ func setupLogLevel(c LogConf) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupWithConsole(c LogConf) {
|
func setupWithConsole() {
|
||||||
once.Do(func() {
|
SetWriter(newConsoleWriter())
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
writeConsole = true
|
|
||||||
setupLogLevel(c)
|
|
||||||
|
|
||||||
infoLog = newLogWriter(log.New(os.Stdout, "", flags))
|
|
||||||
errorLog = newLogWriter(log.New(os.Stderr, "", flags))
|
|
||||||
severeLog = newLogWriter(log.New(os.Stderr, "", flags))
|
|
||||||
slowLog = newLogWriter(log.New(os.Stderr, "", flags))
|
|
||||||
stackLog = newLessWriter(errorLog, options.logStackCooldownMills)
|
|
||||||
statLog = infoLog
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupWithFiles(c LogConf) error {
|
func setupWithFiles(c LogConf) error {
|
||||||
var opts []LogOption
|
w, err := newFileWriter(c)
|
||||||
var err error
|
if err != nil {
|
||||||
|
return err
|
||||||
if len(c.Path) == 0 {
|
|
||||||
return ErrLogPathNotSet
|
|
||||||
}
|
}
|
||||||
|
|
||||||
opts = append(opts, WithCooldownMillis(c.StackCooldownMillis))
|
SetWriter(w)
|
||||||
if c.Compress {
|
return nil
|
||||||
opts = append(opts, WithGzip())
|
|
||||||
}
|
|
||||||
if c.KeepDays > 0 {
|
|
||||||
opts = append(opts, WithKeepDays(c.KeepDays))
|
|
||||||
}
|
|
||||||
|
|
||||||
accessFile := path.Join(c.Path, accessFilename)
|
|
||||||
errorFile := path.Join(c.Path, errorFilename)
|
|
||||||
severeFile := path.Join(c.Path, severeFilename)
|
|
||||||
slowFile := path.Join(c.Path, slowFilename)
|
|
||||||
statFile := path.Join(c.Path, statFilename)
|
|
||||||
|
|
||||||
once.Do(func() {
|
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
handleOptions(opts)
|
|
||||||
setupLogLevel(c)
|
|
||||||
|
|
||||||
if infoLog, err = createOutput(accessFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if errorLog, err = createOutput(errorFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if severeLog, err = createOutput(severeFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if slowLog, err = createOutput(slowFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if statLog, err = createOutput(statFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
stackLog = newLessWriter(errorLog, options.logStackCooldownMills)
|
|
||||||
})
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupWithVolume(c LogConf) error {
|
func setupWithVolume(c LogConf) error {
|
||||||
@@ -556,7 +391,7 @@ func setupWithVolume(c LogConf) error {
|
|||||||
|
|
||||||
func severeSync(msg string) {
|
func severeSync(msg string) {
|
||||||
if shallLog(SevereLevel) {
|
if shallLog(SevereLevel) {
|
||||||
outputText(severeLog, levelSevere, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
getWriter().Severe(fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -570,99 +405,30 @@ func shallLogStat() bool {
|
|||||||
|
|
||||||
func slowAnySync(v interface{}) {
|
func slowAnySync(v interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
outputAny(slowLog, levelSlow, v)
|
getWriter().Slow(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func slowFieldsSync(content string, fields ...LogField) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
getWriter().Slow(content, fields...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func slowTextSync(msg string) {
|
func slowTextSync(msg string) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
outputText(slowLog, levelSlow, msg)
|
getWriter().Slow(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func stackSync(msg string) {
|
func stackSync(msg string) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
outputText(stackLog, levelError, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
getWriter().Stack(fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func statSync(msg string) {
|
func statSync(msg string) {
|
||||||
if shallLogStat() && shallLog(InfoLevel) {
|
if shallLogStat() && shallLog(InfoLevel) {
|
||||||
outputText(statLog, levelStat, msg)
|
getWriter().Stat(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writePlainAny(writer io.Writer, level string, val interface{}, fields ...string) {
|
|
||||||
switch v := val.(type) {
|
|
||||||
case string:
|
|
||||||
writePlainText(writer, level, v, fields...)
|
|
||||||
case error:
|
|
||||||
writePlainText(writer, level, v.Error(), fields...)
|
|
||||||
case fmt.Stringer:
|
|
||||||
writePlainText(writer, level, v.String(), fields...)
|
|
||||||
default:
|
|
||||||
var buf bytes.Buffer
|
|
||||||
buf.WriteString(getTimestamp())
|
|
||||||
buf.WriteByte(plainEncodingSep)
|
|
||||||
buf.WriteString(level)
|
|
||||||
for _, item := range fields {
|
|
||||||
buf.WriteByte(plainEncodingSep)
|
|
||||||
buf.WriteString(item)
|
|
||||||
}
|
|
||||||
buf.WriteByte(plainEncodingSep)
|
|
||||||
if err := json.NewEncoder(&buf).Encode(val); err != nil {
|
|
||||||
log.Println(err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
buf.WriteByte('\n')
|
|
||||||
if atomic.LoadUint32(&initialized) == 0 || writer == nil {
|
|
||||||
log.Println(buf.String())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := writer.Write(buf.Bytes()); err != nil {
|
|
||||||
log.Println(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func writePlainText(writer io.Writer, level, msg string, fields ...string) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
buf.WriteString(getTimestamp())
|
|
||||||
buf.WriteByte(plainEncodingSep)
|
|
||||||
buf.WriteString(level)
|
|
||||||
for _, item := range fields {
|
|
||||||
buf.WriteByte(plainEncodingSep)
|
|
||||||
buf.WriteString(item)
|
|
||||||
}
|
|
||||||
buf.WriteByte(plainEncodingSep)
|
|
||||||
buf.WriteString(msg)
|
|
||||||
buf.WriteByte('\n')
|
|
||||||
if atomic.LoadUint32(&initialized) == 0 || writer == nil {
|
|
||||||
log.Println(buf.String())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := writer.Write(buf.Bytes()); err != nil {
|
|
||||||
log.Println(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type logWriter struct {
|
|
||||||
logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLogWriter(logger *log.Logger) logWriter {
|
|
||||||
return logWriter{
|
|
||||||
logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lw logWriter) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lw logWriter) Write(data []byte) (int, error) {
|
|
||||||
lw.logger.Print(string(data))
|
|
||||||
return len(data), nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -19,8 +19,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
s = []byte("Sending #11 notification (id: 1451875113812010473) in #1 connection")
|
s = []byte("Sending #11 notification (id: 1451875113812010473) in #1 connection")
|
||||||
pool = make(chan []byte, 1)
|
pool = make(chan []byte, 1)
|
||||||
|
_ Writer = (*mockWriter)(nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockWriter struct {
|
type mockWriter struct {
|
||||||
@@ -28,10 +29,46 @@ type mockWriter struct {
|
|||||||
builder strings.Builder
|
builder strings.Builder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockWriter) Write(data []byte) (int, error) {
|
func (mw *mockWriter) Alert(v interface{}) {
|
||||||
mw.lock.Lock()
|
mw.lock.Lock()
|
||||||
defer mw.lock.Unlock()
|
defer mw.lock.Unlock()
|
||||||
return mw.builder.Write(data)
|
output(&mw.builder, levelAlert, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockWriter) Error(v interface{}, fields ...LogField) {
|
||||||
|
mw.lock.Lock()
|
||||||
|
defer mw.lock.Unlock()
|
||||||
|
output(&mw.builder, levelError, v, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockWriter) Info(v interface{}, fields ...LogField) {
|
||||||
|
mw.lock.Lock()
|
||||||
|
defer mw.lock.Unlock()
|
||||||
|
output(&mw.builder, levelInfo, v, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockWriter) Severe(v interface{}) {
|
||||||
|
mw.lock.Lock()
|
||||||
|
defer mw.lock.Unlock()
|
||||||
|
output(&mw.builder, levelSevere, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockWriter) Slow(v interface{}, fields ...LogField) {
|
||||||
|
mw.lock.Lock()
|
||||||
|
defer mw.lock.Unlock()
|
||||||
|
output(&mw.builder, levelSlow, v, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockWriter) Stack(v interface{}) {
|
||||||
|
mw.lock.Lock()
|
||||||
|
defer mw.lock.Unlock()
|
||||||
|
output(&mw.builder, levelError, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *mockWriter) Stat(v interface{}, fields ...LogField) {
|
||||||
|
mw.lock.Lock()
|
||||||
|
defer mw.lock.Unlock()
|
||||||
|
output(&mw.builder, levelStat, v, fields...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *mockWriter) Close() error {
|
func (mw *mockWriter) Close() error {
|
||||||
@@ -56,99 +93,215 @@ func (mw *mockWriter) String() string {
|
|||||||
return mw.builder.String()
|
return mw.builder.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestField(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
f LogField
|
||||||
|
want map[string]interface{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "error",
|
||||||
|
f: Field("foo", errors.New("bar")),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors",
|
||||||
|
f: Field("foo", []error{errors.New("bar"), errors.New("baz")}),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": []interface{}{"bar", "baz"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "strings",
|
||||||
|
f: Field("foo", []string{"bar", "baz"}),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": []interface{}{"bar", "baz"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duration",
|
||||||
|
f: Field("foo", time.Second),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": "1s",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "durations",
|
||||||
|
f: Field("foo", []time.Duration{time.Second, 2 * time.Second}),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": []interface{}{"1s", "2s"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "times",
|
||||||
|
f: Field("foo", []time.Time{
|
||||||
|
time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||||
|
time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC),
|
||||||
|
}),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": []interface{}{"2020-01-01 00:00:00 +0000 UTC", "2020-01-02 00:00:00 +0000 UTC"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "stringer",
|
||||||
|
f: Field("foo", ValStringer{val: "bar"}),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "stringers",
|
||||||
|
f: Field("foo", []fmt.Stringer{ValStringer{val: "bar"}, ValStringer{val: "baz"}}),
|
||||||
|
want: map[string]interface{}{
|
||||||
|
"foo": []interface{}{"bar", "baz"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
Infow("foo", test.f)
|
||||||
|
validateFields(t, w.String(), test.want)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestFileLineFileMode(t *testing.T) {
|
func TestFileLineFileMode(t *testing.T) {
|
||||||
writer := new(mockWriter)
|
w := new(mockWriter)
|
||||||
errorLog = writer
|
old := writer.Swap(w)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
defer writer.Store(old)
|
||||||
|
|
||||||
file, line := getFileLine()
|
file, line := getFileLine()
|
||||||
Error("anything")
|
Error("anything")
|
||||||
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
|
||||||
writer.Reset()
|
|
||||||
file, line = getFileLine()
|
file, line = getFileLine()
|
||||||
Errorf("anything %s", "format")
|
Errorf("anything %s", "format")
|
||||||
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileLineConsoleMode(t *testing.T) {
|
func TestFileLineConsoleMode(t *testing.T) {
|
||||||
writer := new(mockWriter)
|
w := new(mockWriter)
|
||||||
writeConsole = true
|
old := writer.Swap(w)
|
||||||
errorLog = newLogWriter(log.New(writer, "[ERROR] ", flags))
|
defer writer.Store(old)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
file, line := getFileLine()
|
file, line := getFileLine()
|
||||||
Error("anything")
|
Error("anything")
|
||||||
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
|
||||||
writer.Reset()
|
w.Reset()
|
||||||
file, line = getFileLine()
|
file, line = getFileLine()
|
||||||
Errorf("anything %s", "format")
|
Errorf("anything %s", "format")
|
||||||
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogAlert(t *testing.T) {
|
func TestStructedLogAlert(t *testing.T) {
|
||||||
doTestStructedLog(t, levelAlert, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
errorLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelAlert, w, func(v ...interface{}) {
|
||||||
Alert(fmt.Sprint(v...))
|
Alert(fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogError(t *testing.T) {
|
func TestStructedLogError(t *testing.T) {
|
||||||
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
errorLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelError, w, func(v ...interface{}) {
|
||||||
Error(v...)
|
Error(v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogErrorf(t *testing.T) {
|
func TestStructedLogErrorf(t *testing.T) {
|
||||||
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
errorLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelError, w, func(v ...interface{}) {
|
||||||
Errorf("%s", fmt.Sprint(v...))
|
Errorf("%s", fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogErrorv(t *testing.T) {
|
func TestStructedLogErrorv(t *testing.T) {
|
||||||
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
errorLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelError, w, func(v ...interface{}) {
|
||||||
Errorv(fmt.Sprint(v...))
|
Errorv(fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogErrorw(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelError, w, func(v ...interface{}) {
|
||||||
|
Errorw(fmt.Sprint(v...), Field("foo", "bar"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogInfo(t *testing.T) {
|
func TestStructedLogInfo(t *testing.T) {
|
||||||
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) {
|
||||||
Info(v...)
|
Info(v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogInfof(t *testing.T) {
|
func TestStructedLogInfof(t *testing.T) {
|
||||||
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) {
|
||||||
Infof("%s", fmt.Sprint(v...))
|
Infof("%s", fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogInfov(t *testing.T) {
|
func TestStructedLogInfov(t *testing.T) {
|
||||||
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) {
|
||||||
Infov(fmt.Sprint(v...))
|
Infov(fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogInfow(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) {
|
||||||
|
Infow(fmt.Sprint(v...), Field("foo", "bar"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogInfoConsoleAny(t *testing.T) {
|
func TestStructedLogInfoConsoleAny(t *testing.T) {
|
||||||
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
old := encoding
|
|
||||||
encoding = plainEncodingType
|
doTestStructedLogConsole(t, w, func(v ...interface{}) {
|
||||||
|
old := atomic.LoadUint32(&encoding)
|
||||||
|
atomic.StoreUint32(&encoding, plainEncodingType)
|
||||||
defer func() {
|
defer func() {
|
||||||
encoding = old
|
atomic.StoreUint32(&encoding, old)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
Infov(v)
|
Infov(v)
|
||||||
@@ -156,13 +309,15 @@ func TestStructedLogInfoConsoleAny(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogInfoConsoleAnyString(t *testing.T) {
|
func TestStructedLogInfoConsoleAnyString(t *testing.T) {
|
||||||
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
old := encoding
|
|
||||||
encoding = plainEncodingType
|
doTestStructedLogConsole(t, w, func(v ...interface{}) {
|
||||||
|
old := atomic.LoadUint32(&encoding)
|
||||||
|
atomic.StoreUint32(&encoding, plainEncodingType)
|
||||||
defer func() {
|
defer func() {
|
||||||
encoding = old
|
atomic.StoreUint32(&encoding, old)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
Infov(fmt.Sprint(v...))
|
Infov(fmt.Sprint(v...))
|
||||||
@@ -170,13 +325,15 @@ func TestStructedLogInfoConsoleAnyString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogInfoConsoleAnyError(t *testing.T) {
|
func TestStructedLogInfoConsoleAnyError(t *testing.T) {
|
||||||
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
old := encoding
|
|
||||||
encoding = plainEncodingType
|
doTestStructedLogConsole(t, w, func(v ...interface{}) {
|
||||||
|
old := atomic.LoadUint32(&encoding)
|
||||||
|
atomic.StoreUint32(&encoding, plainEncodingType)
|
||||||
defer func() {
|
defer func() {
|
||||||
encoding = old
|
atomic.StoreUint32(&encoding, old)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
Infov(errors.New(fmt.Sprint(v...)))
|
Infov(errors.New(fmt.Sprint(v...)))
|
||||||
@@ -184,13 +341,15 @@ func TestStructedLogInfoConsoleAnyError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogInfoConsoleAnyStringer(t *testing.T) {
|
func TestStructedLogInfoConsoleAnyStringer(t *testing.T) {
|
||||||
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
old := encoding
|
|
||||||
encoding = plainEncodingType
|
doTestStructedLogConsole(t, w, func(v ...interface{}) {
|
||||||
|
old := atomic.LoadUint32(&encoding)
|
||||||
|
atomic.StoreUint32(&encoding, plainEncodingType)
|
||||||
defer func() {
|
defer func() {
|
||||||
encoding = old
|
atomic.StoreUint32(&encoding, old)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
Infov(ValStringer{
|
Infov(ValStringer{
|
||||||
@@ -200,13 +359,15 @@ func TestStructedLogInfoConsoleAnyStringer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogInfoConsoleText(t *testing.T) {
|
func TestStructedLogInfoConsoleText(t *testing.T) {
|
||||||
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
old := encoding
|
|
||||||
encoding = plainEncodingType
|
doTestStructedLogConsole(t, w, func(v ...interface{}) {
|
||||||
|
old := atomic.LoadUint32(&encoding)
|
||||||
|
atomic.StoreUint32(&encoding, plainEncodingType)
|
||||||
defer func() {
|
defer func() {
|
||||||
encoding = old
|
atomic.StoreUint32(&encoding, old)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
Info(fmt.Sprint(v...))
|
Info(fmt.Sprint(v...))
|
||||||
@@ -214,69 +375,94 @@ func TestStructedLogInfoConsoleText(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogSlow(t *testing.T) {
|
func TestStructedLogSlow(t *testing.T) {
|
||||||
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
slowLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) {
|
||||||
Slow(v...)
|
Slow(v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogSlowf(t *testing.T) {
|
func TestStructedLogSlowf(t *testing.T) {
|
||||||
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
slowLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) {
|
||||||
Slowf(fmt.Sprint(v...))
|
Slowf(fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogSlowv(t *testing.T) {
|
func TestStructedLogSlowv(t *testing.T) {
|
||||||
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
slowLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) {
|
||||||
Slowv(fmt.Sprint(v...))
|
Slowv(fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogSloww(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) {
|
||||||
|
Sloww(fmt.Sprint(v...), Field("foo", time.Second))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogStat(t *testing.T) {
|
func TestStructedLogStat(t *testing.T) {
|
||||||
doTestStructedLog(t, levelStat, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
statLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelStat, w, func(v ...interface{}) {
|
||||||
Stat(v...)
|
Stat(v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogStatf(t *testing.T) {
|
func TestStructedLogStatf(t *testing.T) {
|
||||||
doTestStructedLog(t, levelStat, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
statLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelStat, w, func(v ...interface{}) {
|
||||||
Statf(fmt.Sprint(v...))
|
Statf(fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogSevere(t *testing.T) {
|
func TestStructedLogSevere(t *testing.T) {
|
||||||
doTestStructedLog(t, levelSevere, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
severeLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelSevere, w, func(v ...interface{}) {
|
||||||
Severe(v...)
|
Severe(v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogSeveref(t *testing.T) {
|
func TestStructedLogSeveref(t *testing.T) {
|
||||||
doTestStructedLog(t, levelSevere, func(writer io.WriteCloser) {
|
w := new(mockWriter)
|
||||||
severeLog = writer
|
old := writer.Swap(w)
|
||||||
}, func(v ...interface{}) {
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelSevere, w, func(v ...interface{}) {
|
||||||
Severef(fmt.Sprint(v...))
|
Severef(fmt.Sprint(v...))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructedLogWithDuration(t *testing.T) {
|
func TestStructedLogWithDuration(t *testing.T) {
|
||||||
const message = "hello there"
|
const message = "hello there"
|
||||||
writer := new(mockWriter)
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Info(message)
|
WithDuration(time.Second).Info(message)
|
||||||
var entry logEntry
|
var entry logEntry
|
||||||
if err := json.Unmarshal([]byte(writer.builder.String()), &entry); err != nil {
|
if err := json.Unmarshal([]byte(w.String()), &entry); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, levelInfo, entry.Level)
|
assert.Equal(t, levelInfo, entry.Level)
|
||||||
@@ -287,11 +473,12 @@ func TestStructedLogWithDuration(t *testing.T) {
|
|||||||
func TestSetLevel(t *testing.T) {
|
func TestSetLevel(t *testing.T) {
|
||||||
SetLevel(ErrorLevel)
|
SetLevel(ErrorLevel)
|
||||||
const message = "hello there"
|
const message = "hello there"
|
||||||
writer := new(mockWriter)
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
defer writer.Store(old)
|
||||||
|
|
||||||
Info(message)
|
Info(message)
|
||||||
assert.Equal(t, 0, writer.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetLevelTwiceWithMode(t *testing.T) {
|
func TestSetLevelTwiceWithMode(t *testing.T) {
|
||||||
@@ -300,29 +487,35 @@ func TestSetLevelTwiceWithMode(t *testing.T) {
|
|||||||
"console",
|
"console",
|
||||||
"volumn",
|
"volumn",
|
||||||
}
|
}
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
for _, mode := range testModes {
|
for _, mode := range testModes {
|
||||||
testSetLevelTwiceWithMode(t, mode)
|
testSetLevelTwiceWithMode(t, mode, w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetLevelWithDuration(t *testing.T) {
|
func TestSetLevelWithDuration(t *testing.T) {
|
||||||
SetLevel(ErrorLevel)
|
SetLevel(ErrorLevel)
|
||||||
const message = "hello there"
|
const message = "hello there"
|
||||||
writer := new(mockWriter)
|
w := new(mockWriter)
|
||||||
infoLog = writer
|
old := writer.Swap(w)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
defer writer.Store(old)
|
||||||
|
|
||||||
WithDuration(time.Second).Info(message)
|
WithDuration(time.Second).Info(message)
|
||||||
assert.Equal(t, 0, writer.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestErrorfWithWrappedError(t *testing.T) {
|
func TestErrorfWithWrappedError(t *testing.T) {
|
||||||
SetLevel(ErrorLevel)
|
SetLevel(ErrorLevel)
|
||||||
const message = "there"
|
const message = "there"
|
||||||
writer := new(mockWriter)
|
w := new(mockWriter)
|
||||||
errorLog = writer
|
old := writer.Swap(w)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
defer writer.Store(old)
|
||||||
|
|
||||||
Errorf("hello %w", errors.New(message))
|
Errorf("hello %w", errors.New(message))
|
||||||
assert.True(t, strings.Contains(writer.builder.String(), "hello there"))
|
assert.True(t, strings.Contains(w.String(), "hello there"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMustNil(t *testing.T) {
|
func TestMustNil(t *testing.T) {
|
||||||
@@ -330,6 +523,11 @@ func TestMustNil(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSetup(t *testing.T) {
|
func TestSetup(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
SetLevel(InfoLevel)
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
|
}()
|
||||||
|
|
||||||
MustSetup(LogConf{
|
MustSetup(LogConf{
|
||||||
ServiceName: "any",
|
ServiceName: "any",
|
||||||
Mode: "console",
|
Mode: "console",
|
||||||
@@ -344,6 +542,17 @@ func TestSetup(t *testing.T) {
|
|||||||
Mode: "volume",
|
Mode: "volume",
|
||||||
Path: os.TempDir(),
|
Path: os.TempDir(),
|
||||||
})
|
})
|
||||||
|
MustSetup(LogConf{
|
||||||
|
ServiceName: "any",
|
||||||
|
Mode: "console",
|
||||||
|
TimeFormat: timeFormat,
|
||||||
|
})
|
||||||
|
MustSetup(LogConf{
|
||||||
|
ServiceName: "any",
|
||||||
|
Mode: "console",
|
||||||
|
Encoding: plainEncoding,
|
||||||
|
})
|
||||||
|
|
||||||
assert.NotNil(t, setupWithVolume(LogConf{}))
|
assert.NotNil(t, setupWithVolume(LogConf{}))
|
||||||
assert.NotNil(t, setupWithFiles(LogConf{}))
|
assert.NotNil(t, setupWithFiles(LogConf{}))
|
||||||
assert.Nil(t, setupWithFiles(LogConf{
|
assert.Nil(t, setupWithFiles(LogConf{
|
||||||
@@ -364,6 +573,8 @@ func TestSetup(t *testing.T) {
|
|||||||
_, err := createOutput("")
|
_, err := createOutput("")
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
Disable()
|
Disable()
|
||||||
|
SetLevel(InfoLevel)
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDisable(t *testing.T) {
|
func TestDisable(t *testing.T) {
|
||||||
@@ -373,7 +584,6 @@ func TestDisable(t *testing.T) {
|
|||||||
WithKeepDays(1)(&opt)
|
WithKeepDays(1)(&opt)
|
||||||
WithGzip()(&opt)
|
WithGzip()(&opt)
|
||||||
assert.Nil(t, Close())
|
assert.Nil(t, Close())
|
||||||
writeConsole = false
|
|
||||||
assert.Nil(t, Close())
|
assert.Nil(t, Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -381,11 +591,20 @@ func TestDisableStat(t *testing.T) {
|
|||||||
DisableStat()
|
DisableStat()
|
||||||
|
|
||||||
const message = "hello there"
|
const message = "hello there"
|
||||||
writer := new(mockWriter)
|
w := new(mockWriter)
|
||||||
statLog = writer
|
old := writer.Swap(w)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
defer writer.Store(old)
|
||||||
Stat(message)
|
Stat(message)
|
||||||
assert.Equal(t, 0, writer.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetWriter(t *testing.T) {
|
||||||
|
Reset()
|
||||||
|
SetWriter(nopWriter{})
|
||||||
|
assert.NotNil(t, writer.Load())
|
||||||
|
assert.True(t, writer.Load() == nopWriter{})
|
||||||
|
SetWriter(new(mockWriter))
|
||||||
|
assert.True(t, writer.Load() == nopWriter{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithGzip(t *testing.T) {
|
func TestWithGzip(t *testing.T) {
|
||||||
@@ -487,15 +706,12 @@ func put(b []byte) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func doTestStructedLog(t *testing.T, level string, setup func(writer io.WriteCloser),
|
func doTestStructedLog(t *testing.T, level string, w *mockWriter, write func(...interface{})) {
|
||||||
write func(...interface{})) {
|
|
||||||
const message = "hello there"
|
const message = "hello there"
|
||||||
writer := new(mockWriter)
|
|
||||||
setup(writer)
|
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
write(message)
|
write(message)
|
||||||
|
fmt.Println(w.String())
|
||||||
var entry logEntry
|
var entry logEntry
|
||||||
if err := json.Unmarshal([]byte(writer.builder.String()), &entry); err != nil {
|
if err := json.Unmarshal([]byte(w.String()), &entry); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, level, entry.Level)
|
assert.Equal(t, level, entry.Level)
|
||||||
@@ -504,18 +720,14 @@ func doTestStructedLog(t *testing.T, level string, setup func(writer io.WriteClo
|
|||||||
assert.True(t, strings.Contains(val, message))
|
assert.True(t, strings.Contains(val, message))
|
||||||
}
|
}
|
||||||
|
|
||||||
func doTestStructedLogConsole(t *testing.T, setup func(writer io.WriteCloser),
|
func doTestStructedLogConsole(t *testing.T, w *mockWriter, write func(...interface{})) {
|
||||||
write func(...interface{})) {
|
|
||||||
const message = "hello there"
|
const message = "hello there"
|
||||||
writer := new(mockWriter)
|
|
||||||
setup(writer)
|
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
write(message)
|
write(message)
|
||||||
println(writer.String())
|
assert.True(t, strings.Contains(w.String(), message))
|
||||||
assert.True(t, strings.Contains(writer.String(), message))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSetLevelTwiceWithMode(t *testing.T, mode string) {
|
func testSetLevelTwiceWithMode(t *testing.T, mode string, w *mockWriter) {
|
||||||
|
writer.Store(nil)
|
||||||
SetUp(LogConf{
|
SetUp(LogConf{
|
||||||
Mode: mode,
|
Mode: mode,
|
||||||
Level: "error",
|
Level: "error",
|
||||||
@@ -527,17 +739,14 @@ func testSetLevelTwiceWithMode(t *testing.T, mode string) {
|
|||||||
Path: "/dev/null",
|
Path: "/dev/null",
|
||||||
})
|
})
|
||||||
const message = "hello there"
|
const message = "hello there"
|
||||||
writer := new(mockWriter)
|
|
||||||
infoLog = writer
|
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
Info(message)
|
Info(message)
|
||||||
assert.Equal(t, 0, writer.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
Infof(message)
|
Infof(message)
|
||||||
assert.Equal(t, 0, writer.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
ErrorStack(message)
|
ErrorStack(message)
|
||||||
assert.Equal(t, 0, writer.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
ErrorStackf(message)
|
ErrorStackf(message)
|
||||||
assert.Equal(t, 0, writer.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
}
|
}
|
||||||
|
|
||||||
type ValStringer struct {
|
type ValStringer struct {
|
||||||
@@ -547,3 +756,18 @@ type ValStringer struct {
|
|||||||
func (v ValStringer) String() string {
|
func (v ValStringer) String() string {
|
||||||
return v.val
|
return v.val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateFields(t *testing.T, content string, fields map[string]interface{}) {
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := json.Unmarshal([]byte(content), &m); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range fields {
|
||||||
|
if reflect.TypeOf(v).Kind() == reflect.Slice {
|
||||||
|
assert.EqualValues(t, v, m[k])
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, v, m[k], content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
22
core/logx/logwriter.go
Normal file
22
core/logx/logwriter.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import "log"
|
||||||
|
|
||||||
|
type logWriter struct {
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLogWriter(logger *log.Logger) logWriter {
|
||||||
|
return logWriter{
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lw logWriter) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lw logWriter) Write(data []byte) (int, error) {
|
||||||
|
lw.logger.Print(string(data))
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
197
core/logx/readme-cn.md
Normal file
197
core/logx/readme-cn.md
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
<IMG align="right" width="150px" src="https://raw.githubusercontent.com/zeromicro/zero-doc/main/doc/images/go-zero.png">
|
||||||
|
|
||||||
|
# logx
|
||||||
|
|
||||||
|
[English](readme.md) | 简体中文
|
||||||
|
|
||||||
|
## logx 配置
|
||||||
|
|
||||||
|
```go
|
||||||
|
type LogConf struct {
|
||||||
|
ServiceName string `json:",optional"`
|
||||||
|
Mode string `json:",default=console,options=[console,file,volume]"`
|
||||||
|
Encoding string `json:",default=json,options=[json,plain]"`
|
||||||
|
TimeFormat string `json:",optional"`
|
||||||
|
Path string `json:",default=logs"`
|
||||||
|
Level string `json:",default=info,options=[info,error,severe]"`
|
||||||
|
Compress bool `json:",optional"`
|
||||||
|
KeepDays int `json:",optional"`
|
||||||
|
StackCooldownMillis int `json:",default=100"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `ServiceName`:设置服务名称,可选。在 `volume` 模式下,该名称用于生成日志文件。在 `rest/zrpc` 服务中,名称将被自动设置为 `rest`或`zrpc` 的名称。
|
||||||
|
- `Mode`:输出日志的模式,默认是 `console`
|
||||||
|
- `console` 模式将日志写到 `stdout/stderr`
|
||||||
|
- `file` 模式将日志写到 `Path` 指定目录的文件中
|
||||||
|
- `volume` 模式在 docker 中使用,将日志写入挂载的卷中
|
||||||
|
- `Encoding`: 指示如何对日志进行编码,默认是 `json`
|
||||||
|
- `json`模式以 json 格式写日志
|
||||||
|
- `plain`模式用纯文本写日志,并带有终端颜色显示
|
||||||
|
- `TimeFormat`:自定义时间格式,可选。默认是 `2006-01-02T15:04:05.000Z07:00`
|
||||||
|
- `Path`:设置日志路径,默认为 `logs`
|
||||||
|
- `Level`: 用于过滤日志的日志级别。默认为 `info`
|
||||||
|
- `info`,所有日志都被写入
|
||||||
|
- `error`, `info` 的日志被丢弃
|
||||||
|
- `severe`, `info` 和 `error` 日志被丢弃,只有 `severe` 日志被写入
|
||||||
|
- `Compress`: 是否压缩日志文件,只在 `file` 模式下工作
|
||||||
|
- `KeepDays`:日志文件被保留多少天,在给定的天数之后,过期的文件将被自动删除。对 `console` 模式没有影响
|
||||||
|
- `StackCooldownMillis`:多少毫秒后再次写入堆栈跟踪。用来避免堆栈跟踪日志过多
|
||||||
|
|
||||||
|
## 打印日志方法
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Logger interface {
|
||||||
|
// Error logs a message at error level.
|
||||||
|
Error(...interface{})
|
||||||
|
// Errorf logs a message at error level.
|
||||||
|
Errorf(string, ...interface{})
|
||||||
|
// Errorv logs a message at error level.
|
||||||
|
Errorv(interface{})
|
||||||
|
// Errorw logs a message at error level.
|
||||||
|
Errorw(string, ...LogField)
|
||||||
|
// Info logs a message at info level.
|
||||||
|
Info(...interface{})
|
||||||
|
// Infof logs a message at info level.
|
||||||
|
Infof(string, ...interface{})
|
||||||
|
// Infov logs a message at info level.
|
||||||
|
Infov(interface{})
|
||||||
|
// Infow logs a message at info level.
|
||||||
|
Infow(string, ...LogField)
|
||||||
|
// Slow logs a message at slow level.
|
||||||
|
Slow(...interface{})
|
||||||
|
// Slowf logs a message at slow level.
|
||||||
|
Slowf(string, ...interface{})
|
||||||
|
// Slowv logs a message at slow level.
|
||||||
|
Slowv(interface{})
|
||||||
|
// Sloww logs a message at slow level.
|
||||||
|
Sloww(string, ...LogField)
|
||||||
|
// WithContext returns a new logger with the given context.
|
||||||
|
WithContext(context.Context) Logger
|
||||||
|
// WithDuration returns a new logger with the given duration.
|
||||||
|
WithDuration(time.Duration) Logger
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `Error`, `Info`, `Slow`: 将任何类型的信息写进日志,使用 `fmt.Sprint(...)` 来转换为 `string`
|
||||||
|
- `Errorf`, `Infof`, `Slowf`: 将指定格式的信息写入日志
|
||||||
|
- `Errorv`, `Infov`, `Slowv`: 将任何类型的信息写入日志,用 `json marshal` 编码
|
||||||
|
- `Errorw`, `Infow`, `Sloww`: 写日志,并带上给定的 `key:value` 字段
|
||||||
|
- `WithContext`:将给定的 ctx 注入日志信息,例如用于记录 `trace-id`和`span-id`
|
||||||
|
- `WithDuration`: 将指定的时间写入日志信息中,字段名为 `duration`
|
||||||
|
|
||||||
|
## 与第三方日志库集成
|
||||||
|
|
||||||
|
- zap
|
||||||
|
- 实现:[https://github.com/zeromicro/zero-contrib/blob/main/logx/zapx/zap.go](https://github.com/zeromicro/zero-contrib/blob/main/logx/zapx/zap.go)
|
||||||
|
- 使用示例:[https://github.com/zeromicro/zero-examples/blob/main/logx/zaplog/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/zaplog/main.go)
|
||||||
|
- logrus
|
||||||
|
- 实现:[https://github.com/zeromicro/zero-contrib/blob/main/logx/logrusx/logrus.go](https://github.com/zeromicro/zero-contrib/blob/main/logx/logrusx/logrus.go)
|
||||||
|
- 使用示例:[https://github.com/zeromicro/zero-examples/blob/main/logx/logrus/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/logrus/main.go)
|
||||||
|
|
||||||
|
对于其它的日志库,请参考上面示例实现,并欢迎提交 `PR` 到 [https://github.com/zeromicro/zero-contrib](https://github.com/zeromicro/zero-contrib)
|
||||||
|
|
||||||
|
## 将日志写到指定的存储
|
||||||
|
|
||||||
|
`logx`定义了两个接口,方便自定义 `logx`,将日志写入任何存储。
|
||||||
|
|
||||||
|
- `logx.NewWriter(w io.Writer)`
|
||||||
|
- `logx.SetWriter(write logx.Writer)`
|
||||||
|
|
||||||
|
例如,如果我们想把日志写进kafka,而不是控制台或文件,我们可以像下面这样做。
|
||||||
|
|
||||||
|
```go
|
||||||
|
type KafkaWriter struct {
|
||||||
|
Pusher *kq.Pusher
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKafkaWriter(pusher *kq.Pusher) *KafkaWriter {
|
||||||
|
return &KafkaWriter{
|
||||||
|
Pusher: pusher,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *KafkaWriter) Write(p []byte) (n int, err error) {
|
||||||
|
// writing log with newlines, trim them.
|
||||||
|
if err := w.Pusher.Push(strings.TrimSpace(string(p))); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
pusher := kq.NewPusher([]string{"localhost:9092"}, "go-zero")
|
||||||
|
defer pusher.Close()
|
||||||
|
|
||||||
|
writer := logx.NewWriter(NewKafkaWriter(pusher))
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
|
||||||
|
// more code
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
完整代码:[https://github.com/zeromicro/zero-examples/blob/main/logx/tokafka/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/tokafka/main.go)
|
||||||
|
|
||||||
|
## 过滤敏感字段
|
||||||
|
|
||||||
|
如果我们需要防止 `password` 字段被记录下来,我们可以像下面这样实现。
|
||||||
|
|
||||||
|
```go
|
||||||
|
type (
|
||||||
|
Message struct {
|
||||||
|
Name string
|
||||||
|
Password string
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
SensitiveLogger struct {
|
||||||
|
logx.Writer
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewSensitiveLogger(writer logx.Writer) *SensitiveLogger {
|
||||||
|
return &SensitiveLogger{
|
||||||
|
Writer: writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *SensitiveLogger) Info(msg interface{}, fields ...logx.LogField) {
|
||||||
|
if m, ok := msg.(Message); ok {
|
||||||
|
l.Writer.Info(Message{
|
||||||
|
Name: m.Name,
|
||||||
|
Password: "******",
|
||||||
|
Message: m.Message,
|
||||||
|
}, fields...)
|
||||||
|
} else {
|
||||||
|
l.Writer.Info(msg, fields...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// setup logx to make sure originalWriter not nil,
|
||||||
|
// the injected writer is only for filtering, like a middleware.
|
||||||
|
|
||||||
|
originalWriter := logx.Reset()
|
||||||
|
writer := NewSensitiveLogger(originalWriter)
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
|
||||||
|
logx.Infov(Message{
|
||||||
|
Name: "foo",
|
||||||
|
Password: "shouldNotAppear",
|
||||||
|
Message: "bar",
|
||||||
|
})
|
||||||
|
|
||||||
|
// more code
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
完整代码:[https://github.com/zeromicro/zero-examples/blob/main/logx/filterfields/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/filterfields/main.go)
|
||||||
|
|
||||||
|
## 更多示例
|
||||||
|
|
||||||
|
[https://github.com/zeromicro/zero-examples/tree/main/logx](https://github.com/zeromicro/zero-examples/tree/main/logx)
|
||||||
|
|
||||||
|
## Give a Star! ⭐
|
||||||
|
|
||||||
|
如果你正在使用或者觉得这个项目对你有帮助,请 **star** 支持,感谢!
|
||||||
197
core/logx/readme.md
Normal file
197
core/logx/readme.md
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
<img align="right" width="150px" src="https://raw.githubusercontent.com/zeromicro/zero-doc/main/doc/images/go-zero.png">
|
||||||
|
|
||||||
|
# logx
|
||||||
|
|
||||||
|
English | [简体中文](readme-cn.md)
|
||||||
|
|
||||||
|
## logx configurations
|
||||||
|
|
||||||
|
```go
|
||||||
|
type LogConf struct {
|
||||||
|
ServiceName string `json:",optional"`
|
||||||
|
Mode string `json:",default=console,options=[console,file,volume]"`
|
||||||
|
Encoding string `json:",default=json,options=[json,plain]"`
|
||||||
|
TimeFormat string `json:",optional"`
|
||||||
|
Path string `json:",default=logs"`
|
||||||
|
Level string `json:",default=info,options=[info,error,severe]"`
|
||||||
|
Compress bool `json:",optional"`
|
||||||
|
KeepDays int `json:",optional"`
|
||||||
|
StackCooldownMillis int `json:",default=100"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `ServiceName`: set the service name, optional. on `volume` mode, the name is used to generate the log files. Within `rest/zrpc` services, the name will be set to the name of `rest` or `zrpc` automatically.
|
||||||
|
- `Mode`: the mode to output the logs, default is `console`.
|
||||||
|
- `console` mode writes the logs to `stdout/stderr`.
|
||||||
|
- `file` mode writes the logs to the files specified by `Path`.
|
||||||
|
- `volume` mode is used in docker, to write logs into mounted volumes.
|
||||||
|
- `Encoding`: indicates how to encode the logs, default is `json`.
|
||||||
|
- `json` mode writes the logs in json format.
|
||||||
|
- `plain` mode writes the logs with plain text, with terminal color enabled.
|
||||||
|
- `TimeFormat`: customize the time format, optional. Default is `2006-01-02T15:04:05.000Z07:00`.
|
||||||
|
- `Path`: set the log path, default to `logs`.
|
||||||
|
- `Level`: the logging level to filter logs. Default is `info`.
|
||||||
|
- `info`, all logs are written.
|
||||||
|
- `error`, `info` logs are suppressed.
|
||||||
|
- `severe`, `info` and `error` logs are suppressed, only `severe` logs are written.
|
||||||
|
- `Compress`: whether or not to compress log files, only works with `file` mode.
|
||||||
|
- `KeepDays`: how many days that the log files are kept, after the given days, the outdated files will be deleted automatically. It has no effect on `console` mode.
|
||||||
|
- `StackCooldownMillis`: how many milliseconds to rewrite stacktrace again. It’s used to avoid stacktrace flooding.
|
||||||
|
|
||||||
|
## Logging methods
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Logger interface {
|
||||||
|
// Error logs a message at error level.
|
||||||
|
Error(...interface{})
|
||||||
|
// Errorf logs a message at error level.
|
||||||
|
Errorf(string, ...interface{})
|
||||||
|
// Errorv logs a message at error level.
|
||||||
|
Errorv(interface{})
|
||||||
|
// Errorw logs a message at error level.
|
||||||
|
Errorw(string, ...LogField)
|
||||||
|
// Info logs a message at info level.
|
||||||
|
Info(...interface{})
|
||||||
|
// Infof logs a message at info level.
|
||||||
|
Infof(string, ...interface{})
|
||||||
|
// Infov logs a message at info level.
|
||||||
|
Infov(interface{})
|
||||||
|
// Infow logs a message at info level.
|
||||||
|
Infow(string, ...LogField)
|
||||||
|
// Slow logs a message at slow level.
|
||||||
|
Slow(...interface{})
|
||||||
|
// Slowf logs a message at slow level.
|
||||||
|
Slowf(string, ...interface{})
|
||||||
|
// Slowv logs a message at slow level.
|
||||||
|
Slowv(interface{})
|
||||||
|
// Sloww logs a message at slow level.
|
||||||
|
Sloww(string, ...LogField)
|
||||||
|
// WithContext returns a new logger with the given context.
|
||||||
|
WithContext(context.Context) Logger
|
||||||
|
// WithDuration returns a new logger with the given duration.
|
||||||
|
WithDuration(time.Duration) Logger
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `Error`, `Info`, `Slow`: write any kind of messages into logs, with like `fmt.Sprint(…)`.
|
||||||
|
- `Errorf`, `Infof`, `Slowf`: write messages with given format into logs.
|
||||||
|
- `Errorv`, `Infov`, `Slowv`: write any kind of messages into logs, with json marshalling to encode them.
|
||||||
|
- `Errorw`, `Infow`, `Sloww`: write the string message with given `key:value` fields.
|
||||||
|
- `WithContext`: inject the given ctx into the log messages, typically used to log `trace-id` and `span-id`.
|
||||||
|
- `WithDuration`: write elapsed duration into the log messages, with key `duration`.
|
||||||
|
|
||||||
|
## Integrating with third-party logging libs
|
||||||
|
|
||||||
|
- zap
|
||||||
|
- implementation: [https://github.com/zeromicro/zero-contrib/blob/main/logx/zapx/zap.go](https://github.com/zeromicro/zero-contrib/blob/main/logx/zapx/zap.go)
|
||||||
|
- usage example: [https://github.com/zeromicro/zero-examples/blob/main/logx/zaplog/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/zaplog/main.go)
|
||||||
|
- logrus
|
||||||
|
- implementation: [https://github.com/zeromicro/zero-contrib/blob/main/logx/logrusx/logrus.go](https://github.com/zeromicro/zero-contrib/blob/main/logx/logrusx/logrus.go)
|
||||||
|
- usage example: [https://github.com/zeromicro/zero-examples/blob/main/logx/logrus/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/logrus/main.go)
|
||||||
|
|
||||||
|
For more libs, please implement and PR to [https://github.com/zeromicro/zero-contrib](https://github.com/zeromicro/zero-contrib)
|
||||||
|
|
||||||
|
## Write the logs to specific stores
|
||||||
|
|
||||||
|
`logx` defined two interfaces to let you customize `logx` to write logs into any stores.
|
||||||
|
|
||||||
|
- `logx.NewWriter(w io.Writer)`
|
||||||
|
- `logx.SetWriter(writer logx.Writer)`
|
||||||
|
|
||||||
|
For example, if we want to write the logs into kafka instead of console or files, we can do it like below:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type KafkaWriter struct {
|
||||||
|
Pusher *kq.Pusher
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKafkaWriter(pusher *kq.Pusher) *KafkaWriter {
|
||||||
|
return &KafkaWriter{
|
||||||
|
Pusher: pusher,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *KafkaWriter) Write(p []byte) (n int, err error) {
|
||||||
|
// writing log with newlines, trim them.
|
||||||
|
if err := w.Pusher.Push(strings.TrimSpace(string(p))); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
pusher := kq.NewPusher([]string{"localhost:9092"}, "go-zero")
|
||||||
|
defer pusher.Close()
|
||||||
|
|
||||||
|
writer := logx.NewWriter(NewKafkaWriter(pusher))
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
|
||||||
|
// more code
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete code: [https://github.com/zeromicro/zero-examples/blob/main/logx/tokafka/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/tokafka/main.go)
|
||||||
|
|
||||||
|
## Filtering sensitive fields
|
||||||
|
|
||||||
|
If we need to prevent the `password` fields from logging, we can do it like below:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type (
|
||||||
|
Message struct {
|
||||||
|
Name string
|
||||||
|
Password string
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
SensitiveLogger struct {
|
||||||
|
logx.Writer
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewSensitiveLogger(writer logx.Writer) *SensitiveLogger {
|
||||||
|
return &SensitiveLogger{
|
||||||
|
Writer: writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *SensitiveLogger) Info(msg interface{}, fields ...logx.LogField) {
|
||||||
|
if m, ok := msg.(Message); ok {
|
||||||
|
l.Writer.Info(Message{
|
||||||
|
Name: m.Name,
|
||||||
|
Password: "******",
|
||||||
|
Message: m.Message,
|
||||||
|
}, fields...)
|
||||||
|
} else {
|
||||||
|
l.Writer.Info(msg, fields...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// setup logx to make sure originalWriter not nil,
|
||||||
|
// the injected writer is only for filtering, like a middleware.
|
||||||
|
|
||||||
|
originalWriter := logx.Reset()
|
||||||
|
writer := NewSensitiveLogger(originalWriter)
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
|
||||||
|
logx.Infov(Message{
|
||||||
|
Name: "foo",
|
||||||
|
Password: "shouldNotAppear",
|
||||||
|
Message: "bar",
|
||||||
|
})
|
||||||
|
|
||||||
|
// more code
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete code: [https://github.com/zeromicro/zero-examples/blob/main/logx/filterfields/main.go](https://github.com/zeromicro/zero-examples/blob/main/logx/filterfields/main.go)
|
||||||
|
|
||||||
|
## More examples
|
||||||
|
|
||||||
|
[https://github.com/zeromicro/zero-examples/tree/main/logx](https://github.com/zeromicro/zero-examples/tree/main/logx)
|
||||||
|
|
||||||
|
## Give a Star! ⭐
|
||||||
|
|
||||||
|
If you like or are using this project to learn or start your solution, please give it a star. Thanks!
|
||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/fs"
|
"github.com/zeromicro/go-zero/core/fs"
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -211,6 +210,12 @@ func (l *RotateLogger) maybeCompressFile(file string) {
|
|||||||
ErrorStack(r)
|
ErrorStack(r)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if _, err := os.Stat(file); err != nil {
|
||||||
|
// file not exists or other error, ignore compression
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
compressLogFile(file)
|
compressLogFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -290,12 +295,12 @@ func (l *RotateLogger) write(v []byte) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func compressLogFile(file string) {
|
func compressLogFile(file string) {
|
||||||
start := timex.Now()
|
start := time.Now()
|
||||||
Infof("compressing log file: %s", file)
|
Infof("compressing log file: %s", file)
|
||||||
if err := gzipFile(file); err != nil {
|
if err := gzipFile(file); err != nil {
|
||||||
Errorf("compress error: %s", err)
|
Errorf("compress error: %s", err)
|
||||||
} else {
|
} else {
|
||||||
Infof("compressed log file: %s, took %s", file, timex.Since(start))
|
Infof("compressed log file: %s, took %s", file, time.Since(start))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -75,10 +75,7 @@ func TestRotateLoggerMayCompressFileTrue(t *testing.T) {
|
|||||||
logger, err := NewLogger(filename, new(DailyRotateRule), true)
|
logger, err := NewLogger(filename, new(DailyRotateRule), true)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
if len(filename) > 0 {
|
if len(filename) > 0 {
|
||||||
defer func() {
|
defer os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
|
||||||
os.Remove(filename)
|
|
||||||
os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
logger.maybeCompressFile(filename)
|
logger.maybeCompressFile(filename)
|
||||||
_, err = os.Stat(filename)
|
_, err = os.Stat(filename)
|
||||||
@@ -92,7 +89,6 @@ func TestRotateLoggerRotate(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
if len(filename) > 0 {
|
if len(filename) > 0 {
|
||||||
defer func() {
|
defer func() {
|
||||||
os.Remove(filename)
|
|
||||||
os.Remove(logger.getBackupFilename())
|
os.Remove(logger.getBackupFilename())
|
||||||
os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
|
os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
|
||||||
}()
|
}()
|
||||||
@@ -102,6 +98,10 @@ func TestRotateLoggerRotate(t *testing.T) {
|
|||||||
case *os.LinkError:
|
case *os.LinkError:
|
||||||
// avoid rename error on docker container
|
// avoid rename error on docker container
|
||||||
assert.Equal(t, syscall.EXDEV, v.Err)
|
assert.Equal(t, syscall.EXDEV, v.Err)
|
||||||
|
case *os.PathError:
|
||||||
|
// ignore remove error for tests,
|
||||||
|
// files are cleaned in GitHub actions.
|
||||||
|
assert.Equal(t, "remove", v.Op)
|
||||||
default:
|
default:
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
@@ -115,12 +115,18 @@ func TestRotateLoggerWrite(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
if len(filename) > 0 {
|
if len(filename) > 0 {
|
||||||
defer func() {
|
defer func() {
|
||||||
os.Remove(filename)
|
|
||||||
os.Remove(logger.getBackupFilename())
|
os.Remove(logger.getBackupFilename())
|
||||||
os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
|
os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
// the following write calls cannot be changed to Write, because of DATA RACE.
|
||||||
logger.write([]byte(`foo`))
|
logger.write([]byte(`foo`))
|
||||||
rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(dateFormat)
|
rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(dateFormat)
|
||||||
logger.write([]byte(`bar`))
|
logger.write([]byte(`bar`))
|
||||||
|
logger.Close()
|
||||||
|
logger.write([]byte(`baz`))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogWriterClose(t *testing.T) {
|
||||||
|
assert.Nil(t, newLogWriter(nil).Close())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,20 +29,24 @@ func TestRedirector(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func captureOutput(f func()) string {
|
func captureOutput(f func()) string {
|
||||||
atomic.StoreUint32(&initialized, 1)
|
w := new(mockWriter)
|
||||||
writer := new(mockWriter)
|
old := writer.Swap(w)
|
||||||
infoLog = writer
|
defer writer.Store(old)
|
||||||
|
|
||||||
prevLevel := atomic.LoadUint32(&logLevel)
|
prevLevel := atomic.LoadUint32(&logLevel)
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
f()
|
f()
|
||||||
SetLevel(prevLevel)
|
SetLevel(prevLevel)
|
||||||
|
|
||||||
return writer.builder.String()
|
return w.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getContent(jsonStr string) string {
|
func getContent(jsonStr string) string {
|
||||||
var entry logEntry
|
var entry logEntry
|
||||||
json.Unmarshal([]byte(jsonStr), &entry)
|
json.Unmarshal([]byte(jsonStr), &entry)
|
||||||
return entry.Content.(string)
|
val, ok := entry.Content.(string)
|
||||||
|
if ok {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,72 +3,79 @@ package logx
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// WithContext sets ctx to log, for keeping tracing information.
|
||||||
|
func WithContext(ctx context.Context) Logger {
|
||||||
|
return &traceLogger{
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type traceLogger struct {
|
type traceLogger struct {
|
||||||
logEntry
|
logEntry
|
||||||
Trace string `json:"trace,omitempty"`
|
ctx context.Context
|
||||||
Span string `json:"span,omitempty"`
|
|
||||||
ctx context.Context
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Error(v ...interface{}) {
|
func (l *traceLogger) Error(v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.err(fmt.Sprint(v...))
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Errorf(format string, v ...interface{}) {
|
func (l *traceLogger) Errorf(format string, v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.err(fmt.Sprintf(format, v...))
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Errorv(v interface{}) {
|
func (l *traceLogger) Errorv(v interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.err(fmt.Sprint(v))
|
||||||
l.write(errorLog, levelError, v)
|
}
|
||||||
}
|
|
||||||
|
func (l *traceLogger) Errorw(msg string, fields ...LogField) {
|
||||||
|
l.err(msg, fields...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Info(v ...interface{}) {
|
func (l *traceLogger) Info(v ...interface{}) {
|
||||||
if shallLog(InfoLevel) {
|
l.info(fmt.Sprint(v...))
|
||||||
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Infof(format string, v ...interface{}) {
|
func (l *traceLogger) Infof(format string, v ...interface{}) {
|
||||||
if shallLog(InfoLevel) {
|
l.info(fmt.Sprintf(format, v...))
|
||||||
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Infov(v interface{}) {
|
func (l *traceLogger) Infov(v interface{}) {
|
||||||
if shallLog(InfoLevel) {
|
l.info(v)
|
||||||
l.write(infoLog, levelInfo, v)
|
}
|
||||||
}
|
|
||||||
|
func (l *traceLogger) Infow(msg string, fields ...LogField) {
|
||||||
|
l.info(msg, fields...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Slow(v ...interface{}) {
|
func (l *traceLogger) Slow(v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.slow(fmt.Sprint(v...))
|
||||||
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Slowf(format string, v ...interface{}) {
|
func (l *traceLogger) Slowf(format string, v ...interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.slow(fmt.Sprintf(format, v...))
|
||||||
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) Slowv(v interface{}) {
|
func (l *traceLogger) Slowv(v interface{}) {
|
||||||
if shallLog(ErrorLevel) {
|
l.slow(v)
|
||||||
l.write(slowLog, levelSlow, v)
|
}
|
||||||
|
|
||||||
|
func (l *traceLogger) Sloww(msg string, fields ...LogField) {
|
||||||
|
l.slow(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *traceLogger) WithContext(ctx context.Context) Logger {
|
||||||
|
if ctx == nil {
|
||||||
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
l.ctx = ctx
|
||||||
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) WithDuration(duration time.Duration) Logger {
|
func (l *traceLogger) WithDuration(duration time.Duration) Logger {
|
||||||
@@ -76,31 +83,37 @@ func (l *traceLogger) WithDuration(duration time.Duration) Logger {
|
|||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *traceLogger) write(writer io.Writer, level string, val interface{}) {
|
func (l *traceLogger) buildFields(fields ...LogField) []LogField {
|
||||||
|
if len(l.Duration) > 0 {
|
||||||
|
fields = append(fields, Field(durationKey, l.Duration))
|
||||||
|
}
|
||||||
traceID := traceIdFromContext(l.ctx)
|
traceID := traceIdFromContext(l.ctx)
|
||||||
|
if len(traceID) > 0 {
|
||||||
|
fields = append(fields, Field(traceKey, traceID))
|
||||||
|
}
|
||||||
spanID := spanIdFromContext(l.ctx)
|
spanID := spanIdFromContext(l.ctx)
|
||||||
|
if len(spanID) > 0 {
|
||||||
|
fields = append(fields, Field(spanKey, spanID))
|
||||||
|
}
|
||||||
|
|
||||||
switch encoding {
|
return fields
|
||||||
case plainEncodingType:
|
}
|
||||||
writePlainAny(writer, level, val, l.Duration, traceID, spanID)
|
|
||||||
default:
|
func (l *traceLogger) err(v interface{}, fields ...LogField) {
|
||||||
outputJson(writer, &traceLogger{
|
if shallLog(ErrorLevel) {
|
||||||
logEntry: logEntry{
|
getWriter().Error(v, l.buildFields(fields...)...)
|
||||||
Timestamp: getTimestamp(),
|
|
||||||
Level: level,
|
|
||||||
Duration: l.Duration,
|
|
||||||
Content: val,
|
|
||||||
},
|
|
||||||
Trace: traceID,
|
|
||||||
Span: spanID,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithContext sets ctx to log, for keeping tracing information.
|
func (l *traceLogger) info(v interface{}, fields ...LogField) {
|
||||||
func WithContext(ctx context.Context) Logger {
|
if shallLog(InfoLevel) {
|
||||||
return &traceLogger{
|
getWriter().Info(v, l.buildFields(fields...)...)
|
||||||
ctx: ctx,
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *traceLogger) slow(v interface{}, fields ...LogField) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
getWriter().Slow(v, l.buildFields(fields...)...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,8 @@ package logx
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -13,142 +14,195 @@ import (
|
|||||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
traceKey = "trace"
|
|
||||||
spanKey = "span"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTraceLog(t *testing.T) {
|
func TestTraceLog(t *testing.T) {
|
||||||
var buf mockWriter
|
SetLevel(InfoLevel)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
writer.lock.RLock()
|
||||||
|
defer func() {
|
||||||
|
writer.lock.RUnlock()
|
||||||
|
writer.Store(old)
|
||||||
|
}()
|
||||||
|
|
||||||
otp := otel.GetTracerProvider()
|
otp := otel.GetTracerProvider()
|
||||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
otel.SetTracerProvider(tp)
|
otel.SetTracerProvider(tp)
|
||||||
defer otel.SetTracerProvider(otp)
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
ctx, span := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
WithContext(ctx).(*traceLogger).write(&buf, levelInfo, testlog)
|
defer span.End()
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
WithContext(ctx).Info(testlog)
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceError(t *testing.T) {
|
func TestTraceError(t *testing.T) {
|
||||||
var buf mockWriter
|
w := new(mockWriter)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
old := writer.Swap(w)
|
||||||
errorLog = newLogWriter(log.New(&buf, "", flags))
|
writer.lock.RLock()
|
||||||
|
defer func() {
|
||||||
|
writer.lock.RUnlock()
|
||||||
|
writer.Store(old)
|
||||||
|
}()
|
||||||
|
|
||||||
otp := otel.GetTracerProvider()
|
otp := otel.GetTracerProvider()
|
||||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
otel.SetTracerProvider(tp)
|
otel.SetTracerProvider(tp)
|
||||||
defer otel.SetTracerProvider(otp)
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
ctx, span := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
l := WithContext(ctx).(*traceLogger)
|
defer span.End()
|
||||||
SetLevel(InfoLevel)
|
|
||||||
|
var nilCtx context.Context
|
||||||
|
l := WithContext(context.Background())
|
||||||
|
l = l.WithContext(nilCtx)
|
||||||
|
l = l.WithContext(ctx)
|
||||||
|
SetLevel(ErrorLevel)
|
||||||
l.WithDuration(time.Second).Error(testlog)
|
l.WithDuration(time.Second).Error(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
w.Reset()
|
||||||
buf.Reset()
|
|
||||||
l.WithDuration(time.Second).Errorf(testlog)
|
l.WithDuration(time.Second).Errorf(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
w.Reset()
|
||||||
buf.Reset()
|
|
||||||
l.WithDuration(time.Second).Errorv(testlog)
|
l.WithDuration(time.Second).Errorv(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
fmt.Println(w.String())
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
validate(t, w.String(), true, true)
|
||||||
|
w.Reset()
|
||||||
|
l.WithDuration(time.Second).Errorw(testlog, Field("foo", "bar"))
|
||||||
|
fmt.Println(w.String())
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
|
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceInfo(t *testing.T) {
|
func TestTraceInfo(t *testing.T) {
|
||||||
var buf mockWriter
|
w := new(mockWriter)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
old := writer.Swap(w)
|
||||||
infoLog = newLogWriter(log.New(&buf, "", flags))
|
writer.lock.RLock()
|
||||||
|
defer func() {
|
||||||
|
writer.lock.RUnlock()
|
||||||
|
writer.Store(old)
|
||||||
|
}()
|
||||||
|
|
||||||
otp := otel.GetTracerProvider()
|
otp := otel.GetTracerProvider()
|
||||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
otel.SetTracerProvider(tp)
|
otel.SetTracerProvider(tp)
|
||||||
defer otel.SetTracerProvider(otp)
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
ctx, span := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
l := WithContext(ctx).(*traceLogger)
|
defer span.End()
|
||||||
|
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
|
l := WithContext(ctx)
|
||||||
l.WithDuration(time.Second).Info(testlog)
|
l.WithDuration(time.Second).Info(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
w.Reset()
|
||||||
buf.Reset()
|
|
||||||
l.WithDuration(time.Second).Infof(testlog)
|
l.WithDuration(time.Second).Infof(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
w.Reset()
|
||||||
buf.Reset()
|
|
||||||
l.WithDuration(time.Second).Infov(testlog)
|
l.WithDuration(time.Second).Infov(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
w.Reset()
|
||||||
|
l.WithDuration(time.Second).Infow(testlog, Field("foo", "bar"))
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
|
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceInfoConsole(t *testing.T) {
|
func TestTraceInfoConsole(t *testing.T) {
|
||||||
old := encoding
|
old := atomic.SwapUint32(&encoding, jsonEncodingType)
|
||||||
encoding = plainEncodingType
|
defer atomic.StoreUint32(&encoding, old)
|
||||||
|
|
||||||
|
w := new(mockWriter)
|
||||||
|
o := writer.Swap(w)
|
||||||
|
writer.lock.RLock()
|
||||||
defer func() {
|
defer func() {
|
||||||
encoding = old
|
writer.lock.RUnlock()
|
||||||
|
writer.Store(o)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var buf mockWriter
|
|
||||||
atomic.StoreUint32(&initialized, 1)
|
|
||||||
infoLog = newLogWriter(log.New(&buf, "", flags))
|
|
||||||
otp := otel.GetTracerProvider()
|
otp := otel.GetTracerProvider()
|
||||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
otel.SetTracerProvider(tp)
|
otel.SetTracerProvider(tp)
|
||||||
defer otel.SetTracerProvider(otp)
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
ctx, span := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
l := WithContext(ctx).(*traceLogger)
|
defer span.End()
|
||||||
|
|
||||||
|
l := WithContext(ctx)
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
l.WithDuration(time.Second).Info(testlog)
|
l.WithDuration(time.Second).Info(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceIdFromContext(ctx)))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanIdFromContext(ctx)))
|
w.Reset()
|
||||||
buf.Reset()
|
|
||||||
l.WithDuration(time.Second).Infof(testlog)
|
l.WithDuration(time.Second).Infof(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceIdFromContext(ctx)))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanIdFromContext(ctx)))
|
w.Reset()
|
||||||
buf.Reset()
|
|
||||||
l.WithDuration(time.Second).Infov(testlog)
|
l.WithDuration(time.Second).Infov(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceIdFromContext(ctx)))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanIdFromContext(ctx)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceSlow(t *testing.T) {
|
func TestTraceSlow(t *testing.T) {
|
||||||
var buf mockWriter
|
w := new(mockWriter)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
old := writer.Swap(w)
|
||||||
slowLog = newLogWriter(log.New(&buf, "", flags))
|
writer.lock.RLock()
|
||||||
|
defer func() {
|
||||||
|
writer.lock.RUnlock()
|
||||||
|
writer.Store(old)
|
||||||
|
}()
|
||||||
|
|
||||||
otp := otel.GetTracerProvider()
|
otp := otel.GetTracerProvider()
|
||||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||||
otel.SetTracerProvider(tp)
|
otel.SetTracerProvider(tp)
|
||||||
defer otel.SetTracerProvider(otp)
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
ctx, span := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||||
l := WithContext(ctx).(*traceLogger)
|
defer span.End()
|
||||||
|
|
||||||
|
l := WithContext(ctx)
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
l.WithDuration(time.Second).Slow(testlog)
|
l.WithDuration(time.Second).Slow(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
assert.True(t, strings.Contains(w.String(), traceKey))
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
assert.True(t, strings.Contains(w.String(), spanKey))
|
||||||
buf.Reset()
|
w.Reset()
|
||||||
l.WithDuration(time.Second).Slowf(testlog)
|
l.WithDuration(time.Second).Slowf(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
fmt.Println("buf:", w.String())
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
validate(t, w.String(), true, true)
|
||||||
buf.Reset()
|
w.Reset()
|
||||||
l.WithDuration(time.Second).Slowv(testlog)
|
l.WithDuration(time.Second).Slowv(testlog)
|
||||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), true, true)
|
||||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
w.Reset()
|
||||||
|
l.WithDuration(time.Second).Sloww(testlog, Field("foo", "bar"))
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
|
assert.True(t, strings.Contains(w.String(), "foo"), w.String())
|
||||||
|
assert.True(t, strings.Contains(w.String(), "bar"), w.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTraceWithoutContext(t *testing.T) {
|
func TestTraceWithoutContext(t *testing.T) {
|
||||||
var buf mockWriter
|
w := new(mockWriter)
|
||||||
atomic.StoreUint32(&initialized, 1)
|
old := writer.Swap(w)
|
||||||
infoLog = newLogWriter(log.New(&buf, "", flags))
|
writer.lock.RLock()
|
||||||
l := WithContext(context.Background()).(*traceLogger)
|
defer func() {
|
||||||
|
writer.lock.RUnlock()
|
||||||
|
writer.Store(old)
|
||||||
|
}()
|
||||||
|
|
||||||
|
l := WithContext(context.Background())
|
||||||
SetLevel(InfoLevel)
|
SetLevel(InfoLevel)
|
||||||
l.WithDuration(time.Second).Info(testlog)
|
l.WithDuration(time.Second).Info(testlog)
|
||||||
assert.False(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), false, false)
|
||||||
assert.False(t, strings.Contains(buf.String(), spanKey))
|
w.Reset()
|
||||||
buf.Reset()
|
|
||||||
l.WithDuration(time.Second).Infof(testlog)
|
l.WithDuration(time.Second).Infof(testlog)
|
||||||
assert.False(t, strings.Contains(buf.String(), traceKey))
|
validate(t, w.String(), false, false)
|
||||||
assert.False(t, strings.Contains(buf.String(), spanKey))
|
}
|
||||||
|
|
||||||
|
func validate(t *testing.T, body string, expectedTrace, expectedSpan bool) {
|
||||||
|
var val mockValue
|
||||||
|
assert.Nil(t, json.Unmarshal([]byte(body), &val), body)
|
||||||
|
assert.Equal(t, expectedTrace, len(val.Trace) > 0, body)
|
||||||
|
assert.Equal(t, expectedSpan, len(val.Span) > 0, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockValue struct {
|
||||||
|
Trace string `json:"trace"`
|
||||||
|
Span string `json:"span"`
|
||||||
}
|
}
|
||||||
|
|||||||
35
core/logx/util.go
Normal file
35
core/logx/util.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getCaller(callDepth int) string {
|
||||||
|
_, file, line, ok := runtime.Caller(callDepth)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return prettyCaller(file, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTimestamp() string {
|
||||||
|
return time.Now().Format(timeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prettyCaller(file string, line int) string {
|
||||||
|
idx := strings.LastIndexByte(file, '/')
|
||||||
|
if idx < 0 {
|
||||||
|
return fmt.Sprintf("%s:%d", file, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx = strings.LastIndexByte(file[:idx], '/')
|
||||||
|
if idx < 0 {
|
||||||
|
return fmt.Sprintf("%s:%d", file, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s:%d", file[idx+1:], line)
|
||||||
|
}
|
||||||
72
core/logx/util_test.go
Normal file
72
core/logx/util_test.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetCaller(t *testing.T) {
|
||||||
|
_, file, _, _ := runtime.Caller(0)
|
||||||
|
assert.Contains(t, getCaller(1), filepath.Base(file))
|
||||||
|
assert.True(t, len(getCaller(1<<10)) == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetTimestamp(t *testing.T) {
|
||||||
|
ts := getTimestamp()
|
||||||
|
tm, err := time.Parse(timeFormat, ts)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.True(t, time.Since(tm) < time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrettyCaller(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
file string
|
||||||
|
line int
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "regular",
|
||||||
|
file: "logx_test.go",
|
||||||
|
line: 123,
|
||||||
|
want: "logx_test.go:123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "relative",
|
||||||
|
file: "adhoc/logx_test.go",
|
||||||
|
line: 123,
|
||||||
|
want: "adhoc/logx_test.go:123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "long path",
|
||||||
|
file: "github.com/zeromicro/go-zero/core/logx/util_test.go",
|
||||||
|
line: 12,
|
||||||
|
want: "logx/util_test.go:12",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "local path",
|
||||||
|
file: "/Users/kevin/go-zero/core/logx/util_test.go",
|
||||||
|
line: 1234,
|
||||||
|
want: "logx/util_test.go:1234",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, test.want, prettyCaller(test.file, test.line))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkGetCaller(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
getCaller(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
61
core/logx/vars.go
Normal file
61
core/logx/vars.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InfoLevel logs everything
|
||||||
|
InfoLevel uint32 = iota
|
||||||
|
// ErrorLevel includes errors, slows, stacks
|
||||||
|
ErrorLevel
|
||||||
|
// SevereLevel only log severe messages
|
||||||
|
SevereLevel
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
jsonEncodingType = iota
|
||||||
|
plainEncodingType
|
||||||
|
|
||||||
|
jsonEncoding = "json"
|
||||||
|
plainEncoding = "plain"
|
||||||
|
plainEncodingSep = '\t'
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
accessFilename = "access.log"
|
||||||
|
errorFilename = "error.log"
|
||||||
|
severeFilename = "severe.log"
|
||||||
|
slowFilename = "slow.log"
|
||||||
|
statFilename = "stat.log"
|
||||||
|
|
||||||
|
consoleMode = "console"
|
||||||
|
fileMode = "file"
|
||||||
|
volumeMode = "volume"
|
||||||
|
|
||||||
|
levelAlert = "alert"
|
||||||
|
levelInfo = "info"
|
||||||
|
levelError = "error"
|
||||||
|
levelSevere = "severe"
|
||||||
|
levelFatal = "fatal"
|
||||||
|
levelSlow = "slow"
|
||||||
|
levelStat = "stat"
|
||||||
|
|
||||||
|
backupFileDelimiter = "-"
|
||||||
|
flags = 0x0
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
callerKey = "caller"
|
||||||
|
contentKey = "content"
|
||||||
|
durationKey = "duration"
|
||||||
|
levelKey = "level"
|
||||||
|
spanKey = "span"
|
||||||
|
timestampKey = "@timestamp"
|
||||||
|
traceKey = "trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrLogPathNotSet is an error that indicates the log path is not set.
|
||||||
|
ErrLogPathNotSet = errors.New("log path must be set")
|
||||||
|
// ErrLogServiceNameNotSet is an error that indicates that the service name is not set.
|
||||||
|
ErrLogServiceNameNotSet = errors.New("log service name must be set")
|
||||||
|
)
|
||||||
348
core/logx/writer.go
Normal file
348
core/logx/writer.go
Normal file
@@ -0,0 +1,348 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/color"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Writer interface {
|
||||||
|
Alert(v interface{})
|
||||||
|
Close() error
|
||||||
|
Error(v interface{}, fields ...LogField)
|
||||||
|
Info(v interface{}, fields ...LogField)
|
||||||
|
Severe(v interface{})
|
||||||
|
Slow(v interface{}, fields ...LogField)
|
||||||
|
Stack(v interface{})
|
||||||
|
Stat(v interface{}, fields ...LogField)
|
||||||
|
}
|
||||||
|
|
||||||
|
atomicWriter struct {
|
||||||
|
writer Writer
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
concreteWriter struct {
|
||||||
|
infoLog io.WriteCloser
|
||||||
|
errorLog io.WriteCloser
|
||||||
|
severeLog io.WriteCloser
|
||||||
|
slowLog io.WriteCloser
|
||||||
|
statLog io.WriteCloser
|
||||||
|
stackLog io.Writer
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewWriter creates a new Writer with the given io.Writer.
|
||||||
|
func NewWriter(w io.Writer) Writer {
|
||||||
|
lw := newLogWriter(log.New(w, "", flags))
|
||||||
|
|
||||||
|
return &concreteWriter{
|
||||||
|
infoLog: lw,
|
||||||
|
errorLog: lw,
|
||||||
|
severeLog: lw,
|
||||||
|
slowLog: lw,
|
||||||
|
statLog: lw,
|
||||||
|
stackLog: lw,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *atomicWriter) Load() Writer {
|
||||||
|
w.lock.RLock()
|
||||||
|
defer w.lock.RUnlock()
|
||||||
|
return w.writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *atomicWriter) Store(v Writer) {
|
||||||
|
w.lock.Lock()
|
||||||
|
w.writer = v
|
||||||
|
w.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *atomicWriter) Swap(v Writer) Writer {
|
||||||
|
w.lock.Lock()
|
||||||
|
old := w.writer
|
||||||
|
w.writer = v
|
||||||
|
w.lock.Unlock()
|
||||||
|
return old
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConsoleWriter() Writer {
|
||||||
|
outLog := newLogWriter(log.New(os.Stdout, "", flags))
|
||||||
|
errLog := newLogWriter(log.New(os.Stderr, "", flags))
|
||||||
|
return &concreteWriter{
|
||||||
|
infoLog: outLog,
|
||||||
|
errorLog: errLog,
|
||||||
|
severeLog: errLog,
|
||||||
|
slowLog: errLog,
|
||||||
|
stackLog: newLessWriter(errLog, options.logStackCooldownMills),
|
||||||
|
statLog: outLog,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFileWriter(c LogConf) (Writer, error) {
|
||||||
|
var err error
|
||||||
|
var opts []LogOption
|
||||||
|
var infoLog io.WriteCloser
|
||||||
|
var errorLog io.WriteCloser
|
||||||
|
var severeLog io.WriteCloser
|
||||||
|
var slowLog io.WriteCloser
|
||||||
|
var statLog io.WriteCloser
|
||||||
|
var stackLog io.Writer
|
||||||
|
|
||||||
|
if len(c.Path) == 0 {
|
||||||
|
return nil, ErrLogPathNotSet
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = append(opts, WithCooldownMillis(c.StackCooldownMillis))
|
||||||
|
if c.Compress {
|
||||||
|
opts = append(opts, WithGzip())
|
||||||
|
}
|
||||||
|
if c.KeepDays > 0 {
|
||||||
|
opts = append(opts, WithKeepDays(c.KeepDays))
|
||||||
|
}
|
||||||
|
|
||||||
|
accessFile := path.Join(c.Path, accessFilename)
|
||||||
|
errorFile := path.Join(c.Path, errorFilename)
|
||||||
|
severeFile := path.Join(c.Path, severeFilename)
|
||||||
|
slowFile := path.Join(c.Path, slowFilename)
|
||||||
|
statFile := path.Join(c.Path, statFilename)
|
||||||
|
|
||||||
|
handleOptions(opts)
|
||||||
|
setupLogLevel(c)
|
||||||
|
|
||||||
|
if infoLog, err = createOutput(accessFile); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if errorLog, err = createOutput(errorFile); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if severeLog, err = createOutput(severeFile); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if slowLog, err = createOutput(slowFile); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if statLog, err = createOutput(statFile); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stackLog = newLessWriter(errorLog, options.logStackCooldownMills)
|
||||||
|
|
||||||
|
return &concreteWriter{
|
||||||
|
infoLog: infoLog,
|
||||||
|
errorLog: errorLog,
|
||||||
|
severeLog: severeLog,
|
||||||
|
slowLog: slowLog,
|
||||||
|
statLog: statLog,
|
||||||
|
stackLog: stackLog,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Alert(v interface{}) {
|
||||||
|
output(w.errorLog, levelAlert, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Close() error {
|
||||||
|
if err := w.infoLog.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.errorLog.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.severeLog.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.slowLog.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.statLog.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Error(v interface{}, fields ...LogField) {
|
||||||
|
output(w.errorLog, levelError, v, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Info(v interface{}, fields ...LogField) {
|
||||||
|
output(w.infoLog, levelInfo, v, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Severe(v interface{}) {
|
||||||
|
output(w.severeLog, levelFatal, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Slow(v interface{}, fields ...LogField) {
|
||||||
|
output(w.slowLog, levelSlow, v, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Stack(v interface{}) {
|
||||||
|
output(w.stackLog, levelError, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *concreteWriter) Stat(v interface{}, fields ...LogField) {
|
||||||
|
output(w.statLog, levelStat, v, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type nopWriter struct{}
|
||||||
|
|
||||||
|
func (n nopWriter) Alert(_ interface{}) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nopWriter) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nopWriter) Error(_ interface{}, _ ...LogField) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nopWriter) Info(_ interface{}, _ ...LogField) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nopWriter) Severe(_ interface{}) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nopWriter) Slow(_ interface{}, _ ...LogField) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nopWriter) Stack(_ interface{}) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nopWriter) Stat(_ interface{}, _ ...LogField) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildFields(fields ...LogField) []string {
|
||||||
|
var items []string
|
||||||
|
|
||||||
|
for _, field := range fields {
|
||||||
|
items = append(items, fmt.Sprintf("%s=%v", field.Key, field.Value))
|
||||||
|
}
|
||||||
|
|
||||||
|
return items
|
||||||
|
}
|
||||||
|
|
||||||
|
func output(writer io.Writer, level string, val interface{}, fields ...LogField) {
|
||||||
|
fields = append(fields, Field(callerKey, getCaller(callerDepth)))
|
||||||
|
|
||||||
|
switch atomic.LoadUint32(&encoding) {
|
||||||
|
case plainEncodingType:
|
||||||
|
writePlainAny(writer, level, val, buildFields(fields...)...)
|
||||||
|
default:
|
||||||
|
entry := make(logEntryWithFields)
|
||||||
|
for _, field := range fields {
|
||||||
|
entry[field.Key] = field.Value
|
||||||
|
}
|
||||||
|
entry[timestampKey] = getTimestamp()
|
||||||
|
entry[levelKey] = level
|
||||||
|
entry[contentKey] = val
|
||||||
|
writeJson(writer, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapLevelWithColor(level string) string {
|
||||||
|
var colour color.Color
|
||||||
|
switch level {
|
||||||
|
case levelAlert:
|
||||||
|
colour = color.FgRed
|
||||||
|
case levelError:
|
||||||
|
colour = color.FgRed
|
||||||
|
case levelFatal:
|
||||||
|
colour = color.FgRed
|
||||||
|
case levelInfo:
|
||||||
|
colour = color.FgBlue
|
||||||
|
case levelSlow:
|
||||||
|
colour = color.FgYellow
|
||||||
|
case levelStat:
|
||||||
|
colour = color.FgGreen
|
||||||
|
}
|
||||||
|
|
||||||
|
if colour == color.NoColor {
|
||||||
|
return level
|
||||||
|
}
|
||||||
|
|
||||||
|
return color.WithColorPadding(level, colour)
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeJson(writer io.Writer, info interface{}) {
|
||||||
|
if content, err := json.Marshal(info); err != nil {
|
||||||
|
log.Println(err.Error())
|
||||||
|
} else if writer == nil {
|
||||||
|
log.Println(string(content))
|
||||||
|
} else {
|
||||||
|
writer.Write(append(content, '\n'))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writePlainAny(writer io.Writer, level string, val interface{}, fields ...string) {
|
||||||
|
level = wrapLevelWithColor(level)
|
||||||
|
|
||||||
|
switch v := val.(type) {
|
||||||
|
case string:
|
||||||
|
writePlainText(writer, level, v, fields...)
|
||||||
|
case error:
|
||||||
|
writePlainText(writer, level, v.Error(), fields...)
|
||||||
|
case fmt.Stringer:
|
||||||
|
writePlainText(writer, level, v.String(), fields...)
|
||||||
|
default:
|
||||||
|
var buf strings.Builder
|
||||||
|
buf.WriteString(getTimestamp())
|
||||||
|
buf.WriteByte(plainEncodingSep)
|
||||||
|
buf.WriteString(level)
|
||||||
|
buf.WriteByte(plainEncodingSep)
|
||||||
|
if err := json.NewEncoder(&buf).Encode(val); err != nil {
|
||||||
|
log.Println(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range fields {
|
||||||
|
buf.WriteByte(plainEncodingSep)
|
||||||
|
buf.WriteString(item)
|
||||||
|
}
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
if writer == nil {
|
||||||
|
log.Println(buf.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fmt.Fprint(writer, buf.String()); err != nil {
|
||||||
|
log.Println(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writePlainText(writer io.Writer, level, msg string, fields ...string) {
|
||||||
|
var buf strings.Builder
|
||||||
|
buf.WriteString(getTimestamp())
|
||||||
|
buf.WriteByte(plainEncodingSep)
|
||||||
|
buf.WriteString(level)
|
||||||
|
buf.WriteByte(plainEncodingSep)
|
||||||
|
buf.WriteString(msg)
|
||||||
|
for _, item := range fields {
|
||||||
|
buf.WriteByte(plainEncodingSep)
|
||||||
|
buf.WriteString(item)
|
||||||
|
}
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
if writer == nil {
|
||||||
|
log.Println(buf.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fmt.Fprint(writer, buf.String()); err != nil {
|
||||||
|
log.Println(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
179
core/logx/writer_test.go
Normal file
179
core/logx/writer_test.go
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewWriter(t *testing.T) {
|
||||||
|
const literal = "foo bar"
|
||||||
|
var buf bytes.Buffer
|
||||||
|
w := NewWriter(&buf)
|
||||||
|
w.Info(literal)
|
||||||
|
assert.Contains(t, buf.String(), literal)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConsoleWriter(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
w := newConsoleWriter()
|
||||||
|
lw := newLogWriter(log.New(&buf, "", 0))
|
||||||
|
w.(*concreteWriter).errorLog = lw
|
||||||
|
w.Alert("foo bar 1")
|
||||||
|
var val mockedEntry
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &val); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, levelAlert, val.Level)
|
||||||
|
assert.Equal(t, "foo bar 1", val.Content)
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
w.(*concreteWriter).errorLog = lw
|
||||||
|
w.Error("foo bar 2")
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &val); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, levelError, val.Level)
|
||||||
|
assert.Equal(t, "foo bar 2", val.Content)
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
w.(*concreteWriter).infoLog = lw
|
||||||
|
w.Info("foo bar 3")
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &val); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, levelInfo, val.Level)
|
||||||
|
assert.Equal(t, "foo bar 3", val.Content)
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
w.(*concreteWriter).severeLog = lw
|
||||||
|
w.Severe("foo bar 4")
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &val); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, levelFatal, val.Level)
|
||||||
|
assert.Equal(t, "foo bar 4", val.Content)
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
w.(*concreteWriter).slowLog = lw
|
||||||
|
w.Slow("foo bar 5")
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &val); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, levelSlow, val.Level)
|
||||||
|
assert.Equal(t, "foo bar 5", val.Content)
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
w.(*concreteWriter).statLog = lw
|
||||||
|
w.Stat("foo bar 6")
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &val); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, levelStat, val.Level)
|
||||||
|
assert.Equal(t, "foo bar 6", val.Content)
|
||||||
|
|
||||||
|
w.(*concreteWriter).infoLog = hardToCloseWriter{}
|
||||||
|
assert.NotNil(t, w.Close())
|
||||||
|
w.(*concreteWriter).infoLog = easyToCloseWriter{}
|
||||||
|
w.(*concreteWriter).errorLog = hardToCloseWriter{}
|
||||||
|
assert.NotNil(t, w.Close())
|
||||||
|
w.(*concreteWriter).errorLog = easyToCloseWriter{}
|
||||||
|
w.(*concreteWriter).severeLog = hardToCloseWriter{}
|
||||||
|
assert.NotNil(t, w.Close())
|
||||||
|
w.(*concreteWriter).severeLog = easyToCloseWriter{}
|
||||||
|
w.(*concreteWriter).slowLog = hardToCloseWriter{}
|
||||||
|
assert.NotNil(t, w.Close())
|
||||||
|
w.(*concreteWriter).slowLog = easyToCloseWriter{}
|
||||||
|
w.(*concreteWriter).statLog = hardToCloseWriter{}
|
||||||
|
assert.NotNil(t, w.Close())
|
||||||
|
w.(*concreteWriter).statLog = easyToCloseWriter{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNopWriter(t *testing.T) {
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
var w nopWriter
|
||||||
|
w.Alert("foo")
|
||||||
|
w.Error("foo")
|
||||||
|
w.Info("foo")
|
||||||
|
w.Severe("foo")
|
||||||
|
w.Stack("foo")
|
||||||
|
w.Stat("foo")
|
||||||
|
w.Slow("foo")
|
||||||
|
w.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJson(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
log.SetOutput(&buf)
|
||||||
|
writeJson(nil, "foo")
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
buf.Reset()
|
||||||
|
writeJson(nil, make(chan int))
|
||||||
|
assert.Contains(t, buf.String(), "unsupported type")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWritePlainAny(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
log.SetOutput(&buf)
|
||||||
|
writePlainAny(nil, levelInfo, "foo")
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
writePlainAny(nil, levelError, make(chan int))
|
||||||
|
assert.Contains(t, buf.String(), "unsupported type")
|
||||||
|
writePlainAny(nil, levelSlow, 100)
|
||||||
|
assert.Contains(t, buf.String(), "100")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
writePlainAny(hardToWriteWriter{}, levelStat, 100)
|
||||||
|
assert.Contains(t, buf.String(), "write error")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
writePlainAny(hardToWriteWriter{}, levelSevere, "foo")
|
||||||
|
assert.Contains(t, buf.String(), "write error")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
writePlainAny(hardToWriteWriter{}, levelAlert, "foo")
|
||||||
|
assert.Contains(t, buf.String(), "write error")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
writePlainAny(hardToWriteWriter{}, levelFatal, "foo")
|
||||||
|
assert.Contains(t, buf.String(), "write error")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockedEntry struct {
|
||||||
|
Level string `json:"level"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type easyToCloseWriter struct{}
|
||||||
|
|
||||||
|
func (h easyToCloseWriter) Write(_ []byte) (_ int, _ error) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h easyToCloseWriter) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type hardToCloseWriter struct{}
|
||||||
|
|
||||||
|
func (h hardToCloseWriter) Write(_ []byte) (_ int, _ error) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h hardToCloseWriter) Close() error {
|
||||||
|
return errors.New("close error")
|
||||||
|
}
|
||||||
|
|
||||||
|
type hardToWriteWriter struct{}
|
||||||
|
|
||||||
|
func (h hardToWriteWriter) Write(_ []byte) (_ int, _ error) {
|
||||||
|
return 0, errors.New("write error")
|
||||||
|
}
|
||||||
186
core/mapping/marshaler.go
Normal file
186
core/mapping/marshaler.go
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
package mapping
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
emptyTag = ""
|
||||||
|
tagKVSeparator = ":"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshal marshals the given val and returns the map that contains the fields.
|
||||||
|
// optional=another is not implemented, and it's hard to implement and not common used.
|
||||||
|
func Marshal(val interface{}) (map[string]map[string]interface{}, error) {
|
||||||
|
ret := make(map[string]map[string]interface{})
|
||||||
|
tp := reflect.TypeOf(val)
|
||||||
|
if tp.Kind() == reflect.Ptr {
|
||||||
|
tp = tp.Elem()
|
||||||
|
}
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
if rv.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < tp.NumField(); i++ {
|
||||||
|
field := tp.Field(i)
|
||||||
|
value := rv.Field(i)
|
||||||
|
if err := processMember(field, value, ret); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTag(field reflect.StructField) (string, bool) {
|
||||||
|
tag := string(field.Tag)
|
||||||
|
if i := strings.Index(tag, tagKVSeparator); i >= 0 {
|
||||||
|
return strings.TrimSpace(tag[:i]), true
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSpace(tag), false
|
||||||
|
}
|
||||||
|
|
||||||
|
func processMember(field reflect.StructField, value reflect.Value,
|
||||||
|
collector map[string]map[string]interface{}) error {
|
||||||
|
var key string
|
||||||
|
var opt *fieldOptions
|
||||||
|
var err error
|
||||||
|
tag, ok := getTag(field)
|
||||||
|
if !ok {
|
||||||
|
tag = emptyTag
|
||||||
|
key = field.Name
|
||||||
|
} else {
|
||||||
|
key, opt, err = parseKeyAndOptions(tag, field)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = validate(field, value, opt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val := value.Interface()
|
||||||
|
if opt != nil && opt.FromString {
|
||||||
|
val = fmt.Sprint(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, ok := collector[tag]
|
||||||
|
if ok {
|
||||||
|
m[key] = val
|
||||||
|
} else {
|
||||||
|
m = map[string]interface{}{
|
||||||
|
key: val,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
collector[tag] = m
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validate(field reflect.StructField, value reflect.Value, opt *fieldOptions) error {
|
||||||
|
if opt == nil || !opt.Optional {
|
||||||
|
if err := validateOptional(field, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.Optional && value.IsZero() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opt.Options) > 0 {
|
||||||
|
if err := validateOptions(value, opt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.Range != nil {
|
||||||
|
if err := validateRange(value, opt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOptional(field reflect.StructField, value reflect.Value) error {
|
||||||
|
switch field.Type.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if value.IsNil() {
|
||||||
|
return fmt.Errorf("field %q is nil", field.Name)
|
||||||
|
}
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Map:
|
||||||
|
if value.IsNil() || value.Len() == 0 {
|
||||||
|
return fmt.Errorf("field %q is empty", field.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOptions(value reflect.Value, opt *fieldOptions) error {
|
||||||
|
var found bool
|
||||||
|
val := fmt.Sprint(value.Interface())
|
||||||
|
for i := range opt.Options {
|
||||||
|
if opt.Options[i] == val {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return fmt.Errorf("field %q not in options", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRange(value reflect.Value, opt *fieldOptions) error {
|
||||||
|
var val float64
|
||||||
|
switch v := value.Interface().(type) {
|
||||||
|
case int:
|
||||||
|
val = float64(v)
|
||||||
|
case int8:
|
||||||
|
val = float64(v)
|
||||||
|
case int16:
|
||||||
|
val = float64(v)
|
||||||
|
case int32:
|
||||||
|
val = float64(v)
|
||||||
|
case int64:
|
||||||
|
val = float64(v)
|
||||||
|
case uint:
|
||||||
|
val = float64(v)
|
||||||
|
case uint8:
|
||||||
|
val = float64(v)
|
||||||
|
case uint16:
|
||||||
|
val = float64(v)
|
||||||
|
case uint32:
|
||||||
|
val = float64(v)
|
||||||
|
case uint64:
|
||||||
|
val = float64(v)
|
||||||
|
case float32:
|
||||||
|
val = float64(v)
|
||||||
|
case float64:
|
||||||
|
val = v
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown support type for range %q", value.Type().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// validates [left, right], [left, right), (left, right], (left, right)
|
||||||
|
if val < opt.Range.left ||
|
||||||
|
(!opt.Range.leftInclude && val == opt.Range.left) ||
|
||||||
|
val > opt.Range.right ||
|
||||||
|
(!opt.Range.rightInclude && val == opt.Range.right) {
|
||||||
|
return fmt.Errorf("%v out of range", value.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
274
core/mapping/marshaler_test.go
Normal file
274
core/mapping/marshaler_test.go
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
package mapping
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMarshal(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Name string `path:"name"`
|
||||||
|
Address string `json:"address,options=[beijing,shanghai]"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
Anonymous bool
|
||||||
|
}{
|
||||||
|
Name: "kevin",
|
||||||
|
Address: "shanghai",
|
||||||
|
Age: 20,
|
||||||
|
Anonymous: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "kevin", m["path"]["name"])
|
||||||
|
assert.Equal(t, "shanghai", m["json"]["address"])
|
||||||
|
assert.Equal(t, 20, m["json"]["age"].(int))
|
||||||
|
assert.True(t, m[emptyTag]["Anonymous"].(bool))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_Ptr(t *testing.T) {
|
||||||
|
v := &struct {
|
||||||
|
Name string `path:"name"`
|
||||||
|
Address string `json:"address,options=[beijing,shanghai]"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
Anonymous bool
|
||||||
|
}{
|
||||||
|
Name: "kevin",
|
||||||
|
Address: "shanghai",
|
||||||
|
Age: 20,
|
||||||
|
Anonymous: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "kevin", m["path"]["name"])
|
||||||
|
assert.Equal(t, "shanghai", m["json"]["address"])
|
||||||
|
assert.Equal(t, 20, m["json"]["age"].(int))
|
||||||
|
assert.True(t, m[emptyTag]["Anonymous"].(bool))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_OptionalPtr(t *testing.T) {
|
||||||
|
var val = 1
|
||||||
|
v := struct {
|
||||||
|
Age *int `json:"age"`
|
||||||
|
}{
|
||||||
|
Age: &val,
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 1, *m["json"]["age"].(*int))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_OptionalPtrNil(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Age *int `json:"age"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
_, err := Marshal(v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_BadOptions(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Name string `json:"name,options"`
|
||||||
|
}{
|
||||||
|
Name: "kevin",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := Marshal(v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_NotInOptions(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Name string `json:"name,options=[a,b]"`
|
||||||
|
}{
|
||||||
|
Name: "kevin",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := Marshal(v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_NotInOptionsOptional(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Name string `json:"name,options=[a,b],optional"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
_, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_NotInOptionsOptionalWrongValue(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Name string `json:"name,options=[a,b],optional"`
|
||||||
|
}{
|
||||||
|
Name: "kevin",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := Marshal(v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_Nested(t *testing.T) {
|
||||||
|
type address struct {
|
||||||
|
Country string `json:"country"`
|
||||||
|
City string `json:"city"`
|
||||||
|
}
|
||||||
|
v := struct {
|
||||||
|
Name string `json:"name,options=[kevin,wan]"`
|
||||||
|
Address address `json:"address"`
|
||||||
|
}{
|
||||||
|
Name: "kevin",
|
||||||
|
Address: address{
|
||||||
|
Country: "China",
|
||||||
|
City: "Shanghai",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "kevin", m["json"]["name"])
|
||||||
|
assert.Equal(t, "China", m["json"]["address"].(address).Country)
|
||||||
|
assert.Equal(t, "Shanghai", m["json"]["address"].(address).City)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_NestedPtr(t *testing.T) {
|
||||||
|
type address struct {
|
||||||
|
Country string `json:"country"`
|
||||||
|
City string `json:"city"`
|
||||||
|
}
|
||||||
|
v := struct {
|
||||||
|
Name string `json:"name,options=[kevin,wan]"`
|
||||||
|
Address *address `json:"address"`
|
||||||
|
}{
|
||||||
|
Name: "kevin",
|
||||||
|
Address: &address{
|
||||||
|
Country: "China",
|
||||||
|
City: "Shanghai",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "kevin", m["json"]["name"])
|
||||||
|
assert.Equal(t, "China", m["json"]["address"].(*address).Country)
|
||||||
|
assert.Equal(t, "Shanghai", m["json"]["address"].(*address).City)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_Slice(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Name []string `json:"name"`
|
||||||
|
}{
|
||||||
|
Name: []string{"kevin", "wan"},
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.ElementsMatch(t, []string{"kevin", "wan"}, m["json"]["name"].([]string))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_SliceNil(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Name []string `json:"name"`
|
||||||
|
}{
|
||||||
|
Name: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := Marshal(v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_Range(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Int int `json:"int,range=[1:3]"`
|
||||||
|
Int8 int8 `json:"int8,range=[1:3)"`
|
||||||
|
Int16 int16 `json:"int16,range=(1:3]"`
|
||||||
|
Int32 int32 `json:"int32,range=(1:3)"`
|
||||||
|
Int64 int64 `json:"int64,range=(1:3)"`
|
||||||
|
Uint uint `json:"uint,range=[1:3]"`
|
||||||
|
Uint8 uint8 `json:"uint8,range=[1:3)"`
|
||||||
|
Uint16 uint16 `json:"uint16,range=(1:3]"`
|
||||||
|
Uint32 uint32 `json:"uint32,range=(1:3)"`
|
||||||
|
Uint64 uint64 `json:"uint64,range=(1:3)"`
|
||||||
|
Float32 float32 `json:"float32,range=(1:3)"`
|
||||||
|
Float64 float64 `json:"float64,range=(1:3)"`
|
||||||
|
}{
|
||||||
|
Int: 1,
|
||||||
|
Int8: 1,
|
||||||
|
Int16: 2,
|
||||||
|
Int32: 2,
|
||||||
|
Int64: 2,
|
||||||
|
Uint: 1,
|
||||||
|
Uint8: 1,
|
||||||
|
Uint16: 2,
|
||||||
|
Uint32: 2,
|
||||||
|
Uint64: 2,
|
||||||
|
Float32: 2,
|
||||||
|
Float64: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 1, m["json"]["int"].(int))
|
||||||
|
assert.Equal(t, int8(1), m["json"]["int8"].(int8))
|
||||||
|
assert.Equal(t, int16(2), m["json"]["int16"].(int16))
|
||||||
|
assert.Equal(t, int32(2), m["json"]["int32"].(int32))
|
||||||
|
assert.Equal(t, int64(2), m["json"]["int64"].(int64))
|
||||||
|
assert.Equal(t, uint(1), m["json"]["uint"].(uint))
|
||||||
|
assert.Equal(t, uint8(1), m["json"]["uint8"].(uint8))
|
||||||
|
assert.Equal(t, uint16(2), m["json"]["uint16"].(uint16))
|
||||||
|
assert.Equal(t, uint32(2), m["json"]["uint32"].(uint32))
|
||||||
|
assert.Equal(t, uint64(2), m["json"]["uint64"].(uint64))
|
||||||
|
assert.Equal(t, float32(2), m["json"]["float32"].(float32))
|
||||||
|
assert.Equal(t, float64(2), m["json"]["float64"].(float64))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_RangeOut(t *testing.T) {
|
||||||
|
tests := []interface{}{
|
||||||
|
struct {
|
||||||
|
Int int `json:"int,range=[1:3]"`
|
||||||
|
}{
|
||||||
|
Int: 4,
|
||||||
|
},
|
||||||
|
struct {
|
||||||
|
Int int `json:"int,range=(1:3]"`
|
||||||
|
}{
|
||||||
|
Int: 1,
|
||||||
|
},
|
||||||
|
struct {
|
||||||
|
Int int `json:"int,range=[1:3)"`
|
||||||
|
}{
|
||||||
|
Int: 3,
|
||||||
|
},
|
||||||
|
struct {
|
||||||
|
Int int `json:"int,range=(1:3)"`
|
||||||
|
}{
|
||||||
|
Int: 3,
|
||||||
|
},
|
||||||
|
struct {
|
||||||
|
Bool bool `json:"bool,range=(1:3)"`
|
||||||
|
}{
|
||||||
|
Bool: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
_, err := Marshal(test)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshal_FromString(t *testing.T) {
|
||||||
|
v := struct {
|
||||||
|
Age int `json:"age,string"`
|
||||||
|
}{
|
||||||
|
Age: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "10", m["json"]["age"].(string))
|
||||||
|
}
|
||||||
29
core/mapping/tomlunmarshaler.go
Normal file
29
core/mapping/tomlunmarshaler.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package mapping
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/pelletier/go-toml/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnmarshalTomlBytes unmarshals TOML bytes into the given v.
|
||||||
|
func UnmarshalTomlBytes(content []byte, v interface{}) error {
|
||||||
|
return UnmarshalTomlReader(bytes.NewReader(content), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalTomlReader unmarshals TOML from the given io.Reader into the given v.
|
||||||
|
func UnmarshalTomlReader(r io.Reader, v interface{}) error {
|
||||||
|
var val interface{}
|
||||||
|
if err := toml.NewDecoder(r).Decode(&val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := json.NewEncoder(&buf).Encode(val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return UnmarshalJsonReader(&buf, v)
|
||||||
|
}
|
||||||
41
core/mapping/tomlunmarshaler_test.go
Normal file
41
core/mapping/tomlunmarshaler_test.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package mapping
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUnmarshalToml(t *testing.T) {
|
||||||
|
const input = `a = "foo"
|
||||||
|
b = 1
|
||||||
|
c = "${FOO}"
|
||||||
|
d = "abcd!@#$112"
|
||||||
|
`
|
||||||
|
var val struct {
|
||||||
|
A string `json:"a"`
|
||||||
|
B int `json:"b"`
|
||||||
|
C string `json:"c"`
|
||||||
|
D string `json:"d"`
|
||||||
|
}
|
||||||
|
assert.Nil(t, UnmarshalTomlBytes([]byte(input), &val))
|
||||||
|
assert.Equal(t, "foo", val.A)
|
||||||
|
assert.Equal(t, 1, val.B)
|
||||||
|
assert.Equal(t, "${FOO}", val.C)
|
||||||
|
assert.Equal(t, "abcd!@#$112", val.D)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalTomlErrorToml(t *testing.T) {
|
||||||
|
const input = `foo"
|
||||||
|
b = 1
|
||||||
|
c = "${FOO}"
|
||||||
|
d = "abcd!@#$112"
|
||||||
|
`
|
||||||
|
var val struct {
|
||||||
|
A string `json:"a"`
|
||||||
|
B int `json:"b"`
|
||||||
|
C string `json:"c"`
|
||||||
|
D string `json:"d"`
|
||||||
|
}
|
||||||
|
assert.NotNil(t, UnmarshalTomlBytes([]byte(input), &val))
|
||||||
|
}
|
||||||
@@ -97,10 +97,6 @@ func (u *Unmarshaler) unmarshalWithFullName(m Valuer, v interface{}, fullName st
|
|||||||
numFields := rte.NumField()
|
numFields := rte.NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := 0; i < numFields; i++ {
|
||||||
field := rte.Field(i)
|
field := rte.Field(i)
|
||||||
if usingDifferentKeys(u.key, field) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := u.processField(field, rve.Field(i), m, fullName); err != nil {
|
if err := u.processField(field, rve.Field(i), m, fullName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -275,6 +271,10 @@ func (u *Unmarshaler) processFieldPrimitiveWithJSONNumber(field reflect.StructFi
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if iValue < 0 {
|
||||||
|
return fmt.Errorf("unmarshal %q with bad value %q", fullName, v.String())
|
||||||
|
}
|
||||||
|
|
||||||
value.SetUint(uint64(iValue))
|
value.SetUint(uint64(iValue))
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
fValue, err := v.Float64()
|
fValue, err := v.Float64()
|
||||||
@@ -448,7 +448,15 @@ func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, map
|
|||||||
dereffedBaseType := Deref(baseType)
|
dereffedBaseType := Deref(baseType)
|
||||||
dereffedBaseKind := dereffedBaseType.Kind()
|
dereffedBaseKind := dereffedBaseType.Kind()
|
||||||
refValue := reflect.ValueOf(mapValue)
|
refValue := reflect.ValueOf(mapValue)
|
||||||
|
if refValue.IsNil() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
conv := reflect.MakeSlice(reflect.SliceOf(baseType), refValue.Len(), refValue.Cap())
|
conv := reflect.MakeSlice(reflect.SliceOf(baseType), refValue.Len(), refValue.Cap())
|
||||||
|
if refValue.Len() == 0 {
|
||||||
|
value.Set(conv)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var valid bool
|
var valid bool
|
||||||
for i := 0; i < refValue.Len(); i++ {
|
for i := 0; i < refValue.Len(); i++ {
|
||||||
@@ -488,10 +496,20 @@ func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, map
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Unmarshaler) fillSliceFromString(fieldType reflect.Type, value reflect.Value, mapValue interface{}) error {
|
func (u *Unmarshaler) fillSliceFromString(fieldType reflect.Type, value reflect.Value,
|
||||||
|
mapValue interface{}) error {
|
||||||
var slice []interface{}
|
var slice []interface{}
|
||||||
if err := jsonx.UnmarshalFromString(mapValue.(string), &slice); err != nil {
|
switch v := mapValue.(type) {
|
||||||
return err
|
case fmt.Stringer:
|
||||||
|
if err := jsonx.UnmarshalFromString(v.String(), &slice); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
if err := jsonx.UnmarshalFromString(v, &slice); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errUnsupportedType
|
||||||
}
|
}
|
||||||
|
|
||||||
baseFieldType := Deref(fieldType.Elem())
|
baseFieldType := Deref(fieldType.Elem())
|
||||||
@@ -723,10 +741,10 @@ func fillWithSameType(field reflect.StructField, value reflect.Value, mapValue i
|
|||||||
if field.Type.Kind() == reflect.Ptr {
|
if field.Type.Kind() == reflect.Ptr {
|
||||||
baseType := Deref(field.Type)
|
baseType := Deref(field.Type)
|
||||||
target := reflect.New(baseType).Elem()
|
target := reflect.New(baseType).Elem()
|
||||||
target.Set(reflect.ValueOf(mapValue))
|
setSameKindValue(baseType, target, mapValue)
|
||||||
value.Set(target.Addr())
|
value.Set(target.Addr())
|
||||||
} else {
|
} else {
|
||||||
value.Set(reflect.ValueOf(mapValue))
|
setSameKindValue(field.Type, value, mapValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -801,3 +819,11 @@ func readKeys(key string) []string {
|
|||||||
|
|
||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setSameKindValue(targetType reflect.Type, target reflect.Value, value interface{}) {
|
||||||
|
if reflect.ValueOf(value).Type().AssignableTo(targetType) {
|
||||||
|
target.Set(reflect.ValueOf(value))
|
||||||
|
} else {
|
||||||
|
target.Set(reflect.ValueOf(value).Convert(targetType))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -198,6 +198,49 @@ func TestUnmarshalIntWithDefault(t *testing.T) {
|
|||||||
assert.Equal(t, 1, in.Int)
|
assert.Equal(t, 1, in.Int)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBoolSliceRequired(t *testing.T) {
|
||||||
|
type inner struct {
|
||||||
|
Bools []bool `key:"bools"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var in inner
|
||||||
|
assert.NotNil(t, UnmarshalKey(map[string]interface{}{}, &in))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBoolSliceNil(t *testing.T) {
|
||||||
|
type inner struct {
|
||||||
|
Bools []bool `key:"bools,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var in inner
|
||||||
|
assert.Nil(t, UnmarshalKey(map[string]interface{}{}, &in))
|
||||||
|
assert.Nil(t, in.Bools)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBoolSliceNilExplicit(t *testing.T) {
|
||||||
|
type inner struct {
|
||||||
|
Bools []bool `key:"bools,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var in inner
|
||||||
|
assert.Nil(t, UnmarshalKey(map[string]interface{}{
|
||||||
|
"bools": nil,
|
||||||
|
}, &in))
|
||||||
|
assert.Nil(t, in.Bools)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBoolSliceEmpty(t *testing.T) {
|
||||||
|
type inner struct {
|
||||||
|
Bools []bool `key:"bools,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var in inner
|
||||||
|
assert.Nil(t, UnmarshalKey(map[string]interface{}{
|
||||||
|
"bools": []bool{},
|
||||||
|
}, &in))
|
||||||
|
assert.Empty(t, in.Bools)
|
||||||
|
}
|
||||||
|
|
||||||
func TestUnmarshalBoolSliceWithDefault(t *testing.T) {
|
func TestUnmarshalBoolSliceWithDefault(t *testing.T) {
|
||||||
type inner struct {
|
type inner struct {
|
||||||
Bools []bool `key:"bools,default=[true,false]"`
|
Bools []bool `key:"bools,default=[true,false]"`
|
||||||
@@ -330,28 +373,34 @@ func TestUnmarshalFloat(t *testing.T) {
|
|||||||
|
|
||||||
func TestUnmarshalInt64Slice(t *testing.T) {
|
func TestUnmarshalInt64Slice(t *testing.T) {
|
||||||
var v struct {
|
var v struct {
|
||||||
Ages []int64 `key:"ages"`
|
Ages []int64 `key:"ages"`
|
||||||
|
Slice []int64 `key:"slice"`
|
||||||
}
|
}
|
||||||
m := map[string]interface{}{
|
m := map[string]interface{}{
|
||||||
"ages": []int64{1, 2},
|
"ages": []int64{1, 2},
|
||||||
|
"slice": []interface{}{},
|
||||||
}
|
}
|
||||||
|
|
||||||
ast := assert.New(t)
|
ast := assert.New(t)
|
||||||
ast.Nil(UnmarshalKey(m, &v))
|
ast.Nil(UnmarshalKey(m, &v))
|
||||||
ast.ElementsMatch([]int64{1, 2}, v.Ages)
|
ast.ElementsMatch([]int64{1, 2}, v.Ages)
|
||||||
|
ast.Equal([]int64{}, v.Slice)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalIntSlice(t *testing.T) {
|
func TestUnmarshalIntSlice(t *testing.T) {
|
||||||
var v struct {
|
var v struct {
|
||||||
Ages []int `key:"ages"`
|
Ages []int `key:"ages"`
|
||||||
|
Slice []int `key:"slice"`
|
||||||
}
|
}
|
||||||
m := map[string]interface{}{
|
m := map[string]interface{}{
|
||||||
"ages": []int{1, 2},
|
"ages": []int{1, 2},
|
||||||
|
"slice": []interface{}{},
|
||||||
}
|
}
|
||||||
|
|
||||||
ast := assert.New(t)
|
ast := assert.New(t)
|
||||||
ast.Nil(UnmarshalKey(m, &v))
|
ast.Nil(UnmarshalKey(m, &v))
|
||||||
ast.ElementsMatch([]int{1, 2}, v.Ages)
|
ast.ElementsMatch([]int{1, 2}, v.Ages)
|
||||||
|
ast.Equal([]int{}, v.Slice)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalString(t *testing.T) {
|
func TestUnmarshalString(t *testing.T) {
|
||||||
@@ -938,6 +987,43 @@ func TestUnmarshalWithStringOptionsCorrect(t *testing.T) {
|
|||||||
ast.Equal("2", in.Correct)
|
ast.Equal("2", in.Correct)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalOptionsOptional(t *testing.T) {
|
||||||
|
type inner struct {
|
||||||
|
Value string `key:"value,options=first|second,optional"`
|
||||||
|
OptionalValue string `key:"optional_value,options=first|second,optional"`
|
||||||
|
Foo string `key:"foo,options=[bar,baz]"`
|
||||||
|
Correct string `key:"correct,options=1|2"`
|
||||||
|
}
|
||||||
|
m := map[string]interface{}{
|
||||||
|
"value": "first",
|
||||||
|
"foo": "bar",
|
||||||
|
"correct": "2",
|
||||||
|
}
|
||||||
|
|
||||||
|
var in inner
|
||||||
|
ast := assert.New(t)
|
||||||
|
ast.Nil(UnmarshalKey(m, &in))
|
||||||
|
ast.Equal("first", in.Value)
|
||||||
|
ast.Equal("", in.OptionalValue)
|
||||||
|
ast.Equal("bar", in.Foo)
|
||||||
|
ast.Equal("2", in.Correct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalOptionsOptionalWrongValue(t *testing.T) {
|
||||||
|
type inner struct {
|
||||||
|
Value string `key:"value,options=first|second,optional"`
|
||||||
|
OptionalValue string `key:"optional_value,options=first|second,optional"`
|
||||||
|
WrongValue string `key:"wrong_value,options=first|second,optional"`
|
||||||
|
}
|
||||||
|
m := map[string]interface{}{
|
||||||
|
"value": "first",
|
||||||
|
"wrong_value": "third",
|
||||||
|
}
|
||||||
|
|
||||||
|
var in inner
|
||||||
|
assert.NotNil(t, UnmarshalKey(m, &in))
|
||||||
|
}
|
||||||
|
|
||||||
func TestUnmarshalStringOptionsWithStringOptionsNotString(t *testing.T) {
|
func TestUnmarshalStringOptionsWithStringOptionsNotString(t *testing.T) {
|
||||||
type inner struct {
|
type inner struct {
|
||||||
Value string `key:"value,options=first|second"`
|
Value string `key:"value,options=first|second"`
|
||||||
@@ -2611,6 +2697,116 @@ func TestUnmarshalJsonWithoutKey(t *testing.T) {
|
|||||||
assert.Equal(t, "2", res.B)
|
assert.Equal(t, "2", res.B)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonUintNegative(t *testing.T) {
|
||||||
|
payload := `{"a": -1}`
|
||||||
|
var res struct {
|
||||||
|
A uint `json:"a"`
|
||||||
|
}
|
||||||
|
reader := strings.NewReader(payload)
|
||||||
|
err := UnmarshalJsonReader(reader, &res)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonDefinedInt(t *testing.T) {
|
||||||
|
type Int int
|
||||||
|
var res struct {
|
||||||
|
A Int `json:"a"`
|
||||||
|
}
|
||||||
|
payload := `{"a": -1}`
|
||||||
|
reader := strings.NewReader(payload)
|
||||||
|
err := UnmarshalJsonReader(reader, &res)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, Int(-1), res.A)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonDefinedString(t *testing.T) {
|
||||||
|
type String string
|
||||||
|
var res struct {
|
||||||
|
A String `json:"a"`
|
||||||
|
}
|
||||||
|
payload := `{"a": "foo"}`
|
||||||
|
reader := strings.NewReader(payload)
|
||||||
|
err := UnmarshalJsonReader(reader, &res)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, String("foo"), res.A)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonDefinedStringPtr(t *testing.T) {
|
||||||
|
type String string
|
||||||
|
var res struct {
|
||||||
|
A *String `json:"a"`
|
||||||
|
}
|
||||||
|
payload := `{"a": "foo"}`
|
||||||
|
reader := strings.NewReader(payload)
|
||||||
|
err := UnmarshalJsonReader(reader, &res)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, String("foo"), *res.A)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonReaderComplex(t *testing.T) {
|
||||||
|
type (
|
||||||
|
MyInt int
|
||||||
|
MyTxt string
|
||||||
|
MyTxtArray []string
|
||||||
|
|
||||||
|
Req struct {
|
||||||
|
MyInt MyInt `json:"my_int"` // int.. ok
|
||||||
|
MyTxtArray MyTxtArray `json:"my_txt_array"`
|
||||||
|
MyTxt MyTxt `json:"my_txt"` // but string is not assignable
|
||||||
|
Int int `json:"int"`
|
||||||
|
Txt string `json:"txt"`
|
||||||
|
}
|
||||||
|
)
|
||||||
|
body := `{
|
||||||
|
"my_int": 100,
|
||||||
|
"my_txt_array": [
|
||||||
|
"a",
|
||||||
|
"b"
|
||||||
|
],
|
||||||
|
"my_txt": "my_txt",
|
||||||
|
"int": 200,
|
||||||
|
"txt": "txt"
|
||||||
|
}`
|
||||||
|
var req Req
|
||||||
|
err := UnmarshalJsonReader(strings.NewReader(body), &req)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, MyInt(100), req.MyInt)
|
||||||
|
assert.Equal(t, MyTxt("my_txt"), req.MyTxt)
|
||||||
|
assert.EqualValues(t, MyTxtArray([]string{"a", "b"}), req.MyTxtArray)
|
||||||
|
assert.Equal(t, 200, req.Int)
|
||||||
|
assert.Equal(t, "txt", req.Txt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonReaderArrayBool(t *testing.T) {
|
||||||
|
payload := `{"id": false}`
|
||||||
|
var res struct {
|
||||||
|
ID []string `json:"id"`
|
||||||
|
}
|
||||||
|
reader := strings.NewReader(payload)
|
||||||
|
err := UnmarshalJsonReader(reader, &res)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonReaderArrayInt(t *testing.T) {
|
||||||
|
payload := `{"id": 123}`
|
||||||
|
var res struct {
|
||||||
|
ID []string `json:"id"`
|
||||||
|
}
|
||||||
|
reader := strings.NewReader(payload)
|
||||||
|
err := UnmarshalJsonReader(reader, &res)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalJsonReaderArrayString(t *testing.T) {
|
||||||
|
payload := `{"id": "123"}`
|
||||||
|
var res struct {
|
||||||
|
ID []string `json:"id"`
|
||||||
|
}
|
||||||
|
reader := strings.NewReader(payload)
|
||||||
|
err := UnmarshalJsonReader(reader, &res)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkDefaultValue(b *testing.B) {
|
func BenchmarkDefaultValue(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
var a struct {
|
var a struct {
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
@@ -14,7 +13,7 @@ const yamlTagKey = "json"
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrUnsupportedType is an error that indicates the config format is not supported.
|
// ErrUnsupportedType is an error that indicates the config format is not supported.
|
||||||
ErrUnsupportedType = errors.New("only map-like configs are suported")
|
ErrUnsupportedType = errors.New("only map-like configs are supported")
|
||||||
|
|
||||||
yamlUnmarshaler = NewUnmarshaler(yamlTagKey)
|
yamlUnmarshaler = NewUnmarshaler(yamlTagKey)
|
||||||
)
|
)
|
||||||
@@ -29,39 +28,6 @@ func UnmarshalYamlReader(reader io.Reader, v interface{}) error {
|
|||||||
return unmarshalYamlReader(reader, v, yamlUnmarshaler)
|
return unmarshalYamlReader(reader, v, yamlUnmarshaler)
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalYamlBytes(content []byte, v interface{}, unmarshaler *Unmarshaler) error {
|
|
||||||
var o interface{}
|
|
||||||
if err := yamlUnmarshal(content, &o); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if m, ok := o.(map[string]interface{}); ok {
|
|
||||||
return unmarshaler.Unmarshal(m, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrUnsupportedType
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalYamlReader(reader io.Reader, v interface{}, unmarshaler *Unmarshaler) error {
|
|
||||||
content, err := ioutil.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return unmarshalYamlBytes(content, v, unmarshaler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// yamlUnmarshal YAML to map[string]interface{} instead of map[interface{}]interface{}.
|
|
||||||
func yamlUnmarshal(in []byte, out interface{}) error {
|
|
||||||
var res interface{}
|
|
||||||
if err := yaml.Unmarshal(in, &res); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*out.(*interface{}) = cleanupMapValue(res)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanupInterfaceMap(in map[interface{}]interface{}) map[string]interface{} {
|
func cleanupInterfaceMap(in map[interface{}]interface{}) map[string]interface{} {
|
||||||
res := make(map[string]interface{})
|
res := make(map[string]interface{})
|
||||||
for k, v := range in {
|
for k, v := range in {
|
||||||
@@ -96,3 +62,40 @@ func cleanupMapValue(v interface{}) interface{} {
|
|||||||
return Repr(v)
|
return Repr(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unmarshal(unmarshaler *Unmarshaler, o interface{}, v interface{}) error {
|
||||||
|
if m, ok := o.(map[string]interface{}); ok {
|
||||||
|
return unmarshaler.Unmarshal(m, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ErrUnsupportedType
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalYamlBytes(content []byte, v interface{}, unmarshaler *Unmarshaler) error {
|
||||||
|
var o interface{}
|
||||||
|
if err := yamlUnmarshal(content, &o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshal(unmarshaler, o, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalYamlReader(reader io.Reader, v interface{}, unmarshaler *Unmarshaler) error {
|
||||||
|
var res interface{}
|
||||||
|
if err := yaml.NewDecoder(reader).Decode(&res); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshal(unmarshaler, cleanupMapValue(res), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// yamlUnmarshal YAML to map[string]interface{} instead of map[interface{}]interface{}.
|
||||||
|
func yamlUnmarshal(in []byte, out interface{}) error {
|
||||||
|
var res interface{}
|
||||||
|
if err := yaml.Unmarshal(in, &res); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*out.(*interface{}) = cleanupMapValue(res)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -926,14 +926,17 @@ func TestUnmarshalYamlBytesError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalYamlReaderError(t *testing.T) {
|
func TestUnmarshalYamlReaderError(t *testing.T) {
|
||||||
payload := `abcd: cdef`
|
|
||||||
reader := strings.NewReader(payload)
|
|
||||||
var v struct {
|
var v struct {
|
||||||
Any string
|
Any string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reader := strings.NewReader(`abcd: cdef`)
|
||||||
err := UnmarshalYamlReader(reader, &v)
|
err := UnmarshalYamlReader(reader, &v)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
reader = strings.NewReader("chenquan")
|
||||||
|
err = UnmarshalYamlReader(reader, &v)
|
||||||
|
assert.ErrorIs(t, err, ErrUnsupportedType)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalYamlBadReader(t *testing.T) {
|
func TestUnmarshalYamlBadReader(t *testing.T) {
|
||||||
@@ -1011,6 +1014,6 @@ func TestUnmarshalYamlMapRune(t *testing.T) {
|
|||||||
|
|
||||||
type badReader struct{}
|
type badReader struct{}
|
||||||
|
|
||||||
func (b *badReader) Read(p []byte) (n int, err error) {
|
func (b *badReader) Read(_ []byte) (n int, err error) {
|
||||||
return 0, io.ErrLimitReached
|
return 0, io.ErrLimitReached
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -102,12 +102,12 @@ func ForEach(generate GenerateFunc, mapper ForEachFunc, opts ...Option) {
|
|||||||
options := buildOptions(opts...)
|
options := buildOptions(opts...)
|
||||||
panicChan := &onceChan{channel: make(chan interface{})}
|
panicChan := &onceChan{channel: make(chan interface{})}
|
||||||
source := buildSource(generate, panicChan)
|
source := buildSource(generate, panicChan)
|
||||||
collector := make(chan interface{}, options.workers)
|
collector := make(chan interface{})
|
||||||
done := make(chan lang.PlaceholderType)
|
done := make(chan lang.PlaceholderType)
|
||||||
|
|
||||||
go executeMappers(mapperContext{
|
go executeMappers(mapperContext{
|
||||||
ctx: options.ctx,
|
ctx: options.ctx,
|
||||||
mapper: func(item interface{}, writer Writer) {
|
mapper: func(item interface{}, _ Writer) {
|
||||||
mapper(item)
|
mapper(item)
|
||||||
},
|
},
|
||||||
source: source,
|
source: source,
|
||||||
|
|||||||
@@ -1,16 +1,23 @@
|
|||||||
package proc
|
package proc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDumpGoroutines(t *testing.T) {
|
func TestDumpGoroutines(t *testing.T) {
|
||||||
var buf strings.Builder
|
var buf strings.Builder
|
||||||
log.SetOutput(&buf)
|
w := logx.NewWriter(&buf)
|
||||||
|
o := logx.Reset()
|
||||||
|
logx.SetWriter(w)
|
||||||
|
defer func() {
|
||||||
|
logx.Reset()
|
||||||
|
logx.SetWriter(o)
|
||||||
|
}()
|
||||||
|
|
||||||
dumpGoroutines()
|
dumpGoroutines()
|
||||||
assert.True(t, strings.Contains(buf.String(), ".dump"))
|
assert.True(t, strings.Contains(buf.String(), ".dump"))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,24 @@
|
|||||||
package proc
|
package proc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProfile(t *testing.T) {
|
func TestProfile(t *testing.T) {
|
||||||
var buf strings.Builder
|
var buf strings.Builder
|
||||||
log.SetOutput(&buf)
|
w := logx.NewWriter(&buf)
|
||||||
|
o := logx.Reset()
|
||||||
|
logx.SetWriter(w)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
logx.Reset()
|
||||||
|
logx.SetWriter(o)
|
||||||
|
}()
|
||||||
|
|
||||||
profiler := StartProfile()
|
profiler := StartProfile()
|
||||||
// start again should not work
|
// start again should not work
|
||||||
assert.NotNil(t, StartProfile())
|
assert.NotNil(t, StartProfile())
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
//go:build linux || darwin
|
||||||
|
// +build linux darwin
|
||||||
|
|
||||||
package proc
|
package proc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ const (
|
|||||||
mega = 1024 * 1024
|
mega = 1024 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DisplayStats prints the goroutine, memory, GC stats with given interval, default to 5 seconds.
|
||||||
func DisplayStats(interval ...time.Duration) {
|
func DisplayStats(interval ...time.Duration) {
|
||||||
duration := defaultInterval
|
duration := defaultInterval
|
||||||
for _, val := range interval {
|
for _, val := range interval {
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/proc"
|
"github.com/zeromicro/go-zero/core/proc"
|
||||||
"github.com/zeromicro/go-zero/core/sysx"
|
"github.com/zeromicro/go-zero/core/sysx"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,7 +46,7 @@ func Report(msg string) {
|
|||||||
if fn != nil {
|
if fn != nil {
|
||||||
reported := lessExecutor.DoOrDiscard(func() {
|
reported := lessExecutor.DoOrDiscard(func() {
|
||||||
var builder strings.Builder
|
var builder strings.Builder
|
||||||
fmt.Fprintf(&builder, "%s\n", timex.Time().Format(timeFormat))
|
fmt.Fprintf(&builder, "%s\n", time.Now().Format(timeFormat))
|
||||||
if len(clusterName) > 0 {
|
if len(clusterName) > 0 {
|
||||||
fmt.Fprintf(&builder, "cluster: %s\n", clusterName)
|
fmt.Fprintf(&builder, "cluster: %s\n", clusterName)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,78 +1,129 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/iox"
|
"github.com/zeromicro/go-zero/core/iox"
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
const cgroupDir = "/sys/fs/cgroup"
|
const (
|
||||||
|
cgroupDir = "/sys/fs/cgroup"
|
||||||
|
cpuStatFile = cgroupDir + "/cpu.stat"
|
||||||
|
cpusetFile = cgroupDir + "/cpuset.cpus.effective"
|
||||||
|
)
|
||||||
|
|
||||||
type cgroup struct {
|
var (
|
||||||
|
isUnifiedOnce sync.Once
|
||||||
|
isUnified bool
|
||||||
|
inUserNS bool
|
||||||
|
nsOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
type cgroup interface {
|
||||||
|
cpuQuotaUs() (int64, error)
|
||||||
|
cpuPeriodUs() (uint64, error)
|
||||||
|
cpus() ([]uint64, error)
|
||||||
|
usageAllCpus() (uint64, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func currentCgroup() (cgroup, error) {
|
||||||
|
if isCgroup2UnifiedMode() {
|
||||||
|
return currentCgroupV2()
|
||||||
|
}
|
||||||
|
|
||||||
|
return currentCgroupV1()
|
||||||
|
}
|
||||||
|
|
||||||
|
type cgroupV1 struct {
|
||||||
cgroups map[string]string
|
cgroups map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cgroup) acctUsageAllCpus() (uint64, error) {
|
func (c *cgroupV1) cpuQuotaUs() (int64, error) {
|
||||||
data, err := iox.ReadText(path.Join(c.cgroups["cpuacct"], "cpuacct.usage"))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return parseUint(string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cgroup) acctUsagePerCpu() ([]uint64, error) {
|
|
||||||
data, err := iox.ReadText(path.Join(c.cgroups["cpuacct"], "cpuacct.usage_percpu"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var usage []uint64
|
|
||||||
for _, v := range strings.Fields(string(data)) {
|
|
||||||
u, err := parseUint(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
usage = append(usage, u)
|
|
||||||
}
|
|
||||||
|
|
||||||
return usage, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cgroup) cpuQuotaUs() (int64, error) {
|
|
||||||
data, err := iox.ReadText(path.Join(c.cgroups["cpu"], "cpu.cfs_quota_us"))
|
data, err := iox.ReadText(path.Join(c.cgroups["cpu"], "cpu.cfs_quota_us"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return strconv.ParseInt(string(data), 10, 64)
|
return strconv.ParseInt(data, 10, 64)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cgroup) cpuPeriodUs() (uint64, error) {
|
func (c *cgroupV1) cpuPeriodUs() (uint64, error) {
|
||||||
data, err := iox.ReadText(path.Join(c.cgroups["cpu"], "cpu.cfs_period_us"))
|
data, err := iox.ReadText(path.Join(c.cgroups["cpu"], "cpu.cfs_period_us"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return parseUint(string(data))
|
return parseUint(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cgroup) cpus() ([]uint64, error) {
|
func (c *cgroupV1) cpus() ([]uint64, error) {
|
||||||
data, err := iox.ReadText(path.Join(c.cgroups["cpuset"], "cpuset.cpus"))
|
data, err := iox.ReadText(path.Join(c.cgroups["cpuset"], "cpuset.cpus"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return parseUints(string(data))
|
return parseUints(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func currentCgroup() (*cgroup, error) {
|
func (c *cgroupV1) usageAllCpus() (uint64, error) {
|
||||||
|
data, err := iox.ReadText(path.Join(c.cgroups["cpuacct"], "cpuacct.usage"))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseUint(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
type cgroupV2 struct {
|
||||||
|
cgroups map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroupV2) cpuQuotaUs() (int64, error) {
|
||||||
|
data, err := iox.ReadText(path.Join(cgroupDir, "cpu.cfs_quota_us"))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return strconv.ParseInt(data, 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroupV2) cpuPeriodUs() (uint64, error) {
|
||||||
|
data, err := iox.ReadText(path.Join(cgroupDir, "cpu.cfs_period_us"))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseUint(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroupV2) cpus() ([]uint64, error) {
|
||||||
|
data, err := iox.ReadText(cpusetFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseUints(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroupV2) usageAllCpus() (uint64, error) {
|
||||||
|
usec, err := parseUint(c.cgroups["usage_usec"])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return usec * uint64(time.Microsecond), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func currentCgroupV1() (cgroup, error) {
|
||||||
cgroupFile := fmt.Sprintf("/proc/%d/cgroup", os.Getpid())
|
cgroupFile := fmt.Sprintf("/proc/%d/cgroup", os.Getpid())
|
||||||
lines, err := iox.ReadTextLines(cgroupFile, iox.WithoutBlank())
|
lines, err := iox.ReadTextLines(cgroupFile, iox.WithoutBlank())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -100,11 +151,51 @@ func currentCgroup() (*cgroup, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &cgroup{
|
return &cgroupV1{
|
||||||
cgroups: cgroups,
|
cgroups: cgroups,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func currentCgroupV2() (cgroup, error) {
|
||||||
|
lines, err := iox.ReadTextLines(cpuStatFile, iox.WithoutBlank())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cgroups := make(map[string]string)
|
||||||
|
for _, line := range lines {
|
||||||
|
cols := strings.Fields(line)
|
||||||
|
if len(cols) != 2 {
|
||||||
|
return nil, fmt.Errorf("invalid cgroupV2 line: %s", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
cgroups[cols[0]] = cols[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cgroupV2{
|
||||||
|
cgroups: cgroups,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode.
|
||||||
|
func isCgroup2UnifiedMode() bool {
|
||||||
|
isUnifiedOnce.Do(func() {
|
||||||
|
var st unix.Statfs_t
|
||||||
|
err := unix.Statfs(cgroupDir, &st)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) && runningInUserNS() {
|
||||||
|
// ignore the "not found" error if running in userns
|
||||||
|
isUnified = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("cannot statfs cgroup root: %s", err))
|
||||||
|
}
|
||||||
|
isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC
|
||||||
|
})
|
||||||
|
|
||||||
|
return isUnified
|
||||||
|
}
|
||||||
|
|
||||||
func parseUint(s string) (uint64, error) {
|
func parseUint(s string) (uint64, error) {
|
||||||
v, err := strconv.ParseInt(s, 10, 64)
|
v, err := strconv.ParseInt(s, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -166,3 +257,36 @@ func parseUints(val string) ([]uint64, error) {
|
|||||||
|
|
||||||
return sets, nil
|
return sets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// runningInUserNS detects whether we are currently running in a user namespace.
|
||||||
|
func runningInUserNS() bool {
|
||||||
|
nsOnce.Do(func() {
|
||||||
|
file, err := os.Open("/proc/self/uid_map")
|
||||||
|
if err != nil {
|
||||||
|
// This kernel-provided file only exists if user namespaces are supported
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
buf := bufio.NewReader(file)
|
||||||
|
l, _, err := buf.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
line := string(l)
|
||||||
|
var a, b, c int64
|
||||||
|
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We assume we are in the initial user namespace if we have a full
|
||||||
|
* range - 4294967295 uids starting at uid 0.
|
||||||
|
*/
|
||||||
|
if a == 0 && b == 0 && c == 4294967295 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
inUserNS = true
|
||||||
|
})
|
||||||
|
|
||||||
|
return inUserNS
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ var (
|
|||||||
|
|
||||||
// if /proc not present, ignore the cpu calculation, like wsl linux
|
// if /proc not present, ignore the cpu calculation, like wsl linux
|
||||||
func init() {
|
func init() {
|
||||||
cpus, err := perCpuUsage()
|
cpus, err := cpuSets()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logx.Error(err)
|
logx.Error(err)
|
||||||
return
|
return
|
||||||
@@ -117,15 +117,6 @@ func cpuSets() ([]uint64, error) {
|
|||||||
return cg.cpus()
|
return cg.cpus()
|
||||||
}
|
}
|
||||||
|
|
||||||
func perCpuUsage() ([]uint64, error) {
|
|
||||||
cg, err := currentCgroup()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cg.acctUsagePerCpu()
|
|
||||||
}
|
|
||||||
|
|
||||||
func systemCpuUsage() (uint64, error) {
|
func systemCpuUsage() (uint64, error) {
|
||||||
lines, err := iox.ReadTextLines("/proc/stat", iox.WithoutBlank())
|
lines, err := iox.ReadTextLines("/proc/stat", iox.WithoutBlank())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -157,10 +148,10 @@ func systemCpuUsage() (uint64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func totalCpuUsage() (usage uint64, err error) {
|
func totalCpuUsage() (usage uint64, err error) {
|
||||||
var cg *cgroup
|
var cg cgroup
|
||||||
if cg, err = currentCgroup(); err != nil {
|
if cg, err = currentCgroup(); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return cg.acctUsageAllCpus()
|
return cg.usageAllCpus()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,10 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
)
|
)
|
||||||
|
|
||||||
const httpTimeout = time.Second * 5
|
const (
|
||||||
|
httpTimeout = time.Second * 5
|
||||||
|
jsonContentType = "application/json; charset=utf-8"
|
||||||
|
)
|
||||||
|
|
||||||
// ErrWriteFailed is an error that indicates failed to submit a StatReport.
|
// ErrWriteFailed is an error that indicates failed to submit a StatReport.
|
||||||
var ErrWriteFailed = errors.New("submit failed")
|
var ErrWriteFailed = errors.New("submit failed")
|
||||||
@@ -36,7 +39,7 @@ func (rw *RemoteWriter) Write(report *StatReport) error {
|
|||||||
client := &http.Client{
|
client := &http.Client{
|
||||||
Timeout: httpTimeout,
|
Timeout: httpTimeout,
|
||||||
}
|
}
|
||||||
resp, err := client.Post(rw.endpoint, "application/json", bytes.NewBuffer(bs))
|
resp, err := client.Post(rw.endpoint, jsonContentType, bytes.NewReader(bs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,18 +30,32 @@ func RawFieldNames(in interface{}, postgresSql ...bool) []string {
|
|||||||
for i := 0; i < v.NumField(); i++ {
|
for i := 0; i < v.NumField(); i++ {
|
||||||
// gets us a StructField
|
// gets us a StructField
|
||||||
fi := typ.Field(i)
|
fi := typ.Field(i)
|
||||||
if tagv := fi.Tag.Get(dbTag); tagv != "" {
|
tagv := fi.Tag.Get(dbTag)
|
||||||
if pg {
|
switch tagv {
|
||||||
out = append(out, tagv)
|
case "-":
|
||||||
} else {
|
continue
|
||||||
out = append(out, fmt.Sprintf("`%s`", tagv))
|
case "":
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if pg {
|
if pg {
|
||||||
out = append(out, fi.Name)
|
out = append(out, fi.Name)
|
||||||
} else {
|
} else {
|
||||||
out = append(out, fmt.Sprintf("`%s`", fi.Name))
|
out = append(out, fmt.Sprintf("`%s`", fi.Name))
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
// get tag name with the tag opton, e.g.:
|
||||||
|
// `db:"id"`
|
||||||
|
// `db:"id,type=char,length=16"`
|
||||||
|
// `db:",type=char,length=16"`
|
||||||
|
if strings.Contains(tagv, ",") {
|
||||||
|
tagv = strings.TrimSpace(strings.Split(tagv, ",")[0])
|
||||||
|
}
|
||||||
|
if len(tagv) == 0 {
|
||||||
|
tagv = fi.Name
|
||||||
|
}
|
||||||
|
if pg {
|
||||||
|
out = append(out, tagv)
|
||||||
|
} else {
|
||||||
|
out = append(out, fmt.Sprintf("`%s`", tagv))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -22,3 +22,20 @@ func TestFieldNames(t *testing.T) {
|
|||||||
assert.Equal(t, expected, out)
|
assert.Equal(t, expected, out)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockedUserWithOptions struct {
|
||||||
|
ID string `db:"id" json:"id,omitempty"`
|
||||||
|
UserName string `db:"user_name,type=varchar,length=255" json:"userName,omitempty"`
|
||||||
|
Sex int `db:"sex" json:"sex,omitempty"`
|
||||||
|
UUID string `db:",type=varchar,length=16" uuid:"uuid,omitempty"`
|
||||||
|
Age int `db:"age" json:"age"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFieldNamesWithTagOptions(t *testing.T) {
|
||||||
|
t.Run("new", func(t *testing.T) {
|
||||||
|
var u mockedUserWithOptions
|
||||||
|
out := RawFieldNames(&u)
|
||||||
|
expected := []string{"`id`", "`user_name`", "`sex`", "`UUID`", "`age`"}
|
||||||
|
assert.Equal(t, expected, out)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
104
core/stores/cache/cache.go
vendored
104
core/stores/cache/cache.go
vendored
@@ -1,6 +1,8 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,13 +15,37 @@ import (
|
|||||||
type (
|
type (
|
||||||
// Cache interface is used to define the cache implementation.
|
// Cache interface is used to define the cache implementation.
|
||||||
Cache interface {
|
Cache interface {
|
||||||
|
// Del deletes cached values with keys.
|
||||||
Del(keys ...string) error
|
Del(keys ...string) error
|
||||||
Get(key string, v interface{}) error
|
// DelCtx deletes cached values with keys.
|
||||||
|
DelCtx(ctx context.Context, keys ...string) error
|
||||||
|
// Get gets the cache with key and fills into v.
|
||||||
|
Get(key string, val interface{}) error
|
||||||
|
// GetCtx gets the cache with key and fills into v.
|
||||||
|
GetCtx(ctx context.Context, key string, val interface{}) error
|
||||||
|
// IsNotFound checks if the given error is the defined errNotFound.
|
||||||
IsNotFound(err error) bool
|
IsNotFound(err error) bool
|
||||||
Set(key string, v interface{}) error
|
// Set sets the cache with key and v, using c.expiry.
|
||||||
SetWithExpire(key string, v interface{}, expire time.Duration) error
|
Set(key string, val interface{}) error
|
||||||
Take(v interface{}, key string, query func(v interface{}) error) error
|
// SetCtx sets the cache with key and v, using c.expiry.
|
||||||
TakeWithExpire(v interface{}, key string, query func(v interface{}, expire time.Duration) error) error
|
SetCtx(ctx context.Context, key string, val interface{}) error
|
||||||
|
// SetWithExpire sets the cache with key and v, using given expire.
|
||||||
|
SetWithExpire(key string, val interface{}, expire time.Duration) error
|
||||||
|
// SetWithExpireCtx sets the cache with key and v, using given expire.
|
||||||
|
SetWithExpireCtx(ctx context.Context, key string, val interface{}, expire time.Duration) error
|
||||||
|
// Take takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using c.expiry, then return the result.
|
||||||
|
Take(val interface{}, key string, query func(val interface{}) error) error
|
||||||
|
// TakeCtx takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using c.expiry, then return the result.
|
||||||
|
TakeCtx(ctx context.Context, val interface{}, key string, query func(val interface{}) error) error
|
||||||
|
// TakeWithExpire takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using given expire, then return the result.
|
||||||
|
TakeWithExpire(val interface{}, key string, query func(val interface{}, expire time.Duration) error) error
|
||||||
|
// TakeWithExpireCtx takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using given expire, then return the result.
|
||||||
|
TakeWithExpireCtx(ctx context.Context, val interface{}, key string,
|
||||||
|
query func(val interface{}, expire time.Duration) error) error
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheCluster struct {
|
cacheCluster struct {
|
||||||
@@ -51,7 +77,13 @@ func New(c ClusterConf, barrier syncx.SingleFlight, st *Stat, errNotFound error,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Del deletes cached values with keys.
|
||||||
func (cc cacheCluster) Del(keys ...string) error {
|
func (cc cacheCluster) Del(keys ...string) error {
|
||||||
|
return cc.DelCtx(context.Background(), keys...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelCtx deletes cached values with keys.
|
||||||
|
func (cc cacheCluster) DelCtx(ctx context.Context, keys ...string) error {
|
||||||
switch len(keys) {
|
switch len(keys) {
|
||||||
case 0:
|
case 0:
|
||||||
return nil
|
return nil
|
||||||
@@ -62,7 +94,7 @@ func (cc cacheCluster) Del(keys ...string) error {
|
|||||||
return cc.errNotFound
|
return cc.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.(Cache).Del(key)
|
return c.(Cache).DelCtx(ctx, key)
|
||||||
default:
|
default:
|
||||||
var be errorx.BatchError
|
var be errorx.BatchError
|
||||||
nodes := make(map[interface{}][]string)
|
nodes := make(map[interface{}][]string)
|
||||||
@@ -76,7 +108,7 @@ func (cc cacheCluster) Del(keys ...string) error {
|
|||||||
nodes[c] = append(nodes[c], key)
|
nodes[c] = append(nodes[c], key)
|
||||||
}
|
}
|
||||||
for c, ks := range nodes {
|
for c, ks := range nodes {
|
||||||
if err := c.(Cache).Del(ks...); err != nil {
|
if err := c.(Cache).DelCtx(ctx, ks...); err != nil {
|
||||||
be.Add(err)
|
be.Add(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -85,52 +117,86 @@ func (cc cacheCluster) Del(keys ...string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc cacheCluster) Get(key string, v interface{}) error {
|
// Get gets the cache with key and fills into v.
|
||||||
|
func (cc cacheCluster) Get(key string, val interface{}) error {
|
||||||
|
return cc.GetCtx(context.Background(), key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCtx gets the cache with key and fills into v.
|
||||||
|
func (cc cacheCluster) GetCtx(ctx context.Context, key string, val interface{}) error {
|
||||||
c, ok := cc.dispatcher.Get(key)
|
c, ok := cc.dispatcher.Get(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return cc.errNotFound
|
return cc.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.(Cache).Get(key, v)
|
return c.(Cache).GetCtx(ctx, key, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNotFound checks if the given error is the defined errNotFound.
|
||||||
func (cc cacheCluster) IsNotFound(err error) bool {
|
func (cc cacheCluster) IsNotFound(err error) bool {
|
||||||
return err == cc.errNotFound
|
return errors.Is(err, cc.errNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc cacheCluster) Set(key string, v interface{}) error {
|
// Set sets the cache with key and v, using c.expiry.
|
||||||
|
func (cc cacheCluster) Set(key string, val interface{}) error {
|
||||||
|
return cc.SetCtx(context.Background(), key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCtx sets the cache with key and v, using c.expiry.
|
||||||
|
func (cc cacheCluster) SetCtx(ctx context.Context, key string, val interface{}) error {
|
||||||
c, ok := cc.dispatcher.Get(key)
|
c, ok := cc.dispatcher.Get(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return cc.errNotFound
|
return cc.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.(Cache).Set(key, v)
|
return c.(Cache).SetCtx(ctx, key, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc cacheCluster) SetWithExpire(key string, v interface{}, expire time.Duration) error {
|
// SetWithExpire sets the cache with key and v, using given expire.
|
||||||
|
func (cc cacheCluster) SetWithExpire(key string, val interface{}, expire time.Duration) error {
|
||||||
|
return cc.SetWithExpireCtx(context.Background(), key, val, expire)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWithExpireCtx sets the cache with key and v, using given expire.
|
||||||
|
func (cc cacheCluster) SetWithExpireCtx(ctx context.Context, key string, val interface{}, expire time.Duration) error {
|
||||||
c, ok := cc.dispatcher.Get(key)
|
c, ok := cc.dispatcher.Get(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return cc.errNotFound
|
return cc.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.(Cache).SetWithExpire(key, v, expire)
|
return c.(Cache).SetWithExpireCtx(ctx, key, val, expire)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc cacheCluster) Take(v interface{}, key string, query func(v interface{}) error) error {
|
// Take takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using c.expiry, then return the result.
|
||||||
|
func (cc cacheCluster) Take(val interface{}, key string, query func(val interface{}) error) error {
|
||||||
|
return cc.TakeCtx(context.Background(), val, key, query)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TakeCtx takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using c.expiry, then return the result.
|
||||||
|
func (cc cacheCluster) TakeCtx(ctx context.Context, val interface{}, key string, query func(val interface{}) error) error {
|
||||||
c, ok := cc.dispatcher.Get(key)
|
c, ok := cc.dispatcher.Get(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return cc.errNotFound
|
return cc.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.(Cache).Take(v, key, query)
|
return c.(Cache).TakeCtx(ctx, val, key, query)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc cacheCluster) TakeWithExpire(v interface{}, key string,
|
// TakeWithExpire takes the result from cache first, if not found,
|
||||||
query func(v interface{}, expire time.Duration) error) error {
|
// query from DB and set cache using given expire, then return the result.
|
||||||
|
func (cc cacheCluster) TakeWithExpire(val interface{}, key string, query func(val interface{}, expire time.Duration) error) error {
|
||||||
|
return cc.TakeWithExpireCtx(context.Background(), val, key, query)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TakeWithExpireCtx takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using given expire, then return the result.
|
||||||
|
func (cc cacheCluster) TakeWithExpireCtx(ctx context.Context, val interface{}, key string, query func(val interface{}, expire time.Duration) error) error {
|
||||||
c, ok := cc.dispatcher.Get(key)
|
c, ok := cc.dispatcher.Get(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return cc.errNotFound
|
return cc.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.(Cache).TakeWithExpire(v, key, query)
|
return c.(Cache).TakeWithExpireCtx(ctx, val, key, query)
|
||||||
}
|
}
|
||||||
|
|||||||
102
core/stores/cache/cache_test.go
vendored
102
core/stores/cache/cache_test.go
vendored
@@ -1,7 +1,9 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -16,12 +18,18 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/syncx"
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var _ Cache = (*mockedNode)(nil)
|
||||||
|
|
||||||
type mockedNode struct {
|
type mockedNode struct {
|
||||||
vals map[string][]byte
|
vals map[string][]byte
|
||||||
errNotFound error
|
errNotFound error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockedNode) Del(keys ...string) error {
|
func (mc *mockedNode) Del(keys ...string) error {
|
||||||
|
return mc.DelCtx(context.Background(), keys...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockedNode) DelCtx(_ context.Context, keys ...string) error {
|
||||||
var be errorx.BatchError
|
var be errorx.BatchError
|
||||||
|
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
@@ -35,21 +43,29 @@ func (mc *mockedNode) Del(keys ...string) error {
|
|||||||
return be.Err()
|
return be.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockedNode) Get(key string, v interface{}) error {
|
func (mc *mockedNode) Get(key string, val interface{}) error {
|
||||||
|
return mc.GetCtx(context.Background(), key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockedNode) GetCtx(ctx context.Context, key string, val interface{}) error {
|
||||||
bs, ok := mc.vals[key]
|
bs, ok := mc.vals[key]
|
||||||
if ok {
|
if ok {
|
||||||
return json.Unmarshal(bs, v)
|
return json.Unmarshal(bs, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
return mc.errNotFound
|
return mc.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockedNode) IsNotFound(err error) bool {
|
func (mc *mockedNode) IsNotFound(err error) bool {
|
||||||
return err == mc.errNotFound
|
return errors.Is(err, mc.errNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockedNode) Set(key string, v interface{}) error {
|
func (mc *mockedNode) Set(key string, val interface{}) error {
|
||||||
data, err := json.Marshal(v)
|
return mc.SetCtx(context.Background(), key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockedNode) SetCtx(ctx context.Context, key string, val interface{}) error {
|
||||||
|
data, err := json.Marshal(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -58,25 +74,37 @@ func (mc *mockedNode) Set(key string, v interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockedNode) SetWithExpire(key string, v interface{}, expire time.Duration) error {
|
func (mc *mockedNode) SetWithExpire(key string, val interface{}, expire time.Duration) error {
|
||||||
return mc.Set(key, v)
|
return mc.SetWithExpireCtx(context.Background(), key, val, expire)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockedNode) Take(v interface{}, key string, query func(v interface{}) error) error {
|
func (mc *mockedNode) SetWithExpireCtx(ctx context.Context, key string, val interface{}, expire time.Duration) error {
|
||||||
|
return mc.Set(key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockedNode) Take(val interface{}, key string, query func(val interface{}) error) error {
|
||||||
|
return mc.TakeCtx(context.Background(), val, key, query)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockedNode) TakeCtx(ctx context.Context, val interface{}, key string, query func(val interface{}) error) error {
|
||||||
if _, ok := mc.vals[key]; ok {
|
if _, ok := mc.vals[key]; ok {
|
||||||
return mc.Get(key, v)
|
return mc.GetCtx(ctx, key, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := query(v); err != nil {
|
if err := query(val); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return mc.Set(key, v)
|
return mc.SetCtx(ctx, key, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockedNode) TakeWithExpire(v interface{}, key string, query func(v interface{}, expire time.Duration) error) error {
|
func (mc *mockedNode) TakeWithExpire(val interface{}, key string, query func(val interface{}, expire time.Duration) error) error {
|
||||||
return mc.Take(v, key, func(v interface{}) error {
|
return mc.TakeWithExpireCtx(context.Background(), val, key, query)
|
||||||
return query(v, 0)
|
}
|
||||||
|
|
||||||
|
func (mc *mockedNode) TakeWithExpireCtx(ctx context.Context, val interface{}, key string, query func(val interface{}, expire time.Duration) error) error {
|
||||||
|
return mc.Take(val, key, func(val interface{}) error {
|
||||||
|
return query(val, 0)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,18 +141,18 @@ func TestCache_SetDel(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
var v int
|
var val int
|
||||||
assert.Nil(t, c.Get(fmt.Sprintf("key/%d", i), &v))
|
assert.Nil(t, c.Get(fmt.Sprintf("key/%d", i), &val))
|
||||||
assert.Equal(t, i, v)
|
assert.Equal(t, i, val)
|
||||||
}
|
}
|
||||||
assert.Nil(t, c.Del())
|
assert.Nil(t, c.Del())
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
assert.Nil(t, c.Del(fmt.Sprintf("key/%d", i)))
|
assert.Nil(t, c.Del(fmt.Sprintf("key/%d", i)))
|
||||||
}
|
}
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
var v int
|
var val int
|
||||||
assert.True(t, c.IsNotFound(c.Get(fmt.Sprintf("key/%d", i), &v)))
|
assert.True(t, c.IsNotFound(c.Get(fmt.Sprintf("key/%d", i), &val)))
|
||||||
assert.Equal(t, 0, v)
|
assert.Equal(t, 0, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,18 +179,18 @@ func TestCache_OneNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
var v int
|
var val int
|
||||||
assert.Nil(t, c.Get(fmt.Sprintf("key/%d", i), &v))
|
assert.Nil(t, c.Get(fmt.Sprintf("key/%d", i), &val))
|
||||||
assert.Equal(t, i, v)
|
assert.Equal(t, i, val)
|
||||||
}
|
}
|
||||||
assert.Nil(t, c.Del())
|
assert.Nil(t, c.Del())
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
assert.Nil(t, c.Del(fmt.Sprintf("key/%d", i)))
|
assert.Nil(t, c.Del(fmt.Sprintf("key/%d", i)))
|
||||||
}
|
}
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
var v int
|
var val int
|
||||||
assert.True(t, c.IsNotFound(c.Get(fmt.Sprintf("key/%d", i), &v)))
|
assert.True(t, c.IsNotFound(c.Get(fmt.Sprintf("key/%d", i), &val)))
|
||||||
assert.Equal(t, 0, v)
|
assert.Equal(t, 0, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,9 +230,9 @@ func TestCache_Balance(t *testing.T) {
|
|||||||
assert.True(t, entropy > .95, fmt.Sprintf("entropy should be greater than 0.95, but got %.2f", entropy))
|
assert.True(t, entropy > .95, fmt.Sprintf("entropy should be greater than 0.95, but got %.2f", entropy))
|
||||||
|
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
var v int
|
var val int
|
||||||
assert.Nil(t, c.Get(strconv.Itoa(i), &v))
|
assert.Nil(t, c.Get(strconv.Itoa(i), &val))
|
||||||
assert.Equal(t, i, v)
|
assert.Equal(t, i, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < total/10; i++ {
|
for i := 0; i < total/10; i++ {
|
||||||
@@ -216,14 +244,14 @@ func TestCache_Balance(t *testing.T) {
|
|||||||
for i := 0; i < total/10; i++ {
|
for i := 0; i < total/10; i++ {
|
||||||
var val int
|
var val int
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
assert.Nil(t, c.Take(&val, strconv.Itoa(i*10), func(v interface{}) error {
|
assert.Nil(t, c.Take(&val, strconv.Itoa(i*10), func(val interface{}) error {
|
||||||
*v.(*int) = i
|
*val.(*int) = i
|
||||||
count++
|
count++
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
} else {
|
} else {
|
||||||
assert.Nil(t, c.TakeWithExpire(&val, strconv.Itoa(i*10), func(v interface{}, expire time.Duration) error {
|
assert.Nil(t, c.TakeWithExpire(&val, strconv.Itoa(i*10), func(val interface{}, expire time.Duration) error {
|
||||||
*v.(*int) = i
|
*val.(*int) = i
|
||||||
count++
|
count++
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
@@ -244,10 +272,10 @@ func TestCacheNoNode(t *testing.T) {
|
|||||||
assert.NotNil(t, c.Get("foo", nil))
|
assert.NotNil(t, c.Get("foo", nil))
|
||||||
assert.NotNil(t, c.Set("foo", nil))
|
assert.NotNil(t, c.Set("foo", nil))
|
||||||
assert.NotNil(t, c.SetWithExpire("foo", nil, time.Second))
|
assert.NotNil(t, c.SetWithExpire("foo", nil, time.Second))
|
||||||
assert.NotNil(t, c.Take(nil, "foo", func(v interface{}) error {
|
assert.NotNil(t, c.Take(nil, "foo", func(val interface{}) error {
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
assert.NotNil(t, c.TakeWithExpire(nil, "foo", func(v interface{}, duration time.Duration) error {
|
assert.NotNil(t, c.TakeWithExpire(nil, "foo", func(val interface{}, duration time.Duration) error {
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@@ -255,8 +283,8 @@ func TestCacheNoNode(t *testing.T) {
|
|||||||
func calcEntropy(m map[int]int, total int) float64 {
|
func calcEntropy(m map[int]int, total int) float64 {
|
||||||
var entropy float64
|
var entropy float64
|
||||||
|
|
||||||
for _, v := range m {
|
for _, val := range m {
|
||||||
proba := float64(v) / float64(total)
|
proba := float64(val) / float64(total)
|
||||||
entropy -= proba * math.Log2(proba)
|
entropy -= proba * math.Log2(proba)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
115
core/stores/cache/cachenode.go
vendored
115
core/stores/cache/cachenode.go
vendored
@@ -1,6 +1,7 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -61,30 +62,39 @@ func NewNode(rds *redis.Redis, barrier syncx.SingleFlight, st *Stat,
|
|||||||
|
|
||||||
// Del deletes cached values with keys.
|
// Del deletes cached values with keys.
|
||||||
func (c cacheNode) Del(keys ...string) error {
|
func (c cacheNode) Del(keys ...string) error {
|
||||||
|
return c.DelCtx(context.Background(), keys...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelCtx deletes cached values with keys.
|
||||||
|
func (c cacheNode) DelCtx(ctx context.Context, keys ...string) error {
|
||||||
if len(keys) == 0 {
|
if len(keys) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := logx.WithContext(ctx)
|
||||||
if len(keys) > 1 && c.rds.Type == redis.ClusterType {
|
if len(keys) > 1 && c.rds.Type == redis.ClusterType {
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
if _, err := c.rds.Del(key); err != nil {
|
if _, err := c.rds.DelCtx(ctx, key); err != nil {
|
||||||
logx.Errorf("failed to clear cache with key: %q, error: %v", key, err)
|
logger.Errorf("failed to clear cache with key: %q, error: %v", key, err)
|
||||||
c.asyncRetryDelCache(key)
|
c.asyncRetryDelCache(key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if _, err := c.rds.DelCtx(ctx, keys...); err != nil {
|
||||||
if _, err := c.rds.Del(keys...); err != nil {
|
logger.Errorf("failed to clear cache with keys: %q, error: %v", formatKeys(keys), err)
|
||||||
logx.Errorf("failed to clear cache with keys: %q, error: %v", formatKeys(keys), err)
|
c.asyncRetryDelCache(keys...)
|
||||||
c.asyncRetryDelCache(keys...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets the cache with key and fills into v.
|
// Get gets the cache with key and fills into v.
|
||||||
func (c cacheNode) Get(key string, v interface{}) error {
|
func (c cacheNode) Get(key string, val interface{}) error {
|
||||||
err := c.doGetCache(key, v)
|
return c.GetCtx(context.Background(), key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCtx gets the cache with key and fills into v.
|
||||||
|
func (c cacheNode) GetCtx(ctx context.Context, key string, val interface{}) error {
|
||||||
|
err := c.doGetCache(ctx, key, val)
|
||||||
if err == errPlaceholder {
|
if err == errPlaceholder {
|
||||||
return c.errNotFound
|
return c.errNotFound
|
||||||
}
|
}
|
||||||
@@ -94,22 +104,33 @@ func (c cacheNode) Get(key string, v interface{}) error {
|
|||||||
|
|
||||||
// IsNotFound checks if the given error is the defined errNotFound.
|
// IsNotFound checks if the given error is the defined errNotFound.
|
||||||
func (c cacheNode) IsNotFound(err error) bool {
|
func (c cacheNode) IsNotFound(err error) bool {
|
||||||
return err == c.errNotFound
|
return errors.Is(err, c.errNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets the cache with key and v, using c.expiry.
|
// Set sets the cache with key and v, using c.expiry.
|
||||||
func (c cacheNode) Set(key string, v interface{}) error {
|
func (c cacheNode) Set(key string, val interface{}) error {
|
||||||
return c.SetWithExpire(key, v, c.aroundDuration(c.expiry))
|
return c.SetCtx(context.Background(), key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCtx sets the cache with key and v, using c.expiry.
|
||||||
|
func (c cacheNode) SetCtx(ctx context.Context, key string, val interface{}) error {
|
||||||
|
return c.SetWithExpireCtx(ctx, key, val, c.aroundDuration(c.expiry))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWithExpire sets the cache with key and v, using given expire.
|
// SetWithExpire sets the cache with key and v, using given expire.
|
||||||
func (c cacheNode) SetWithExpire(key string, v interface{}, expire time.Duration) error {
|
func (c cacheNode) SetWithExpire(key string, val interface{}, expire time.Duration) error {
|
||||||
data, err := jsonx.Marshal(v)
|
return c.SetWithExpireCtx(context.Background(), key, val, expire)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWithExpireCtx sets the cache with key and v, using given expire.
|
||||||
|
func (c cacheNode) SetWithExpireCtx(ctx context.Context, key string, val interface{},
|
||||||
|
expire time.Duration) error {
|
||||||
|
data, err := jsonx.Marshal(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.rds.Setex(key, string(data), int(expire.Seconds()))
|
return c.rds.SetexCtx(ctx, key, string(data), int(expire.Seconds()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a string that represents the cacheNode.
|
// String returns a string that represents the cacheNode.
|
||||||
@@ -119,21 +140,35 @@ func (c cacheNode) String() string {
|
|||||||
|
|
||||||
// Take takes the result from cache first, if not found,
|
// Take takes the result from cache first, if not found,
|
||||||
// query from DB and set cache using c.expiry, then return the result.
|
// query from DB and set cache using c.expiry, then return the result.
|
||||||
func (c cacheNode) Take(v interface{}, key string, query func(v interface{}) error) error {
|
func (c cacheNode) Take(val interface{}, key string, query func(val interface{}) error) error {
|
||||||
return c.doTake(v, key, query, func(v interface{}) error {
|
return c.TakeCtx(context.Background(), val, key, query)
|
||||||
return c.Set(key, v)
|
}
|
||||||
|
|
||||||
|
// TakeCtx takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using c.expiry, then return the result.
|
||||||
|
func (c cacheNode) TakeCtx(ctx context.Context, val interface{}, key string,
|
||||||
|
query func(val interface{}) error) error {
|
||||||
|
return c.doTake(ctx, val, key, query, func(v interface{}) error {
|
||||||
|
return c.SetCtx(ctx, key, v)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakeWithExpire takes the result from cache first, if not found,
|
// TakeWithExpire takes the result from cache first, if not found,
|
||||||
// query from DB and set cache using given expire, then return the result.
|
// query from DB and set cache using given expire, then return the result.
|
||||||
func (c cacheNode) TakeWithExpire(v interface{}, key string, query func(v interface{},
|
func (c cacheNode) TakeWithExpire(val interface{}, key string, query func(val interface{},
|
||||||
expire time.Duration) error) error {
|
expire time.Duration) error) error {
|
||||||
|
return c.TakeWithExpireCtx(context.Background(), val, key, query)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TakeWithExpireCtx takes the result from cache first, if not found,
|
||||||
|
// query from DB and set cache using given expire, then return the result.
|
||||||
|
func (c cacheNode) TakeWithExpireCtx(ctx context.Context, val interface{}, key string,
|
||||||
|
query func(val interface{}, expire time.Duration) error) error {
|
||||||
expire := c.aroundDuration(c.expiry)
|
expire := c.aroundDuration(c.expiry)
|
||||||
return c.doTake(v, key, func(v interface{}) error {
|
return c.doTake(ctx, val, key, func(v interface{}) error {
|
||||||
return query(v, expire)
|
return query(v, expire)
|
||||||
}, func(v interface{}) error {
|
}, func(v interface{}) error {
|
||||||
return c.SetWithExpire(key, v, expire)
|
return c.SetWithExpireCtx(ctx, key, v, expire)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,9 +183,9 @@ func (c cacheNode) asyncRetryDelCache(keys ...string) {
|
|||||||
}, keys...)
|
}, keys...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c cacheNode) doGetCache(key string, v interface{}) error {
|
func (c cacheNode) doGetCache(ctx context.Context, key string, v interface{}) error {
|
||||||
c.stat.IncrementTotal()
|
c.stat.IncrementTotal()
|
||||||
data, err := c.rds.Get(key)
|
data, err := c.rds.GetCtx(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.stat.IncrementMiss()
|
c.stat.IncrementMiss()
|
||||||
return err
|
return err
|
||||||
@@ -166,13 +201,14 @@ func (c cacheNode) doGetCache(key string, v interface{}) error {
|
|||||||
return errPlaceholder
|
return errPlaceholder
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.processCache(key, data, v)
|
return c.processCache(ctx, key, data, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c cacheNode) doTake(v interface{}, key string, query func(v interface{}) error,
|
func (c cacheNode) doTake(ctx context.Context, v interface{}, key string,
|
||||||
cacheVal func(v interface{}) error) error {
|
query func(v interface{}) error, cacheVal func(v interface{}) error) error {
|
||||||
|
logger := logx.WithContext(ctx)
|
||||||
val, fresh, err := c.barrier.DoEx(key, func() (interface{}, error) {
|
val, fresh, err := c.barrier.DoEx(key, func() (interface{}, error) {
|
||||||
if err := c.doGetCache(key, v); err != nil {
|
if err := c.doGetCache(ctx, key, v); err != nil {
|
||||||
if err == errPlaceholder {
|
if err == errPlaceholder {
|
||||||
return nil, c.errNotFound
|
return nil, c.errNotFound
|
||||||
} else if err != c.errNotFound {
|
} else if err != c.errNotFound {
|
||||||
@@ -183,8 +219,8 @@ func (c cacheNode) doTake(v interface{}, key string, query func(v interface{}) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err = query(v); err == c.errNotFound {
|
if err = query(v); err == c.errNotFound {
|
||||||
if err = c.setCacheWithNotFound(key); err != nil {
|
if err = c.setCacheWithNotFound(ctx, key); err != nil {
|
||||||
logx.Error(err)
|
logger.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, c.errNotFound
|
return nil, c.errNotFound
|
||||||
@@ -194,7 +230,7 @@ func (c cacheNode) doTake(v interface{}, key string, query func(v interface{}) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err = cacheVal(v); err != nil {
|
if err = cacheVal(v); err != nil {
|
||||||
logx.Error(err)
|
logger.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,14 +243,18 @@ func (c cacheNode) doTake(v interface{}, key string, query func(v interface{}) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// got the result from previous ongoing query
|
// got the result from previous ongoing query.
|
||||||
|
// why not call IncrementTotal at the beginning of this function?
|
||||||
|
// because a shared error is returned, and we don't want to count.
|
||||||
|
// for example, if the db is down, the query will be failed, we count
|
||||||
|
// the shared errors with one db failure.
|
||||||
c.stat.IncrementTotal()
|
c.stat.IncrementTotal()
|
||||||
c.stat.IncrementHit()
|
c.stat.IncrementHit()
|
||||||
|
|
||||||
return jsonx.Unmarshal(val.([]byte), v)
|
return jsonx.Unmarshal(val.([]byte), v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c cacheNode) processCache(key, data string, v interface{}) error {
|
func (c cacheNode) processCache(ctx context.Context, key, data string, v interface{}) error {
|
||||||
err := jsonx.Unmarshal([]byte(data), v)
|
err := jsonx.Unmarshal([]byte(data), v)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -222,10 +262,11 @@ func (c cacheNode) processCache(key, data string, v interface{}) error {
|
|||||||
|
|
||||||
report := fmt.Sprintf("unmarshal cache, node: %s, key: %s, value: %s, error: %v",
|
report := fmt.Sprintf("unmarshal cache, node: %s, key: %s, value: %s, error: %v",
|
||||||
c.rds.Addr, key, data, err)
|
c.rds.Addr, key, data, err)
|
||||||
logx.Error(report)
|
logger := logx.WithContext(ctx)
|
||||||
|
logger.Error(report)
|
||||||
stat.Report(report)
|
stat.Report(report)
|
||||||
if _, e := c.rds.Del(key); e != nil {
|
if _, e := c.rds.DelCtx(ctx, key); e != nil {
|
||||||
logx.Errorf("delete invalid cache, node: %s, key: %s, value: %s, error: %v",
|
logger.Errorf("delete invalid cache, node: %s, key: %s, value: %s, error: %v",
|
||||||
c.rds.Addr, key, data, e)
|
c.rds.Addr, key, data, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,6 +274,6 @@ func (c cacheNode) processCache(key, data string, v interface{}) error {
|
|||||||
return c.errNotFound
|
return c.errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c cacheNode) setCacheWithNotFound(key string) error {
|
func (c cacheNode) setCacheWithNotFound(ctx context.Context, key string) error {
|
||||||
return c.rds.Setex(key, notFoundPlaceholder, int(c.aroundDuration(c.notFoundExpiry).Seconds()))
|
return c.rds.SetexCtx(ctx, key, notFoundPlaceholder, int(c.aroundDuration(c.notFoundExpiry).Seconds()))
|
||||||
}
|
}
|
||||||
|
|||||||
14
core/stores/cache/cachenode_test.go
vendored
14
core/stores/cache/cachenode_test.go
vendored
@@ -88,7 +88,7 @@ func TestCacheNode_InvalidCache(t *testing.T) {
|
|||||||
assert.Equal(t, miniredis.ErrKeyNotFound, err)
|
assert.Equal(t, miniredis.ErrKeyNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheNode_Take(t *testing.T) {
|
func TestCacheNode_SetWithExpire(t *testing.T) {
|
||||||
store, clean, err := redistest.CreateRedis()
|
store, clean, err := redistest.CreateRedis()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
@@ -100,8 +100,18 @@ func TestCacheNode_Take(t *testing.T) {
|
|||||||
lock: new(sync.Mutex),
|
lock: new(sync.Mutex),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
stat: NewStat("any"),
|
stat: NewStat("any"),
|
||||||
errNotFound: errTestNotFound,
|
errNotFound: errors.New("any"),
|
||||||
}
|
}
|
||||||
|
assert.NotNil(t, cn.SetWithExpire("key", make(chan int), time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheNode_Take(t *testing.T) {
|
||||||
|
store, clean, err := redistest.CreateRedis()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer clean()
|
||||||
|
|
||||||
|
cn := NewNode(store, syncx.NewSingleFlight(), NewStat("any"), errTestNotFound,
|
||||||
|
WithExpiry(time.Second), WithNotFoundExpiry(time.Second))
|
||||||
var str string
|
var str string
|
||||||
err = cn.Take(&str, "any", func(v interface{}) error {
|
err = cn.Take(&str, "any", func(v interface{}) error {
|
||||||
*v.(*string) = "value"
|
*v.(*string) = "value"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package clickhouse
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
// imports the driver, don't remove this comment, golint requires.
|
// imports the driver, don't remove this comment, golint requires.
|
||||||
_ "github.com/ClickHouse/clickhouse-go"
|
_ "github.com/ClickHouse/clickhouse-go/v2"
|
||||||
"github.com/zeromicro/go-zero/core/stores/sqlx"
|
"github.com/zeromicro/go-zero/core/stores/sqlx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -490,6 +490,29 @@ func TestRedis_SetExNx(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRedis_Getset(t *testing.T) {
|
||||||
|
store := clusterStore{dispatcher: hash.NewConsistentHash()}
|
||||||
|
_, err := store.GetSet("hello", "world")
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
runOnCluster(t, func(client Store) {
|
||||||
|
val, err := client.GetSet("hello", "world")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "", val)
|
||||||
|
val, err = client.Get("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "world", val)
|
||||||
|
val, err = client.GetSet("hello", "newworld")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "world", val)
|
||||||
|
val, err = client.Get("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "newworld", val)
|
||||||
|
_, err = client.Del("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestRedis_SetGetDelHashField(t *testing.T) {
|
func TestRedis_SetGetDelHashField(t *testing.T) {
|
||||||
store := clusterStore{dispatcher: hash.NewConsistentHash()}
|
store := clusterStore{dispatcher: hash.NewConsistentHash()}
|
||||||
err := store.Hset("key", "field", "value")
|
err := store.Hset("key", "field", "value")
|
||||||
|
|||||||
91
core/stores/mon/bulkinserter.go
Normal file
91
core/stores/mon/bulkinserter.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/executors"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
flushInterval = time.Second
|
||||||
|
maxBulkRows = 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// ResultHandler is a handler that used to handle results.
|
||||||
|
ResultHandler func(*mongo.InsertManyResult, error)
|
||||||
|
|
||||||
|
// A BulkInserter is used to insert bulk of mongo records.
|
||||||
|
BulkInserter struct {
|
||||||
|
executor *executors.PeriodicalExecutor
|
||||||
|
inserter *dbInserter
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewBulkInserter returns a BulkInserter.
|
||||||
|
func NewBulkInserter(coll *mongo.Collection, interval ...time.Duration) *BulkInserter {
|
||||||
|
inserter := &dbInserter{
|
||||||
|
collection: coll,
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := flushInterval
|
||||||
|
if len(interval) > 0 {
|
||||||
|
duration = interval[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return &BulkInserter{
|
||||||
|
executor: executors.NewPeriodicalExecutor(duration, inserter),
|
||||||
|
inserter: inserter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush flushes the inserter, writes all pending records.
|
||||||
|
func (bi *BulkInserter) Flush() {
|
||||||
|
bi.executor.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts doc.
|
||||||
|
func (bi *BulkInserter) Insert(doc interface{}) {
|
||||||
|
bi.executor.Add(doc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetResultHandler sets the result handler.
|
||||||
|
func (bi *BulkInserter) SetResultHandler(handler ResultHandler) {
|
||||||
|
bi.executor.Sync(func() {
|
||||||
|
bi.inserter.resultHandler = handler
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type dbInserter struct {
|
||||||
|
collection *mongo.Collection
|
||||||
|
documents []interface{}
|
||||||
|
resultHandler ResultHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *dbInserter) AddTask(doc interface{}) bool {
|
||||||
|
in.documents = append(in.documents, doc)
|
||||||
|
return len(in.documents) >= maxBulkRows
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *dbInserter) Execute(objs interface{}) {
|
||||||
|
docs := objs.([]interface{})
|
||||||
|
if len(docs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := in.collection.InsertMany(context.Background(), docs)
|
||||||
|
if in.resultHandler != nil {
|
||||||
|
in.resultHandler(result, err)
|
||||||
|
} else if err != nil {
|
||||||
|
logx.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *dbInserter) RemoveAll() interface{} {
|
||||||
|
documents := in.documents
|
||||||
|
in.documents = nil
|
||||||
|
return documents
|
||||||
|
}
|
||||||
27
core/stores/mon/bulkinserter_test.go
Normal file
27
core/stores/mon/bulkinserter_test.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.mongodb.org/mongo-driver/bson"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBulkInserter(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...))
|
||||||
|
bulk := NewBulkInserter(mt.Coll)
|
||||||
|
bulk.SetResultHandler(func(result *mongo.InsertManyResult, err error) {
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 2, len(result.InsertedIDs))
|
||||||
|
})
|
||||||
|
bulk.Insert(bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
bulk.Insert(bson.D{{Key: "foo", Value: "baz"}})
|
||||||
|
bulk.Flush()
|
||||||
|
})
|
||||||
|
}
|
||||||
51
core/stores/mon/clientmanager.go
Normal file
51
core/stores/mon/clientmanager.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
mopt "go.mongodb.org/mongo-driver/mongo/options"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultTimeout = time.Second
|
||||||
|
|
||||||
|
var clientManager = syncx.NewResourceManager()
|
||||||
|
|
||||||
|
// ClosableClient wraps *mongo.Client and provides a Close method.
|
||||||
|
type ClosableClient struct {
|
||||||
|
*mongo.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close disconnects the underlying *mongo.Client.
|
||||||
|
func (cs *ClosableClient) Close() error {
|
||||||
|
return cs.Client.Disconnect(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inject injects a *mongo.Client into the client manager.
|
||||||
|
// Typically, this is used to inject a *mongo.Client for test purpose.
|
||||||
|
func Inject(key string, client *mongo.Client) {
|
||||||
|
clientManager.Inject(key, &ClosableClient{client})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getClient(url string) (*mongo.Client, error) {
|
||||||
|
val, err := clientManager.GetResource(url, func() (io.Closer, error) {
|
||||||
|
cli, err := mongo.Connect(context.Background(), mopt.Client().ApplyURI(url))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
concurrentSess := &ClosableClient{
|
||||||
|
Client: cli,
|
||||||
|
}
|
||||||
|
|
||||||
|
return concurrentSess, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return val.(*ClosableClient).Client, nil
|
||||||
|
}
|
||||||
20
core/stores/mon/clientmanager_test.go
Normal file
20
core/stores/mon/clientmanager_test.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestClientManger_getClient(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
Inject(mtest.ClusterURI(), mt.Client)
|
||||||
|
cli, err := getClient(mtest.ClusterURI())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, mt.Client, cli)
|
||||||
|
})
|
||||||
|
}
|
||||||
558
core/stores/mon/collection.go
Normal file
558
core/stores/mon/collection.go
Normal file
@@ -0,0 +1,558 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/breaker"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
mopt "go.mongodb.org/mongo-driver/mongo/options"
|
||||||
|
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultSlowThreshold = time.Millisecond * 500
|
||||||
|
// spanName is the span name of the mongo calls.
|
||||||
|
spanName = "mongo"
|
||||||
|
|
||||||
|
// mongodb method names
|
||||||
|
aggregate = "Aggregate"
|
||||||
|
bulkWrite = "BulkWrite"
|
||||||
|
countDocuments = "CountDocuments"
|
||||||
|
deleteMany = "DeleteMany"
|
||||||
|
deleteOne = "DeleteOne"
|
||||||
|
distinct = "Distinct"
|
||||||
|
estimatedDocumentCount = "EstimatedDocumentCount"
|
||||||
|
find = "Find"
|
||||||
|
findOne = "FindOne"
|
||||||
|
findOneAndDelete = "FindOneAndDelete"
|
||||||
|
findOneAndReplace = "FindOneAndReplace"
|
||||||
|
findOneAndUpdate = "FindOneAndUpdate"
|
||||||
|
insertMany = "InsertMany"
|
||||||
|
insertOne = "InsertOne"
|
||||||
|
replaceOne = "ReplaceOne"
|
||||||
|
updateByID = "UpdateByID"
|
||||||
|
updateMany = "UpdateMany"
|
||||||
|
updateOne = "UpdateOne"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNotFound is an alias of mongo.ErrNoDocuments
|
||||||
|
var ErrNotFound = mongo.ErrNoDocuments
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Collection defines a MongoDB collection.
|
||||||
|
Collection interface {
|
||||||
|
// Aggregate executes an aggregation pipeline.
|
||||||
|
Aggregate(ctx context.Context, pipeline interface{}, opts ...*mopt.AggregateOptions) (
|
||||||
|
*mongo.Cursor, error)
|
||||||
|
// BulkWrite performs a bulk write operation.
|
||||||
|
BulkWrite(ctx context.Context, models []mongo.WriteModel, opts ...*mopt.BulkWriteOptions) (
|
||||||
|
*mongo.BulkWriteResult, error)
|
||||||
|
// Clone creates a copy of this collection with the same settings.
|
||||||
|
Clone(opts ...*mopt.CollectionOptions) (*mongo.Collection, error)
|
||||||
|
// CountDocuments returns the number of documents in the collection that match the filter.
|
||||||
|
CountDocuments(ctx context.Context, filter interface{}, opts ...*mopt.CountOptions) (int64, error)
|
||||||
|
// Database returns the database that this collection is a part of.
|
||||||
|
Database() *mongo.Database
|
||||||
|
// DeleteMany deletes documents from the collection that match the filter.
|
||||||
|
DeleteMany(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) (
|
||||||
|
*mongo.DeleteResult, error)
|
||||||
|
// DeleteOne deletes at most one document from the collection that matches the filter.
|
||||||
|
DeleteOne(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) (
|
||||||
|
*mongo.DeleteResult, error)
|
||||||
|
// Distinct returns a list of distinct values for the given key across the collection.
|
||||||
|
Distinct(ctx context.Context, fieldName string, filter interface{},
|
||||||
|
opts ...*mopt.DistinctOptions) ([]interface{}, error)
|
||||||
|
// Drop drops this collection from database.
|
||||||
|
Drop(ctx context.Context) error
|
||||||
|
// EstimatedDocumentCount returns an estimate of the count of documents in a collection
|
||||||
|
// using collection metadata.
|
||||||
|
EstimatedDocumentCount(ctx context.Context, opts ...*mopt.EstimatedDocumentCountOptions) (int64, error)
|
||||||
|
// Find finds the documents matching the provided filter.
|
||||||
|
Find(ctx context.Context, filter interface{}, opts ...*mopt.FindOptions) (*mongo.Cursor, error)
|
||||||
|
// FindOne returns up to one document that matches the provided filter.
|
||||||
|
FindOne(ctx context.Context, filter interface{}, opts ...*mopt.FindOneOptions) (
|
||||||
|
*mongo.SingleResult, error)
|
||||||
|
// FindOneAndDelete returns at most one document that matches the filter. If the filter
|
||||||
|
// matches multiple documents, only the first document is deleted.
|
||||||
|
FindOneAndDelete(ctx context.Context, filter interface{}, opts ...*mopt.FindOneAndDeleteOptions) (
|
||||||
|
*mongo.SingleResult, error)
|
||||||
|
// FindOneAndReplace returns at most one document that matches the filter. If the filter
|
||||||
|
// matches multiple documents, FindOneAndReplace returns the first document in the
|
||||||
|
// collection that matches the filter.
|
||||||
|
FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{},
|
||||||
|
opts ...*mopt.FindOneAndReplaceOptions) (*mongo.SingleResult, error)
|
||||||
|
// FindOneAndUpdate returns at most one document that matches the filter. If the filter
|
||||||
|
// matches multiple documents, FindOneAndUpdate returns the first document in the
|
||||||
|
// collection that matches the filter.
|
||||||
|
FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.FindOneAndUpdateOptions) (*mongo.SingleResult, error)
|
||||||
|
// Indexes returns the index view for this collection.
|
||||||
|
Indexes() mongo.IndexView
|
||||||
|
// InsertMany inserts the provided documents.
|
||||||
|
InsertMany(ctx context.Context, documents []interface{}, opts ...*mopt.InsertManyOptions) (
|
||||||
|
*mongo.InsertManyResult, error)
|
||||||
|
// InsertOne inserts the provided document.
|
||||||
|
InsertOne(ctx context.Context, document interface{}, opts ...*mopt.InsertOneOptions) (
|
||||||
|
*mongo.InsertOneResult, error)
|
||||||
|
// ReplaceOne replaces at most one document that matches the filter.
|
||||||
|
ReplaceOne(ctx context.Context, filter interface{}, replacement interface{},
|
||||||
|
opts ...*mopt.ReplaceOptions) (*mongo.UpdateResult, error)
|
||||||
|
// UpdateByID updates a single document matching the provided filter.
|
||||||
|
UpdateByID(ctx context.Context, id interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
|
||||||
|
// UpdateMany updates the provided documents.
|
||||||
|
UpdateMany(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
|
||||||
|
// UpdateOne updates a single document matching the provided filter.
|
||||||
|
UpdateOne(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
|
||||||
|
// Watch returns a change stream cursor used to receive notifications of changes to the collection.
|
||||||
|
Watch(ctx context.Context, pipeline interface{}, opts ...*mopt.ChangeStreamOptions) (
|
||||||
|
*mongo.ChangeStream, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
decoratedCollection struct {
|
||||||
|
*mongo.Collection
|
||||||
|
name string
|
||||||
|
brk breaker.Breaker
|
||||||
|
}
|
||||||
|
|
||||||
|
keepablePromise struct {
|
||||||
|
promise breaker.Promise
|
||||||
|
log func(error)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func newCollection(collection *mongo.Collection, brk breaker.Breaker) Collection {
|
||||||
|
return &decoratedCollection{
|
||||||
|
Collection: collection,
|
||||||
|
name: collection.Name(),
|
||||||
|
brk: brk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) Aggregate(ctx context.Context, pipeline interface{},
|
||||||
|
opts ...*mopt.AggregateOptions) (cur *mongo.Cursor, err error) {
|
||||||
|
ctx, span := startSpan(ctx, aggregate)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
starTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, aggregate, starTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
cur, err = c.Collection.Aggregate(ctx, pipeline, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) BulkWrite(ctx context.Context, models []mongo.WriteModel,
|
||||||
|
opts ...*mopt.BulkWriteOptions) (res *mongo.BulkWriteResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, bulkWrite)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, bulkWrite, startTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.BulkWrite(ctx, models, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) CountDocuments(ctx context.Context, filter interface{},
|
||||||
|
opts ...*mopt.CountOptions) (count int64, err error) {
|
||||||
|
ctx, span := startSpan(ctx, countDocuments)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, countDocuments, startTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
count, err = c.Collection.CountDocuments(ctx, filter, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) DeleteMany(ctx context.Context, filter interface{},
|
||||||
|
opts ...*mopt.DeleteOptions) (res *mongo.DeleteResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, deleteMany)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, deleteMany, startTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.DeleteMany(ctx, filter, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) DeleteOne(ctx context.Context, filter interface{},
|
||||||
|
opts ...*mopt.DeleteOptions) (res *mongo.DeleteResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, deleteOne)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, deleteOne, startTime, err, filter)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.DeleteOne(ctx, filter, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) Distinct(ctx context.Context, fieldName string, filter interface{},
|
||||||
|
opts ...*mopt.DistinctOptions) (val []interface{}, err error) {
|
||||||
|
ctx, span := startSpan(ctx, distinct)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, distinct, startTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
val, err = c.Collection.Distinct(ctx, fieldName, filter, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) EstimatedDocumentCount(ctx context.Context,
|
||||||
|
opts ...*mopt.EstimatedDocumentCountOptions) (val int64, err error) {
|
||||||
|
ctx, span := startSpan(ctx, estimatedDocumentCount)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, estimatedDocumentCount, startTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
val, err = c.Collection.EstimatedDocumentCount(ctx, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) Find(ctx context.Context, filter interface{},
|
||||||
|
opts ...*mopt.FindOptions) (cur *mongo.Cursor, err error) {
|
||||||
|
ctx, span := startSpan(ctx, find)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, find, startTime, err, filter)
|
||||||
|
}()
|
||||||
|
|
||||||
|
cur, err = c.Collection.Find(ctx, filter, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) FindOne(ctx context.Context, filter interface{},
|
||||||
|
opts ...*mopt.FindOneOptions) (res *mongo.SingleResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, findOne)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, findOne, startTime, err, filter)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res = c.Collection.FindOne(ctx, filter, opts...)
|
||||||
|
err = res.Err()
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) FindOneAndDelete(ctx context.Context, filter interface{},
|
||||||
|
opts ...*mopt.FindOneAndDeleteOptions) (res *mongo.SingleResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, findOneAndDelete)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, findOneAndDelete, startTime, err, filter)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res = c.Collection.FindOneAndDelete(ctx, filter, opts...)
|
||||||
|
err = res.Err()
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) FindOneAndReplace(ctx context.Context, filter interface{},
|
||||||
|
replacement interface{}, opts ...*mopt.FindOneAndReplaceOptions) (
|
||||||
|
res *mongo.SingleResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, findOneAndReplace)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, findOneAndReplace, startTime, err, filter, replacement)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res = c.Collection.FindOneAndReplace(ctx, filter, replacement, opts...)
|
||||||
|
err = res.Err()
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.FindOneAndUpdateOptions) (res *mongo.SingleResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, findOneAndUpdate)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, findOneAndUpdate, startTime, err, filter, update)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res = c.Collection.FindOneAndUpdate(ctx, filter, update, opts...)
|
||||||
|
err = res.Err()
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) InsertMany(ctx context.Context, documents []interface{},
|
||||||
|
opts ...*mopt.InsertManyOptions) (res *mongo.InsertManyResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, insertMany)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, insertMany, startTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.InsertMany(ctx, documents, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) InsertOne(ctx context.Context, document interface{},
|
||||||
|
opts ...*mopt.InsertOneOptions) (res *mongo.InsertOneResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, insertOne)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, insertOne, startTime, err, document)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.InsertOne(ctx, document, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter interface{}, replacement interface{},
|
||||||
|
opts ...*mopt.ReplaceOptions) (res *mongo.UpdateResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, replaceOne)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, replaceOne, startTime, err, filter, replacement)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.ReplaceOne(ctx, filter, replacement, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) UpdateByID(ctx context.Context, id interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, updateByID)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, updateByID, startTime, err, id, update)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.UpdateByID(ctx, id, update, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) UpdateMany(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, updateMany)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDurationSimple(ctx, updateMany, startTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.UpdateMany(ctx, filter, update, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) UpdateOne(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
|
||||||
|
ctx, span := startSpan(ctx, updateOne)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = c.brk.DoWithAcceptable(func() error {
|
||||||
|
startTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
c.logDuration(ctx, updateOne, startTime, err, filter, update)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = c.Collection.UpdateOne(ctx, filter, update, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) logDuration(ctx context.Context, method string, startTime time.Duration, err error,
|
||||||
|
docs ...interface{}) {
|
||||||
|
duration := timex.Since(startTime)
|
||||||
|
logger := logx.WithContext(ctx).WithDuration(duration)
|
||||||
|
|
||||||
|
content, e := json.Marshal(docs)
|
||||||
|
if e != nil {
|
||||||
|
logger.Error(err)
|
||||||
|
} else if err != nil {
|
||||||
|
if duration > slowThreshold.Load() {
|
||||||
|
logger.Slowf("[MONGO] mongo(%s) - slowcall - %s - fail(%s) - %s",
|
||||||
|
c.name, method, err.Error(), string(content))
|
||||||
|
} else {
|
||||||
|
logger.Infof("mongo(%s) - %s - fail(%s) - %s",
|
||||||
|
c.name, method, err.Error(), string(content))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if duration > slowThreshold.Load() {
|
||||||
|
logger.Slowf("[MONGO] mongo(%s) - slowcall - %s - ok - %s",
|
||||||
|
c.name, method, string(content))
|
||||||
|
} else {
|
||||||
|
logger.Infof("mongo(%s) - %s - ok - %s", c.name, method, string(content))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *decoratedCollection) logDurationSimple(ctx context.Context, method string, startTime time.Duration, err error) {
|
||||||
|
logDuration(ctx, c.name, method, startTime, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p keepablePromise) accept(err error) error {
|
||||||
|
p.promise.Accept()
|
||||||
|
p.log(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p keepablePromise) keep(err error) error {
|
||||||
|
if acceptable(err) {
|
||||||
|
p.promise.Accept()
|
||||||
|
} else {
|
||||||
|
p.promise.Reject(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
p.log(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func acceptable(err error) bool {
|
||||||
|
return err == nil || err == mongo.ErrNoDocuments || err == mongo.ErrNilValue ||
|
||||||
|
err == mongo.ErrNilDocument || err == mongo.ErrNilCursor || err == mongo.ErrEmptySlice ||
|
||||||
|
// session errors
|
||||||
|
err == session.ErrSessionEnded || err == session.ErrNoTransactStarted ||
|
||||||
|
err == session.ErrTransactInProgress || err == session.ErrAbortAfterCommit ||
|
||||||
|
err == session.ErrAbortTwice || err == session.ErrCommitAfterAbort ||
|
||||||
|
err == session.ErrUnackWCUnsupported || err == session.ErrSnapshotTransaction
|
||||||
|
}
|
||||||
648
core/stores/mon/collection_test.go
Normal file
648
core/stores/mon/collection_test.go
Normal file
@@ -0,0 +1,648 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/breaker"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
|
"go.mongodb.org/mongo-driver/bson"
|
||||||
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
|
||||||
|
mopt "go.mongodb.org/mongo-driver/mongo/options"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errDummy = errors.New("dummy")
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
logx.Disable()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeepPromise_accept(t *testing.T) {
|
||||||
|
p := new(mockPromise)
|
||||||
|
kp := keepablePromise{
|
||||||
|
promise: p,
|
||||||
|
log: func(error) {},
|
||||||
|
}
|
||||||
|
assert.Nil(t, kp.accept(nil))
|
||||||
|
assert.Equal(t, ErrNotFound, kp.accept(ErrNotFound))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeepPromise_keep(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
err error
|
||||||
|
accepted bool
|
||||||
|
reason string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
err: nil,
|
||||||
|
accepted: true,
|
||||||
|
reason: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
err: ErrNotFound,
|
||||||
|
accepted: true,
|
||||||
|
reason: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
err: errors.New("any"),
|
||||||
|
accepted: false,
|
||||||
|
reason: "any",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(stringx.RandId(), func(t *testing.T) {
|
||||||
|
p := new(mockPromise)
|
||||||
|
kp := keepablePromise{
|
||||||
|
promise: p,
|
||||||
|
log: func(error) {},
|
||||||
|
}
|
||||||
|
assert.Equal(t, test.err, kp.keep(test.err))
|
||||||
|
assert.Equal(t, test.accepted, p.accepted)
|
||||||
|
assert.Equal(t, test.reason, p.reason)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewCollection(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
coll := mt.Coll
|
||||||
|
assert.NotNil(t, coll)
|
||||||
|
col := newCollection(coll, breaker.GetBreaker("localhost"))
|
||||||
|
assert.Equal(t, t.Name()+"/test", col.(*decoratedCollection).name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_Aggregate(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
coll := mt.Coll
|
||||||
|
assert.NotNil(t, coll)
|
||||||
|
col := newCollection(coll, breaker.GetBreaker("localhost"))
|
||||||
|
ns := mt.Coll.Database().Name() + "." + mt.Coll.Name()
|
||||||
|
aggRes := mtest.CreateCursorResponse(1, ns, mtest.FirstBatch)
|
||||||
|
mt.AddMockResponses(aggRes)
|
||||||
|
assert.Equal(t, t.Name()+"/test", col.(*decoratedCollection).name)
|
||||||
|
cursor, err := col.Aggregate(context.Background(), mongo.Pipeline{}, mopt.Aggregate())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
cursor.Close(context.Background())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_BulkWrite(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...))
|
||||||
|
res, err := c.BulkWrite(context.Background(), []mongo.WriteModel{
|
||||||
|
mongo.NewInsertOneModel().SetDocument(bson.D{{Key: "foo", Value: 1}})},
|
||||||
|
)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, res)
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.BulkWrite(context.Background(), []mongo.WriteModel{
|
||||||
|
mongo.NewInsertOneModel().SetDocument(bson.D{{Key: "foo", Value: 1}})},
|
||||||
|
)
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_CountDocuments(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "n", Value: 1},
|
||||||
|
}))
|
||||||
|
res, err := c.CountDocuments(context.Background(), bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), res)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.CountDocuments(context.Background(), bson.D{{Key: "foo", Value: 1}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecoratedCollection_DeleteMany(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
res, err := c.DeleteMany(context.Background(), bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), res.DeletedCount)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.DeleteMany(context.Background(), bson.D{{Key: "foo", Value: 1}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_Distinct(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(bson.D{{Key: "ok", Value: 1}, {Key: "values", Value: []int{1}}})
|
||||||
|
resp, err := c.Distinct(context.Background(), "foo", bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 1, len(resp))
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.Distinct(context.Background(), "foo", bson.D{{Key: "foo", Value: 1}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_EstimatedDocumentCount(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(bson.D{{Key: "ok", Value: 1}, {Key: "n", Value: 1}})
|
||||||
|
res, err := c.EstimatedDocumentCount(context.Background())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), res)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.EstimatedDocumentCount(context.Background())
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectionFind(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
find := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "John"},
|
||||||
|
})
|
||||||
|
getMore := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
killCursors := mtest.CreateCursorResponse(
|
||||||
|
0,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch)
|
||||||
|
mt.AddMockResponses(find, getMore, killCursors)
|
||||||
|
filter := bson.D{{Key: "x", Value: 1}}
|
||||||
|
cursor, err := c.Find(context.Background(), filter, mopt.Find())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer cursor.Close(context.Background())
|
||||||
|
|
||||||
|
var val []struct {
|
||||||
|
ID primitive.ObjectID `bson:"_id"`
|
||||||
|
Name string `bson:"name"`
|
||||||
|
}
|
||||||
|
assert.Nil(t, cursor.All(context.Background(), &val))
|
||||||
|
assert.Equal(t, 2, len(val))
|
||||||
|
assert.Equal(t, "John", val[0].Name)
|
||||||
|
assert.Equal(t, "Mary", val[1].Name)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.Find(context.Background(), filter, mopt.Find())
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectionFindOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
find := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "John"},
|
||||||
|
})
|
||||||
|
getMore := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
killCursors := mtest.CreateCursorResponse(
|
||||||
|
0,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch)
|
||||||
|
mt.AddMockResponses(find, getMore, killCursors)
|
||||||
|
filter := bson.D{{Key: "x", Value: 1}}
|
||||||
|
resp, err := c.FindOne(context.Background(), filter)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
var val struct {
|
||||||
|
ID primitive.ObjectID `bson:"_id"`
|
||||||
|
Name string `bson:"name"`
|
||||||
|
}
|
||||||
|
assert.Nil(t, resp.Decode(&val))
|
||||||
|
assert.Equal(t, "John", val.Name)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.FindOne(context.Background(), filter)
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_FindOneAndDelete(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
filter := bson.D{}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{}...))
|
||||||
|
_, err := c.FindOneAndDelete(context.Background(), filter, mopt.FindOneAndDelete())
|
||||||
|
assert.Equal(t, mongo.ErrNoDocuments, err)
|
||||||
|
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "name", Value: "John"}}},
|
||||||
|
}...))
|
||||||
|
resp, err := c.FindOneAndDelete(context.Background(), filter, mopt.FindOneAndDelete())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
var val struct {
|
||||||
|
Name string `bson:"name"`
|
||||||
|
}
|
||||||
|
assert.Nil(t, resp.Decode(&val))
|
||||||
|
assert.Equal(t, "John", val.Name)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.FindOneAndDelete(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_FindOneAndReplace(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{}...))
|
||||||
|
filter := bson.D{{Key: "x", Value: 1}}
|
||||||
|
replacement := bson.D{{Key: "x", Value: 2}}
|
||||||
|
opts := mopt.FindOneAndReplace().SetUpsert(true)
|
||||||
|
_, err := c.FindOneAndReplace(context.Background(), filter, replacement, opts)
|
||||||
|
assert.Equal(t, mongo.ErrNoDocuments, err)
|
||||||
|
mt.AddMockResponses(bson.D{{Key: "ok", Value: 1}, {Key: "value", Value: bson.D{
|
||||||
|
{Key: "name", Value: "John"},
|
||||||
|
}}})
|
||||||
|
resp, err := c.FindOneAndReplace(context.Background(), filter, replacement, opts)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
var val struct {
|
||||||
|
Name string `bson:"name"`
|
||||||
|
}
|
||||||
|
assert.Nil(t, resp.Decode(&val))
|
||||||
|
assert.Equal(t, "John", val.Name)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.FindOneAndReplace(context.Background(), filter, replacement, opts)
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_FindOneAndUpdate(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(bson.D{{Key: "ok", Value: 1}})
|
||||||
|
filter := bson.D{{Key: "x", Value: 1}}
|
||||||
|
update := bson.D{{Key: "$x", Value: 2}}
|
||||||
|
opts := mopt.FindOneAndUpdate().SetUpsert(true)
|
||||||
|
_, err := c.FindOneAndUpdate(context.Background(), filter, update, opts)
|
||||||
|
assert.Equal(t, mongo.ErrNoDocuments, err)
|
||||||
|
|
||||||
|
mt.AddMockResponses(bson.D{{Key: "ok", Value: 1}, {Key: "value", Value: bson.D{
|
||||||
|
{Key: "name", Value: "John"},
|
||||||
|
}}})
|
||||||
|
resp, err := c.FindOneAndUpdate(context.Background(), filter, update, opts)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
var val struct {
|
||||||
|
Name string `bson:"name"`
|
||||||
|
}
|
||||||
|
assert.Nil(t, resp.Decode(&val))
|
||||||
|
assert.Equal(t, "John", val.Name)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.FindOneAndUpdate(context.Background(), filter, update, opts)
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_InsertOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...))
|
||||||
|
res, err := c.InsertOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, res)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.InsertOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_InsertMany(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...))
|
||||||
|
res, err := c.InsertMany(context.Background(), []interface{}{
|
||||||
|
bson.D{{Key: "foo", Value: "bar"}},
|
||||||
|
bson.D{{Key: "foo", Value: "baz"}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, res)
|
||||||
|
assert.Equal(t, 2, len(res.InsertedIDs))
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.InsertMany(context.Background(), []interface{}{bson.D{{Key: "foo", Value: "bar"}}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_Remove(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
res, err := c.DeleteOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), res.DeletedCount)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.DeleteOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectionRemoveAll(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
res, err := c.DeleteMany(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), res.DeletedCount)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.DeleteMany(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_ReplaceOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
res, err := c.ReplaceOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}},
|
||||||
|
bson.D{{Key: "foo", Value: "baz"}},
|
||||||
|
)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), res.MatchedCount)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.ReplaceOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}},
|
||||||
|
bson.D{{Key: "foo", Value: "baz"}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_UpdateOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
resp, err := c.UpdateOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}},
|
||||||
|
bson.D{{Key: "$set", Value: bson.D{{Key: "baz", Value: "qux"}}}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), resp.MatchedCount)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.UpdateOne(context.Background(), bson.D{{Key: "foo", Value: "bar"}},
|
||||||
|
bson.D{{Key: "$set", Value: bson.D{{Key: "baz", Value: "qux"}}}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_UpdateByID(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
resp, err := c.UpdateByID(context.Background(), primitive.NewObjectID(),
|
||||||
|
bson.D{{Key: "$set", Value: bson.D{{Key: "baz", Value: "qux"}}}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), resp.MatchedCount)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.UpdateByID(context.Background(), primitive.NewObjectID(),
|
||||||
|
bson.D{{Key: "$set", Value: bson.D{{Key: "baz", Value: "qux"}}}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollection_UpdateMany(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
resp, err := c.UpdateMany(context.Background(), bson.D{{Key: "foo", Value: "bar"}},
|
||||||
|
bson.D{{Key: "$set", Value: bson.D{{Key: "baz", Value: "qux"}}}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), resp.MatchedCount)
|
||||||
|
|
||||||
|
c.brk = new(dropBreaker)
|
||||||
|
_, err = c.UpdateMany(context.Background(), bson.D{{Key: "foo", Value: "bar"}},
|
||||||
|
bson.D{{Key: "$set", Value: bson.D{{Key: "baz", Value: "qux"}}}})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_DecoratedCollectionLogDuration(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
c := decoratedCollection{
|
||||||
|
Collection: mt.Coll,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf strings.Builder
|
||||||
|
w := logx.NewWriter(&buf)
|
||||||
|
o := logx.Reset()
|
||||||
|
logx.SetWriter(w)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
logx.Reset()
|
||||||
|
logx.SetWriter(o)
|
||||||
|
}()
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration(context.Background(), "foo", time.Millisecond, nil, "bar")
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
assert.Contains(t, buf.String(), "bar")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration(context.Background(), "foo", time.Millisecond, errors.New("bar"), make(chan int))
|
||||||
|
assert.Contains(t, buf.String(), "bar")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration(context.Background(), "foo", slowThreshold.Load()+time.Millisecond, errors.New("bar"))
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
assert.Contains(t, buf.String(), "slowcall")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration(context.Background(), "foo", slowThreshold.Load()+time.Millisecond, nil)
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
assert.Contains(t, buf.String(), "slowcall")
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockPromise struct {
|
||||||
|
accepted bool
|
||||||
|
reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mockPromise) Accept() {
|
||||||
|
p.accepted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mockPromise) Reject(reason string) {
|
||||||
|
p.reason = reason
|
||||||
|
}
|
||||||
|
|
||||||
|
type dropBreaker struct{}
|
||||||
|
|
||||||
|
func (d *dropBreaker) Name() string {
|
||||||
|
return "dummy"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dropBreaker) Allow() (breaker.Promise, error) {
|
||||||
|
return nil, errDummy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dropBreaker) Do(_ func() error) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dropBreaker) DoWithAcceptable(_ func() error, _ breaker.Acceptable) error {
|
||||||
|
return errDummy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dropBreaker) DoWithFallback(_ func() error, _ func(err error) error) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dropBreaker) DoWithFallbackAcceptable(_ func() error, _ func(err error) error,
|
||||||
|
_ breaker.Acceptable) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
258
core/stores/mon/model.go
Normal file
258
core/stores/mon/model.go
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/breaker"
|
||||||
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
mopt "go.mongodb.org/mongo-driver/mongo/options"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
startSession = "StartSession"
|
||||||
|
abortTransaction = "AbortTransaction"
|
||||||
|
commitTransaction = "CommitTransaction"
|
||||||
|
withTransaction = "WithTransaction"
|
||||||
|
endSession = "EndSession"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Model is a mongodb store model that represents a collection.
|
||||||
|
Model struct {
|
||||||
|
Collection
|
||||||
|
name string
|
||||||
|
cli *mongo.Client
|
||||||
|
brk breaker.Breaker
|
||||||
|
opts []Option
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedSession struct {
|
||||||
|
mongo.Session
|
||||||
|
name string
|
||||||
|
brk breaker.Breaker
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MustNewModel returns a Model, exits on errors.
|
||||||
|
func MustNewModel(uri, db, collection string, opts ...Option) *Model {
|
||||||
|
model, err := NewModel(uri, db, collection, opts...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return model
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewModel returns a Model.
|
||||||
|
func NewModel(uri, db, collection string, opts ...Option) (*Model, error) {
|
||||||
|
cli, err := getClient(uri)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
name := strings.Join([]string{uri, collection}, "/")
|
||||||
|
brk := breaker.GetBreaker(uri)
|
||||||
|
coll := newCollection(cli.Database(db).Collection(collection), brk)
|
||||||
|
return newModel(name, cli, coll, brk, opts...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newModel(name string, cli *mongo.Client, coll Collection, brk breaker.Breaker,
|
||||||
|
opts ...Option) *Model {
|
||||||
|
return &Model{
|
||||||
|
name: name,
|
||||||
|
Collection: coll,
|
||||||
|
cli: cli,
|
||||||
|
brk: brk,
|
||||||
|
opts: opts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSession starts a new session.
|
||||||
|
func (m *Model) StartSession(opts ...*mopt.SessionOptions) (sess mongo.Session, err error) {
|
||||||
|
err = m.brk.DoWithAcceptable(func() error {
|
||||||
|
starTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
logDuration(context.Background(), m.name, startSession, starTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
session, sessionErr := m.cli.StartSession(opts...)
|
||||||
|
if sessionErr != nil {
|
||||||
|
return sessionErr
|
||||||
|
}
|
||||||
|
|
||||||
|
sess = &wrappedSession{
|
||||||
|
Session: session,
|
||||||
|
name: m.name,
|
||||||
|
brk: m.brk,
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate executes an aggregation pipeline.
|
||||||
|
func (m *Model) Aggregate(ctx context.Context, v, pipeline interface{}, opts ...*mopt.AggregateOptions) error {
|
||||||
|
cur, err := m.Collection.Aggregate(ctx, pipeline, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cur.Close(ctx)
|
||||||
|
|
||||||
|
return cur.All(ctx, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteMany deletes documents that match the filter.
|
||||||
|
func (m *Model) DeleteMany(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) (int64, error) {
|
||||||
|
res, err := m.Collection.DeleteMany(ctx, filter, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.DeletedCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOne deletes the first document that matches the filter.
|
||||||
|
func (m *Model) DeleteOne(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) (int64, error) {
|
||||||
|
res, err := m.Collection.DeleteOne(ctx, filter, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.DeletedCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find finds documents that match the filter.
|
||||||
|
func (m *Model) Find(ctx context.Context, v, filter interface{}, opts ...*mopt.FindOptions) error {
|
||||||
|
cur, err := m.Collection.Find(ctx, filter, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cur.Close(ctx)
|
||||||
|
|
||||||
|
return cur.All(ctx, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOne finds the first document that matches the filter.
|
||||||
|
func (m *Model) FindOne(ctx context.Context, v, filter interface{}, opts ...*mopt.FindOneOptions) error {
|
||||||
|
res, err := m.Collection.FindOne(ctx, filter, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndDelete finds a single document and deletes it.
|
||||||
|
func (m *Model) FindOneAndDelete(ctx context.Context, v, filter interface{},
|
||||||
|
opts ...*mopt.FindOneAndDeleteOptions) error {
|
||||||
|
res, err := m.Collection.FindOneAndDelete(ctx, filter, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndReplace finds a single document and replaces it.
|
||||||
|
func (m *Model) FindOneAndReplace(ctx context.Context, v, filter interface{}, replacement interface{},
|
||||||
|
opts ...*mopt.FindOneAndReplaceOptions) error {
|
||||||
|
res, err := m.Collection.FindOneAndReplace(ctx, filter, replacement, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndUpdate finds a single document and updates it.
|
||||||
|
func (m *Model) FindOneAndUpdate(ctx context.Context, v, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.FindOneAndUpdateOptions) error {
|
||||||
|
res, err := m.Collection.FindOneAndUpdate(ctx, filter, update, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortTransaction implements the mongo.Session interface.
|
||||||
|
func (w *wrappedSession) AbortTransaction(ctx context.Context) (err error) {
|
||||||
|
ctx, span := startSpan(ctx, abortTransaction)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return w.brk.DoWithAcceptable(func() error {
|
||||||
|
starTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
logDuration(ctx, w.name, abortTransaction, starTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return w.Session.AbortTransaction(ctx)
|
||||||
|
}, acceptable)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommitTransaction implements the mongo.Session interface.
|
||||||
|
func (w *wrappedSession) CommitTransaction(ctx context.Context) (err error) {
|
||||||
|
ctx, span := startSpan(ctx, commitTransaction)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return w.brk.DoWithAcceptable(func() error {
|
||||||
|
starTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
logDuration(ctx, w.name, commitTransaction, starTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return w.Session.CommitTransaction(ctx)
|
||||||
|
}, acceptable)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTransaction implements the mongo.Session interface.
|
||||||
|
func (w *wrappedSession) WithTransaction(
|
||||||
|
ctx context.Context,
|
||||||
|
fn func(sessCtx mongo.SessionContext) (interface{}, error),
|
||||||
|
opts ...*mopt.TransactionOptions,
|
||||||
|
) (res interface{}, err error) {
|
||||||
|
ctx, span := startSpan(ctx, withTransaction)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = w.brk.DoWithAcceptable(func() error {
|
||||||
|
starTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
logDuration(ctx, w.name, withTransaction, starTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
res, err = w.Session.WithTransaction(ctx, fn, opts...)
|
||||||
|
return err
|
||||||
|
}, acceptable)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndSession implements the mongo.Session interface.
|
||||||
|
func (w *wrappedSession) EndSession(ctx context.Context) {
|
||||||
|
var err error
|
||||||
|
ctx, span := startSpan(ctx, endSession)
|
||||||
|
defer func() {
|
||||||
|
endSpan(span, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = w.brk.DoWithAcceptable(func() error {
|
||||||
|
starTime := timex.Now()
|
||||||
|
defer func() {
|
||||||
|
logDuration(ctx, w.name, endSession, starTime, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
w.Session.EndSession(ctx)
|
||||||
|
return nil
|
||||||
|
}, acceptable)
|
||||||
|
}
|
||||||
243
core/stores/mon/model_test.go
Normal file
243
core/stores/mon/model_test.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.mongodb.org/mongo-driver/bson"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestModel_StartSession(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
sess, err := m.StartSession()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer sess.EndSession(context.Background())
|
||||||
|
|
||||||
|
_, err = sess.WithTransaction(context.Background(), func(sessCtx mongo.SessionContext) (interface{}, error) {
|
||||||
|
_ = sessCtx.StartTransaction()
|
||||||
|
sessCtx.Client().Database("1")
|
||||||
|
sessCtx.EndSession(context.Background())
|
||||||
|
return nil, nil
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NoError(t, sess.CommitTransaction(context.Background()))
|
||||||
|
assert.Error(t, sess.AbortTransaction(context.Background()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_Aggregate(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
find := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "John"},
|
||||||
|
})
|
||||||
|
getMore := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
killCursors := mtest.CreateCursorResponse(
|
||||||
|
0,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch)
|
||||||
|
mt.AddMockResponses(find, getMore, killCursors)
|
||||||
|
var result []interface{}
|
||||||
|
err := m.Aggregate(context.Background(), &result, mongo.Pipeline{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 2, len(result))
|
||||||
|
assert.Equal(t, "John", result[0].(bson.D).Map()["name"])
|
||||||
|
assert.Equal(t, "Mary", result[1].(bson.D).Map()["name"])
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
assert.Equal(t, errDummy, m.Aggregate(context.Background(), &result, mongo.Pipeline{}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_DeleteMany(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
val, err := m.DeleteMany(context.Background(), bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), val)
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
_, err = m.DeleteMany(context.Background(), bson.D{})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_DeleteOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
val, err := m.DeleteOne(context.Background(), bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), val)
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
_, err = m.DeleteOne(context.Background(), bson.D{})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_Find(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
find := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "John"},
|
||||||
|
})
|
||||||
|
getMore := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
killCursors := mtest.CreateCursorResponse(
|
||||||
|
0,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch)
|
||||||
|
mt.AddMockResponses(find, getMore, killCursors)
|
||||||
|
var result []interface{}
|
||||||
|
err := m.Find(context.Background(), &result, bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 2, len(result))
|
||||||
|
assert.Equal(t, "John", result[0].(bson.D).Map()["name"])
|
||||||
|
assert.Equal(t, "Mary", result[1].(bson.D).Map()["name"])
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
assert.Equal(t, errDummy, m.Find(context.Background(), &result, bson.D{}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
find := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "name", Value: "John"},
|
||||||
|
})
|
||||||
|
killCursors := mtest.CreateCursorResponse(
|
||||||
|
0,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.NextBatch)
|
||||||
|
mt.AddMockResponses(find, killCursors)
|
||||||
|
var result bson.D
|
||||||
|
err := m.FindOne(context.Background(), &result, bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", result.Map()["name"])
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
assert.Equal(t, errDummy, m.FindOne(context.Background(), &result, bson.D{}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndDelete(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "name", Value: "John"}}},
|
||||||
|
}...))
|
||||||
|
var result bson.D
|
||||||
|
err := m.FindOneAndDelete(context.Background(), &result, bson.D{})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", result.Map()["name"])
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
assert.Equal(t, errDummy, m.FindOneAndDelete(context.Background(), &result, bson.D{}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndReplace(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "name", Value: "John"}}},
|
||||||
|
}...))
|
||||||
|
var result bson.D
|
||||||
|
err := m.FindOneAndReplace(context.Background(), &result, bson.D{}, bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", result.Map()["name"])
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
assert.Equal(t, errDummy, m.FindOneAndReplace(context.Background(), &result, bson.D{}, bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndUpdate(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(mt)
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "name", Value: "John"}}},
|
||||||
|
}...))
|
||||||
|
var result bson.D
|
||||||
|
err := m.FindOneAndUpdate(context.Background(), &result, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "name", Value: "Mary"}}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", result.Map()["name"])
|
||||||
|
|
||||||
|
triggerBreaker(m)
|
||||||
|
assert.Equal(t, errDummy, m.FindOneAndUpdate(context.Background(), &result, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "name", Value: "Mary"}}},
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func createModel(mt *mtest.T) *Model {
|
||||||
|
Inject(mt.Name(), mt.Client)
|
||||||
|
return MustNewModel(mt.Name(), mt.DB.Name(), mt.Coll.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func triggerBreaker(m *Model) {
|
||||||
|
m.Collection.(*decoratedCollection).brk = new(dropBreaker)
|
||||||
|
}
|
||||||
29
core/stores/mon/options.go
Normal file
29
core/stores/mon/options.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var slowThreshold = syncx.ForAtomicDuration(defaultSlowThreshold)
|
||||||
|
|
||||||
|
type (
|
||||||
|
options struct {
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option defines the method to customize a mongo model.
|
||||||
|
Option func(opts *options)
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetSlowThreshold sets the slow threshold.
|
||||||
|
func SetSlowThreshold(threshold time.Duration) {
|
||||||
|
slowThreshold.Set(threshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultOptions() *options {
|
||||||
|
return &options{
|
||||||
|
timeout: defaultTimeout,
|
||||||
|
}
|
||||||
|
}
|
||||||
18
core/stores/mon/options_test.go
Normal file
18
core/stores/mon/options_test.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSetSlowThreshold(t *testing.T) {
|
||||||
|
assert.Equal(t, defaultSlowThreshold, slowThreshold.Load())
|
||||||
|
SetSlowThreshold(time.Second)
|
||||||
|
assert.Equal(t, time.Second, slowThreshold.Load())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultOptions(t *testing.T) {
|
||||||
|
assert.Equal(t, defaultTimeout, defaultOptions().timeout)
|
||||||
|
}
|
||||||
37
core/stores/mon/trace.go
Normal file
37
core/stores/mon/trace.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/trace"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/codes"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var mongoCmdAttributeKey = attribute.Key("mongo.cmd")
|
||||||
|
|
||||||
|
func startSpan(ctx context.Context, cmd string) (context.Context, oteltrace.Span) {
|
||||||
|
tracer := otel.GetTracerProvider().Tracer(trace.TraceName)
|
||||||
|
ctx, span := tracer.Start(ctx,
|
||||||
|
spanName,
|
||||||
|
oteltrace.WithSpanKind(oteltrace.SpanKindClient),
|
||||||
|
)
|
||||||
|
span.SetAttributes(mongoCmdAttributeKey.String(cmd))
|
||||||
|
return ctx, span
|
||||||
|
}
|
||||||
|
|
||||||
|
func endSpan(span oteltrace.Span, err error) {
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
if err == nil || err == mongo.ErrNoDocuments ||
|
||||||
|
err == mongo.ErrNilValue || err == mongo.ErrNilDocument {
|
||||||
|
span.SetStatus(codes.Ok, "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
span.SetStatus(codes.Error, err.Error())
|
||||||
|
span.RecordError(err)
|
||||||
|
}
|
||||||
27
core/stores/mon/util.go
Normal file
27
core/stores/mon/util.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
|
)
|
||||||
|
|
||||||
|
const mongoAddrSep = ","
|
||||||
|
|
||||||
|
// FormatAddr formats mongo hosts to a string.
|
||||||
|
func FormatAddr(hosts []string) string {
|
||||||
|
return strings.Join(hosts, mongoAddrSep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func logDuration(ctx context.Context, name, method string, startTime time.Duration, err error) {
|
||||||
|
duration := timex.Since(startTime)
|
||||||
|
logger := logx.WithContext(ctx).WithDuration(duration)
|
||||||
|
if err != nil {
|
||||||
|
logger.Infof("mongo(%s) - %s - fail(%s)", name, method, err.Error())
|
||||||
|
} else {
|
||||||
|
logger.Infof("mongo(%s) - %s - ok", name, method)
|
||||||
|
}
|
||||||
|
}
|
||||||
63
core/stores/mon/util_test.go
Normal file
63
core/stores/mon/util_test.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package mon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFormatAddrs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
addrs []string
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
addrs: []string{"a", "b"},
|
||||||
|
expect: "a,b",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
addrs: []string{"a", "b", "c"},
|
||||||
|
expect: "a,b,c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
addrs: []string{},
|
||||||
|
expect: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
addrs: nil,
|
||||||
|
expect: "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
assert.Equal(t, test.expect, FormatAddr(test.addrs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_logDuration(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
w := logx.NewWriter(&buf)
|
||||||
|
o := logx.Reset()
|
||||||
|
logx.SetWriter(w)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
logx.Reset()
|
||||||
|
logx.SetWriter(o)
|
||||||
|
}()
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
logDuration(context.Background(), "foo", "bar", time.Millisecond, nil)
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
assert.Contains(t, buf.String(), "bar")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
logDuration(context.Background(), "foo", "bar", time.Millisecond, errors.New("bar"))
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
assert.Contains(t, buf.String(), "bar")
|
||||||
|
assert.Contains(t, buf.String(), "fail")
|
||||||
|
}
|
||||||
281
core/stores/monc/cachedmodel.go
Normal file
281
core/stores/monc/cachedmodel.go
Normal file
@@ -0,0 +1,281 @@
|
|||||||
|
package monc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/stores/cache"
|
||||||
|
"github.com/zeromicro/go-zero/core/stores/mon"
|
||||||
|
"github.com/zeromicro/go-zero/core/stores/redis"
|
||||||
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
mopt "go.mongodb.org/mongo-driver/mongo/options"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotFound is an alias of mongo.ErrNoDocuments.
|
||||||
|
ErrNotFound = mongo.ErrNoDocuments
|
||||||
|
|
||||||
|
// can't use one SingleFlight per conn, because multiple conns may share the same cache key.
|
||||||
|
singleFlight = syncx.NewSingleFlight()
|
||||||
|
stats = cache.NewStat("monc")
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Model is a mongo model that built with cache capability.
|
||||||
|
type Model struct {
|
||||||
|
*mon.Model
|
||||||
|
cache cache.Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewModel returns a Model with a cache cluster, exists on errors.
|
||||||
|
func MustNewModel(uri, db, collection string, c cache.CacheConf, opts ...cache.Option) *Model {
|
||||||
|
model, err := NewModel(uri, db, collection, c, opts...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return model
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewNodeModel returns a Model with a cache node, exists on errors.
|
||||||
|
func MustNewNodeModel(uri, db, collection string, rds *redis.Redis, opts ...cache.Option) *Model {
|
||||||
|
model, err := NewNodeModel(uri, db, collection, rds, opts...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return model
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewModel returns a Model with a cache cluster.
|
||||||
|
func NewModel(uri, db, collection string, conf cache.CacheConf, opts ...cache.Option) (*Model, error) {
|
||||||
|
c := cache.New(conf, singleFlight, stats, mongo.ErrNoDocuments, opts...)
|
||||||
|
return NewModelWithCache(uri, db, collection, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewModelWithCache returns a Model with a custom cache.
|
||||||
|
func NewModelWithCache(uri, db, collection string, c cache.Cache) (*Model, error) {
|
||||||
|
return newModel(uri, db, collection, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNodeModel returns a Model with a cache node.
|
||||||
|
func NewNodeModel(uri, db, collection string, rds *redis.Redis, opts ...cache.Option) (*Model, error) {
|
||||||
|
c := cache.NewNode(rds, singleFlight, stats, mongo.ErrNoDocuments, opts...)
|
||||||
|
return NewModelWithCache(uri, db, collection, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newModel returns a Model with the given cache.
|
||||||
|
func newModel(uri, db, collection string, c cache.Cache) (*Model, error) {
|
||||||
|
model, err := mon.NewModel(uri, db, collection)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Model{
|
||||||
|
Model: model,
|
||||||
|
cache: c,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelCache deletes the cache with given keys.
|
||||||
|
func (mm *Model) DelCache(ctx context.Context, keys ...string) error {
|
||||||
|
return mm.cache.DelCtx(ctx, keys...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOne deletes the document with given filter, and remove it from cache.
|
||||||
|
func (mm *Model) DeleteOne(ctx context.Context, key string, filter interface{},
|
||||||
|
opts ...*mopt.DeleteOptions) (int64, error) {
|
||||||
|
val, err := mm.Model.DeleteOne(ctx, filter, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mm.DelCache(ctx, key); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOneNoCache deletes the document with given filter.
|
||||||
|
func (mm *Model) DeleteOneNoCache(ctx context.Context, filter interface{},
|
||||||
|
opts ...*mopt.DeleteOptions) (int64, error) {
|
||||||
|
return mm.Model.DeleteOne(ctx, filter, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOne unmarshals a record into v with given key and query.
|
||||||
|
func (mm *Model) FindOne(ctx context.Context, key string, v, filter interface{},
|
||||||
|
opts ...*mopt.FindOneOptions) error {
|
||||||
|
return mm.cache.TakeCtx(ctx, v, key, func(v interface{}) error {
|
||||||
|
return mm.Model.FindOne(ctx, v, filter, opts...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneNoCache unmarshals a record into v with query, without cache.
|
||||||
|
func (mm *Model) FindOneNoCache(ctx context.Context, v, filter interface{},
|
||||||
|
opts ...*mopt.FindOneOptions) error {
|
||||||
|
return mm.Model.FindOne(ctx, v, filter, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndDelete deletes the document with given filter, and unmarshals it into v.
|
||||||
|
func (mm *Model) FindOneAndDelete(ctx context.Context, key string, v, filter interface{},
|
||||||
|
opts ...*mopt.FindOneAndDeleteOptions) error {
|
||||||
|
if err := mm.Model.FindOneAndDelete(ctx, v, filter, opts...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return mm.DelCache(ctx, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndDeleteNoCache deletes the document with given filter, and unmarshals it into v.
|
||||||
|
func (mm *Model) FindOneAndDeleteNoCache(ctx context.Context, v, filter interface{},
|
||||||
|
opts ...*mopt.FindOneAndDeleteOptions) error {
|
||||||
|
return mm.Model.FindOneAndDelete(ctx, v, filter, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndReplace replaces the document with given filter with replacement, and unmarshals it into v.
|
||||||
|
func (mm *Model) FindOneAndReplace(ctx context.Context, key string, v, filter interface{},
|
||||||
|
replacement interface{}, opts ...*mopt.FindOneAndReplaceOptions) error {
|
||||||
|
if err := mm.Model.FindOneAndReplace(ctx, v, filter, replacement, opts...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return mm.DelCache(ctx, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndReplaceNoCache replaces the document with given filter with replacement, and unmarshals it into v.
|
||||||
|
func (mm *Model) FindOneAndReplaceNoCache(ctx context.Context, v, filter interface{},
|
||||||
|
replacement interface{}, opts ...*mopt.FindOneAndReplaceOptions) error {
|
||||||
|
return mm.Model.FindOneAndReplace(ctx, v, filter, replacement, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndUpdate updates the document with given filter with update, and unmarshals it into v.
|
||||||
|
func (mm *Model) FindOneAndUpdate(ctx context.Context, key string, v, filter interface{},
|
||||||
|
update interface{}, opts ...*mopt.FindOneAndUpdateOptions) error {
|
||||||
|
if err := mm.Model.FindOneAndUpdate(ctx, v, filter, update, opts...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return mm.DelCache(ctx, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindOneAndUpdateNoCache updates the document with given filter with update, and unmarshals it into v.
|
||||||
|
func (mm *Model) FindOneAndUpdateNoCache(ctx context.Context, v, filter interface{},
|
||||||
|
update interface{}, opts ...*mopt.FindOneAndUpdateOptions) error {
|
||||||
|
return mm.Model.FindOneAndUpdate(ctx, v, filter, update, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCache unmarshal the cache into v with given key.
|
||||||
|
func (mm *Model) GetCache(key string, v interface{}) error {
|
||||||
|
return mm.cache.Get(key, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOne inserts a single document into the collection, and remove the cache placeholder.
|
||||||
|
func (mm *Model) InsertOne(ctx context.Context, key string, document interface{},
|
||||||
|
opts ...*mopt.InsertOneOptions) (*mongo.InsertOneResult, error) {
|
||||||
|
res, err := mm.Model.InsertOne(ctx, document, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = mm.DelCache(ctx, key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOneNoCache inserts a single document into the collection.
|
||||||
|
func (mm *Model) InsertOneNoCache(ctx context.Context, document interface{},
|
||||||
|
opts ...*mopt.InsertOneOptions) (*mongo.InsertOneResult, error) {
|
||||||
|
return mm.Model.InsertOne(ctx, document, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceOne replaces a single document in the collection, and remove the cache.
|
||||||
|
func (mm *Model) ReplaceOne(ctx context.Context, key string, filter interface{}, replacement interface{},
|
||||||
|
opts ...*mopt.ReplaceOptions) (*mongo.UpdateResult, error) {
|
||||||
|
res, err := mm.Model.ReplaceOne(ctx, filter, replacement, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = mm.DelCache(ctx, key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceOneNoCache replaces a single document in the collection.
|
||||||
|
func (mm *Model) ReplaceOneNoCache(ctx context.Context, filter interface{}, replacement interface{},
|
||||||
|
opts ...*mopt.ReplaceOptions) (*mongo.UpdateResult, error) {
|
||||||
|
return mm.Model.ReplaceOne(ctx, filter, replacement, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCache sets the cache with given key and value.
|
||||||
|
func (mm *Model) SetCache(key string, v interface{}) error {
|
||||||
|
return mm.cache.Set(key, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateByID updates the document with given id with update, and remove the cache.
|
||||||
|
func (mm *Model) UpdateByID(ctx context.Context, key string, id interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) {
|
||||||
|
res, err := mm.Model.UpdateByID(ctx, id, update, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = mm.DelCache(ctx, key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateByIDNoCache updates the document with given id with update.
|
||||||
|
func (mm *Model) UpdateByIDNoCache(ctx context.Context, id interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) {
|
||||||
|
return mm.Model.UpdateByID(ctx, id, update, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMany updates the documents that match filter with update, and remove the cache.
|
||||||
|
func (mm *Model) UpdateMany(ctx context.Context, keys []string, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) {
|
||||||
|
res, err := mm.Model.UpdateMany(ctx, filter, update, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = mm.DelCache(ctx, keys...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateManyNoCache updates the documents that match filter with update.
|
||||||
|
func (mm *Model) UpdateManyNoCache(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) {
|
||||||
|
return mm.Model.UpdateMany(ctx, filter, update, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOne updates the first document that matches filter with update, and remove the cache.
|
||||||
|
func (mm *Model) UpdateOne(ctx context.Context, key string, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) {
|
||||||
|
res, err := mm.Model.UpdateOne(ctx, filter, update, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = mm.DelCache(ctx, key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOneNoCache updates the first document that matches filter with update.
|
||||||
|
func (mm *Model) UpdateOneNoCache(ctx context.Context, filter interface{}, update interface{},
|
||||||
|
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) {
|
||||||
|
return mm.Model.UpdateOne(ctx, filter, update, opts...)
|
||||||
|
}
|
||||||
581
core/stores/monc/cachedmodel_test.go
Normal file
581
core/stores/monc/cachedmodel_test.go
Normal file
@@ -0,0 +1,581 @@
|
|||||||
|
package monc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alicebob/miniredis/v2"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/stores/cache"
|
||||||
|
"github.com/zeromicro/go-zero/core/stores/mon"
|
||||||
|
"github.com/zeromicro/go-zero/core/stores/redis"
|
||||||
|
"go.mongodb.org/mongo-driver/bson"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewModel(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
_, err := newModel("foo", mt.DB.Name(), mt.Coll.Name(), nil)
|
||||||
|
assert.NotNil(mt, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_DelCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
assert.Nil(t, m.cache.Set("bar", "baz"))
|
||||||
|
assert.Nil(t, m.DelCache(context.Background(), "foo", "bar"))
|
||||||
|
var v string
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("bar", &v)))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_DeleteOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
val, err := m.DeleteOne(context.Background(), "foo", bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), val)
|
||||||
|
var v string
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
_, err = m.DeleteOne(context.Background(), "foo", bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
_, err = m.DeleteOne(context.Background(), "foo", bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Equal(t, errMocked, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_DeleteOneNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "n", Value: 1}}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
val, err := m.DeleteOneNoCache(context.Background(), bson.D{{Key: "foo", Value: "bar"}})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), val)
|
||||||
|
var v string
|
||||||
|
assert.Nil(t, m.cache.Get("foo", &v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
resp := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "foo", Value: "bar"},
|
||||||
|
})
|
||||||
|
mt.AddMockResponses(resp)
|
||||||
|
m := createModel(t, mt)
|
||||||
|
var v struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}
|
||||||
|
assert.Nil(t, m.FindOne(context.Background(), "foo", &v, bson.D{}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
resp := mtest.CreateCursorResponse(
|
||||||
|
1,
|
||||||
|
"DBName.CollectionName",
|
||||||
|
mtest.FirstBatch,
|
||||||
|
bson.D{
|
||||||
|
{Key: "foo", Value: "bar"},
|
||||||
|
})
|
||||||
|
mt.AddMockResponses(resp)
|
||||||
|
m := createModel(t, mt)
|
||||||
|
v := struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}{}
|
||||||
|
assert.Nil(t, m.FindOneNoCache(context.Background(), &v, bson.D{}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndDelete(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
v := struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}{}
|
||||||
|
assert.Nil(t, m.FindOneAndDelete(context.Background(), "foo", &v, bson.D{}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
assert.NotNil(t, m.FindOneAndDelete(context.Background(), "foo", &v, bson.D{}))
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
assert.Equal(t, errMocked, m.FindOneAndDelete(context.Background(), "foo", &v, bson.D{}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndDeleteNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
v := struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}{}
|
||||||
|
assert.Nil(t, m.FindOneAndDeleteNoCache(context.Background(), &v, bson.D{}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndReplace(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
v := struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}{}
|
||||||
|
assert.Nil(t, m.FindOneAndReplace(context.Background(), "foo", &v, bson.D{}, bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
assert.NotNil(t, m.FindOneAndReplace(context.Background(), "foo", &v, bson.D{}, bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
}))
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
assert.Equal(t, errMocked, m.FindOneAndReplace(context.Background(), "foo", &v, bson.D{}, bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndReplaceNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
v := struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}{}
|
||||||
|
assert.Nil(t, m.FindOneAndReplaceNoCache(context.Background(), &v, bson.D{}, bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndUpdate(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
v := struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}{}
|
||||||
|
assert.Nil(t, m.FindOneAndUpdate(context.Background(), "foo", &v, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "name", Value: "Mary"}}},
|
||||||
|
}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
assert.NotNil(t, m.FindOneAndUpdate(context.Background(), "foo", &v, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "name", Value: "Mary"}}},
|
||||||
|
}))
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
assert.Equal(t, errMocked, m.FindOneAndUpdate(context.Background(), "foo", &v, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "name", Value: "Mary"}}},
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_FindOneAndUpdateNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
v := struct {
|
||||||
|
Foo string `bson:"foo"`
|
||||||
|
}{}
|
||||||
|
assert.Nil(t, m.FindOneAndUpdateNoCache(context.Background(), &v, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "name", Value: "Mary"}}},
|
||||||
|
}))
|
||||||
|
assert.Equal(t, "bar", v.Foo)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_GetCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.NotNil(t, m.cache)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
var s string
|
||||||
|
assert.Nil(t, m.cache.Get("foo", &s))
|
||||||
|
assert.Equal(t, "bar", s)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_InsertOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
resp, err := m.InsertOne(context.Background(), "foo", bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
var v string
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
_, err = m.InsertOne(context.Background(), "foo", bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
_, err = m.InsertOne(context.Background(), "foo", bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
assert.Equal(t, errMocked, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_InsertOneNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
resp, err := m.InsertOneNoCache(context.Background(), bson.D{
|
||||||
|
{Key: "name", Value: "Mary"},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_ReplaceOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
resp, err := m.ReplaceOne(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "foo", Value: "baz"},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
var v string
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
_, err = m.ReplaceOne(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "foo", Value: "baz"},
|
||||||
|
})
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
_, err = m.ReplaceOne(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "foo", Value: "baz"},
|
||||||
|
})
|
||||||
|
assert.Equal(t, errMocked, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_ReplaceOneNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
resp, err := m.ReplaceOneNoCache(context.Background(), bson.D{}, bson.D{
|
||||||
|
{Key: "foo", Value: "baz"},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_SetCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.SetCache("foo", "bar"))
|
||||||
|
var v string
|
||||||
|
assert.Nil(t, m.GetCache("foo", &v))
|
||||||
|
assert.Equal(t, "bar", v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_UpdateByID(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
resp, err := m.UpdateByID(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
var v string
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
_, err = m.UpdateByID(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
_, err = m.UpdateByID(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Equal(t, errMocked, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_UpdateByIDNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
resp, err := m.UpdateByIDNoCache(context.Background(), bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_UpdateMany(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
assert.Nil(t, m.cache.Set("bar", "baz"))
|
||||||
|
resp, err := m.UpdateMany(context.Background(), []string{"foo", "bar"}, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
var v string
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("bar", &v)))
|
||||||
|
_, err = m.UpdateMany(context.Background(), []string{"foo", "bar"}, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
_, err = m.UpdateMany(context.Background(), []string{"foo", "bar"}, bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Equal(t, errMocked, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_UpdateManyNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
resp, err := m.UpdateManyNoCache(context.Background(), bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_UpdateOne(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
assert.Nil(t, m.cache.Set("foo", "bar"))
|
||||||
|
resp, err := m.UpdateOne(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
var v string
|
||||||
|
assert.True(t, m.cache.IsNotFound(m.cache.Get("foo", &v)))
|
||||||
|
_, err = m.UpdateOne(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m.cache = mockedCache{m.cache}
|
||||||
|
_, err = m.UpdateOne(context.Background(), "foo", bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Equal(t, errMocked, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModel_UpdateOneNoCache(t *testing.T) {
|
||||||
|
mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock))
|
||||||
|
defer mt.Close()
|
||||||
|
|
||||||
|
mt.Run("test", func(mt *mtest.T) {
|
||||||
|
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{
|
||||||
|
{Key: "value", Value: bson.D{{Key: "foo", Value: "bar"}}},
|
||||||
|
}...))
|
||||||
|
m := createModel(t, mt)
|
||||||
|
resp, err := m.UpdateOneNoCache(context.Background(), bson.D{}, bson.D{
|
||||||
|
{Key: "$set", Value: bson.D{{Key: "foo", Value: "baz"}}},
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func createModel(t *testing.T, mt *mtest.T) *Model {
|
||||||
|
s, err := miniredis.Run()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
mon.Inject(mt.Name(), mt.Client)
|
||||||
|
if atomic.AddInt32(&index, 1)%2 == 0 {
|
||||||
|
return MustNewNodeModel(mt.Name(), mt.DB.Name(), mt.Coll.Name(), redis.New(s.Addr()))
|
||||||
|
} else {
|
||||||
|
return MustNewModel(mt.Name(), mt.DB.Name(), mt.Coll.Name(), cache.CacheConf{
|
||||||
|
cache.NodeConf{
|
||||||
|
RedisConf: redis.RedisConf{
|
||||||
|
Host: s.Addr(),
|
||||||
|
Type: redis.NodeType,
|
||||||
|
},
|
||||||
|
Weight: 100,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errMocked = errors.New("mocked error")
|
||||||
|
index int32
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockedCache struct {
|
||||||
|
cache.Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockedCache) DelCtx(_ context.Context, _ ...string) error {
|
||||||
|
return errMocked
|
||||||
|
}
|
||||||
@@ -2,7 +2,9 @@ package mongo
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/globalsign/mgo"
|
"github.com/globalsign/mgo"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
@@ -266,6 +268,46 @@ func TestCollectionUpsert(t *testing.T) {
|
|||||||
assert.Equal(t, errDummy, err)
|
assert.Equal(t, errDummy, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_logDuration(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
|
||||||
|
col := internal.NewMockMgoCollection(ctrl)
|
||||||
|
c := decoratedCollection{
|
||||||
|
collection: col,
|
||||||
|
brk: breaker.NewBreaker(),
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf strings.Builder
|
||||||
|
w := logx.NewWriter(&buf)
|
||||||
|
o := logx.Reset()
|
||||||
|
logx.SetWriter(w)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
logx.Reset()
|
||||||
|
logx.SetWriter(o)
|
||||||
|
}()
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration("foo", time.Millisecond, nil, "bar")
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
assert.Contains(t, buf.String(), "bar")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration("foo", time.Millisecond, errors.New("bar"), make(chan int))
|
||||||
|
assert.Contains(t, buf.String(), "bar")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration("foo", slowThreshold.Load()+time.Millisecond, errors.New("bar"))
|
||||||
|
assert.Contains(t, buf.String(), "bar")
|
||||||
|
assert.Contains(t, buf.String(), "slowcall")
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
c.logDuration("foo", slowThreshold.Load()+time.Millisecond, nil)
|
||||||
|
assert.Contains(t, buf.String(), "foo")
|
||||||
|
assert.Contains(t, buf.String(), "slowcall")
|
||||||
|
}
|
||||||
|
|
||||||
type mockPromise struct {
|
type mockPromise struct {
|
||||||
accepted bool
|
accepted bool
|
||||||
reason string
|
reason string
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ var (
|
|||||||
ErrNotFound = mgo.ErrNotFound
|
ErrNotFound = mgo.ErrNotFound
|
||||||
|
|
||||||
// can't use one SingleFlight per conn, because multiple conns may share the same cache key.
|
// can't use one SingleFlight per conn, because multiple conns may share the same cache key.
|
||||||
sharedCalls = syncx.NewSingleFlight()
|
singleFlight = syncx.NewSingleFlight()
|
||||||
stats = cache.NewStat("mongoc")
|
stats = cache.NewStat("mongoc")
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func TestCollection_Count(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
cach := cache.NewNode(r, singleFlight, stats, mgo.ErrNotFound)
|
||||||
c := newCollection(dummyConn{}, cach)
|
c := newCollection(dummyConn{}, cach)
|
||||||
val, err := c.Count("any")
|
val, err := c.Count("any")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -98,7 +98,7 @@ func TestStat(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
cach := cache.NewNode(r, singleFlight, stats, mgo.ErrNotFound)
|
||||||
c := newCollection(dummyConn{}, cach).(*cachedCollection)
|
c := newCollection(dummyConn{}, cach).(*cachedCollection)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
@@ -121,7 +121,7 @@ func TestStatCacheFails(t *testing.T) {
|
|||||||
defer log.SetOutput(os.Stdout)
|
defer log.SetOutput(os.Stdout)
|
||||||
|
|
||||||
r := redis.New("localhost:59999")
|
r := redis.New("localhost:59999")
|
||||||
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
cach := cache.NewNode(r, singleFlight, stats, mgo.ErrNotFound)
|
||||||
c := newCollection(dummyConn{}, cach)
|
c := newCollection(dummyConn{}, cach)
|
||||||
|
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
@@ -142,7 +142,7 @@ func TestStatDbFails(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
cach := cache.NewNode(r, singleFlight, stats, mgo.ErrNotFound)
|
||||||
c := newCollection(dummyConn{}, cach).(*cachedCollection)
|
c := newCollection(dummyConn{}, cach).(*cachedCollection)
|
||||||
|
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
@@ -164,7 +164,7 @@ func TestStatFromMemory(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
cach := cache.NewNode(r, singleFlight, stats, mgo.ErrNotFound)
|
||||||
c := newCollection(dummyConn{}, cach).(*cachedCollection)
|
c := newCollection(dummyConn{}, cach).(*cachedCollection)
|
||||||
|
|
||||||
var all sync.WaitGroup
|
var all sync.WaitGroup
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ func MustNewModel(url, collection string, c cache.CacheConf, opts ...cache.Optio
|
|||||||
|
|
||||||
// NewModel returns a Model with a cache cluster.
|
// NewModel returns a Model with a cache cluster.
|
||||||
func NewModel(url, collection string, conf cache.CacheConf, opts ...cache.Option) (*Model, error) {
|
func NewModel(url, collection string, conf cache.CacheConf, opts ...cache.Option) (*Model, error) {
|
||||||
c := cache.New(conf, sharedCalls, stats, mgo.ErrNotFound, opts...)
|
c := cache.New(conf, singleFlight, stats, mgo.ErrNotFound, opts...)
|
||||||
return NewModelWithCache(url, collection, c)
|
return NewModelWithCache(url, collection, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,7 +51,7 @@ func NewModelWithCache(url, collection string, c cache.Cache) (*Model, error) {
|
|||||||
|
|
||||||
// NewNodeModel returns a Model with a cache node.
|
// NewNodeModel returns a Model with a cache node.
|
||||||
func NewNodeModel(url, collection string, rds *redis.Redis, opts ...cache.Option) (*Model, error) {
|
func NewNodeModel(url, collection string, rds *redis.Redis, opts ...cache.Option) (*Model, error) {
|
||||||
c := cache.NewNode(rds, sharedCalls, stats, mgo.ErrNotFound, opts...)
|
c := cache.NewNode(rds, singleFlight, stats, mgo.ErrNotFound, opts...)
|
||||||
return NewModelWithCache(url, collection, c)
|
return NewModelWithCache(url, collection, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
141
core/stores/redis/hook.go
Normal file
141
core/stores/redis/hook.go
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
package redis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
red "github.com/go-redis/redis/v8"
|
||||||
|
"github.com/zeromicro/go-zero/core/errorx"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
"github.com/zeromicro/go-zero/core/mapping"
|
||||||
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
|
"github.com/zeromicro/go-zero/core/trace"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/codes"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// spanName is the span name of the redis calls.
|
||||||
|
const spanName = "redis"
|
||||||
|
|
||||||
|
var (
|
||||||
|
startTimeKey = contextKey("startTime")
|
||||||
|
durationHook = hook{tracer: otel.GetTracerProvider().Tracer(trace.TraceName)}
|
||||||
|
redisCmdsAttributeKey = attribute.Key("redis.cmds")
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
contextKey string
|
||||||
|
hook struct {
|
||||||
|
tracer oteltrace.Tracer
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (h hook) BeforeProcess(ctx context.Context, cmd red.Cmder) (context.Context, error) {
|
||||||
|
return h.startSpan(context.WithValue(ctx, startTimeKey, timex.Now()), cmd), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h hook) AfterProcess(ctx context.Context, cmd red.Cmder) error {
|
||||||
|
err := cmd.Err()
|
||||||
|
h.endSpan(ctx, err)
|
||||||
|
|
||||||
|
val := ctx.Value(startTimeKey)
|
||||||
|
if val == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
start, ok := val.(time.Duration)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := timex.Since(start)
|
||||||
|
if duration > slowThreshold.Load() {
|
||||||
|
logDuration(ctx, cmd, duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h hook) BeforeProcessPipeline(ctx context.Context, cmds []red.Cmder) (context.Context, error) {
|
||||||
|
if len(cmds) == 0 {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.startSpan(context.WithValue(ctx, startTimeKey, timex.Now()), cmds...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h hook) AfterProcessPipeline(ctx context.Context, cmds []red.Cmder) error {
|
||||||
|
if len(cmds) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
batchError := errorx.BatchError{}
|
||||||
|
for _, cmd := range cmds {
|
||||||
|
err := cmd.Err()
|
||||||
|
if err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
batchError.Add(err)
|
||||||
|
}
|
||||||
|
h.endSpan(ctx, batchError.Err())
|
||||||
|
|
||||||
|
val := ctx.Value(startTimeKey)
|
||||||
|
if val == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
start, ok := val.(time.Duration)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := timex.Since(start)
|
||||||
|
if duration > slowThreshold.Load()*time.Duration(len(cmds)) {
|
||||||
|
logDuration(ctx, cmds[0], duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func logDuration(ctx context.Context, cmd red.Cmder, duration time.Duration) {
|
||||||
|
var buf strings.Builder
|
||||||
|
for i, arg := range cmd.Args() {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
buf.WriteString(mapping.Repr(arg))
|
||||||
|
}
|
||||||
|
logx.WithContext(ctx).WithDuration(duration).Slowf("[REDIS] slowcall on executing: %s", buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h hook) startSpan(ctx context.Context, cmds ...red.Cmder) context.Context {
|
||||||
|
ctx, span := h.tracer.Start(ctx,
|
||||||
|
spanName,
|
||||||
|
oteltrace.WithSpanKind(oteltrace.SpanKindClient),
|
||||||
|
)
|
||||||
|
|
||||||
|
cmdStrs := make([]string, 0, len(cmds))
|
||||||
|
for _, cmd := range cmds {
|
||||||
|
cmdStrs = append(cmdStrs, cmd.Name())
|
||||||
|
}
|
||||||
|
span.SetAttributes(redisCmdsAttributeKey.StringSlice(cmdStrs))
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h hook) endSpan(ctx context.Context, err error) {
|
||||||
|
span := oteltrace.SpanFromContext(ctx)
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
if err == nil || err == red.Nil {
|
||||||
|
span.SetStatus(codes.Ok, "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
span.SetStatus(codes.Error, err.Error())
|
||||||
|
span.RecordError(err)
|
||||||
|
}
|
||||||
173
core/stores/redis/hook_test.go
Normal file
173
core/stores/redis/hook_test.go
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
package redis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
red "github.com/go-redis/redis/v8"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
ztrace "github.com/zeromicro/go-zero/core/trace"
|
||||||
|
tracesdk "go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHookProcessCase1(t *testing.T) {
|
||||||
|
ztrace.StartAgent(ztrace.Config{
|
||||||
|
Name: "go-zero-test",
|
||||||
|
Endpoint: "http://localhost:14268/api/traces",
|
||||||
|
Batcher: "jaeger",
|
||||||
|
Sampler: 1.0,
|
||||||
|
})
|
||||||
|
|
||||||
|
writer := log.Writer()
|
||||||
|
var buf strings.Builder
|
||||||
|
log.SetOutput(&buf)
|
||||||
|
defer log.SetOutput(writer)
|
||||||
|
|
||||||
|
ctx, err := durationHook.BeforeProcess(context.Background(), red.NewCmd(context.Background()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Nil(t, durationHook.AfterProcess(ctx, red.NewCmd(context.Background())))
|
||||||
|
assert.False(t, strings.Contains(buf.String(), "slow"))
|
||||||
|
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessCase2(t *testing.T) {
|
||||||
|
ztrace.StartAgent(ztrace.Config{
|
||||||
|
Name: "go-zero-test",
|
||||||
|
Endpoint: "http://localhost:14268/api/traces",
|
||||||
|
Batcher: "jaeger",
|
||||||
|
Sampler: 1.0,
|
||||||
|
})
|
||||||
|
|
||||||
|
w, restore := injectLog()
|
||||||
|
defer restore()
|
||||||
|
|
||||||
|
ctx, err := durationHook.BeforeProcess(context.Background(), red.NewCmd(context.Background()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
|
||||||
|
|
||||||
|
time.Sleep(slowThreshold.Load() + time.Millisecond)
|
||||||
|
|
||||||
|
assert.Nil(t, durationHook.AfterProcess(ctx, red.NewCmd(context.Background(), "foo", "bar")))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "slow"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "trace"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "span"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessCase3(t *testing.T) {
|
||||||
|
writer := log.Writer()
|
||||||
|
var buf strings.Builder
|
||||||
|
log.SetOutput(&buf)
|
||||||
|
defer log.SetOutput(writer)
|
||||||
|
|
||||||
|
assert.Nil(t, durationHook.AfterProcess(context.Background(), red.NewCmd(context.Background())))
|
||||||
|
assert.True(t, buf.Len() == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessCase4(t *testing.T) {
|
||||||
|
writer := log.Writer()
|
||||||
|
var buf strings.Builder
|
||||||
|
log.SetOutput(&buf)
|
||||||
|
defer log.SetOutput(writer)
|
||||||
|
|
||||||
|
ctx := context.WithValue(context.Background(), startTimeKey, "foo")
|
||||||
|
assert.Nil(t, durationHook.AfterProcess(ctx, red.NewCmd(context.Background())))
|
||||||
|
assert.True(t, buf.Len() == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessPipelineCase1(t *testing.T) {
|
||||||
|
writer := log.Writer()
|
||||||
|
var buf strings.Builder
|
||||||
|
log.SetOutput(&buf)
|
||||||
|
defer log.SetOutput(writer)
|
||||||
|
|
||||||
|
ctx, err := durationHook.BeforeProcessPipeline(context.Background(), []red.Cmder{red.NewCmd(context.Background())})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
|
||||||
|
|
||||||
|
assert.Nil(t, durationHook.AfterProcessPipeline(ctx, []red.Cmder{
|
||||||
|
red.NewCmd(context.Background()),
|
||||||
|
}))
|
||||||
|
assert.False(t, strings.Contains(buf.String(), "slow"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessPipelineCase2(t *testing.T) {
|
||||||
|
ztrace.StartAgent(ztrace.Config{
|
||||||
|
Name: "go-zero-test",
|
||||||
|
Endpoint: "http://localhost:14268/api/traces",
|
||||||
|
Batcher: "jaeger",
|
||||||
|
Sampler: 1.0,
|
||||||
|
})
|
||||||
|
|
||||||
|
w, restore := injectLog()
|
||||||
|
defer restore()
|
||||||
|
|
||||||
|
ctx, err := durationHook.BeforeProcessPipeline(context.Background(), []red.Cmder{red.NewCmd(context.Background())})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, "redis", tracesdk.SpanFromContext(ctx).(interface{ Name() string }).Name())
|
||||||
|
|
||||||
|
time.Sleep(slowThreshold.Load() + time.Millisecond)
|
||||||
|
|
||||||
|
assert.Nil(t, durationHook.AfterProcessPipeline(ctx, []red.Cmder{
|
||||||
|
red.NewCmd(context.Background(), "foo", "bar"),
|
||||||
|
}))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "slow"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "trace"))
|
||||||
|
assert.True(t, strings.Contains(w.String(), "span"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessPipelineCase3(t *testing.T) {
|
||||||
|
w, restore := injectLog()
|
||||||
|
defer restore()
|
||||||
|
|
||||||
|
assert.Nil(t, durationHook.AfterProcessPipeline(context.Background(), []red.Cmder{
|
||||||
|
red.NewCmd(context.Background()),
|
||||||
|
}))
|
||||||
|
assert.True(t, len(w.String()) == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessPipelineCase4(t *testing.T) {
|
||||||
|
w, restore := injectLog()
|
||||||
|
defer restore()
|
||||||
|
|
||||||
|
ctx := context.WithValue(context.Background(), startTimeKey, "foo")
|
||||||
|
assert.Nil(t, durationHook.AfterProcessPipeline(ctx, []red.Cmder{
|
||||||
|
red.NewCmd(context.Background()),
|
||||||
|
}))
|
||||||
|
assert.True(t, len(w.String()) == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHookProcessPipelineCase5(t *testing.T) {
|
||||||
|
writer := log.Writer()
|
||||||
|
var buf strings.Builder
|
||||||
|
log.SetOutput(&buf)
|
||||||
|
defer log.SetOutput(writer)
|
||||||
|
|
||||||
|
ctx := context.WithValue(context.Background(), startTimeKey, "foo")
|
||||||
|
assert.Nil(t, durationHook.AfterProcessPipeline(ctx, []red.Cmder{red.NewCmd(context.Background())}))
|
||||||
|
assert.True(t, buf.Len() == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func injectLog() (r *strings.Builder, restore func()) {
|
||||||
|
var buf strings.Builder
|
||||||
|
w := logx.NewWriter(&buf)
|
||||||
|
o := logx.Reset()
|
||||||
|
logx.SetWriter(w)
|
||||||
|
|
||||||
|
return &buf, func() {
|
||||||
|
logx.Reset()
|
||||||
|
logx.SetWriter(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
package redis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
red "github.com/go-redis/redis"
|
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
|
||||||
"github.com/zeromicro/go-zero/core/mapping"
|
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
|
||||||
)
|
|
||||||
|
|
||||||
func checkDuration(proc func(red.Cmder) error) func(red.Cmder) error {
|
|
||||||
return func(cmd red.Cmder) error {
|
|
||||||
start := timex.Now()
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
duration := timex.Since(start)
|
|
||||||
if duration > slowThreshold.Load() {
|
|
||||||
var buf strings.Builder
|
|
||||||
for i, arg := range cmd.Args() {
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte(' ')
|
|
||||||
}
|
|
||||||
buf.WriteString(mapping.Repr(arg))
|
|
||||||
}
|
|
||||||
logx.WithDuration(duration).Slowf("[REDIS] slowcall on executing: %s", buf.String())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return proc(cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,7 @@
|
|||||||
package redis
|
package redis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
@@ -9,8 +10,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alicebob/miniredis/v2"
|
"github.com/alicebob/miniredis/v2"
|
||||||
red "github.com/go-redis/redis"
|
red "github.com/go-redis/redis/v8"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -385,30 +387,33 @@ func TestRedis_Mget(t *testing.T) {
|
|||||||
|
|
||||||
func TestRedis_SetBit(t *testing.T) {
|
func TestRedis_SetBit(t *testing.T) {
|
||||||
runOnRedis(t, func(client *Redis) {
|
runOnRedis(t, func(client *Redis) {
|
||||||
err := New(client.Addr, badType()).SetBit("key", 1, 1)
|
_, err := New(client.Addr, badType()).SetBit("key", 1, 1)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
err = client.SetBit("key", 1, 1)
|
val, err := client.SetBit("key", 1, 1)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 0, val)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRedis_GetBit(t *testing.T) {
|
func TestRedis_GetBit(t *testing.T) {
|
||||||
runOnRedis(t, func(client *Redis) {
|
runOnRedis(t, func(client *Redis) {
|
||||||
err := client.SetBit("key", 2, 1)
|
val, err := client.SetBit("key", 2, 1)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 0, val)
|
||||||
_, err = New(client.Addr, badType()).GetBit("key", 2)
|
_, err = New(client.Addr, badType()).GetBit("key", 2)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
val, err := client.GetBit("key", 2)
|
v, err := client.GetBit("key", 2)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, 1, val)
|
assert.Equal(t, 1, v)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRedis_BitCount(t *testing.T) {
|
func TestRedis_BitCount(t *testing.T) {
|
||||||
runOnRedis(t, func(client *Redis) {
|
runOnRedis(t, func(client *Redis) {
|
||||||
for i := 0; i < 11; i++ {
|
for i := 0; i < 11; i++ {
|
||||||
err := client.SetBit("key", int64(i), 1)
|
val, err := client.SetBit("key", int64(i), 1)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 0, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := New(client.Addr, badType()).BitCount("key", 0, -1)
|
_, err := New(client.Addr, badType()).BitCount("key", 0, -1)
|
||||||
@@ -699,6 +704,28 @@ func TestRedis_Set(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRedis_GetSet(t *testing.T) {
|
||||||
|
runOnRedis(t, func(client *Redis) {
|
||||||
|
_, err := New(client.Addr, badType()).GetSet("hello", "world")
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
val, err := client.GetSet("hello", "world")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "", val)
|
||||||
|
val, err = client.Get("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "world", val)
|
||||||
|
val, err = client.GetSet("hello", "newworld")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "world", val)
|
||||||
|
val, err = client.Get("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "newworld", val)
|
||||||
|
ret, err := client.Del("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 1, ret)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestRedis_SetGetDel(t *testing.T) {
|
func TestRedis_SetGetDel(t *testing.T) {
|
||||||
runOnRedis(t, func(client *Redis) {
|
runOnRedis(t, func(client *Redis) {
|
||||||
err := New(client.Addr, badType()).Set("hello", "world")
|
err := New(client.Addr, badType()).Set("hello", "world")
|
||||||
@@ -963,13 +990,14 @@ func TestRedis_SortedSet(t *testing.T) {
|
|||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
client.Zadd("second", 2, "aa")
|
client.Zadd("second", 2, "aa")
|
||||||
client.Zadd("third", 3, "bbb")
|
client.Zadd("third", 3, "bbb")
|
||||||
val, err = client.Zunionstore("union", ZStore{
|
val, err = client.Zunionstore("union", &ZStore{
|
||||||
|
Keys: []string{"second", "third"},
|
||||||
Weights: []float64{1, 2},
|
Weights: []float64{1, 2},
|
||||||
Aggregate: "SUM",
|
Aggregate: "SUM",
|
||||||
}, "second", "third")
|
})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, int64(2), val)
|
assert.Equal(t, int64(2), val)
|
||||||
_, err = New(client.Addr, badType()).Zunionstore("union", ZStore{})
|
_, err = New(client.Addr, badType()).Zunionstore("union", &ZStore{})
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
vals, err = client.Zrange("union", 0, 10000)
|
vals, err = client.Zrange("union", 0, 10000)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -987,9 +1015,9 @@ func TestRedis_Pipelined(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
err := client.Pipelined(
|
err := client.Pipelined(
|
||||||
func(pipe Pipeliner) error {
|
func(pipe Pipeliner) error {
|
||||||
pipe.Incr("pipelined_counter")
|
pipe.Incr(context.Background(), "pipelined_counter")
|
||||||
pipe.Expire("pipelined_counter", time.Hour)
|
pipe.Expire(context.Background(), "pipelined_counter", time.Hour)
|
||||||
pipe.ZAdd("zadd", Z{Score: 12, Member: "zadd"})
|
pipe.ZAdd(context.Background(), "zadd", &Z{Score: 12, Member: "zadd"})
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -1135,6 +1163,8 @@ func TestRedis_WithPass(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runOnRedis(t *testing.T, fn func(client *Redis)) {
|
func runOnRedis(t *testing.T, fn func(client *Redis)) {
|
||||||
|
logx.Disable()
|
||||||
|
|
||||||
s, err := miniredis.Run()
|
s, err := miniredis.Run()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -1153,6 +1183,8 @@ func runOnRedis(t *testing.T, fn func(client *Redis)) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runOnRedisTLS(t *testing.T, fn func(client *Redis)) {
|
func runOnRedisTLS(t *testing.T, fn func(client *Redis)) {
|
||||||
|
logx.Disable()
|
||||||
|
|
||||||
s, err := miniredis.RunTLS(&tls.Config{
|
s, err := miniredis.RunTLS(&tls.Config{
|
||||||
Certificates: make([]tls.Certificate, 1),
|
Certificates: make([]tls.Certificate, 1),
|
||||||
InsecureSkipVerify: true,
|
InsecureSkipVerify: true,
|
||||||
@@ -1182,6 +1214,6 @@ type mockedNode struct {
|
|||||||
RedisNode
|
RedisNode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n mockedNode) BLPop(timeout time.Duration, keys ...string) *red.StringSliceCmd {
|
func (n mockedNode) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *red.StringSliceCmd {
|
||||||
return red.NewStringSliceCmd("foo", "bar")
|
return red.NewStringSliceCmd(context.Background(), "foo", "bar")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package redis
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
red "github.com/go-redis/redis"
|
red "github.com/go-redis/redis/v8"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user