mirror of
https://github.com/zeromicro/go-zero.git
synced 2026-05-12 01:10:00 +08:00
Compare commits
682 Commits
v1.6.0
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02191e0d99 | ||
|
|
28b12ad9cc | ||
|
|
87dd9671be | ||
|
|
4e52d77ad8 | ||
|
|
1fc2cfb859 | ||
|
|
942cdae41d | ||
|
|
e9c3607bc6 | ||
|
|
d1603e9166 | ||
|
|
e30317e9c4 | ||
|
|
568f9ce007 | ||
|
|
dcb309065a | ||
|
|
bf8e17a686 | ||
|
|
b2ebbfce62 | ||
|
|
2b10a6a223 | ||
|
|
80c320b46e | ||
|
|
bea9d150a1 | ||
|
|
3f756a2cbf | ||
|
|
bbe5bbb0c0 | ||
|
|
5ad2278a69 | ||
|
|
77763fe748 | ||
|
|
538c4fb5c7 | ||
|
|
315fb2fe0a | ||
|
|
e382887eb8 | ||
|
|
cf21cb2b0b | ||
|
|
61e8894c31 | ||
|
|
7a6c3c8129 | ||
|
|
875fec3e1a | ||
|
|
60128c2100 | ||
|
|
ce6d0e3ea7 | ||
|
|
fa85c84af3 | ||
|
|
440884105e | ||
|
|
271f10598f | ||
|
|
cf55a88ce3 | ||
|
|
c1c786b14a | ||
|
|
988fb9d9bf | ||
|
|
d212c81bca | ||
|
|
bc43df2641 | ||
|
|
351b8cb37b | ||
|
|
0d681a2e29 | ||
|
|
5ea027c5de | ||
|
|
5de6112dcd | ||
|
|
4fb51723b7 | ||
|
|
06502d1115 | ||
|
|
3854d6dd00 | ||
|
|
895854913a | ||
|
|
ef753b8857 | ||
|
|
9c16fede73 | ||
|
|
ce11adb5e4 | ||
|
|
894e8b1218 | ||
|
|
2ec7e432dd | ||
|
|
870e8352c1 | ||
|
|
de42f27e03 | ||
|
|
955b8016aa | ||
|
|
d728a3b2d9 | ||
|
|
0c205a71fc | ||
|
|
a8c0199d96 | ||
|
|
032a266ec4 | ||
|
|
40b75fbb9b | ||
|
|
afad55045b | ||
|
|
5f54f06ee5 | ||
|
|
20f56ae1d0 | ||
|
|
73d6fcfccd | ||
|
|
20d20ef861 | ||
|
|
a37422b504 | ||
|
|
a81d898408 | ||
|
|
a5d42e20d5 | ||
|
|
4bdb07f225 | ||
|
|
3e6ec9b83d | ||
|
|
f0a3d213dc | ||
|
|
94562ded74 | ||
|
|
d68cf4920c | ||
|
|
31b749ab67 | ||
|
|
3834319278 | ||
|
|
1c9d339361 | ||
|
|
b7f601c912 | ||
|
|
1ebbc6f0c7 | ||
|
|
b41b1b00df | ||
|
|
f36e5fed35 | ||
|
|
2583673c8b | ||
|
|
00e67b9d20 | ||
|
|
9fd1f29845 | ||
|
|
130e1ba963 | ||
|
|
a2b98dbcf7 | ||
|
|
b46d507a1d | ||
|
|
3152581d0d | ||
|
|
46e466f037 | ||
|
|
151b3d1085 | ||
|
|
ea53fe41de | ||
|
|
d9df08b079 | ||
|
|
569c00ad09 | ||
|
|
9da76fbf04 | ||
|
|
b69db5e09d | ||
|
|
ee6b7cee79 | ||
|
|
d150248c52 | ||
|
|
610a7345dc | ||
|
|
b0b31f3993 | ||
|
|
82a937d517 | ||
|
|
93c11a7eb7 | ||
|
|
63ec989376 | ||
|
|
bf75027889 | ||
|
|
d505fae979 | ||
|
|
25f37ca750 | ||
|
|
0be63c3625 | ||
|
|
b011a072c7 | ||
|
|
3c9b6335fb | ||
|
|
bf6ef5f033 | ||
|
|
ff890628b0 | ||
|
|
cc79e3d842 | ||
|
|
f11b78ced9 | ||
|
|
1d2b0d7ab8 | ||
|
|
da987e1270 | ||
|
|
12e03c8843 | ||
|
|
8cf4f95bd7 | ||
|
|
ba0febf308 | ||
|
|
c9ff6a10d3 | ||
|
|
a71e56de52 | ||
|
|
bae8d4f4c8 | ||
|
|
8c6266f338 | ||
|
|
95d5b81f44 | ||
|
|
bca7bbc142 | ||
|
|
df9a52664b | ||
|
|
937cf0db96 | ||
|
|
75cebb65f8 | ||
|
|
410f56e73a | ||
|
|
017909a3ab | ||
|
|
0d31e6c375 | ||
|
|
0ba86b1849 | ||
|
|
4cacc4d9d3 | ||
|
|
a99c14da4a | ||
|
|
985582264a | ||
|
|
8364e341e1 | ||
|
|
0f2b589d4d | ||
|
|
19fec36d24 | ||
|
|
f037bf344d | ||
|
|
d99cf35b07 | ||
|
|
f459f1b5ff | ||
|
|
0140fd417b | ||
|
|
7969e0ca38 | ||
|
|
91c885b5b0 | ||
|
|
d4cccca387 | ||
|
|
4b2095ed03 | ||
|
|
1229eeb2d2 | ||
|
|
9142b146c5 | ||
|
|
8a1b2d5aed | ||
|
|
da5d39e6ca | ||
|
|
68c5a17c67 | ||
|
|
b53f9f5f2d | ||
|
|
36d57626b6 | ||
|
|
4e36ba832f | ||
|
|
a44954a771 | ||
|
|
f3edd4b880 | ||
|
|
2de3e397ff | ||
|
|
a435eb56f2 | ||
|
|
d80761c147 | ||
|
|
e7bd0d8b60 | ||
|
|
b109b3ef4c | ||
|
|
e3c371ac89 | ||
|
|
15eb6f4f6d | ||
|
|
4d3681b71c | ||
|
|
a682bda0bb | ||
|
|
45b27ad93a | ||
|
|
292a8302a1 | ||
|
|
91ab1f6d2b | ||
|
|
5048c350ae | ||
|
|
94edc32f3e | ||
|
|
ec989b2e2a | ||
|
|
82fe802e81 | ||
|
|
072d68f897 | ||
|
|
2e91ba5811 | ||
|
|
5564c43197 | ||
|
|
e55158b0f7 | ||
|
|
69aa7fe346 | ||
|
|
c3820a95c1 | ||
|
|
493f3bad0f | ||
|
|
eb0d5ad3a4 | ||
|
|
14192050ae | ||
|
|
9193e771e3 | ||
|
|
808b4e496a | ||
|
|
e416d01f8d | ||
|
|
789c5de873 | ||
|
|
52078a0c14 | ||
|
|
7ef13116a0 | ||
|
|
6b8053410a | ||
|
|
81c6928445 | ||
|
|
761c2dd716 | ||
|
|
aeceb3cfbe | ||
|
|
15ea07aad1 | ||
|
|
98bebbc74f | ||
|
|
eafd11d949 | ||
|
|
b251ce346e | ||
|
|
812140ba36 | ||
|
|
44735e949c | ||
|
|
bf313c3c56 | ||
|
|
94e7753262 | ||
|
|
9c478626d2 | ||
|
|
801c283478 | ||
|
|
2a54faf997 | ||
|
|
ecd98f3653 | ||
|
|
61641581eb | ||
|
|
6f2730d5ae | ||
|
|
0eff777b62 | ||
|
|
cafbf535f7 | ||
|
|
6edfce63e3 | ||
|
|
cdb0098b18 | ||
|
|
620c7f9693 | ||
|
|
dba444a382 | ||
|
|
b24fb3ebf7 | ||
|
|
967f0926eb | ||
|
|
e68c683df9 | ||
|
|
247985a065 | ||
|
|
80573af0d8 | ||
|
|
c0394b631a | ||
|
|
68d1aba377 | ||
|
|
3315e60272 | ||
|
|
327ef73700 | ||
|
|
eb11521655 | ||
|
|
4c37545e55 | ||
|
|
2f47c1fba4 | ||
|
|
16d54d0ace | ||
|
|
9925bcbf99 | ||
|
|
38a5ecb796 | ||
|
|
af78fc7c5f | ||
|
|
790302b486 | ||
|
|
6a0672b801 | ||
|
|
560c61612c | ||
|
|
6a988dc4a9 | ||
|
|
15842c3c7a | ||
|
|
f2914a74df | ||
|
|
f113d512e8 | ||
|
|
7a4818da59 | ||
|
|
48d0709ca6 | ||
|
|
f747585518 | ||
|
|
507ff96546 | ||
|
|
651eabb4c6 | ||
|
|
e6b4372056 | ||
|
|
24073969a1 | ||
|
|
ca797ed22c | ||
|
|
e347d3f8f8 | ||
|
|
396393b336 | ||
|
|
1f0531b254 | ||
|
|
77fb271a06 | ||
|
|
af7cf79963 | ||
|
|
7926d396d7 | ||
|
|
080cd3df84 | ||
|
|
c4e1a6a2d8 | ||
|
|
4e71e95e44 | ||
|
|
84db9bcd15 | ||
|
|
b28f79ac11 | ||
|
|
e134e77b2b | ||
|
|
f669d84ce8 | ||
|
|
9213b8ac27 | ||
|
|
ae09d0e56d | ||
|
|
0bc4206d08 | ||
|
|
39ce17bfd2 | ||
|
|
d415ba39e2 | ||
|
|
c71829c8de | ||
|
|
a32f6d7642 | ||
|
|
64e8c94198 | ||
|
|
7d05a4bc93 | ||
|
|
44504e8df7 | ||
|
|
114311e51b | ||
|
|
4307ce45fc | ||
|
|
37b54d1fc7 | ||
|
|
00e0db5def | ||
|
|
cbcacf31c1 | ||
|
|
238c92aaa9 | ||
|
|
520d2a2075 | ||
|
|
1023800b02 | ||
|
|
030c859171 | ||
|
|
e6d1b47a43 | ||
|
|
6138f85470 | ||
|
|
bf883101d7 | ||
|
|
33011c7ed1 | ||
|
|
17d98f69e0 | ||
|
|
b650c8c425 | ||
|
|
3d931d7030 | ||
|
|
68da9ed51a | ||
|
|
b25c45b352 | ||
|
|
f05234a967 | ||
|
|
12071d17b4 | ||
|
|
11c47d23df | ||
|
|
024f285f86 | ||
|
|
fa4674611a | ||
|
|
730c3c5246 | ||
|
|
2c9310ac3a | ||
|
|
74ba0bcd50 | ||
|
|
5f4190b6c6 | ||
|
|
e1787b4ccb | ||
|
|
4ac8b492ef | ||
|
|
cdd068575c | ||
|
|
e89e2d8a75 | ||
|
|
acd2b94bd9 | ||
|
|
6a0c8047f4 | ||
|
|
cfe03ea9e1 | ||
|
|
48d21ef8ad | ||
|
|
28a001c5f9 | ||
|
|
22a41cacc7 | ||
|
|
fcc246933c | ||
|
|
5c3679ffe7 | ||
|
|
eaa01ccb9f | ||
|
|
b8206fb46a | ||
|
|
1c3876810e | ||
|
|
1d9159ea39 | ||
|
|
2159d112c3 | ||
|
|
f57874a51f | ||
|
|
8625864d43 | ||
|
|
8f9ba3ec11 | ||
|
|
a1d9bc08f0 | ||
|
|
b9d7f1cc77 | ||
|
|
6700910f64 | ||
|
|
9c4ed394a7 | ||
|
|
fd07a9c6e4 | ||
|
|
b8c239630c | ||
|
|
672ea55736 | ||
|
|
f7097866bf | ||
|
|
796b2bd1b0 | ||
|
|
e1e5fb2071 | ||
|
|
89ecb50005 | ||
|
|
dbed1ea042 | ||
|
|
ad291daf78 | ||
|
|
13746a3706 | ||
|
|
f03b13f632 | ||
|
|
f6f64b1286 | ||
|
|
300a415f5d | ||
|
|
c5de546f8a | ||
|
|
cad243905f | ||
|
|
7c8f41d577 | ||
|
|
cbd118d55f | ||
|
|
9d2a1b8b0a | ||
|
|
f6ada979aa | ||
|
|
53a74759a5 | ||
|
|
1940f7bd58 | ||
|
|
18cb3141ba | ||
|
|
f822c9a94f | ||
|
|
1a3dc75874 | ||
|
|
796dd5b6e2 | ||
|
|
94e476ade7 | ||
|
|
2a74996e1b | ||
|
|
f52af1ebf9 | ||
|
|
24450f18bb | ||
|
|
f1a45d8a23 | ||
|
|
9aebba1566 | ||
|
|
4998479f9a | ||
|
|
873d1351ee | ||
|
|
afcbca8f24 | ||
|
|
5b8126c2cf | ||
|
|
4dfaf35151 | ||
|
|
00cd77c92b | ||
|
|
2145a7a93c | ||
|
|
d5302f2dbe | ||
|
|
6181594bc8 | ||
|
|
11c10e51ff | ||
|
|
3f03126d27 | ||
|
|
d43adc2823 | ||
|
|
656222b572 | ||
|
|
077b6072fa | ||
|
|
0cafb1164b | ||
|
|
90afa08367 | ||
|
|
c92f788292 | ||
|
|
e94be9b302 | ||
|
|
e713d9013d | ||
|
|
24d6150073 | ||
|
|
44cddec5c3 | ||
|
|
47d13e5ef8 | ||
|
|
896e1a2abb | ||
|
|
075817a8dd | ||
|
|
29400f6814 | ||
|
|
34f536264f | ||
|
|
9d9c7e0fe0 | ||
|
|
e220d3a4cb | ||
|
|
193dcf90bc | ||
|
|
03756c9166 | ||
|
|
c1f12c5784 | ||
|
|
2883111af5 | ||
|
|
2758c4e842 | ||
|
|
4196ddb3e3 | ||
|
|
e24d797226 | ||
|
|
d4349fa958 | ||
|
|
da2c14d45f | ||
|
|
64e3aeda55 | ||
|
|
dedba17219 | ||
|
|
c6348b9855 | ||
|
|
8689a6247e | ||
|
|
ff6ee25d23 | ||
|
|
5213243bbb | ||
|
|
2588a36555 | ||
|
|
c2421beb25 | ||
|
|
dfe8a81c76 | ||
|
|
ee643a945e | ||
|
|
eeda6efae7 | ||
|
|
caf0e64beb | ||
|
|
0e61303cb0 | ||
|
|
f651d7cf6c | ||
|
|
05da2c560b | ||
|
|
8ae0f287d6 | ||
|
|
8f7aff558f | ||
|
|
6e08d478fe | ||
|
|
944ac383d2 | ||
|
|
0eec33f14b | ||
|
|
9de04ee035 | ||
|
|
cf5b080fbe | ||
|
|
4a14164be1 | ||
|
|
5dd6f2a43a | ||
|
|
a00c956776 | ||
|
|
c02fb3acab | ||
|
|
9f8455ddb3 | ||
|
|
775b105ab2 | ||
|
|
ec86f22cd6 | ||
|
|
e776b5d8ab | ||
|
|
2026d4410b | ||
|
|
f8437e6364 | ||
|
|
bd2033eb35 | ||
|
|
fed835bc25 | ||
|
|
c9cbd74bf3 | ||
|
|
27ea106293 | ||
|
|
657923b9d5 | ||
|
|
8dbec6a800 | ||
|
|
490559434a | ||
|
|
4a62d084a9 | ||
|
|
2f9b6cf8ec | ||
|
|
01bbc78bac | ||
|
|
a012a9138f | ||
|
|
4ec9cac82b | ||
|
|
8d9746e794 | ||
|
|
8f83705199 | ||
|
|
f1ed7bd75d | ||
|
|
7a20608756 | ||
|
|
5cfff95e95 | ||
|
|
1e1cc1a0d9 | ||
|
|
0a1440a839 | ||
|
|
23980d29c3 | ||
|
|
424119d796 | ||
|
|
97c7835d9e | ||
|
|
7954ad3759 | ||
|
|
e8c9b0ddf8 | ||
|
|
70112e59cb | ||
|
|
7ba5ced2d9 | ||
|
|
962b36d745 | ||
|
|
57060cc6d7 | ||
|
|
e0c16059d9 | ||
|
|
a0d954dfab | ||
|
|
a5ece25c07 | ||
|
|
0cac41a38b | ||
|
|
f10084a3f5 | ||
|
|
040fee5669 | ||
|
|
42b3bae65a | ||
|
|
7c730b97d8 | ||
|
|
057bae92ab | ||
|
|
74331a45c9 | ||
|
|
9d551d507f | ||
|
|
02dd81c05c | ||
|
|
3095ba2b1f | ||
|
|
2afa60132c | ||
|
|
e71ed7294b | ||
|
|
95822281bf | ||
|
|
588e10daef | ||
|
|
62ba01120e | ||
|
|
527de1c50e | ||
|
|
abfe62a2d7 | ||
|
|
36f4cf97ff | ||
|
|
b3cd8a32ed | ||
|
|
a9d27cda8a | ||
|
|
04116f647d | ||
|
|
a8ccda0c06 | ||
|
|
bfddb9dae4 | ||
|
|
b337ae36e5 | ||
|
|
5e5123caa3 | ||
|
|
d371ab5479 | ||
|
|
1b9b61f505 | ||
|
|
e1f15efb3b | ||
|
|
1540bdc4c9 | ||
|
|
95b32b5779 | ||
|
|
815a4f7eed | ||
|
|
4b0bacc9c6 | ||
|
|
e9dc96af17 | ||
|
|
62c88a84d1 | ||
|
|
36088ea0d4 | ||
|
|
164f5aa86c | ||
|
|
07d07cdd23 | ||
|
|
0efe99af66 | ||
|
|
927f8bc821 | ||
|
|
2a7ada993b | ||
|
|
682460c1c8 | ||
|
|
a66ae0d4c4 | ||
|
|
d1f24ab70f | ||
|
|
d0983948b5 | ||
|
|
3343fc2cdb | ||
|
|
3866b5741a | ||
|
|
5fbe8ff5c4 | ||
|
|
6f763f71f9 | ||
|
|
80377f18e7 | ||
|
|
8690859c7d | ||
|
|
d744038198 | ||
|
|
58ad8cac8a | ||
|
|
74886a151e | ||
|
|
c5eda1f155 | ||
|
|
b5b7c054ca | ||
|
|
6c8073b691 | ||
|
|
64d430d424 | ||
|
|
f138cc792e | ||
|
|
b20ec8aedb | ||
|
|
a53254fa91 | ||
|
|
08563482e5 | ||
|
|
968727412d | ||
|
|
6f3d094eba | ||
|
|
2d3ebb9b62 | ||
|
|
8c0bb27136 | ||
|
|
cf987295df | ||
|
|
8c92b3af7d | ||
|
|
5dd9342703 | ||
|
|
3ef59f6a71 | ||
|
|
f12802abc7 | ||
|
|
6f0fe67804 | ||
|
|
f44f0e7e62 | ||
|
|
cdd95296db | ||
|
|
3e794cf991 | ||
|
|
bbce95e7e1 | ||
|
|
0449450c64 | ||
|
|
9f9a12ea57 | ||
|
|
cc2a7e97f9 | ||
|
|
09d7af76af | ||
|
|
c233a66601 | ||
|
|
94fa12560c | ||
|
|
7d90f906f5 | ||
|
|
f372b98d96 | ||
|
|
459d3025c5 | ||
|
|
e9e55125a9 | ||
|
|
159ecb7386 | ||
|
|
69bb746a1d | ||
|
|
d184f96b13 | ||
|
|
c7dacb0146 | ||
|
|
2207477b60 | ||
|
|
105ab590ff | ||
|
|
2f4c58ed73 | ||
|
|
1631aa02ad | ||
|
|
4df10eef5d | ||
|
|
3d552ea7a8 | ||
|
|
74b87ac9fd | ||
|
|
ba1d6e3664 | ||
|
|
2096cd5749 | ||
|
|
2eb2fa26f6 | ||
|
|
bc4187ca90 | ||
|
|
b7be25b98b | ||
|
|
dd01695d45 | ||
|
|
25821bdee6 | ||
|
|
b624b966f0 | ||
|
|
df96262235 | ||
|
|
2629636f64 | ||
|
|
708ad207d7 | ||
|
|
b53ba76a99 | ||
|
|
be7f93924a | ||
|
|
45be48a4ee | ||
|
|
e08ba2fee8 | ||
|
|
a5d2b971a1 | ||
|
|
9763c8b143 | ||
|
|
4e3f1776dc | ||
|
|
e38036cea2 | ||
|
|
8e97c5819f | ||
|
|
0ee44c7064 | ||
|
|
a1bacd3fc8 | ||
|
|
c98d5fdaf4 | ||
|
|
2ee43b41b8 | ||
|
|
8367af3416 | ||
|
|
03b6e377d7 | ||
|
|
ec41880476 | ||
|
|
5263805b3b | ||
|
|
a7363f0c21 | ||
|
|
52e5d85221 | ||
|
|
88aab8f635 | ||
|
|
1f63cbe9c6 | ||
|
|
0dfaf135dd | ||
|
|
914bcdcf2b | ||
|
|
e38cb0118d | ||
|
|
cb8161c799 | ||
|
|
c4dac2095f | ||
|
|
25a807afb2 | ||
|
|
6be37ad533 | ||
|
|
28cb2c5804 | ||
|
|
0f1d4c6bca | ||
|
|
bfe8335cb2 | ||
|
|
3c10ce0115 | ||
|
|
1303e0fe6f | ||
|
|
9c17499757 | ||
|
|
8ceb2885db | ||
|
|
00944894b4 | ||
|
|
609fb3d59e | ||
|
|
01c330abe7 | ||
|
|
2ccef5bb4f | ||
|
|
10f1d93e2a | ||
|
|
dd518c8eac | ||
|
|
97cf2421de | ||
|
|
786a80131e | ||
|
|
93d257f9f5 | ||
|
|
f79535057f | ||
|
|
a905f4c20c | ||
|
|
3331954a78 | ||
|
|
f54c2e384f | ||
|
|
4b83f2ebd0 | ||
|
|
1c572ee16b | ||
|
|
b3402430e8 | ||
|
|
076f5de7d9 | ||
|
|
303a74559a | ||
|
|
c08e741d7a | ||
|
|
06d2c07fce | ||
|
|
b6f00a5789 | ||
|
|
dace520654 | ||
|
|
44d347d48a | ||
|
|
408827d876 | ||
|
|
9e33b557b1 | ||
|
|
368caa7608 | ||
|
|
7822a4c1cb | ||
|
|
0441f84606 | ||
|
|
81d72b5010 | ||
|
|
7ba8adfc74 | ||
|
|
ffd2a78623 | ||
|
|
1b9b3cada7 | ||
|
|
38c8f9cf21 | ||
|
|
54dbb05bb9 | ||
|
|
9a671f6059 | ||
|
|
80aab0b3f8 | ||
|
|
2d0286646f | ||
|
|
d012fe97b1 | ||
|
|
7ca13bc25e | ||
|
|
9c20f10743 | ||
|
|
6ec38ec056 | ||
|
|
28c742a1e1 | ||
|
|
b3b6cfe947 | ||
|
|
a8ef7b51eb | ||
|
|
124968114a | ||
|
|
04ed821b65 | ||
|
|
15599ac0a0 | ||
|
|
0cf6971664 | ||
|
|
47c4f2831c | ||
|
|
2b18dd1764 | ||
|
|
27c4908342 | ||
|
|
48625fa381 | ||
|
|
83a776a190 | ||
|
|
431f9af43e | ||
|
|
8c2f4c1899 | ||
|
|
919477ffe4 | ||
|
|
400386459c | ||
|
|
ebe0801d2f | ||
|
|
b76d85f204 | ||
|
|
28ba57afb3 | ||
|
|
d6873047ce | ||
|
|
54c0f2e5cf | ||
|
|
7795231cc6 | ||
|
|
4835e4fe51 | ||
|
|
daef970091 | ||
|
|
05020a92e8 | ||
|
|
a1bbac3c6c | ||
|
|
22c98beb24 | ||
|
|
8fd710d5e7 | ||
|
|
91a735ae47 | ||
|
|
39c662eece | ||
|
|
5e63002cf8 | ||
|
|
c46bcf7e1b | ||
|
|
3c65bdbb66 | ||
|
|
5630bce286 | ||
|
|
75524da21e | ||
|
|
ede7e683fd | ||
|
|
eb14d1347e | ||
|
|
c220b5d886 | ||
|
|
5e8e21b257 | ||
|
|
0635a4ac96 | ||
|
|
c71b753c78 | ||
|
|
2f8cffc699 | ||
|
|
9c1aa6da3d | ||
|
|
da67ea2300 | ||
|
|
72dd2736f5 | ||
|
|
24695bba09 | ||
|
|
c7c43062c5 | ||
|
|
97e1ea0633 | ||
|
|
04b9737a61 | ||
|
|
b0fb246693 | ||
|
|
41140ac78c | ||
|
|
1281904572 | ||
|
|
c8a8ff7cad | ||
|
|
df2799fff1 | ||
|
|
fd8ee0b851 | ||
|
|
6ecc5e7b73 | ||
|
|
52963c2ebf |
13
.codecov.yml
13
.codecov.yml
@@ -1,13 +0,0 @@
|
|||||||
coverage:
|
|
||||||
status:
|
|
||||||
patch: true
|
|
||||||
project: false # disabled because project coverage is not stable
|
|
||||||
comment:
|
|
||||||
layout: "flags, files"
|
|
||||||
behavior: once
|
|
||||||
require_changes: true
|
|
||||||
ignore:
|
|
||||||
- "tools"
|
|
||||||
- "**/mock"
|
|
||||||
- "**/*_mock.go"
|
|
||||||
- "**/*test"
|
|
||||||
@@ -1 +1,7 @@
|
|||||||
**/.git
|
**/.git
|
||||||
|
.dockerignore
|
||||||
|
Dockerfile
|
||||||
|
goctl
|
||||||
|
Makefile
|
||||||
|
readme.md
|
||||||
|
readme-cn.md
|
||||||
|
|||||||
12
.github/FUNDING.yml
vendored
12
.github/FUNDING.yml
vendored
@@ -1,13 +1,3 @@
|
|||||||
# These are supported funding model platforms
|
# These are supported funding model platforms
|
||||||
|
|
||||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
github: [zeromicro]
|
||||||
patreon: # Replace with a single Patreon username
|
|
||||||
open_collective: # Replace with a single Open Collective username
|
|
||||||
ko_fi: # Replace with a single Ko-fi username
|
|
||||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
|
||||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
|
||||||
liberapay: # Replace with a single Liberapay username
|
|
||||||
issuehunt: # Replace with a single IssueHunt username
|
|
||||||
otechie: # Replace with a single Otechie username
|
|
||||||
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
|
|
||||||
ethereum: # 0x5052b7f6B937B02563996D23feb69b38D06Ca150 | kevwan
|
|
||||||
|
|||||||
197
.github/copilot-instructions.md
vendored
Normal file
197
.github/copilot-instructions.md
vendored
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
# GitHub Copilot Instructions for go-zero
|
||||||
|
|
||||||
|
This document provides guidelines for GitHub Copilot when assisting with development in the go-zero project.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
go-zero is a web and RPC framework with lots of built-in engineering practices designed to ensure the stability of busy services with resilience design. It has been serving sites with tens of millions of users for years.
|
||||||
|
|
||||||
|
### Key Architecture Components
|
||||||
|
|
||||||
|
- **REST API framework** (`rest/`) - HTTP service framework with middleware support
|
||||||
|
- **RPC framework** (`zrpc/`) - gRPC-based RPC framework with service discovery
|
||||||
|
- **Core utilities** (`core/`) - Foundational components including:
|
||||||
|
- Circuit breakers, rate limiters, load shedding
|
||||||
|
- Caching, stores (Redis, MongoDB, SQL)
|
||||||
|
- Concurrency control, metrics, tracing
|
||||||
|
- Configuration management
|
||||||
|
- **Code generation tool** (`tools/goctl/`) - CLI tool for generating code from API files
|
||||||
|
|
||||||
|
## Coding Standards and Conventions
|
||||||
|
|
||||||
|
### Code Style
|
||||||
|
|
||||||
|
1. **Follow Go conventions**: Use `gofmt` for formatting, follow effective Go practices
|
||||||
|
2. **Package naming**: Use lowercase, single-word package names when possible
|
||||||
|
3. **Error handling**: Always handle errors explicitly, use `errorx.BatchError` for multiple errors
|
||||||
|
4. **Context propagation**: Always pass `context.Context` as the first parameter for functions that may block
|
||||||
|
5. **Configuration structures**: Use struct tags with JSON annotations and default values
|
||||||
|
|
||||||
|
Example configuration pattern:
|
||||||
|
```go
|
||||||
|
type Config struct {
|
||||||
|
Host string `json:",default=0.0.0.0"`
|
||||||
|
Port int `json:",default=8080"`
|
||||||
|
Timeout int `json:",default=3000"`
|
||||||
|
Optional string `json:",optional"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Interface Design
|
||||||
|
|
||||||
|
1. **Small interfaces**: Follow Go's preference for small, focused interfaces
|
||||||
|
2. **Context methods**: Provide both context and non-context versions of methods
|
||||||
|
3. **Options pattern**: Use functional options for complex configuration
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```go
|
||||||
|
func (c *Client) Get(key string, val any) error {
|
||||||
|
return c.GetCtx(context.Background(), key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetCtx(ctx context.Context, key string, val any) error {
|
||||||
|
// implementation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing Patterns
|
||||||
|
|
||||||
|
1. **Test file naming**: Use `*_test.go` suffix
|
||||||
|
2. **Test function naming**: Use `TestFunctionName` pattern
|
||||||
|
3. **Use testify/assert**: Prefer `assert` package for assertions
|
||||||
|
4. **Table-driven tests**: Use table-driven tests for multiple scenarios
|
||||||
|
5. **Mock interfaces**: Use `go.uber.org/mock` for mocking
|
||||||
|
6. **Test helpers**: Use `redistest`, `mongtest` helpers for database testing
|
||||||
|
|
||||||
|
Example test pattern:
|
||||||
|
```go
|
||||||
|
func TestSomething(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid case", "input", "output", false},
|
||||||
|
{"error case", "bad", "", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result, err := SomeFunction(tt.input)
|
||||||
|
if tt.wantErr {
|
||||||
|
assert.Error(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Framework-Specific Guidelines
|
||||||
|
|
||||||
|
### REST API Development
|
||||||
|
|
||||||
|
1. **API Definition**: Use `.api` files to define REST APIs
|
||||||
|
2. **Handler pattern**: Separate business logic into logic packages
|
||||||
|
3. **Middleware**: Use built-in middlewares (tracing, logging, metrics, recovery)
|
||||||
|
4. **Response handling**: Use `httpx.WriteJson` for JSON responses
|
||||||
|
5. **Error handling**: Use `httpx.Error` for HTTP error responses
|
||||||
|
|
||||||
|
### RPC Development
|
||||||
|
|
||||||
|
1. **Protocol Buffers**: Use protobuf for service definitions
|
||||||
|
2. **Service discovery**: Integrate with etcd for service registration
|
||||||
|
3. **Load balancing**: Use built-in load balancing strategies
|
||||||
|
4. **Interceptors**: Implement interceptors for cross-cutting concerns
|
||||||
|
|
||||||
|
### Database Operations
|
||||||
|
|
||||||
|
1. **SQL operations**: Use `sqlx` package for database operations
|
||||||
|
2. **Caching**: Implement caching patterns with `cache` package
|
||||||
|
3. **Transactions**: Use proper transaction handling
|
||||||
|
4. **Connection pooling**: Configure appropriate connection pools
|
||||||
|
|
||||||
|
Example cache pattern:
|
||||||
|
```go
|
||||||
|
err := c.QueryRowCtx(ctx, &dest, key, func(ctx context.Context, conn sqlx.SqlConn) error {
|
||||||
|
return conn.QueryRowCtx(ctx, &dest, query, args...)
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Management
|
||||||
|
|
||||||
|
1. **YAML configuration**: Use YAML for configuration files
|
||||||
|
2. **Environment variables**: Support environment variable overrides
|
||||||
|
3. **Validation**: Include proper validation for configuration parameters
|
||||||
|
4. **Sensible defaults**: Provide reasonable default values
|
||||||
|
|
||||||
|
## Error Handling Best Practices
|
||||||
|
|
||||||
|
1. **Wrap errors**: Use `fmt.Errorf` with `%w` verb to wrap errors
|
||||||
|
2. **Custom errors**: Define custom error types when needed
|
||||||
|
3. **Error logging**: Log errors appropriately with context
|
||||||
|
4. **Graceful degradation**: Implement fallback mechanisms
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
1. **Resource pools**: Use connection pools and worker pools
|
||||||
|
2. **Circuit breakers**: Implement circuit breaker patterns for external calls
|
||||||
|
3. **Rate limiting**: Apply rate limiting to protect services
|
||||||
|
4. **Load shedding**: Implement adaptive load shedding
|
||||||
|
5. **Metrics**: Add appropriate metrics and monitoring
|
||||||
|
|
||||||
|
## Security Guidelines
|
||||||
|
|
||||||
|
1. **Input validation**: Validate all input parameters
|
||||||
|
2. **SQL injection prevention**: Use parameterized queries
|
||||||
|
3. **Authentication**: Implement proper JWT token handling
|
||||||
|
4. **HTTPS**: Support TLS/HTTPS configurations
|
||||||
|
5. **CORS**: Configure CORS appropriately for web APIs
|
||||||
|
|
||||||
|
## Documentation Standards
|
||||||
|
|
||||||
|
1. **Package documentation**: Include package-level documentation
|
||||||
|
2. **Function documentation**: Document exported functions with examples
|
||||||
|
3. **API documentation**: Maintain API documentation in sync
|
||||||
|
4. **README updates**: Update README for significant changes
|
||||||
|
|
||||||
|
## Common Patterns to Follow
|
||||||
|
|
||||||
|
### Service Configuration
|
||||||
|
```go
|
||||||
|
type ServiceConf struct {
|
||||||
|
Name string
|
||||||
|
Log logx.LogConf
|
||||||
|
Mode string `json:",default=pro,options=[dev,test,pre,pro]"`
|
||||||
|
// ... other common fields
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Middleware Implementation
|
||||||
|
```go
|
||||||
|
func SomeMiddleware() rest.Middleware {
|
||||||
|
return func(next http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Pre-processing
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
// Post-processing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Resource Management
|
||||||
|
Always implement proper resource cleanup using defer and context cancellation.
|
||||||
|
|
||||||
|
## Build and Test Commands
|
||||||
|
|
||||||
|
- Build: `go build ./...`
|
||||||
|
- Test: `go test ./...`
|
||||||
|
- Test with race detection: `go test -race ./...`
|
||||||
|
- Format: `gofmt -w .`
|
||||||
|
- Generate code: `goctl api go -api *.api -dir .`
|
||||||
|
|
||||||
|
Remember to run tests and ensure all checks pass before submitting changes. The project emphasizes high quality, performance, and reliability, so these should be primary considerations in all development work.
|
||||||
8
.github/dependabot.yml
vendored
8
.github/dependabot.yml
vendored
@@ -5,6 +5,14 @@
|
|||||||
|
|
||||||
version: 2
|
version: 2
|
||||||
updates:
|
updates:
|
||||||
|
- package-ecosystem: "docker" # Update image tags in Dockerfile
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
- package-ecosystem: "github-actions" # Update GitHub Actions
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
- package-ecosystem: "gomod" # See documentation for possible values
|
- package-ecosystem: "gomod" # See documentation for possible values
|
||||||
directory: "/" # Location of package manifests
|
directory: "/" # Location of package manifests
|
||||||
schedule:
|
schedule:
|
||||||
|
|||||||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
@@ -35,11 +35,11 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v4
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
@@ -50,7 +50,7 @@ jobs:
|
|||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v2
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
# 📚 https://git.io/JvXDl
|
# 📚 https://git.io/JvXDl
|
||||||
@@ -64,4 +64,4 @@ jobs:
|
|||||||
# make release
|
# make release
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v2
|
uses: github/codeql-action/analyze@v4
|
||||||
|
|||||||
21
.github/workflows/go.yml
vendored
21
.github/workflows/go.yml
vendored
@@ -12,12 +12,12 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
- name: Set up Go 1.x
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: 1.19
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: true
|
cache: true
|
||||||
id: go
|
id: go
|
||||||
@@ -40,20 +40,25 @@ jobs:
|
|||||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
- name: Codecov
|
- name: Codecov
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
files: ./coverage.txt
|
||||||
|
flags: unittests
|
||||||
|
name: codecov-umbrella
|
||||||
|
fail_ci_if_error: false
|
||||||
|
|
||||||
test-win:
|
test-win:
|
||||||
name: Windows
|
name: Windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout codebase
|
- name: Checkout codebase
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
- name: Set up Go 1.x
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
# use 1.19 to guarantee Go 1.19 compatibility
|
# make sure Go version compatible with go-zero
|
||||||
go-version: 1.19
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
|
|||||||
18
.github/workflows/issue-translator.yml
vendored
18
.github/workflows/issue-translator.yml
vendored
@@ -1,18 +0,0 @@
|
|||||||
name: 'issue-translator'
|
|
||||||
on:
|
|
||||||
issue_comment:
|
|
||||||
types: [created]
|
|
||||||
issues:
|
|
||||||
types: [opened]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: usthe/issues-translate-action@v2.7
|
|
||||||
with:
|
|
||||||
IS_MODIFY_TITLE: true
|
|
||||||
# not require, default false, . Decide whether to modify the issue title
|
|
||||||
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
|
|
||||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. 👯👭🏻🧑🤝🧑👫🧑🏿🤝🧑🏻👩🏾🤝👨🏿👬🏿
|
|
||||||
# not require. Customize the translation robot prefix message.
|
|
||||||
2
.github/workflows/issues.yml
vendored
2
.github/workflows/issues.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
|||||||
close-issues:
|
close-issues:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v8
|
- uses: actions/stale@v10
|
||||||
with:
|
with:
|
||||||
days-before-issue-stale: 365
|
days-before-issue-stale: 365
|
||||||
days-before-issue-close: 90
|
days-before-issue-close: 90
|
||||||
|
|||||||
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@@ -16,13 +16,13 @@ jobs:
|
|||||||
- goarch: "386"
|
- goarch: "386"
|
||||||
goos: darwin
|
goos: darwin
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- uses: zeromicro/go-zero-release-action@master
|
- uses: zeromicro/go-zero-release-action@master
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
goversion: "https://dl.google.com/go/go1.19.13.linux-amd64.tar.gz"
|
goversion: "https://dl.google.com/go/go1.21.13.linux-amd64.tar.gz"
|
||||||
project_path: "tools/goctl"
|
project_path: "tools/goctl"
|
||||||
binary_name: "goctl"
|
binary_name: "goctl"
|
||||||
extra_files: tools/goctl/readme.md tools/goctl/readme-cn.md
|
extra_files: tools/goctl/readme.md tools/goctl/readme-cn.md
|
||||||
|
|||||||
4
.github/workflows/reviewdog.yml
vendored
4
.github/workflows/reviewdog.yml
vendored
@@ -5,7 +5,7 @@ jobs:
|
|||||||
name: runner / staticcheck
|
name: runner / staticcheck
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- uses: reviewdog/action-staticcheck@v1
|
- uses: reviewdog/action-staticcheck@v1
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.github_token }}
|
github_token: ${{ secrets.github_token }}
|
||||||
@@ -14,6 +14,6 @@ jobs:
|
|||||||
# Report all results.
|
# Report all results.
|
||||||
filter_mode: nofilter
|
filter_mode: nofilter
|
||||||
# Exit with 1 when it find at least one finding.
|
# Exit with 1 when it find at least one finding.
|
||||||
fail_on_error: true
|
fail_level: any
|
||||||
# Set staticcheck flags
|
# Set staticcheck flags
|
||||||
staticcheck_flags: -checks=inherit,-SA1019,-SA1029,-SA5008
|
staticcheck_flags: -checks=inherit,-SA1019,-SA1029,-SA5008
|
||||||
|
|||||||
42
.github/workflows/version-check.yml
vendored
Normal file
42
.github/workflows/version-check.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: Release Version Check
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'tools/goctl/v*'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
version-check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version: '1.21'
|
||||||
|
|
||||||
|
- name: Extract tag version
|
||||||
|
id: get_version
|
||||||
|
run: |
|
||||||
|
# Extract version from tools/goctl/v* format
|
||||||
|
VERSION="${GITHUB_REF#refs/tags/tools/goctl/v}"
|
||||||
|
echo "VERSION=$VERSION" >> $GITHUB_ENV
|
||||||
|
echo "Extracted version: $VERSION"
|
||||||
|
|
||||||
|
- name: Check version in goctl source code
|
||||||
|
run: |
|
||||||
|
# Change to goctl directory
|
||||||
|
cd tools/goctl
|
||||||
|
|
||||||
|
# Check version in BuildVersion constant
|
||||||
|
VERSION_IN_CODE=$(grep -r "const BuildVersion =" . | grep -o '".*"' | tr -d '"')
|
||||||
|
echo "Version in code: $VERSION_IN_CODE"
|
||||||
|
echo "Expected version: $VERSION"
|
||||||
|
|
||||||
|
if [ "$VERSION_IN_CODE" != "$VERSION" ]; then
|
||||||
|
echo "Version mismatch: Version in code ($VERSION_IN_CODE) doesn't match tag version ($VERSION)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "✅ Version check passed!"
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -17,6 +17,7 @@
|
|||||||
**/logs
|
**/logs
|
||||||
**/adhoc
|
**/adhoc
|
||||||
**/coverage.txt
|
**/coverage.txt
|
||||||
|
**/WARP.md
|
||||||
|
|
||||||
# for test purpose
|
# for test purpose
|
||||||
go.work
|
go.work
|
||||||
|
|||||||
16
SECURITY.md
Normal file
16
SECURITY.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
We publish releases monthly.
|
||||||
|
|
||||||
|
| Version | Supported |
|
||||||
|
| ------- | ------------------ |
|
||||||
|
| >= 1.4.4 | :white_check_mark: |
|
||||||
|
| < 1.4.4 | :x: |
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
https://github.com/zeromicro/go-zero/security/advisories
|
||||||
|
|
||||||
|
Accepted vulnerabilities are expected to be fixed within a month.
|
||||||
@@ -1,76 +1,127 @@
|
|||||||
|
|
||||||
# Contributor Covenant Code of Conduct
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
## Our Pledge
|
## Our Pledge
|
||||||
|
|
||||||
In the interest of fostering an open and welcoming environment, we as
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
contributors and maintainers pledge to make participation in our project and
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
our community a harassment-free experience for everyone, regardless of age, body
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
level of experience, education, socio-economic status, nationality, personal
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
appearance, race, religion, or sexual identity and orientation.
|
identity and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
## Our Standards
|
## Our Standards
|
||||||
|
|
||||||
Examples of behavior that contributes to creating a positive environment
|
Examples of behavior that contributes to a positive environment for our
|
||||||
include:
|
community include:
|
||||||
|
|
||||||
* Using welcoming and inclusive language
|
* Demonstrating empathy and kindness toward other people
|
||||||
* Being respectful of differing viewpoints and experiences
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
* Gracefully accepting constructive criticism
|
* Giving and gracefully accepting constructive feedback
|
||||||
* Focusing on what is best for the community
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
* Showing empathy towards other community members
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the overall
|
||||||
|
community
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||||
advances
|
any kind
|
||||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
* Public or private harassment
|
* Public or private harassment
|
||||||
* Publishing others' private information, such as a physical or electronic
|
* Publishing others' private information, such as a physical or email address,
|
||||||
address, without explicit permission
|
without their explicit permission
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
professional setting
|
professional setting
|
||||||
|
|
||||||
## Our Responsibilities
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
Project maintainers are responsible for clarifying the standards of acceptable
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
behavior and are expected to take appropriate and fair corrective action in
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
response to any instances of unacceptable behavior.
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
Project maintainers have the right and responsibility to remove, edit, or
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
permanently any contributor for other behaviors that they deem inappropriate,
|
decisions when appropriate.
|
||||||
threatening, offensive, or harmful.
|
|
||||||
|
|
||||||
## Scope
|
## Scope
|
||||||
|
|
||||||
This Code of Conduct applies within all project spaces, and it also applies when
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
an individual is representing the project or its community in public spaces.
|
an individual is officially representing the community in public spaces.
|
||||||
Examples of representing a project or community include using an official
|
Examples of representing our community include using an official e-mail address,
|
||||||
project e-mail address, posting via an official social media account, or acting
|
posting via an official social media account, or acting as an appointed
|
||||||
as an appointed representative at an online or offline event. Representation of
|
representative at an online or offline event.
|
||||||
a project may be further defined and clarified by project maintainers.
|
|
||||||
|
|
||||||
## Enforcement
|
## Enforcement
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
reported by contacting the project team at [INSERT EMAIL ADDRESS]. All
|
reported to the community leaders responsible for enforcement at
|
||||||
complaints will be reviewed and investigated and will result in a response that
|
[INSERT CONTACT METHOD].
|
||||||
is deemed necessary and appropriate to the circumstances. The project team is
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
|
||||||
Further details of specific enforcement policies may be posted separately.
|
|
||||||
|
|
||||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
faith may face temporary or permanent repercussions as determined by other
|
reporter of any incident.
|
||||||
members of the project's leadership.
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series of
|
||||||
|
actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or permanent
|
||||||
|
ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||||
|
community.
|
||||||
|
|
||||||
## Attribution
|
## Attribution
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
version 2.1, available at
|
||||||
|
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
Community Impact Guidelines were inspired by
|
||||||
|
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
https://www.contributor-covenant.org/faq
|
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||||
|
[https://www.contributor-covenant.org/translations][translations].
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package bloom
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
_ "embed"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@@ -17,19 +18,13 @@ var (
|
|||||||
// ErrTooLargeOffset indicates the offset is too large in bitset.
|
// ErrTooLargeOffset indicates the offset is too large in bitset.
|
||||||
ErrTooLargeOffset = errors.New("too large offset")
|
ErrTooLargeOffset = errors.New("too large offset")
|
||||||
|
|
||||||
setScript = redis.NewScript(`
|
//go:embed setscript.lua
|
||||||
for _, offset in ipairs(ARGV) do
|
setLuaScript string
|
||||||
redis.call("setbit", KEYS[1], offset, 1)
|
setScript = redis.NewScript(setLuaScript)
|
||||||
end
|
|
||||||
`)
|
//go:embed testscript.lua
|
||||||
testScript = redis.NewScript(`
|
testLuaScript string
|
||||||
for _, offset in ipairs(ARGV) do
|
testScript = redis.NewScript(testLuaScript)
|
||||||
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
|
|
||||||
return false
|
|
||||||
end
|
|
||||||
end
|
|
||||||
return true
|
|
||||||
`)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@@ -110,7 +105,7 @@ func newRedisBitSet(store *redis.Redis, key string, bits uint) *redisBitSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *redisBitSet) buildOffsetArgs(offsets []uint) ([]string, error) {
|
func (r *redisBitSet) buildOffsetArgs(offsets []uint) ([]string, error) {
|
||||||
var args []string
|
args := make([]string, 0, len(offsets))
|
||||||
|
|
||||||
for _, offset := range offsets {
|
for _, offset := range offsets {
|
||||||
if offset >= r.bits {
|
if offset >= r.bits {
|
||||||
@@ -130,7 +125,7 @@ func (r *redisBitSet) check(ctx context.Context, offsets []uint) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resp, err := r.store.ScriptRunCtx(ctx, testScript, []string{r.key}, args)
|
resp, err := r.store.ScriptRunCtx(ctx, testScript, []string{r.key}, args)
|
||||||
if err == redis.Nil {
|
if errors.Is(err, redis.Nil) {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -162,7 +157,7 @@ func (r *redisBitSet) set(ctx context.Context, offsets []uint) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, err = r.store.ScriptRunCtx(ctx, setScript, []string{r.key}, args)
|
_, err = r.store.ScriptRunCtx(ctx, setScript, []string{r.key}, args)
|
||||||
if err == redis.Nil {
|
if errors.Is(err, redis.Nil) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
3
core/bloom/setscript.lua
Normal file
3
core/bloom/setscript.lua
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
for _, offset in ipairs(ARGV) do
|
||||||
|
redis.call("setbit", KEYS[1], offset, 1)
|
||||||
|
end
|
||||||
6
core/bloom/testscript.lua
Normal file
6
core/bloom/testscript.lua
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
for _, offset in ipairs(ARGV) do
|
||||||
|
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
|
||||||
|
return false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return true
|
||||||
@@ -1,22 +1,19 @@
|
|||||||
package breaker
|
package breaker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/mathx"
|
|
||||||
"github.com/zeromicro/go-zero/core/proc"
|
"github.com/zeromicro/go-zero/core/proc"
|
||||||
"github.com/zeromicro/go-zero/core/stat"
|
"github.com/zeromicro/go-zero/core/stat"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const numHistoryReasons = 5
|
||||||
numHistoryReasons = 5
|
|
||||||
timeFormat = "15:04:05"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrServiceUnavailable is returned when the Breaker state is open.
|
// ErrServiceUnavailable is returned when the Breaker state is open.
|
||||||
var ErrServiceUnavailable = errors.New("circuit breaker is open")
|
var ErrServiceUnavailable = errors.New("circuit breaker is open")
|
||||||
@@ -31,16 +28,21 @@ type (
|
|||||||
Name() string
|
Name() string
|
||||||
|
|
||||||
// Allow checks if the request is allowed.
|
// Allow checks if the request is allowed.
|
||||||
// If allowed, a promise will be returned, the caller needs to call promise.Accept()
|
// If allowed, a promise will be returned,
|
||||||
// on success, or call promise.Reject() on failure.
|
// otherwise ErrServiceUnavailable will be returned as the error.
|
||||||
// If not allow, ErrServiceUnavailable will be returned.
|
// The caller needs to call promise.Accept() on success,
|
||||||
|
// or call promise.Reject() on failure.
|
||||||
Allow() (Promise, error)
|
Allow() (Promise, error)
|
||||||
|
// AllowCtx checks if the request is allowed when ctx isn't done.
|
||||||
|
AllowCtx(ctx context.Context) (Promise, error)
|
||||||
|
|
||||||
// Do runs the given request if the Breaker accepts it.
|
// Do runs the given request if the Breaker accepts it.
|
||||||
// Do returns an error instantly if the Breaker rejects the request.
|
// Do returns an error instantly if the Breaker rejects the request.
|
||||||
// If a panic occurs in the request, the Breaker handles it as an error
|
// If a panic occurs in the request, the Breaker handles it as an error
|
||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
Do(req func() error) error
|
Do(req func() error) error
|
||||||
|
// DoCtx runs the given request if the Breaker accepts it when ctx isn't done.
|
||||||
|
DoCtx(ctx context.Context, req func() error) error
|
||||||
|
|
||||||
// DoWithAcceptable runs the given request if the Breaker accepts it.
|
// DoWithAcceptable runs the given request if the Breaker accepts it.
|
||||||
// DoWithAcceptable returns an error instantly if the Breaker rejects the request.
|
// DoWithAcceptable returns an error instantly if the Breaker rejects the request.
|
||||||
@@ -48,21 +50,31 @@ type (
|
|||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
// acceptable checks if it's a successful call, even if the error is not nil.
|
// acceptable checks if it's a successful call, even if the error is not nil.
|
||||||
DoWithAcceptable(req func() error, acceptable Acceptable) error
|
DoWithAcceptable(req func() error, acceptable Acceptable) error
|
||||||
|
// DoWithAcceptableCtx runs the given request if the Breaker accepts it when ctx isn't done.
|
||||||
|
DoWithAcceptableCtx(ctx context.Context, req func() error, acceptable Acceptable) error
|
||||||
|
|
||||||
// DoWithFallback runs the given request if the Breaker accepts it.
|
// DoWithFallback runs the given request if the Breaker accepts it.
|
||||||
// DoWithFallback runs the fallback if the Breaker rejects the request.
|
// DoWithFallback runs the fallback if the Breaker rejects the request.
|
||||||
// If a panic occurs in the request, the Breaker handles it as an error
|
// If a panic occurs in the request, the Breaker handles it as an error
|
||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
DoWithFallback(req func() error, fallback func(err error) error) error
|
DoWithFallback(req func() error, fallback Fallback) error
|
||||||
|
// DoWithFallbackCtx runs the given request if the Breaker accepts it when ctx isn't done.
|
||||||
|
DoWithFallbackCtx(ctx context.Context, req func() error, fallback Fallback) error
|
||||||
|
|
||||||
// DoWithFallbackAcceptable runs the given request if the Breaker accepts it.
|
// DoWithFallbackAcceptable runs the given request if the Breaker accepts it.
|
||||||
// DoWithFallbackAcceptable runs the fallback if the Breaker rejects the request.
|
// DoWithFallbackAcceptable runs the fallback if the Breaker rejects the request.
|
||||||
// If a panic occurs in the request, the Breaker handles it as an error
|
// If a panic occurs in the request, the Breaker handles it as an error
|
||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
// acceptable checks if it's a successful call, even if the error is not nil.
|
// acceptable checks if it's a successful call, even if the error is not nil.
|
||||||
DoWithFallbackAcceptable(req func() error, fallback func(err error) error, acceptable Acceptable) error
|
DoWithFallbackAcceptable(req func() error, fallback Fallback, acceptable Acceptable) error
|
||||||
|
// DoWithFallbackAcceptableCtx runs the given request if the Breaker accepts it when ctx isn't done.
|
||||||
|
DoWithFallbackAcceptableCtx(ctx context.Context, req func() error, fallback Fallback,
|
||||||
|
acceptable Acceptable) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fallback is the func to be called if the request is rejected.
|
||||||
|
Fallback func(err error) error
|
||||||
|
|
||||||
// Option defines the method to customize a Breaker.
|
// Option defines the method to customize a Breaker.
|
||||||
Option func(breaker *circuitBreaker)
|
Option func(breaker *circuitBreaker)
|
||||||
|
|
||||||
@@ -86,12 +98,12 @@ type (
|
|||||||
|
|
||||||
internalThrottle interface {
|
internalThrottle interface {
|
||||||
allow() (internalPromise, error)
|
allow() (internalPromise, error)
|
||||||
doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error
|
doReq(req func() error, fallback Fallback, acceptable Acceptable) error
|
||||||
}
|
}
|
||||||
|
|
||||||
throttle interface {
|
throttle interface {
|
||||||
allow() (Promise, error)
|
allow() (Promise, error)
|
||||||
doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error
|
doReq(req func() error, fallback Fallback, acceptable Acceptable) error
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -114,23 +126,71 @@ func (cb *circuitBreaker) Allow() (Promise, error) {
|
|||||||
return cb.throttle.allow()
|
return cb.throttle.allow()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cb *circuitBreaker) AllowCtx(ctx context.Context) (Promise, error) {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
return cb.Allow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (cb *circuitBreaker) Do(req func() error) error {
|
func (cb *circuitBreaker) Do(req func() error) error {
|
||||||
return cb.throttle.doReq(req, nil, defaultAcceptable)
|
return cb.throttle.doReq(req, nil, defaultAcceptable)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cb *circuitBreaker) DoCtx(ctx context.Context, req func() error) error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
return cb.Do(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (cb *circuitBreaker) DoWithAcceptable(req func() error, acceptable Acceptable) error {
|
func (cb *circuitBreaker) DoWithAcceptable(req func() error, acceptable Acceptable) error {
|
||||||
return cb.throttle.doReq(req, nil, acceptable)
|
return cb.throttle.doReq(req, nil, acceptable)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cb *circuitBreaker) DoWithFallback(req func() error, fallback func(err error) error) error {
|
func (cb *circuitBreaker) DoWithAcceptableCtx(ctx context.Context, req func() error,
|
||||||
|
acceptable Acceptable) error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
return cb.DoWithAcceptable(req, acceptable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cb *circuitBreaker) DoWithFallback(req func() error, fallback Fallback) error {
|
||||||
return cb.throttle.doReq(req, fallback, defaultAcceptable)
|
return cb.throttle.doReq(req, fallback, defaultAcceptable)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cb *circuitBreaker) DoWithFallbackAcceptable(req func() error, fallback func(err error) error,
|
func (cb *circuitBreaker) DoWithFallbackCtx(ctx context.Context, req func() error,
|
||||||
|
fallback Fallback) error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
return cb.DoWithFallback(req, fallback)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cb *circuitBreaker) DoWithFallbackAcceptable(req func() error, fallback Fallback,
|
||||||
acceptable Acceptable) error {
|
acceptable Acceptable) error {
|
||||||
return cb.throttle.doReq(req, fallback, acceptable)
|
return cb.throttle.doReq(req, fallback, acceptable)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cb *circuitBreaker) DoWithFallbackAcceptableCtx(ctx context.Context, req func() error,
|
||||||
|
fallback Fallback, acceptable Acceptable) error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
return cb.DoWithFallbackAcceptable(req, fallback, acceptable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (cb *circuitBreaker) Name() string {
|
func (cb *circuitBreaker) Name() string {
|
||||||
return cb.name
|
return cb.name
|
||||||
}
|
}
|
||||||
@@ -168,7 +228,7 @@ func (lt loggedThrottle) allow() (Promise, error) {
|
|||||||
}, lt.logError(err)
|
}, lt.logError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lt loggedThrottle) doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error {
|
func (lt loggedThrottle) doReq(req func() error, fallback Fallback, acceptable Acceptable) error {
|
||||||
return lt.logError(lt.internalThrottle.doReq(req, fallback, func(err error) bool {
|
return lt.logError(lt.internalThrottle.doReq(req, fallback, func(err error) bool {
|
||||||
accept := acceptable(err)
|
accept := acceptable(err)
|
||||||
if !accept && err != nil {
|
if !accept && err != nil {
|
||||||
@@ -198,14 +258,14 @@ type errorWindow struct {
|
|||||||
|
|
||||||
func (ew *errorWindow) add(reason string) {
|
func (ew *errorWindow) add(reason string) {
|
||||||
ew.lock.Lock()
|
ew.lock.Lock()
|
||||||
ew.reasons[ew.index] = fmt.Sprintf("%s %s", time.Now().Format(timeFormat), reason)
|
ew.reasons[ew.index] = fmt.Sprintf("%s %s", time.Now().Format(time.TimeOnly), reason)
|
||||||
ew.index = (ew.index + 1) % numHistoryReasons
|
ew.index = (ew.index + 1) % numHistoryReasons
|
||||||
ew.count = mathx.MinInt(ew.count+1, numHistoryReasons)
|
ew.count = min(ew.count+1, numHistoryReasons)
|
||||||
ew.lock.Unlock()
|
ew.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ew *errorWindow) String() string {
|
func (ew *errorWindow) String() string {
|
||||||
var reasons []string
|
reasons := make([]string, 0, ew.count)
|
||||||
|
|
||||||
ew.lock.Lock()
|
ew.lock.Lock()
|
||||||
// reverse order
|
// reverse order
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
package breaker
|
package breaker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/zeromicro/go-zero/core/stat"
|
"github.com/zeromicro/go-zero/core/stat"
|
||||||
@@ -16,10 +18,274 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCircuitBreaker_Allow(t *testing.T) {
|
func TestCircuitBreaker_Allow(t *testing.T) {
|
||||||
b := NewBreaker()
|
t.Run("allow", func(t *testing.T) {
|
||||||
assert.True(t, len(b.Name()) > 0)
|
b := NewBreaker()
|
||||||
_, err := b.Allow()
|
assert.True(t, len(b.Name()) > 0)
|
||||||
assert.Nil(t, err)
|
_, err := b.Allow()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("allow with ctx", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
_, err := b.AllowCtx(context.Background())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("allow with ctx timeout", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||||
|
defer cancel()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
_, err := b.AllowCtx(ctx)
|
||||||
|
assert.ErrorIs(t, err, context.DeadlineExceeded)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("allow with ctx cancel", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
cancel()
|
||||||
|
_, err := b.AllowCtx(ctx)
|
||||||
|
assert.ErrorIs(t, err, context.Canceled)
|
||||||
|
}
|
||||||
|
_, err := b.AllowCtx(context.Background())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCircuitBreaker_Do(t *testing.T) {
|
||||||
|
t.Run("do", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.Do(func() error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("do with ctx", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.DoCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("do with ctx timeout", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||||
|
defer cancel()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
err := b.DoCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.DeadlineExceeded)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("do with ctx cancel", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
cancel()
|
||||||
|
err := b.DoCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.Canceled)
|
||||||
|
}
|
||||||
|
assert.NoError(t, b.DoCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCircuitBreaker_DoWithAcceptable(t *testing.T) {
|
||||||
|
t.Run("doWithAcceptable", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.DoWithAcceptable(func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithAcceptable with ctx", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.DoWithAcceptableCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithAcceptable with ctx timeout", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||||
|
defer cancel()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
err := b.DoWithAcceptableCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.DeadlineExceeded)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithAcceptable with ctx cancel", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
cancel()
|
||||||
|
err := b.DoWithAcceptableCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.Canceled)
|
||||||
|
}
|
||||||
|
assert.NoError(t, b.DoWithAcceptableCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCircuitBreaker_DoWithFallback(t *testing.T) {
|
||||||
|
t.Run("doWithFallback", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.DoWithFallback(func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithFallback with ctx", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.DoWithFallbackCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithFallback with ctx timeout", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||||
|
defer cancel()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
err := b.DoWithFallbackCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.DeadlineExceeded)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithFallback with ctx cancel", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
cancel()
|
||||||
|
err := b.DoWithFallbackCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.Canceled)
|
||||||
|
}
|
||||||
|
assert.NoError(t, b.DoWithFallbackCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCircuitBreaker_DoWithFallbackAcceptable(t *testing.T) {
|
||||||
|
t.Run("doWithFallbackAcceptable", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.DoWithFallbackAcceptable(func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithFallbackAcceptable with ctx", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
err := b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithFallbackAcceptable with ctx timeout", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||||
|
defer cancel()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
err := b.DoWithFallbackAcceptableCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.DeadlineExceeded)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doWithFallbackAcceptable with ctx cancel", func(t *testing.T) {
|
||||||
|
b := NewBreaker()
|
||||||
|
assert.True(t, len(b.Name()) > 0)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
cancel()
|
||||||
|
err := b.DoWithFallbackAcceptableCtx(ctx, func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.ErrorIs(t, err, context.Canceled)
|
||||||
|
}
|
||||||
|
assert.NoError(t, b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
}))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLogReason(t *testing.T) {
|
func TestLogReason(t *testing.T) {
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
package breaker
|
package breaker
|
||||||
|
|
||||||
import "sync"
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
@@ -14,6 +17,13 @@ func Do(name string, req func() error) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoCtx calls Breaker.DoCtx on the Breaker with given name.
|
||||||
|
func DoCtx(ctx context.Context, name string, req func() error) error {
|
||||||
|
return do(name, func(b Breaker) error {
|
||||||
|
return b.DoCtx(ctx, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// DoWithAcceptable calls Breaker.DoWithAcceptable on the Breaker with given name.
|
// DoWithAcceptable calls Breaker.DoWithAcceptable on the Breaker with given name.
|
||||||
func DoWithAcceptable(name string, req func() error, acceptable Acceptable) error {
|
func DoWithAcceptable(name string, req func() error, acceptable Acceptable) error {
|
||||||
return do(name, func(b Breaker) error {
|
return do(name, func(b Breaker) error {
|
||||||
@@ -21,21 +31,44 @@ func DoWithAcceptable(name string, req func() error, acceptable Acceptable) erro
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoWithAcceptableCtx calls Breaker.DoWithAcceptableCtx on the Breaker with given name.
|
||||||
|
func DoWithAcceptableCtx(ctx context.Context, name string, req func() error,
|
||||||
|
acceptable Acceptable) error {
|
||||||
|
return do(name, func(b Breaker) error {
|
||||||
|
return b.DoWithAcceptableCtx(ctx, req, acceptable)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// DoWithFallback calls Breaker.DoWithFallback on the Breaker with given name.
|
// DoWithFallback calls Breaker.DoWithFallback on the Breaker with given name.
|
||||||
func DoWithFallback(name string, req func() error, fallback func(err error) error) error {
|
func DoWithFallback(name string, req func() error, fallback Fallback) error {
|
||||||
return do(name, func(b Breaker) error {
|
return do(name, func(b Breaker) error {
|
||||||
return b.DoWithFallback(req, fallback)
|
return b.DoWithFallback(req, fallback)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoWithFallbackCtx calls Breaker.DoWithFallbackCtx on the Breaker with given name.
|
||||||
|
func DoWithFallbackCtx(ctx context.Context, name string, req func() error, fallback Fallback) error {
|
||||||
|
return do(name, func(b Breaker) error {
|
||||||
|
return b.DoWithFallbackCtx(ctx, req, fallback)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// DoWithFallbackAcceptable calls Breaker.DoWithFallbackAcceptable on the Breaker with given name.
|
// DoWithFallbackAcceptable calls Breaker.DoWithFallbackAcceptable on the Breaker with given name.
|
||||||
func DoWithFallbackAcceptable(name string, req func() error, fallback func(err error) error,
|
func DoWithFallbackAcceptable(name string, req func() error, fallback Fallback,
|
||||||
acceptable Acceptable) error {
|
acceptable Acceptable) error {
|
||||||
return do(name, func(b Breaker) error {
|
return do(name, func(b Breaker) error {
|
||||||
return b.DoWithFallbackAcceptable(req, fallback, acceptable)
|
return b.DoWithFallbackAcceptable(req, fallback, acceptable)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoWithFallbackAcceptableCtx calls Breaker.DoWithFallbackAcceptableCtx on the Breaker with given name.
|
||||||
|
func DoWithFallbackAcceptableCtx(ctx context.Context, name string, req func() error,
|
||||||
|
fallback Fallback, acceptable Acceptable) error {
|
||||||
|
return do(name, func(b Breaker) error {
|
||||||
|
return b.DoWithFallbackAcceptableCtx(ctx, req, fallback, acceptable)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// GetBreaker returns the Breaker with the given name.
|
// GetBreaker returns the Breaker with the given name.
|
||||||
func GetBreaker(name string) Breaker {
|
func GetBreaker(name string) Breaker {
|
||||||
lock.RLock()
|
lock.RLock()
|
||||||
@@ -59,7 +92,7 @@ func GetBreaker(name string) Breaker {
|
|||||||
// NoBreakerFor disables the circuit breaker for the given name.
|
// NoBreakerFor disables the circuit breaker for the given name.
|
||||||
func NoBreakerFor(name string) {
|
func NoBreakerFor(name string) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
breakers[name] = newNopBreaker()
|
breakers[name] = NopBreaker()
|
||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package breaker
|
package breaker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -22,6 +23,9 @@ func TestBreakersDo(t *testing.T) {
|
|||||||
assert.Equal(t, errDummy, Do("any", func() error {
|
assert.Equal(t, errDummy, Do("any", func() error {
|
||||||
return errDummy
|
return errDummy
|
||||||
}))
|
}))
|
||||||
|
assert.Equal(t, errDummy, DoCtx(context.Background(), "any", func() error {
|
||||||
|
return errDummy
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBreakersDoWithAcceptable(t *testing.T) {
|
func TestBreakersDoWithAcceptable(t *testing.T) {
|
||||||
@@ -38,6 +42,13 @@ func TestBreakersDoWithAcceptable(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}) == nil
|
}) == nil
|
||||||
})
|
})
|
||||||
|
verify(t, func() bool {
|
||||||
|
return DoWithAcceptableCtx(context.Background(), "anyone", func() error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) bool {
|
||||||
|
return true
|
||||||
|
}) == nil
|
||||||
|
})
|
||||||
|
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
err := DoWithAcceptable("another", func() error {
|
err := DoWithAcceptable("another", func() error {
|
||||||
@@ -76,6 +87,12 @@ func TestBreakersFallback(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
assert.True(t, err == nil || errors.Is(err, errDummy))
|
assert.True(t, err == nil || errors.Is(err, errDummy))
|
||||||
|
err = DoWithFallbackCtx(context.Background(), "fallback", func() error {
|
||||||
|
return errDummy
|
||||||
|
}, func(err error) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
assert.True(t, err == nil || errors.Is(err, errDummy))
|
||||||
}
|
}
|
||||||
verify(t, func() bool {
|
verify(t, func() bool {
|
||||||
return errors.Is(Do("fallback", func() error {
|
return errors.Is(Do("fallback", func() error {
|
||||||
@@ -86,7 +103,7 @@ func TestBreakersFallback(t *testing.T) {
|
|||||||
|
|
||||||
func TestBreakersAcceptableFallback(t *testing.T) {
|
func TestBreakersAcceptableFallback(t *testing.T) {
|
||||||
errDummy := errors.New("any")
|
errDummy := errors.New("any")
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 5000; i++ {
|
||||||
err := DoWithFallbackAcceptable("acceptablefallback", func() error {
|
err := DoWithFallbackAcceptable("acceptablefallback", func() error {
|
||||||
return errDummy
|
return errDummy
|
||||||
}, func(err error) error {
|
}, func(err error) error {
|
||||||
@@ -95,6 +112,14 @@ func TestBreakersAcceptableFallback(t *testing.T) {
|
|||||||
return err == nil
|
return err == nil
|
||||||
})
|
})
|
||||||
assert.True(t, err == nil || errors.Is(err, errDummy))
|
assert.True(t, err == nil || errors.Is(err, errDummy))
|
||||||
|
err = DoWithFallbackAcceptableCtx(context.Background(), "acceptablefallback", func() error {
|
||||||
|
return errDummy
|
||||||
|
}, func(err error) error {
|
||||||
|
return nil
|
||||||
|
}, func(err error) bool {
|
||||||
|
return err == nil
|
||||||
|
})
|
||||||
|
assert.True(t, err == nil || errors.Is(err, errDummy))
|
||||||
}
|
}
|
||||||
verify(t, func() bool {
|
verify(t, func() bool {
|
||||||
return errors.Is(Do("acceptablefallback", func() error {
|
return errors.Is(Do("acceptablefallback", func() error {
|
||||||
@@ -110,5 +135,5 @@ func verify(t *testing.T, fn func() bool) {
|
|||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert.True(t, count >= 80, fmt.Sprintf("should be greater than 80, actual %d", count))
|
assert.True(t, count >= 75, fmt.Sprintf("should be greater than 75, actual %d", count))
|
||||||
}
|
}
|
||||||
|
|||||||
48
core/breaker/bucket.go
Normal file
48
core/breaker/bucket.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package breaker
|
||||||
|
|
||||||
|
const (
|
||||||
|
success = iota
|
||||||
|
fail
|
||||||
|
drop
|
||||||
|
)
|
||||||
|
|
||||||
|
// bucket defines the bucket that holds sum and num of additions.
|
||||||
|
type bucket struct {
|
||||||
|
Sum int64
|
||||||
|
Success int64
|
||||||
|
Failure int64
|
||||||
|
Drop int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bucket) Add(v int64) {
|
||||||
|
switch v {
|
||||||
|
case fail:
|
||||||
|
b.fail()
|
||||||
|
case drop:
|
||||||
|
b.drop()
|
||||||
|
default:
|
||||||
|
b.succeed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bucket) Reset() {
|
||||||
|
b.Sum = 0
|
||||||
|
b.Success = 0
|
||||||
|
b.Failure = 0
|
||||||
|
b.Drop = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bucket) drop() {
|
||||||
|
b.Sum++
|
||||||
|
b.Drop++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bucket) fail() {
|
||||||
|
b.Sum++
|
||||||
|
b.Failure++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bucket) succeed() {
|
||||||
|
b.Sum++
|
||||||
|
b.Success++
|
||||||
|
}
|
||||||
43
core/breaker/bucket_test.go
Normal file
43
core/breaker/bucket_test.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package breaker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBucketAdd(t *testing.T) {
|
||||||
|
b := &bucket{}
|
||||||
|
|
||||||
|
// Test succeed
|
||||||
|
b.Add(0) // Using 0 for success
|
||||||
|
assert.Equal(t, int64(1), b.Sum, "Sum should be incremented")
|
||||||
|
assert.Equal(t, int64(1), b.Success, "Success should be incremented")
|
||||||
|
assert.Equal(t, int64(0), b.Failure, "Failure should not be incremented")
|
||||||
|
assert.Equal(t, int64(0), b.Drop, "Drop should not be incremented")
|
||||||
|
|
||||||
|
// Test failure
|
||||||
|
b.Add(fail)
|
||||||
|
assert.Equal(t, int64(2), b.Sum, "Sum should be incremented")
|
||||||
|
assert.Equal(t, int64(1), b.Failure, "Failure should be incremented")
|
||||||
|
assert.Equal(t, int64(0), b.Drop, "Drop should not be incremented")
|
||||||
|
|
||||||
|
// Test drop
|
||||||
|
b.Add(drop)
|
||||||
|
assert.Equal(t, int64(3), b.Sum, "Sum should be incremented")
|
||||||
|
assert.Equal(t, int64(1), b.Drop, "Drop should be incremented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBucketReset(t *testing.T) {
|
||||||
|
b := &bucket{
|
||||||
|
Sum: 3,
|
||||||
|
Success: 1,
|
||||||
|
Failure: 1,
|
||||||
|
Drop: 1,
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
assert.Equal(t, int64(0), b.Sum, "Sum should be reset to 0")
|
||||||
|
assert.Equal(t, int64(0), b.Success, "Success should be reset to 0")
|
||||||
|
assert.Equal(t, int64(0), b.Failure, "Failure should be reset to 0")
|
||||||
|
assert.Equal(t, int64(0), b.Drop, "Drop should be reset to 0")
|
||||||
|
}
|
||||||
@@ -1,57 +1,87 @@
|
|||||||
package breaker
|
package breaker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/collection"
|
"github.com/zeromicro/go-zero/core/collection"
|
||||||
"github.com/zeromicro/go-zero/core/mathx"
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// 250ms for bucket duration
|
// 250ms for bucket duration
|
||||||
window = time.Second * 10
|
window = time.Second * 10
|
||||||
buckets = 40
|
buckets = 40
|
||||||
k = 1.5
|
forcePassDuration = time.Second
|
||||||
protection = 5
|
k = 1.5
|
||||||
|
minK = 1.1
|
||||||
|
protection = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
// googleBreaker is a netflixBreaker pattern from google.
|
// googleBreaker is a netflixBreaker pattern from google.
|
||||||
// see Client-Side Throttling section in https://landing.google.com/sre/sre-book/chapters/handling-overload/
|
// see Client-Side Throttling section in https://landing.google.com/sre/sre-book/chapters/handling-overload/
|
||||||
type googleBreaker struct {
|
type (
|
||||||
k float64
|
googleBreaker struct {
|
||||||
stat *collection.RollingWindow
|
k float64
|
||||||
proba *mathx.Proba
|
stat *collection.RollingWindow[int64, *bucket]
|
||||||
}
|
proba *mathx.Proba
|
||||||
|
lastPass *syncx.AtomicDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
windowResult struct {
|
||||||
|
accepts int64
|
||||||
|
total int64
|
||||||
|
failingBuckets int64
|
||||||
|
workingBuckets int64
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
func newGoogleBreaker() *googleBreaker {
|
func newGoogleBreaker() *googleBreaker {
|
||||||
bucketDuration := time.Duration(int64(window) / int64(buckets))
|
bucketDuration := time.Duration(int64(window) / int64(buckets))
|
||||||
st := collection.NewRollingWindow(buckets, bucketDuration)
|
st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
|
||||||
|
return new(bucket)
|
||||||
|
}, buckets, bucketDuration)
|
||||||
return &googleBreaker{
|
return &googleBreaker{
|
||||||
stat: st,
|
stat: st,
|
||||||
k: k,
|
k: k,
|
||||||
proba: mathx.NewProba(),
|
proba: mathx.NewProba(),
|
||||||
|
lastPass: syncx.NewAtomicDuration(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *googleBreaker) accept() error {
|
func (b *googleBreaker) accept() error {
|
||||||
accepts, total := b.history()
|
var w float64
|
||||||
weightedAccepts := b.k * float64(accepts)
|
history := b.history()
|
||||||
|
w = b.k - (b.k-minK)*float64(history.failingBuckets)/buckets
|
||||||
|
weightedAccepts := mathx.AtLeast(w, minK) * float64(history.accepts)
|
||||||
// https://landing.google.com/sre/sre-book/chapters/handling-overload/#eq2101
|
// https://landing.google.com/sre/sre-book/chapters/handling-overload/#eq2101
|
||||||
dropRatio := math.Max(0, (float64(total-protection)-weightedAccepts)/float64(total+1))
|
// for better performance, no need to care about the negative ratio
|
||||||
|
dropRatio := (float64(history.total-protection) - weightedAccepts) / float64(history.total+1)
|
||||||
if dropRatio <= 0 {
|
if dropRatio <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lastPass := b.lastPass.Load()
|
||||||
|
if lastPass > 0 && timex.Since(lastPass) > forcePassDuration {
|
||||||
|
b.lastPass.Set(timex.Now())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dropRatio *= float64(buckets-history.workingBuckets) / buckets
|
||||||
|
|
||||||
if b.proba.TrueOnProba(dropRatio) {
|
if b.proba.TrueOnProba(dropRatio) {
|
||||||
return ErrServiceUnavailable
|
return ErrServiceUnavailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
b.lastPass.Set(timex.Now())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *googleBreaker) allow() (internalPromise, error) {
|
func (b *googleBreaker) allow() (internalPromise, error) {
|
||||||
if err := b.accept(); err != nil {
|
if err := b.accept(); err != nil {
|
||||||
|
b.markDrop()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,8 +90,9 @@ func (b *googleBreaker) allow() (internalPromise, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *googleBreaker) doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error {
|
func (b *googleBreaker) doReq(req func() error, fallback Fallback, acceptable Acceptable) error {
|
||||||
if err := b.accept(); err != nil {
|
if err := b.accept(); err != nil {
|
||||||
|
b.markDrop()
|
||||||
if fallback != nil {
|
if fallback != nil {
|
||||||
return fallback(err)
|
return fallback(err)
|
||||||
}
|
}
|
||||||
@@ -69,38 +100,55 @@ func (b *googleBreaker) doReq(req func() error, fallback func(err error) error,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var succ bool
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := recover(); e != nil {
|
// if req() panic, success is false, mark as failure
|
||||||
|
if succ {
|
||||||
|
b.markSuccess()
|
||||||
|
} else {
|
||||||
b.markFailure()
|
b.markFailure()
|
||||||
panic(e)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := req()
|
err := req()
|
||||||
if acceptable(err) {
|
if acceptable(err) {
|
||||||
b.markSuccess()
|
succ = true
|
||||||
} else {
|
|
||||||
b.markFailure()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *googleBreaker) markSuccess() {
|
func (b *googleBreaker) markDrop() {
|
||||||
b.stat.Add(1)
|
b.stat.Add(drop)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *googleBreaker) markFailure() {
|
func (b *googleBreaker) markFailure() {
|
||||||
b.stat.Add(0)
|
b.stat.Add(fail)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *googleBreaker) history() (accepts, total int64) {
|
func (b *googleBreaker) markSuccess() {
|
||||||
b.stat.Reduce(func(b *collection.Bucket) {
|
b.stat.Add(success)
|
||||||
accepts += int64(b.Sum)
|
}
|
||||||
total += b.Count
|
|
||||||
|
func (b *googleBreaker) history() windowResult {
|
||||||
|
var result windowResult
|
||||||
|
|
||||||
|
b.stat.Reduce(func(b *bucket) {
|
||||||
|
result.accepts += b.Success
|
||||||
|
result.total += b.Sum
|
||||||
|
if b.Failure > 0 {
|
||||||
|
result.workingBuckets = 0
|
||||||
|
} else if b.Success > 0 {
|
||||||
|
result.workingBuckets++
|
||||||
|
}
|
||||||
|
if b.Success > 0 {
|
||||||
|
result.failingBuckets = 0
|
||||||
|
} else if b.Failure > 0 {
|
||||||
|
result.failingBuckets++
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
type googlePromise struct {
|
type googlePromise struct {
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/collection"
|
"github.com/zeromicro/go-zero/core/collection"
|
||||||
"github.com/zeromicro/go-zero/core/mathx"
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
"github.com/zeromicro/go-zero/core/stat"
|
"github.com/zeromicro/go-zero/core/stat"
|
||||||
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -22,11 +23,14 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getGoogleBreaker() *googleBreaker {
|
func getGoogleBreaker() *googleBreaker {
|
||||||
st := collection.NewRollingWindow(testBuckets, testInterval)
|
st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
|
||||||
|
return new(bucket)
|
||||||
|
}, testBuckets, testInterval)
|
||||||
return &googleBreaker{
|
return &googleBreaker{
|
||||||
stat: st,
|
stat: st,
|
||||||
k: 5,
|
k: 5,
|
||||||
proba: mathx.NewProba(),
|
proba: mathx.NewProba(),
|
||||||
|
lastPass: syncx.NewAtomicDuration(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,6 +67,33 @@ func TestGoogleBreakerOpen(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGoogleBreakerRecover(t *testing.T) {
|
||||||
|
st := collection.NewRollingWindow[int64, *bucket](func() *bucket {
|
||||||
|
return new(bucket)
|
||||||
|
}, testBuckets*2, testInterval)
|
||||||
|
b := &googleBreaker{
|
||||||
|
stat: st,
|
||||||
|
k: k,
|
||||||
|
proba: mathx.NewProba(),
|
||||||
|
lastPass: syncx.NewAtomicDuration(),
|
||||||
|
}
|
||||||
|
for i := 0; i < testBuckets; i++ {
|
||||||
|
for j := 0; j < 100; j++ {
|
||||||
|
b.stat.Add(1)
|
||||||
|
}
|
||||||
|
time.Sleep(testInterval)
|
||||||
|
}
|
||||||
|
for i := 0; i < testBuckets; i++ {
|
||||||
|
for j := 0; j < 100; j++ {
|
||||||
|
b.stat.Add(0)
|
||||||
|
}
|
||||||
|
time.Sleep(testInterval)
|
||||||
|
}
|
||||||
|
verify(t, func() bool {
|
||||||
|
return b.accept() == nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestGoogleBreakerFallback(t *testing.T) {
|
func TestGoogleBreakerFallback(t *testing.T) {
|
||||||
b := getGoogleBreaker()
|
b := getGoogleBreaker()
|
||||||
markSuccess(b, 1)
|
markSuccess(b, 1)
|
||||||
@@ -89,6 +120,43 @@ func TestGoogleBreakerReject(t *testing.T) {
|
|||||||
}, nil, defaultAcceptable))
|
}, nil, defaultAcceptable))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGoogleBreakerMoreFallingBuckets(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
t.Run("more falling buckets", func(t *testing.T) {
|
||||||
|
b := getGoogleBreaker()
|
||||||
|
|
||||||
|
func() {
|
||||||
|
stopChan := time.After(testInterval * 6)
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
select {
|
||||||
|
case <-stopChan:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
assert.Error(t, b.doReq(func() error {
|
||||||
|
return errors.New("foo")
|
||||||
|
}, func(err error) error {
|
||||||
|
return err
|
||||||
|
}, func(err error) bool {
|
||||||
|
return err == nil
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
if errors.Is(b.doReq(func() error {
|
||||||
|
return ErrServiceUnavailable
|
||||||
|
}, nil, defaultAcceptable), ErrServiceUnavailable) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, count > 90)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestGoogleBreakerAcceptable(t *testing.T) {
|
func TestGoogleBreakerAcceptable(t *testing.T) {
|
||||||
b := getGoogleBreaker()
|
b := getGoogleBreaker()
|
||||||
errAcceptable := errors.New("any")
|
errAcceptable := errors.New("any")
|
||||||
@@ -164,41 +232,38 @@ func TestGoogleBreakerSelfProtection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGoogleBreakerHistory(t *testing.T) {
|
func TestGoogleBreakerHistory(t *testing.T) {
|
||||||
var b *googleBreaker
|
|
||||||
var accepts, total int64
|
|
||||||
|
|
||||||
sleep := testInterval
|
sleep := testInterval
|
||||||
t.Run("accepts == total", func(t *testing.T) {
|
t.Run("accepts == total", func(t *testing.T) {
|
||||||
b = getGoogleBreaker()
|
b := getGoogleBreaker()
|
||||||
markSuccessWithDuration(b, 10, sleep/2)
|
markSuccessWithDuration(b, 10, sleep/2)
|
||||||
accepts, total = b.history()
|
result := b.history()
|
||||||
assert.Equal(t, int64(10), accepts)
|
assert.Equal(t, int64(10), result.accepts)
|
||||||
assert.Equal(t, int64(10), total)
|
assert.Equal(t, int64(10), result.total)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("fail == total", func(t *testing.T) {
|
t.Run("fail == total", func(t *testing.T) {
|
||||||
b = getGoogleBreaker()
|
b := getGoogleBreaker()
|
||||||
markFailedWithDuration(b, 10, sleep/2)
|
markFailedWithDuration(b, 10, sleep/2)
|
||||||
accepts, total = b.history()
|
result := b.history()
|
||||||
assert.Equal(t, int64(0), accepts)
|
assert.Equal(t, int64(0), result.accepts)
|
||||||
assert.Equal(t, int64(10), total)
|
assert.Equal(t, int64(10), result.total)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("accepts = 1/2 * total, fail = 1/2 * total", func(t *testing.T) {
|
t.Run("accepts = 1/2 * total, fail = 1/2 * total", func(t *testing.T) {
|
||||||
b = getGoogleBreaker()
|
b := getGoogleBreaker()
|
||||||
markFailedWithDuration(b, 5, sleep/2)
|
markFailedWithDuration(b, 5, sleep/2)
|
||||||
markSuccessWithDuration(b, 5, sleep/2)
|
markSuccessWithDuration(b, 5, sleep/2)
|
||||||
accepts, total = b.history()
|
result := b.history()
|
||||||
assert.Equal(t, int64(5), accepts)
|
assert.Equal(t, int64(5), result.accepts)
|
||||||
assert.Equal(t, int64(10), total)
|
assert.Equal(t, int64(10), result.total)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("auto reset rolling counter", func(t *testing.T) {
|
t.Run("auto reset rolling counter", func(t *testing.T) {
|
||||||
b = getGoogleBreaker()
|
b := getGoogleBreaker()
|
||||||
time.Sleep(testInterval * testBuckets)
|
time.Sleep(testInterval * testBuckets)
|
||||||
accepts, total = b.history()
|
result := b.history()
|
||||||
assert.Equal(t, int64(0), accepts)
|
assert.Equal(t, int64(0), result.accepts)
|
||||||
assert.Equal(t, int64(0), total)
|
assert.Equal(t, int64(0), result.total)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,7 +271,7 @@ func BenchmarkGoogleBreakerAllow(b *testing.B) {
|
|||||||
breaker := getGoogleBreaker()
|
breaker := getGoogleBreaker()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i <= b.N; i++ {
|
for i := 0; i <= b.N; i++ {
|
||||||
breaker.accept()
|
_ = breaker.accept()
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
breaker.markSuccess()
|
breaker.markSuccess()
|
||||||
} else {
|
} else {
|
||||||
@@ -215,6 +280,16 @@ func BenchmarkGoogleBreakerAllow(b *testing.B) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkGoogleBreakerDoReq(b *testing.B) {
|
||||||
|
breaker := getGoogleBreaker()
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i <= b.N; i++ {
|
||||||
|
_ = breaker.doReq(func() error {
|
||||||
|
return nil
|
||||||
|
}, nil, defaultAcceptable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func markSuccess(b *googleBreaker, count int) {
|
func markSuccess(b *googleBreaker, count int) {
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
p, err := b.allow()
|
p, err := b.allow()
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
package breaker
|
package breaker
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
const nopBreakerName = "nopBreaker"
|
const nopBreakerName = "nopBreaker"
|
||||||
|
|
||||||
type nopBreaker struct{}
|
type nopBreaker struct{}
|
||||||
|
|
||||||
func newNopBreaker() Breaker {
|
// NopBreaker returns a breaker that never trigger breaker circuit.
|
||||||
|
func NopBreaker() Breaker {
|
||||||
return nopBreaker{}
|
return nopBreaker{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -16,20 +19,40 @@ func (b nopBreaker) Allow() (Promise, error) {
|
|||||||
return nopPromise{}, nil
|
return nopPromise{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b nopBreaker) AllowCtx(_ context.Context) (Promise, error) {
|
||||||
|
return nopPromise{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b nopBreaker) Do(req func() error) error {
|
func (b nopBreaker) Do(req func() error) error {
|
||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b nopBreaker) DoCtx(_ context.Context, req func() error) error {
|
||||||
|
return req()
|
||||||
|
}
|
||||||
|
|
||||||
func (b nopBreaker) DoWithAcceptable(req func() error, _ Acceptable) error {
|
func (b nopBreaker) DoWithAcceptable(req func() error, _ Acceptable) error {
|
||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b nopBreaker) DoWithFallback(req func() error, _ func(err error) error) error {
|
func (b nopBreaker) DoWithAcceptableCtx(_ context.Context, req func() error, _ Acceptable) error {
|
||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b nopBreaker) DoWithFallbackAcceptable(req func() error, _ func(err error) error,
|
func (b nopBreaker) DoWithFallback(req func() error, _ Fallback) error {
|
||||||
_ Acceptable) error {
|
return req()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b nopBreaker) DoWithFallbackCtx(_ context.Context, req func() error, _ Fallback) error {
|
||||||
|
return req()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b nopBreaker) DoWithFallbackAcceptable(req func() error, _ Fallback, _ Acceptable) error {
|
||||||
|
return req()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b nopBreaker) DoWithFallbackAcceptableCtx(_ context.Context, req func() error,
|
||||||
|
_ Fallback, _ Acceptable) error {
|
||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package breaker
|
package breaker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -8,9 +9,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestNopBreaker(t *testing.T) {
|
func TestNopBreaker(t *testing.T) {
|
||||||
b := newNopBreaker()
|
b := NopBreaker()
|
||||||
assert.Equal(t, nopBreakerName, b.Name())
|
assert.Equal(t, nopBreakerName, b.Name())
|
||||||
p, err := b.Allow()
|
_, err := b.Allow()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
p, err := b.AllowCtx(context.Background())
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
p.Accept()
|
p.Accept()
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
@@ -21,18 +24,34 @@ func TestNopBreaker(t *testing.T) {
|
|||||||
assert.Nil(t, b.Do(func() error {
|
assert.Nil(t, b.Do(func() error {
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
|
assert.Nil(t, b.DoCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
assert.Nil(t, b.DoWithAcceptable(func() error {
|
assert.Nil(t, b.DoWithAcceptable(func() error {
|
||||||
return nil
|
return nil
|
||||||
}, defaultAcceptable))
|
}, defaultAcceptable))
|
||||||
|
assert.Nil(t, b.DoWithAcceptableCtx(context.Background(), func() error {
|
||||||
|
return nil
|
||||||
|
}, defaultAcceptable))
|
||||||
errDummy := errors.New("any")
|
errDummy := errors.New("any")
|
||||||
assert.Equal(t, errDummy, b.DoWithFallback(func() error {
|
assert.Equal(t, errDummy, b.DoWithFallback(func() error {
|
||||||
return errDummy
|
return errDummy
|
||||||
}, func(err error) error {
|
}, func(err error) error {
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
|
assert.Equal(t, errDummy, b.DoWithFallbackCtx(context.Background(), func() error {
|
||||||
|
return errDummy
|
||||||
|
}, func(err error) error {
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
assert.Equal(t, errDummy, b.DoWithFallbackAcceptable(func() error {
|
assert.Equal(t, errDummy, b.DoWithFallbackAcceptable(func() error {
|
||||||
return errDummy
|
return errDummy
|
||||||
}, func(err error) error {
|
}, func(err error) error {
|
||||||
return nil
|
return nil
|
||||||
}, defaultAcceptable))
|
}, defaultAcceptable))
|
||||||
|
assert.Equal(t, errDummy, b.DoWithFallbackAcceptableCtx(context.Background(), func() error {
|
||||||
|
return errDummy
|
||||||
|
}, func(err error) error {
|
||||||
|
return nil
|
||||||
|
}, defaultAcceptable))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ var (
|
|||||||
zero = big.NewInt(0)
|
zero = big.NewInt(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
// DhKey defines the Diffie Hellman key.
|
// DhKey defines the Diffie-Hellman key.
|
||||||
type DhKey struct {
|
type DhKey struct {
|
||||||
PriKey *big.Int
|
PriKey *big.Int
|
||||||
PubKey *big.Int
|
PubKey *big.Int
|
||||||
@@ -46,7 +46,7 @@ func ComputeKey(pubKey, priKey *big.Int) (*big.Int, error) {
|
|||||||
return new(big.Int).Exp(pubKey, priKey, p), nil
|
return new(big.Int).Exp(pubKey, priKey, p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateKey returns a Diffie Hellman key.
|
// GenerateKey returns a Diffie-Hellman key.
|
||||||
func GenerateKey() (*DhKey, error) {
|
func GenerateKey() (*DhKey, error) {
|
||||||
var err error
|
var err error
|
||||||
var x *big.Int
|
var x *big.Int
|
||||||
|
|||||||
@@ -128,8 +128,8 @@ func (c *Cache) Take(key string, fetch func() (any, error)) (any, error) {
|
|||||||
|
|
||||||
var fresh bool
|
var fresh bool
|
||||||
val, err := c.barrier.Do(key, func() (any, error) {
|
val, err := c.barrier.Do(key, func() (any, error) {
|
||||||
// because O(1) on map search in memory, and fetch is an IO query
|
// because O(1) on map search in memory, and fetch is an IO query,
|
||||||
// so we do double check, cache might be taken by another call
|
// so we do double-check, cache might be taken by another call
|
||||||
if val, ok := c.doGet(key); ok {
|
if val, ok := c.doGet(key); ok {
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,8 +25,14 @@ func (r *Ring) Add(v any) {
|
|||||||
r.lock.Lock()
|
r.lock.Lock()
|
||||||
defer r.lock.Unlock()
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
r.elements[r.index%len(r.elements)] = v
|
rlen := len(r.elements)
|
||||||
|
r.elements[r.index%rlen] = v
|
||||||
r.index++
|
r.index++
|
||||||
|
|
||||||
|
// prevent ring index overflow
|
||||||
|
if r.index >= rlen<<1 {
|
||||||
|
r.index -= rlen
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take takes all items from r.
|
// Take takes all items from r.
|
||||||
@@ -36,16 +42,18 @@ func (r *Ring) Take() []any {
|
|||||||
|
|
||||||
var size int
|
var size int
|
||||||
var start int
|
var start int
|
||||||
if r.index > len(r.elements) {
|
rlen := len(r.elements)
|
||||||
size = len(r.elements)
|
|
||||||
start = r.index % len(r.elements)
|
if r.index > rlen {
|
||||||
|
size = rlen
|
||||||
|
start = r.index % rlen
|
||||||
} else {
|
} else {
|
||||||
size = r.index
|
size = r.index
|
||||||
}
|
}
|
||||||
|
|
||||||
elements := make([]any, size)
|
elements := make([]any, size)
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
elements[i] = r.elements[(start+i)%len(r.elements)]
|
elements[i] = r.elements[(start+i)%rlen]
|
||||||
}
|
}
|
||||||
|
|
||||||
return elements
|
return elements
|
||||||
|
|||||||
@@ -4,18 +4,28 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// RollingWindowOption let callers customize the RollingWindow.
|
// BucketInterface is the interface that defines the buckets.
|
||||||
RollingWindowOption func(rollingWindow *RollingWindow)
|
BucketInterface[T Numerical] interface {
|
||||||
|
Add(v T)
|
||||||
|
Reset()
|
||||||
|
}
|
||||||
|
|
||||||
// RollingWindow defines a rolling window to calculate the events in buckets with time interval.
|
// Numerical is the interface that restricts the numerical type.
|
||||||
RollingWindow struct {
|
Numerical = mathx.Numerical
|
||||||
|
|
||||||
|
// RollingWindowOption let callers customize the RollingWindow.
|
||||||
|
RollingWindowOption[T Numerical, B BucketInterface[T]] func(rollingWindow *RollingWindow[T, B])
|
||||||
|
|
||||||
|
// RollingWindow defines a rolling window to calculate the events in buckets with the time interval.
|
||||||
|
RollingWindow[T Numerical, B BucketInterface[T]] struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
size int
|
size int
|
||||||
win *window
|
win *window[T, B]
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
offset int
|
offset int
|
||||||
ignoreCurrent bool
|
ignoreCurrent bool
|
||||||
@@ -25,14 +35,15 @@ type (
|
|||||||
|
|
||||||
// NewRollingWindow returns a RollingWindow that with size buckets and time interval,
|
// NewRollingWindow returns a RollingWindow that with size buckets and time interval,
|
||||||
// use opts to customize the RollingWindow.
|
// use opts to customize the RollingWindow.
|
||||||
func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOption) *RollingWindow {
|
func NewRollingWindow[T Numerical, B BucketInterface[T]](newBucket func() B, size int,
|
||||||
|
interval time.Duration, opts ...RollingWindowOption[T, B]) *RollingWindow[T, B] {
|
||||||
if size < 1 {
|
if size < 1 {
|
||||||
panic("size must be greater than 0")
|
panic("size must be greater than 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
w := &RollingWindow{
|
w := &RollingWindow[T, B]{
|
||||||
size: size,
|
size: size,
|
||||||
win: newWindow(size),
|
win: newWindow[T, B](newBucket, size),
|
||||||
interval: interval,
|
interval: interval,
|
||||||
lastTime: timex.Now(),
|
lastTime: timex.Now(),
|
||||||
}
|
}
|
||||||
@@ -43,7 +54,7 @@ func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOpt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add adds value to current bucket.
|
// Add adds value to current bucket.
|
||||||
func (rw *RollingWindow) Add(v float64) {
|
func (rw *RollingWindow[T, B]) Add(v T) {
|
||||||
rw.lock.Lock()
|
rw.lock.Lock()
|
||||||
defer rw.lock.Unlock()
|
defer rw.lock.Unlock()
|
||||||
rw.updateOffset()
|
rw.updateOffset()
|
||||||
@@ -51,13 +62,13 @@ func (rw *RollingWindow) Add(v float64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Reduce runs fn on all buckets, ignore current bucket if ignoreCurrent was set.
|
// Reduce runs fn on all buckets, ignore current bucket if ignoreCurrent was set.
|
||||||
func (rw *RollingWindow) Reduce(fn func(b *Bucket)) {
|
func (rw *RollingWindow[T, B]) Reduce(fn func(b B)) {
|
||||||
rw.lock.RLock()
|
rw.lock.RLock()
|
||||||
defer rw.lock.RUnlock()
|
defer rw.lock.RUnlock()
|
||||||
|
|
||||||
var diff int
|
var diff int
|
||||||
span := rw.span()
|
span := rw.span()
|
||||||
// ignore current bucket, because of partial data
|
// ignore the current bucket, because of partial data
|
||||||
if span == 0 && rw.ignoreCurrent {
|
if span == 0 && rw.ignoreCurrent {
|
||||||
diff = rw.size - 1
|
diff = rw.size - 1
|
||||||
} else {
|
} else {
|
||||||
@@ -69,7 +80,7 @@ func (rw *RollingWindow) Reduce(fn func(b *Bucket)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rw *RollingWindow) span() int {
|
func (rw *RollingWindow[T, B]) span() int {
|
||||||
offset := int(timex.Since(rw.lastTime) / rw.interval)
|
offset := int(timex.Since(rw.lastTime) / rw.interval)
|
||||||
if 0 <= offset && offset < rw.size {
|
if 0 <= offset && offset < rw.size {
|
||||||
return offset
|
return offset
|
||||||
@@ -78,7 +89,7 @@ func (rw *RollingWindow) span() int {
|
|||||||
return rw.size
|
return rw.size
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rw *RollingWindow) updateOffset() {
|
func (rw *RollingWindow[T, B]) updateOffset() {
|
||||||
span := rw.span()
|
span := rw.span()
|
||||||
if span <= 0 {
|
if span <= 0 {
|
||||||
return
|
return
|
||||||
@@ -97,54 +108,54 @@ func (rw *RollingWindow) updateOffset() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Bucket defines the bucket that holds sum and num of additions.
|
// Bucket defines the bucket that holds sum and num of additions.
|
||||||
type Bucket struct {
|
type Bucket[T Numerical] struct {
|
||||||
Sum float64
|
Sum T
|
||||||
Count int64
|
Count int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bucket) add(v float64) {
|
func (b *Bucket[T]) Add(v T) {
|
||||||
b.Sum += v
|
b.Sum += v
|
||||||
b.Count++
|
b.Count++
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bucket) reset() {
|
func (b *Bucket[T]) Reset() {
|
||||||
b.Sum = 0
|
b.Sum = 0
|
||||||
b.Count = 0
|
b.Count = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
type window struct {
|
type window[T Numerical, B BucketInterface[T]] struct {
|
||||||
buckets []*Bucket
|
buckets []B
|
||||||
size int
|
size int
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWindow(size int) *window {
|
func newWindow[T Numerical, B BucketInterface[T]](newBucket func() B, size int) *window[T, B] {
|
||||||
buckets := make([]*Bucket, size)
|
buckets := make([]B, size)
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
buckets[i] = new(Bucket)
|
buckets[i] = newBucket()
|
||||||
}
|
}
|
||||||
return &window{
|
return &window[T, B]{
|
||||||
buckets: buckets,
|
buckets: buckets,
|
||||||
size: size,
|
size: size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *window) add(offset int, v float64) {
|
func (w *window[T, B]) add(offset int, v T) {
|
||||||
w.buckets[offset%w.size].add(v)
|
w.buckets[offset%w.size].Add(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *window) reduce(start, count int, fn func(b *Bucket)) {
|
func (w *window[T, B]) reduce(start, count int, fn func(b B)) {
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
fn(w.buckets[(start+i)%w.size])
|
fn(w.buckets[(start+i)%w.size])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *window) resetBucket(offset int) {
|
func (w *window[T, B]) resetBucket(offset int) {
|
||||||
w.buckets[offset%w.size].reset()
|
w.buckets[offset%w.size].Reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
// IgnoreCurrentBucket lets the Reduce call ignore current bucket.
|
// IgnoreCurrentBucket lets the Reduce call ignore current bucket.
|
||||||
func IgnoreCurrentBucket() RollingWindowOption {
|
func IgnoreCurrentBucket[T Numerical, B BucketInterface[T]]() RollingWindowOption[T, B] {
|
||||||
return func(w *RollingWindow) {
|
return func(w *RollingWindow[T, B]) {
|
||||||
w.ignoreCurrent = true
|
w.ignoreCurrent = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,18 +12,24 @@ import (
|
|||||||
const duration = time.Millisecond * 50
|
const duration = time.Millisecond * 50
|
||||||
|
|
||||||
func TestNewRollingWindow(t *testing.T) {
|
func TestNewRollingWindow(t *testing.T) {
|
||||||
assert.NotNil(t, NewRollingWindow(10, time.Second))
|
assert.NotNil(t, NewRollingWindow[int64, *Bucket[int64]](func() *Bucket[int64] {
|
||||||
|
return new(Bucket[int64])
|
||||||
|
}, 10, time.Second))
|
||||||
assert.Panics(t, func() {
|
assert.Panics(t, func() {
|
||||||
NewRollingWindow(0, time.Second)
|
NewRollingWindow[int64, *Bucket[int64]](func() *Bucket[int64] {
|
||||||
|
return new(Bucket[int64])
|
||||||
|
}, 0, time.Second)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRollingWindowAdd(t *testing.T) {
|
func TestRollingWindowAdd(t *testing.T) {
|
||||||
const size = 3
|
const size = 3
|
||||||
r := NewRollingWindow(size, duration)
|
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
|
||||||
|
return new(Bucket[float64])
|
||||||
|
}, size, duration)
|
||||||
listBuckets := func() []float64 {
|
listBuckets := func() []float64 {
|
||||||
var buckets []float64
|
var buckets []float64
|
||||||
r.Reduce(func(b *Bucket) {
|
r.Reduce(func(b *Bucket[float64]) {
|
||||||
buckets = append(buckets, b.Sum)
|
buckets = append(buckets, b.Sum)
|
||||||
})
|
})
|
||||||
return buckets
|
return buckets
|
||||||
@@ -47,10 +53,12 @@ func TestRollingWindowAdd(t *testing.T) {
|
|||||||
|
|
||||||
func TestRollingWindowReset(t *testing.T) {
|
func TestRollingWindowReset(t *testing.T) {
|
||||||
const size = 3
|
const size = 3
|
||||||
r := NewRollingWindow(size, duration, IgnoreCurrentBucket())
|
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
|
||||||
|
return new(Bucket[float64])
|
||||||
|
}, size, duration, IgnoreCurrentBucket[float64, *Bucket[float64]]())
|
||||||
listBuckets := func() []float64 {
|
listBuckets := func() []float64 {
|
||||||
var buckets []float64
|
var buckets []float64
|
||||||
r.Reduce(func(b *Bucket) {
|
r.Reduce(func(b *Bucket[float64]) {
|
||||||
buckets = append(buckets, b.Sum)
|
buckets = append(buckets, b.Sum)
|
||||||
})
|
})
|
||||||
return buckets
|
return buckets
|
||||||
@@ -72,15 +80,19 @@ func TestRollingWindowReset(t *testing.T) {
|
|||||||
func TestRollingWindowReduce(t *testing.T) {
|
func TestRollingWindowReduce(t *testing.T) {
|
||||||
const size = 4
|
const size = 4
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
win *RollingWindow
|
win *RollingWindow[float64, *Bucket[float64]]
|
||||||
expect float64
|
expect float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
win: NewRollingWindow(size, duration),
|
win: NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
|
||||||
|
return new(Bucket[float64])
|
||||||
|
}, size, duration),
|
||||||
expect: 10,
|
expect: 10,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
win: NewRollingWindow(size, duration, IgnoreCurrentBucket()),
|
win: NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
|
||||||
|
return new(Bucket[float64])
|
||||||
|
}, size, duration, IgnoreCurrentBucket[float64, *Bucket[float64]]()),
|
||||||
expect: 4,
|
expect: 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -97,7 +109,7 @@ func TestRollingWindowReduce(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
var result float64
|
var result float64
|
||||||
r.Reduce(func(b *Bucket) {
|
r.Reduce(func(b *Bucket[float64]) {
|
||||||
result += b.Sum
|
result += b.Sum
|
||||||
})
|
})
|
||||||
assert.Equal(t, test.expect, result)
|
assert.Equal(t, test.expect, result)
|
||||||
@@ -108,10 +120,12 @@ func TestRollingWindowReduce(t *testing.T) {
|
|||||||
func TestRollingWindowBucketTimeBoundary(t *testing.T) {
|
func TestRollingWindowBucketTimeBoundary(t *testing.T) {
|
||||||
const size = 3
|
const size = 3
|
||||||
interval := time.Millisecond * 30
|
interval := time.Millisecond * 30
|
||||||
r := NewRollingWindow(size, interval)
|
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
|
||||||
|
return new(Bucket[float64])
|
||||||
|
}, size, interval)
|
||||||
listBuckets := func() []float64 {
|
listBuckets := func() []float64 {
|
||||||
var buckets []float64
|
var buckets []float64
|
||||||
r.Reduce(func(b *Bucket) {
|
r.Reduce(func(b *Bucket[float64]) {
|
||||||
buckets = append(buckets, b.Sum)
|
buckets = append(buckets, b.Sum)
|
||||||
})
|
})
|
||||||
return buckets
|
return buckets
|
||||||
@@ -138,7 +152,9 @@ func TestRollingWindowBucketTimeBoundary(t *testing.T) {
|
|||||||
|
|
||||||
func TestRollingWindowDataRace(t *testing.T) {
|
func TestRollingWindowDataRace(t *testing.T) {
|
||||||
const size = 3
|
const size = 3
|
||||||
r := NewRollingWindow(size, duration)
|
r := NewRollingWindow[float64, *Bucket[float64]](func() *Bucket[float64] {
|
||||||
|
return new(Bucket[float64])
|
||||||
|
}, size, duration)
|
||||||
stop := make(chan bool)
|
stop := make(chan bool)
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
@@ -157,7 +173,7 @@ func TestRollingWindowDataRace(t *testing.T) {
|
|||||||
case <-stop:
|
case <-stop:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
r.Reduce(func(b *Bucket) {})
|
r.Reduce(func(b *Bucket[float64]) {})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -1,235 +1,53 @@
|
|||||||
package collection
|
package collection
|
||||||
|
|
||||||
import (
|
import "github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
// Set is a type-safe generic set collection.
|
||||||
unmanaged = iota
|
// It's not thread-safe, use with synchronization for concurrent access.
|
||||||
untyped
|
type Set[T comparable] struct {
|
||||||
intType
|
data map[T]lang.PlaceholderType
|
||||||
int64Type
|
|
||||||
uintType
|
|
||||||
uint64Type
|
|
||||||
stringType
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set is not thread-safe, for concurrent use, make sure to use it with synchronization.
|
|
||||||
type Set struct {
|
|
||||||
data map[any]lang.PlaceholderType
|
|
||||||
tp int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSet returns a managed Set, can only put the values with the same type.
|
// NewSet returns a new type-safe set.
|
||||||
func NewSet() *Set {
|
func NewSet[T comparable]() *Set[T] {
|
||||||
return &Set{
|
return &Set[T]{
|
||||||
data: make(map[any]lang.PlaceholderType),
|
data: make(map[T]lang.PlaceholderType),
|
||||||
tp: untyped,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUnmanagedSet returns an unmanaged Set, which can put values with different types.
|
// Add adds items to the set. Duplicates are automatically ignored.
|
||||||
func NewUnmanagedSet() *Set {
|
func (s *Set[T]) Add(items ...T) {
|
||||||
return &Set{
|
for _, item := range items {
|
||||||
data: make(map[any]lang.PlaceholderType),
|
s.data[item] = lang.Placeholder
|
||||||
tp: unmanaged,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds i into s.
|
// Clear removes all items from the set.
|
||||||
func (s *Set) Add(i ...any) {
|
func (s *Set[T]) Clear() {
|
||||||
for _, each := range i {
|
clear(s.data)
|
||||||
s.add(each)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddInt adds int values ii into s.
|
// Contains checks if an item exists in the set.
|
||||||
func (s *Set) AddInt(ii ...int) {
|
func (s *Set[T]) Contains(item T) bool {
|
||||||
for _, each := range ii {
|
_, ok := s.data[item]
|
||||||
s.add(each)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddInt64 adds int64 values ii into s.
|
|
||||||
func (s *Set) AddInt64(ii ...int64) {
|
|
||||||
for _, each := range ii {
|
|
||||||
s.add(each)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddUint adds uint values ii into s.
|
|
||||||
func (s *Set) AddUint(ii ...uint) {
|
|
||||||
for _, each := range ii {
|
|
||||||
s.add(each)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddUint64 adds uint64 values ii into s.
|
|
||||||
func (s *Set) AddUint64(ii ...uint64) {
|
|
||||||
for _, each := range ii {
|
|
||||||
s.add(each)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddStr adds string values ss into s.
|
|
||||||
func (s *Set) AddStr(ss ...string) {
|
|
||||||
for _, each := range ss {
|
|
||||||
s.add(each)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains checks if i is in s.
|
|
||||||
func (s *Set) Contains(i any) bool {
|
|
||||||
if len(s.data) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
s.validate(i)
|
|
||||||
_, ok := s.data[i]
|
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keys returns the keys in s.
|
// Count returns the number of items in the set.
|
||||||
func (s *Set) Keys() []any {
|
func (s *Set[T]) Count() int {
|
||||||
var keys []any
|
|
||||||
|
|
||||||
for key := range s.data {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysInt returns the int keys in s.
|
|
||||||
func (s *Set) KeysInt() []int {
|
|
||||||
var keys []int
|
|
||||||
|
|
||||||
for key := range s.data {
|
|
||||||
if intKey, ok := key.(int); ok {
|
|
||||||
keys = append(keys, intKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysInt64 returns int64 keys in s.
|
|
||||||
func (s *Set) KeysInt64() []int64 {
|
|
||||||
var keys []int64
|
|
||||||
|
|
||||||
for key := range s.data {
|
|
||||||
if intKey, ok := key.(int64); ok {
|
|
||||||
keys = append(keys, intKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysUint returns uint keys in s.
|
|
||||||
func (s *Set) KeysUint() []uint {
|
|
||||||
var keys []uint
|
|
||||||
|
|
||||||
for key := range s.data {
|
|
||||||
if intKey, ok := key.(uint); ok {
|
|
||||||
keys = append(keys, intKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysUint64 returns uint64 keys in s.
|
|
||||||
func (s *Set) KeysUint64() []uint64 {
|
|
||||||
var keys []uint64
|
|
||||||
|
|
||||||
for key := range s.data {
|
|
||||||
if intKey, ok := key.(uint64); ok {
|
|
||||||
keys = append(keys, intKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysStr returns string keys in s.
|
|
||||||
func (s *Set) KeysStr() []string {
|
|
||||||
var keys []string
|
|
||||||
|
|
||||||
for key := range s.data {
|
|
||||||
if strKey, ok := key.(string); ok {
|
|
||||||
keys = append(keys, strKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes i from s.
|
|
||||||
func (s *Set) Remove(i any) {
|
|
||||||
s.validate(i)
|
|
||||||
delete(s.data, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of items in s.
|
|
||||||
func (s *Set) Count() int {
|
|
||||||
return len(s.data)
|
return len(s.data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) add(i any) {
|
// Keys returns all elements in the set as a slice.
|
||||||
switch s.tp {
|
func (s *Set[T]) Keys() []T {
|
||||||
case unmanaged:
|
keys := make([]T, 0, len(s.data))
|
||||||
// do nothing
|
for key := range s.data {
|
||||||
case untyped:
|
keys = append(keys, key)
|
||||||
s.setType(i)
|
|
||||||
default:
|
|
||||||
s.validate(i)
|
|
||||||
}
|
}
|
||||||
s.data[i] = lang.Placeholder
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) setType(i any) {
|
// Remove removes an item from the set.
|
||||||
// s.tp can only be untyped here
|
func (s *Set[T]) Remove(item T) {
|
||||||
switch i.(type) {
|
delete(s.data, item)
|
||||||
case int:
|
|
||||||
s.tp = intType
|
|
||||||
case int64:
|
|
||||||
s.tp = int64Type
|
|
||||||
case uint:
|
|
||||||
s.tp = uintType
|
|
||||||
case uint64:
|
|
||||||
s.tp = uint64Type
|
|
||||||
case string:
|
|
||||||
s.tp = stringType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Set) validate(i any) {
|
|
||||||
if s.tp == unmanaged {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch i.(type) {
|
|
||||||
case int:
|
|
||||||
if s.tp != intType {
|
|
||||||
logx.Errorf("element is int, but set contains elements with type %d", s.tp)
|
|
||||||
}
|
|
||||||
case int64:
|
|
||||||
if s.tp != int64Type {
|
|
||||||
logx.Errorf("element is int64, but set contains elements with type %d", s.tp)
|
|
||||||
}
|
|
||||||
case uint:
|
|
||||||
if s.tp != uintType {
|
|
||||||
logx.Errorf("element is uint, but set contains elements with type %d", s.tp)
|
|
||||||
}
|
|
||||||
case uint64:
|
|
||||||
if s.tp != uint64Type {
|
|
||||||
logx.Errorf("element is uint64, but set contains elements with type %d", s.tp)
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
if s.tp != stringType {
|
|
||||||
logx.Errorf("element is string, but set contains elements with type %d", s.tp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,105 @@ func init() {
|
|||||||
logx.Disable()
|
logx.Disable()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set functionality tests
|
||||||
|
func TestTypedSetInt(t *testing.T) {
|
||||||
|
set := NewSet[int]()
|
||||||
|
values := []int{1, 2, 3, 2, 1} // Contains duplicates
|
||||||
|
|
||||||
|
// Test adding
|
||||||
|
set.Add(values...)
|
||||||
|
assert.Equal(t, 3, set.Count()) // Should only have 3 elements after deduplication
|
||||||
|
|
||||||
|
// Test contains
|
||||||
|
assert.True(t, set.Contains(1))
|
||||||
|
assert.True(t, set.Contains(2))
|
||||||
|
assert.True(t, set.Contains(3))
|
||||||
|
assert.False(t, set.Contains(4))
|
||||||
|
|
||||||
|
// Test getting all keys
|
||||||
|
keys := set.Keys()
|
||||||
|
sort.Ints(keys)
|
||||||
|
assert.EqualValues(t, []int{1, 2, 3}, keys)
|
||||||
|
|
||||||
|
// Test removal
|
||||||
|
set.Remove(2)
|
||||||
|
assert.False(t, set.Contains(2))
|
||||||
|
assert.Equal(t, 2, set.Count())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTypedSetStringOps(t *testing.T) {
|
||||||
|
set := NewSet[string]()
|
||||||
|
values := []string{"a", "b", "c", "b", "a"}
|
||||||
|
|
||||||
|
set.Add(values...)
|
||||||
|
assert.Equal(t, 3, set.Count())
|
||||||
|
|
||||||
|
assert.True(t, set.Contains("a"))
|
||||||
|
assert.True(t, set.Contains("b"))
|
||||||
|
assert.True(t, set.Contains("c"))
|
||||||
|
assert.False(t, set.Contains("d"))
|
||||||
|
|
||||||
|
keys := set.Keys()
|
||||||
|
sort.Strings(keys)
|
||||||
|
assert.EqualValues(t, []string{"a", "b", "c"}, keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTypedSetClear(t *testing.T) {
|
||||||
|
set := NewSet[int]()
|
||||||
|
set.Add(1, 2, 3)
|
||||||
|
assert.Equal(t, 3, set.Count())
|
||||||
|
|
||||||
|
set.Clear()
|
||||||
|
assert.Equal(t, 0, set.Count())
|
||||||
|
assert.False(t, set.Contains(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTypedSetEmpty(t *testing.T) {
|
||||||
|
set := NewSet[int]()
|
||||||
|
assert.Equal(t, 0, set.Count())
|
||||||
|
assert.False(t, set.Contains(1))
|
||||||
|
assert.Empty(t, set.Keys())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTypedSetMultipleTypes(t *testing.T) {
|
||||||
|
// Test different typed generic sets
|
||||||
|
intSet := NewSet[int]()
|
||||||
|
int64Set := NewSet[int64]()
|
||||||
|
uintSet := NewSet[uint]()
|
||||||
|
uint64Set := NewSet[uint64]()
|
||||||
|
stringSet := NewSet[string]()
|
||||||
|
|
||||||
|
intSet.Add(1, 2, 3)
|
||||||
|
int64Set.Add(1, 2, 3)
|
||||||
|
uintSet.Add(1, 2, 3)
|
||||||
|
uint64Set.Add(1, 2, 3)
|
||||||
|
stringSet.Add("1", "2", "3")
|
||||||
|
|
||||||
|
assert.Equal(t, 3, intSet.Count())
|
||||||
|
assert.Equal(t, 3, int64Set.Count())
|
||||||
|
assert.Equal(t, 3, uintSet.Count())
|
||||||
|
assert.Equal(t, 3, uint64Set.Count())
|
||||||
|
assert.Equal(t, 3, stringSet.Count())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set benchmarks
|
||||||
|
func BenchmarkTypedIntSet(b *testing.B) {
|
||||||
|
s := NewSet[int]()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Add(i)
|
||||||
|
_ = s.Contains(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkTypedStringSet(b *testing.B) {
|
||||||
|
s := NewSet[string]()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Add(string(rune(i)))
|
||||||
|
_ = s.Contains(string(rune(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Legacy tests remain unchanged for backward compatibility
|
||||||
func BenchmarkRawSet(b *testing.B) {
|
func BenchmarkRawSet(b *testing.B) {
|
||||||
m := make(map[any]struct{})
|
m := make(map[any]struct{})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@@ -20,26 +119,10 @@ func BenchmarkRawSet(b *testing.B) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkUnmanagedSet(b *testing.B) {
|
|
||||||
s := NewUnmanagedSet()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
s.Add(i)
|
|
||||||
_ = s.Contains(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSet(b *testing.B) {
|
|
||||||
s := NewSet()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
s.AddInt(i)
|
|
||||||
_ = s.Contains(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAdd(t *testing.T) {
|
func TestAdd(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewUnmanagedSet()
|
set := NewSet[int]()
|
||||||
values := []any{1, 2, 3}
|
values := []int{1, 2, 3}
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.Add(values...)
|
set.Add(values...)
|
||||||
@@ -51,82 +134,74 @@ func TestAdd(t *testing.T) {
|
|||||||
|
|
||||||
func TestAddInt(t *testing.T) {
|
func TestAddInt(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[int]()
|
||||||
values := []int{1, 2, 3}
|
values := []int{1, 2, 3}
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.AddInt(values...)
|
set.Add(values...)
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.True(t, set.Contains(1) && set.Contains(2) && set.Contains(3))
|
assert.True(t, set.Contains(1) && set.Contains(2) && set.Contains(3))
|
||||||
keys := set.KeysInt()
|
keys := set.Keys()
|
||||||
sort.Ints(keys)
|
sort.Ints(keys)
|
||||||
assert.EqualValues(t, values, keys)
|
assert.EqualValues(t, values, keys)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddInt64(t *testing.T) {
|
func TestAddInt64(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[int64]()
|
||||||
values := []int64{1, 2, 3}
|
values := []int64{1, 2, 3}
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.AddInt64(values...)
|
set.Add(values...)
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.True(t, set.Contains(int64(1)) && set.Contains(int64(2)) && set.Contains(int64(3)))
|
assert.True(t, set.Contains(1) && set.Contains(2) && set.Contains(3))
|
||||||
assert.Equal(t, len(values), len(set.KeysInt64()))
|
assert.Equal(t, len(values), len(set.Keys()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddUint(t *testing.T) {
|
func TestAddUint(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[uint]()
|
||||||
values := []uint{1, 2, 3}
|
values := []uint{1, 2, 3}
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.AddUint(values...)
|
set.Add(values...)
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.True(t, set.Contains(uint(1)) && set.Contains(uint(2)) && set.Contains(uint(3)))
|
assert.True(t, set.Contains(1) && set.Contains(2) && set.Contains(3))
|
||||||
assert.Equal(t, len(values), len(set.KeysUint()))
|
assert.Equal(t, len(values), len(set.Keys()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddUint64(t *testing.T) {
|
func TestAddUint64(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[uint64]()
|
||||||
values := []uint64{1, 2, 3}
|
values := []uint64{1, 2, 3}
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.AddUint64(values...)
|
set.Add(values...)
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.True(t, set.Contains(uint64(1)) && set.Contains(uint64(2)) && set.Contains(uint64(3)))
|
assert.True(t, set.Contains(1) && set.Contains(2) && set.Contains(3))
|
||||||
assert.Equal(t, len(values), len(set.KeysUint64()))
|
assert.Equal(t, len(values), len(set.Keys()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddStr(t *testing.T) {
|
func TestAddStr(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[string]()
|
||||||
values := []string{"1", "2", "3"}
|
values := []string{"1", "2", "3"}
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.AddStr(values...)
|
set.Add(values...)
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.True(t, set.Contains("1") && set.Contains("2") && set.Contains("3"))
|
assert.True(t, set.Contains("1") && set.Contains("2") && set.Contains("3"))
|
||||||
assert.Equal(t, len(values), len(set.KeysStr()))
|
assert.Equal(t, len(values), len(set.Keys()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestContainsWithoutElements(t *testing.T) {
|
func TestContainsWithoutElements(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[int]()
|
||||||
|
|
||||||
// then
|
|
||||||
assert.False(t, set.Contains(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestContainsUnmanagedWithoutElements(t *testing.T) {
|
|
||||||
// given
|
|
||||||
set := NewUnmanagedSet()
|
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.False(t, set.Contains(1))
|
assert.False(t, set.Contains(1))
|
||||||
@@ -134,8 +209,8 @@ func TestContainsUnmanagedWithoutElements(t *testing.T) {
|
|||||||
|
|
||||||
func TestRemove(t *testing.T) {
|
func TestRemove(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[int]()
|
||||||
set.Add([]any{1, 2, 3}...)
|
set.Add([]int{1, 2, 3}...)
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.Remove(2)
|
set.Remove(2)
|
||||||
@@ -146,57 +221,9 @@ func TestRemove(t *testing.T) {
|
|||||||
|
|
||||||
func TestCount(t *testing.T) {
|
func TestCount(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet[int]()
|
||||||
set.Add([]any{1, 2, 3}...)
|
set.Add([]int{1, 2, 3}...)
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.Equal(t, set.Count(), 3)
|
assert.Equal(t, set.Count(), 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKeysIntMismatch(t *testing.T) {
|
|
||||||
set := NewSet()
|
|
||||||
set.add(int64(1))
|
|
||||||
set.add(2)
|
|
||||||
vals := set.KeysInt()
|
|
||||||
assert.EqualValues(t, []int{2}, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestKeysInt64Mismatch(t *testing.T) {
|
|
||||||
set := NewSet()
|
|
||||||
set.add(1)
|
|
||||||
set.add(int64(2))
|
|
||||||
vals := set.KeysInt64()
|
|
||||||
assert.EqualValues(t, []int64{2}, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestKeysUintMismatch(t *testing.T) {
|
|
||||||
set := NewSet()
|
|
||||||
set.add(1)
|
|
||||||
set.add(uint(2))
|
|
||||||
vals := set.KeysUint()
|
|
||||||
assert.EqualValues(t, []uint{2}, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestKeysUint64Mismatch(t *testing.T) {
|
|
||||||
set := NewSet()
|
|
||||||
set.add(1)
|
|
||||||
set.add(uint64(2))
|
|
||||||
vals := set.KeysUint64()
|
|
||||||
assert.EqualValues(t, []uint64{2}, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestKeysStrMismatch(t *testing.T) {
|
|
||||||
set := NewSet()
|
|
||||||
set.add(1)
|
|
||||||
set.add("2")
|
|
||||||
vals := set.KeysStr()
|
|
||||||
assert.EqualValues(t, []string{"2"}, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetType(t *testing.T) {
|
|
||||||
set := NewUnmanagedSet()
|
|
||||||
set.add(1)
|
|
||||||
set.add("2")
|
|
||||||
vals := set.Keys()
|
|
||||||
assert.ElementsMatch(t, []any{1, "2"}, vals)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -62,7 +62,11 @@ func Load(file string, v any, opts ...Option) error {
|
|||||||
return loader([]byte(os.ExpandEnv(string(content))), v)
|
return loader([]byte(os.ExpandEnv(string(content))), v)
|
||||||
}
|
}
|
||||||
|
|
||||||
return loader(content, v)
|
if err = loader(content, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return validate(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
|
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
|
||||||
@@ -85,7 +89,12 @@ func LoadFromJsonBytes(content []byte, v any) error {
|
|||||||
|
|
||||||
lowerCaseKeyMap := toLowerCaseKeyMap(m, info)
|
lowerCaseKeyMap := toLowerCaseKeyMap(m, info)
|
||||||
|
|
||||||
return mapping.UnmarshalJsonMap(lowerCaseKeyMap, v, mapping.WithCanonicalKeyFunc(toLowerCase))
|
if err = mapping.UnmarshalJsonMap(lowerCaseKeyMap, v,
|
||||||
|
mapping.WithCanonicalKeyFunc(toLowerCase)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return validate(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfigFromJsonBytes loads config into v from content json bytes.
|
// LoadConfigFromJsonBytes loads config into v from content json bytes.
|
||||||
@@ -133,7 +142,7 @@ func addOrMergeFields(info *fieldInfo, key string, child *fieldInfo, fullName st
|
|||||||
return newConflictKeyError(fullName)
|
return newConflictKeyError(fullName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mergeFields(prev, key, child.children, fullName); err != nil {
|
if err := mergeFields(prev, child.children, fullName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -189,10 +198,10 @@ func buildFieldsInfo(tp reflect.Type, fullName string) (*fieldInfo, error) {
|
|||||||
switch tp.Kind() {
|
switch tp.Kind() {
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
return buildStructFieldsInfo(tp, fullName)
|
return buildStructFieldsInfo(tp, fullName)
|
||||||
case reflect.Array, reflect.Slice:
|
case reflect.Array, reflect.Slice, reflect.Map:
|
||||||
return buildFieldsInfo(mapping.Deref(tp.Elem()), fullName)
|
return buildFieldsInfo(mapping.Deref(tp.Elem()), fullName)
|
||||||
case reflect.Chan, reflect.Func:
|
case reflect.Chan, reflect.Func:
|
||||||
return nil, fmt.Errorf("unsupported type: %s", tp.Kind())
|
return nil, fmt.Errorf("unsupported type: %s, fullName: %s", tp.Kind(), fullName)
|
||||||
default:
|
default:
|
||||||
return &fieldInfo{
|
return &fieldInfo{
|
||||||
children: make(map[string]*fieldInfo),
|
children: make(map[string]*fieldInfo),
|
||||||
@@ -281,7 +290,7 @@ func getTagName(field reflect.StructField) string {
|
|||||||
return field.Name
|
return field.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func mergeFields(prev *fieldInfo, key string, children map[string]*fieldInfo, fullName string) error {
|
func mergeFields(prev *fieldInfo, children map[string]*fieldInfo, fullName string) error {
|
||||||
if len(prev.children) == 0 || len(children) == 0 {
|
if len(prev.children) == 0 || len(children) == 0 {
|
||||||
return newConflictKeyError(fullName)
|
return newConflictKeyError(fullName)
|
||||||
}
|
}
|
||||||
@@ -307,7 +316,7 @@ func toLowerCaseInterface(v any, info *fieldInfo) any {
|
|||||||
case map[string]any:
|
case map[string]any:
|
||||||
return toLowerCaseKeyMap(vv, info)
|
return toLowerCaseKeyMap(vv, info)
|
||||||
case []any:
|
case []any:
|
||||||
var arr []any
|
arr := make([]any, 0, len(vv))
|
||||||
for _, vvv := range vv {
|
for _, vvv := range vv {
|
||||||
arr = append(arr, toLowerCaseInterface(vvv, info))
|
arr = append(arr, toLowerCaseInterface(vvv, info))
|
||||||
}
|
}
|
||||||
@@ -332,6 +341,8 @@ func toLowerCaseKeyMap(m map[string]any, info *fieldInfo) map[string]any {
|
|||||||
res[lk] = toLowerCaseInterface(v, ti)
|
res[lk] = toLowerCaseInterface(v, ti)
|
||||||
} else if info.mapField != nil {
|
} else if info.mapField != nil {
|
||||||
res[k] = toLowerCaseInterface(v, info.mapField)
|
res[k] = toLowerCaseInterface(v, info.mapField)
|
||||||
|
} else if vv, ok := v.(map[string]any); ok {
|
||||||
|
res[k] = toLowerCaseKeyMap(vv, info)
|
||||||
} else {
|
} else {
|
||||||
res[k] = v
|
res[k] = v
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package conf
|
package conf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -40,9 +41,8 @@ func TestConfigJson(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
t.Run(test, func(t *testing.T) {
|
t.Run(test, func(t *testing.T) {
|
||||||
tmpfile, err := createTempFile(test, text)
|
tmpfile, err := createTempFile(t, test, text)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tmpfile)
|
|
||||||
|
|
||||||
var val struct {
|
var val struct {
|
||||||
A string `json:"a"`
|
A string `json:"a"`
|
||||||
@@ -82,9 +82,8 @@ c = "${FOO}"
|
|||||||
d = "abcd!@#$112"
|
d = "abcd!@#$112"
|
||||||
`
|
`
|
||||||
t.Setenv("FOO", "2")
|
t.Setenv("FOO", "2")
|
||||||
tmpfile, err := createTempFile(".toml", text)
|
tmpfile, err := createTempFile(t, ".toml", text)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tmpfile)
|
|
||||||
|
|
||||||
var val struct {
|
var val struct {
|
||||||
A string `json:"a"`
|
A string `json:"a"`
|
||||||
@@ -105,9 +104,8 @@ b = 1
|
|||||||
c = "FOO"
|
c = "FOO"
|
||||||
d = "abcd"
|
d = "abcd"
|
||||||
`
|
`
|
||||||
tmpfile, err := createTempFile(".toml", text)
|
tmpfile, err := createTempFile(t, ".toml", text)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tmpfile)
|
|
||||||
|
|
||||||
var val struct {
|
var val struct {
|
||||||
A string `json:"a"`
|
A string `json:"a"`
|
||||||
@@ -127,9 +125,8 @@ func TestConfigWithLower(t *testing.T) {
|
|||||||
text := `a = "foo"
|
text := `a = "foo"
|
||||||
b = 1
|
b = 1
|
||||||
`
|
`
|
||||||
tmpfile, err := createTempFile(".toml", text)
|
tmpfile, err := createTempFile(t, ".toml", text)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tmpfile)
|
|
||||||
|
|
||||||
var val struct {
|
var val struct {
|
||||||
A string `json:"a"`
|
A string `json:"a"`
|
||||||
@@ -207,9 +204,8 @@ c = "${FOO}"
|
|||||||
d = "abcd!@#112"
|
d = "abcd!@#112"
|
||||||
`
|
`
|
||||||
t.Setenv("FOO", "2")
|
t.Setenv("FOO", "2")
|
||||||
tmpfile, err := createTempFile(".toml", text)
|
tmpfile, err := createTempFile(t, ".toml", text)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tmpfile)
|
|
||||||
|
|
||||||
var val struct {
|
var val struct {
|
||||||
A string `json:"a"`
|
A string `json:"a"`
|
||||||
@@ -241,9 +237,8 @@ func TestConfigJsonEnv(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
t.Run(test, func(t *testing.T) {
|
t.Run(test, func(t *testing.T) {
|
||||||
tmpfile, err := createTempFile(test, text)
|
tmpfile, err := createTempFile(t, test, text)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
defer os.Remove(tmpfile)
|
|
||||||
|
|
||||||
var val struct {
|
var val struct {
|
||||||
A string `json:"a"`
|
A string `json:"a"`
|
||||||
@@ -1192,6 +1187,42 @@ Email = "bar"`)
|
|||||||
assert.Len(t, c.Value, 2)
|
assert.Len(t, c.Value, 2)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("multi layer map", func(t *testing.T) {
|
||||||
|
type Value struct {
|
||||||
|
User struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Value map[string]map[string]Value
|
||||||
|
}
|
||||||
|
|
||||||
|
var input = []byte(`
|
||||||
|
[Value.first.User1.User]
|
||||||
|
Name = "foo"
|
||||||
|
[Value.second.User2.User]
|
||||||
|
Name = "bar"
|
||||||
|
`)
|
||||||
|
var c Config
|
||||||
|
if assert.NoError(t, LoadFromTomlBytes(input, &c)) {
|
||||||
|
assert.Len(t, c.Value, 2)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_LoadBadConfig(t *testing.T) {
|
||||||
|
type Config struct {
|
||||||
|
Name string `json:"name,options=foo|bar"`
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := createTempFile(t, ".json", `{"name": "baz"}`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
var c Config
|
||||||
|
err = Load(file, &c)
|
||||||
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_getFullName(t *testing.T) {
|
func Test_getFullName(t *testing.T) {
|
||||||
@@ -1199,6 +1230,26 @@ func Test_getFullName(t *testing.T) {
|
|||||||
assert.Equal(t, "a", getFullName("", "a"))
|
assert.Equal(t, "a", getFullName("", "a"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidate(t *testing.T) {
|
||||||
|
t.Run("normal config", func(t *testing.T) {
|
||||||
|
var c mockConfig
|
||||||
|
err := LoadFromJsonBytes([]byte(`{"val": "hello", "number": 8}`), &c)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error no int", func(t *testing.T) {
|
||||||
|
var c mockConfig
|
||||||
|
err := LoadFromJsonBytes([]byte(`{"val": "hello"}`), &c)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error no string", func(t *testing.T) {
|
||||||
|
var c mockConfig
|
||||||
|
err := LoadFromJsonBytes([]byte(`{"number": 8}`), &c)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func Test_buildFieldsInfo(t *testing.T) {
|
func Test_buildFieldsInfo(t *testing.T) {
|
||||||
type ParentSt struct {
|
type ParentSt struct {
|
||||||
Name string
|
Name string
|
||||||
@@ -1288,13 +1339,13 @@ func Test_buildFieldsInfo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTempFile(ext, text string) (string, error) {
|
func createTempFile(t *testing.T, ext, text string) (string, error) {
|
||||||
tmpFile, err := os.CreateTemp(os.TempDir(), hash.Md5Hex([]byte(text))+"*"+ext)
|
tmpFile, err := os.CreateTemp(os.TempDir(), hash.Md5Hex([]byte(text))+"*"+ext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile(tmpFile.Name(), []byte(text), os.ModeTemporary); err != nil {
|
if err = os.WriteFile(tmpFile.Name(), []byte(text), os.ModeTemporary); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1303,5 +1354,26 @@ func createTempFile(ext, text string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
_ = os.Remove(filename)
|
||||||
|
})
|
||||||
|
|
||||||
return filename, nil
|
return filename, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockConfig struct {
|
||||||
|
Val string
|
||||||
|
Number int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockConfig) Validate() error {
|
||||||
|
if len(m.Val) == 0 {
|
||||||
|
return errors.New("val is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Number == 0 {
|
||||||
|
return errors.New("number is zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ type RestfulConf struct {
|
|||||||
MaxConns int `json:",default=10000"`
|
MaxConns int `json:",default=10000"`
|
||||||
MaxBytes int64 `json:",default=1048576"`
|
MaxBytes int64 `json:",default=1048576"`
|
||||||
Timeout time.Duration `json:",default=3s"`
|
Timeout time.Duration `json:",default=3s"`
|
||||||
CpuThreshold int64 `json:",default=900,range=[0:1000]"`
|
CpuThreshold int64 `json:",default=900,range=[0:1000)"`
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
12
core/conf/validate.go
Normal file
12
core/conf/validate.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package conf
|
||||||
|
|
||||||
|
import "github.com/zeromicro/go-zero/core/validation"
|
||||||
|
|
||||||
|
// validate validates the value if it implements the Validator interface.
|
||||||
|
func validate(v any) error {
|
||||||
|
if val, ok := v.(validation.Validator); ok {
|
||||||
|
return val.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
81
core/conf/validate_test.go
Normal file
81
core/conf/validate_test.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package conf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockType int
|
||||||
|
|
||||||
|
func (m mockType) Validate() error {
|
||||||
|
if m < 10 {
|
||||||
|
return errors.New("invalid value")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type anotherMockType int
|
||||||
|
|
||||||
|
func Test_validate(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
v any
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "invalid",
|
||||||
|
v: mockType(5),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid",
|
||||||
|
v: mockType(10),
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not validator",
|
||||||
|
v: anotherMockType(5),
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := validate(tt.v)
|
||||||
|
assert.Equal(t, tt.wantErr, err != nil)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockVal struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockVal) Validate() error {
|
||||||
|
return errors.New("invalid value")
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_validateValPtr(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
v any
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "invalid",
|
||||||
|
v: mockVal{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid value",
|
||||||
|
v: &mockVal{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assert.Error(t, validate(tt.v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
200
core/configcenter/configurator.go
Normal file
200
core/configcenter/configurator.go
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
package configurator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/configcenter/subscriber"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
"github.com/zeromicro/go-zero/core/mapping"
|
||||||
|
"github.com/zeromicro/go-zero/core/threading"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errEmptyConfig = errors.New("empty config value")
|
||||||
|
errMissingUnmarshalerType = errors.New("missing unmarshaler type")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Configurator is the interface for configuration center.
|
||||||
|
type Configurator[T any] interface {
|
||||||
|
// GetConfig returns the subscription value.
|
||||||
|
GetConfig() (T, error)
|
||||||
|
// AddListener adds a listener to the subscriber.
|
||||||
|
AddListener(listener func())
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Config is the configuration for Configurator.
|
||||||
|
Config struct {
|
||||||
|
// Type is the value type, yaml, json or toml.
|
||||||
|
Type string `json:",default=yaml,options=[yaml,json,toml]"`
|
||||||
|
// Log is the flag to control logging.
|
||||||
|
Log bool `json:",default=true"`
|
||||||
|
}
|
||||||
|
|
||||||
|
configCenter[T any] struct {
|
||||||
|
conf Config
|
||||||
|
unmarshaler LoaderFn
|
||||||
|
subscriber subscriber.Subscriber
|
||||||
|
listeners []func()
|
||||||
|
lock sync.Mutex
|
||||||
|
snapshot atomic.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
value[T any] struct {
|
||||||
|
data string
|
||||||
|
marshalData T
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Configurator is the interface for configuration center.
|
||||||
|
var _ Configurator[any] = (*configCenter[any])(nil)
|
||||||
|
|
||||||
|
// MustNewConfigCenter returns a Configurator, exits on errors.
|
||||||
|
func MustNewConfigCenter[T any](c Config, subscriber subscriber.Subscriber) Configurator[T] {
|
||||||
|
cc, err := NewConfigCenter[T](c, subscriber)
|
||||||
|
logx.Must(err)
|
||||||
|
return cc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfigCenter returns a Configurator.
|
||||||
|
func NewConfigCenter[T any](c Config, subscriber subscriber.Subscriber) (Configurator[T], error) {
|
||||||
|
unmarshaler, ok := Unmarshaler(strings.ToLower(c.Type))
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unknown format: %s", c.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
cc := &configCenter[T]{
|
||||||
|
conf: c,
|
||||||
|
unmarshaler: unmarshaler,
|
||||||
|
subscriber: subscriber,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cc.loadConfig(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cc.subscriber.AddListener(cc.onChange); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := cc.GetConfig(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddListener adds listener to s.
|
||||||
|
func (c *configCenter[T]) AddListener(listener func()) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
c.listeners = append(c.listeners, listener)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfig return structured config.
|
||||||
|
func (c *configCenter[T]) GetConfig() (T, error) {
|
||||||
|
v := c.value()
|
||||||
|
if v == nil || len(v.data) == 0 {
|
||||||
|
var empty T
|
||||||
|
return empty, errEmptyConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
return v.marshalData, v.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the subscription value.
|
||||||
|
func (c *configCenter[T]) Value() string {
|
||||||
|
v := c.value()
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return v.data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configCenter[T]) loadConfig() error {
|
||||||
|
v, err := c.subscriber.Value()
|
||||||
|
if err != nil {
|
||||||
|
if c.conf.Log {
|
||||||
|
logx.Errorf("ConfigCenter loads changed configuration, error: %v", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.conf.Log {
|
||||||
|
logx.Infof("ConfigCenter loads changed configuration, content [%s]", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.snapshot.Store(c.genValue(v))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configCenter[T]) onChange() {
|
||||||
|
if err := c.loadConfig(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.lock.Lock()
|
||||||
|
listeners := make([]func(), len(c.listeners))
|
||||||
|
copy(listeners, c.listeners)
|
||||||
|
c.lock.Unlock()
|
||||||
|
|
||||||
|
for _, l := range listeners {
|
||||||
|
threading.GoSafe(l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configCenter[T]) value() *value[T] {
|
||||||
|
content := c.snapshot.Load()
|
||||||
|
if content == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return content.(*value[T])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configCenter[T]) genValue(data string) *value[T] {
|
||||||
|
v := &value[T]{
|
||||||
|
data: data,
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
t := reflect.TypeOf(v.marshalData)
|
||||||
|
// if the type is nil, it means that the user has not set the type of the configuration.
|
||||||
|
if t == nil {
|
||||||
|
v.err = errMissingUnmarshalerType
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
t = mapping.Deref(t)
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Struct, reflect.Array, reflect.Slice:
|
||||||
|
if err := c.unmarshaler([]byte(data), &v.marshalData); err != nil {
|
||||||
|
v.err = err
|
||||||
|
if c.conf.Log {
|
||||||
|
logx.Errorf("ConfigCenter unmarshal configuration failed, err: %+v, content [%s]",
|
||||||
|
err.Error(), data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if str, ok := any(data).(T); ok {
|
||||||
|
v.marshalData = str
|
||||||
|
} else {
|
||||||
|
v.err = errMissingUnmarshalerType
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if c.conf.Log {
|
||||||
|
logx.Errorf("ConfigCenter unmarshal configuration missing unmarshaler for type: %s, content [%s]",
|
||||||
|
t.Kind(), data)
|
||||||
|
}
|
||||||
|
v.err = errMissingUnmarshalerType
|
||||||
|
}
|
||||||
|
|
||||||
|
return v
|
||||||
|
}
|
||||||
233
core/configcenter/configurator_test.go
Normal file
233
core/configcenter/configurator_test.go
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
package configurator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewConfigCenter(t *testing.T) {
|
||||||
|
_, err := NewConfigCenter[any](Config{
|
||||||
|
Log: true,
|
||||||
|
}, &mockSubscriber{})
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
_, err = NewConfigCenter[any](Config{
|
||||||
|
Type: "json",
|
||||||
|
Log: true,
|
||||||
|
}, &mockSubscriber{})
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigCenter_GetConfig(t *testing.T) {
|
||||||
|
mock := &mockSubscriber{}
|
||||||
|
type Data struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.v = `{"name": "go-zero"}`
|
||||||
|
c1, err := NewConfigCenter[Data](Config{
|
||||||
|
Type: "json",
|
||||||
|
Log: true,
|
||||||
|
}, mock)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := c1.GetConfig()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "go-zero", data.Name)
|
||||||
|
|
||||||
|
mock.v = `{"name": "111"}`
|
||||||
|
c2, err := NewConfigCenter[Data](Config{Type: "json"}, mock)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
mock.v = `{}`
|
||||||
|
c3, err := NewConfigCenter[string](Config{
|
||||||
|
Type: "json",
|
||||||
|
Log: true,
|
||||||
|
}, mock)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = c3.GetConfig()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
data, err = c2.GetConfig()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
mock.lisErr = errors.New("mock error")
|
||||||
|
_, err = NewConfigCenter[Data](Config{
|
||||||
|
Type: "json",
|
||||||
|
Log: true,
|
||||||
|
}, mock)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigCenter_onChange(t *testing.T) {
|
||||||
|
mock := &mockSubscriber{}
|
||||||
|
type Data struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.v = `{"name": "go-zero"}`
|
||||||
|
c1, err := NewConfigCenter[Data](Config{Type: "json", Log: true}, mock)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := c1.GetConfig()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "go-zero", data.Name)
|
||||||
|
|
||||||
|
mock.v = `{"name": "go-zero2"}`
|
||||||
|
mock.change()
|
||||||
|
|
||||||
|
data, err = c1.GetConfig()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "go-zero2", data.Name)
|
||||||
|
|
||||||
|
mock.valErr = errors.New("mock error")
|
||||||
|
_, err = NewConfigCenter[Data](Config{Type: "json", Log: false}, mock)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigCenter_Value(t *testing.T) {
|
||||||
|
mock := &mockSubscriber{}
|
||||||
|
mock.v = "1234"
|
||||||
|
|
||||||
|
c, err := NewConfigCenter[string](Config{
|
||||||
|
Type: "json",
|
||||||
|
Log: true,
|
||||||
|
}, mock)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
cc := c.(*configCenter[string])
|
||||||
|
|
||||||
|
assert.Equal(t, cc.Value(), "1234")
|
||||||
|
|
||||||
|
mock.valErr = errors.New("mock error")
|
||||||
|
|
||||||
|
_, err = NewConfigCenter[any](Config{
|
||||||
|
Type: "json",
|
||||||
|
Log: true,
|
||||||
|
}, mock)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigCenter_AddListener(t *testing.T) {
|
||||||
|
mock := &mockSubscriber{}
|
||||||
|
mock.v = "1234"
|
||||||
|
c, err := NewConfigCenter[string](Config{
|
||||||
|
Type: "json",
|
||||||
|
Log: true,
|
||||||
|
}, mock)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
cc := c.(*configCenter[string])
|
||||||
|
var a, b int
|
||||||
|
var mutex sync.Mutex
|
||||||
|
cc.AddListener(func() {
|
||||||
|
mutex.Lock()
|
||||||
|
a = 1
|
||||||
|
mutex.Unlock()
|
||||||
|
})
|
||||||
|
cc.AddListener(func() {
|
||||||
|
mutex.Lock()
|
||||||
|
b = 2
|
||||||
|
mutex.Unlock()
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, 2, len(cc.listeners))
|
||||||
|
|
||||||
|
mock.change()
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
mutex.Lock()
|
||||||
|
assert.Equal(t, 1, a)
|
||||||
|
assert.Equal(t, 2, b)
|
||||||
|
mutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigCenter_genValue(t *testing.T) {
|
||||||
|
t.Run("data is empty", func(t *testing.T) {
|
||||||
|
c := &configCenter[string]{
|
||||||
|
unmarshaler: registry.unmarshalers["json"],
|
||||||
|
conf: Config{Log: true},
|
||||||
|
}
|
||||||
|
v := c.genValue("")
|
||||||
|
assert.Equal(t, "", v.data)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid template type", func(t *testing.T) {
|
||||||
|
c := &configCenter[any]{
|
||||||
|
unmarshaler: registry.unmarshalers["json"],
|
||||||
|
conf: Config{Log: true},
|
||||||
|
}
|
||||||
|
v := c.genValue("xxxx")
|
||||||
|
assert.Equal(t, errMissingUnmarshalerType, v.err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unsupported template type", func(t *testing.T) {
|
||||||
|
c := &configCenter[int]{
|
||||||
|
unmarshaler: registry.unmarshalers["json"],
|
||||||
|
conf: Config{Log: true},
|
||||||
|
}
|
||||||
|
v := c.genValue("1")
|
||||||
|
assert.Equal(t, errMissingUnmarshalerType, v.err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("supported template string type", func(t *testing.T) {
|
||||||
|
c := &configCenter[string]{
|
||||||
|
unmarshaler: registry.unmarshalers["json"],
|
||||||
|
conf: Config{Log: true},
|
||||||
|
}
|
||||||
|
v := c.genValue("12345")
|
||||||
|
assert.NoError(t, v.err)
|
||||||
|
assert.Equal(t, "12345", v.data)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unmarshal fail", func(t *testing.T) {
|
||||||
|
c := &configCenter[struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}]{
|
||||||
|
unmarshaler: registry.unmarshalers["json"],
|
||||||
|
conf: Config{Log: true},
|
||||||
|
}
|
||||||
|
v := c.genValue(`{"name":"new name}`)
|
||||||
|
assert.Equal(t, `{"name":"new name}`, v.data)
|
||||||
|
assert.Error(t, v.err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("success", func(t *testing.T) {
|
||||||
|
c := &configCenter[struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}]{
|
||||||
|
unmarshaler: registry.unmarshalers["json"],
|
||||||
|
conf: Config{Log: true},
|
||||||
|
}
|
||||||
|
v := c.genValue(`{"name":"new name"}`)
|
||||||
|
assert.Equal(t, `{"name":"new name"}`, v.data)
|
||||||
|
assert.Equal(t, "new name", v.marshalData.Name)
|
||||||
|
assert.NoError(t, v.err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockSubscriber struct {
|
||||||
|
v string
|
||||||
|
lisErr, valErr error
|
||||||
|
listener func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSubscriber) AddListener(listener func()) error {
|
||||||
|
m.listener = listener
|
||||||
|
return m.lisErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSubscriber) Value() (string, error) {
|
||||||
|
return m.v, m.valErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSubscriber) change() {
|
||||||
|
if m.listener != nil {
|
||||||
|
m.listener()
|
||||||
|
}
|
||||||
|
}
|
||||||
67
core/configcenter/subscriber/etcd.go
Normal file
67
core/configcenter/subscriber/etcd.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package subscriber
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/zeromicro/go-zero/core/discov"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// etcdSubscriber is a subscriber that subscribes to etcd.
|
||||||
|
etcdSubscriber struct {
|
||||||
|
*discov.Subscriber
|
||||||
|
}
|
||||||
|
|
||||||
|
// EtcdConf is the configuration for etcd.
|
||||||
|
EtcdConf = discov.EtcdConf
|
||||||
|
)
|
||||||
|
|
||||||
|
// MustNewEtcdSubscriber returns an etcd Subscriber, exits on errors.
|
||||||
|
func MustNewEtcdSubscriber(conf EtcdConf) Subscriber {
|
||||||
|
s, err := NewEtcdSubscriber(conf)
|
||||||
|
logx.Must(err)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEtcdSubscriber returns an etcd Subscriber.
|
||||||
|
func NewEtcdSubscriber(conf EtcdConf) (Subscriber, error) {
|
||||||
|
opts := buildSubOptions(conf)
|
||||||
|
s, err := discov.NewSubscriber(conf.Hosts, conf.Key, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &etcdSubscriber{Subscriber: s}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildSubOptions constructs the options for creating a new etcd subscriber.
|
||||||
|
func buildSubOptions(conf EtcdConf) []discov.SubOption {
|
||||||
|
opts := []discov.SubOption{
|
||||||
|
discov.WithExactMatch(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(conf.User) > 0 {
|
||||||
|
opts = append(opts, discov.WithSubEtcdAccount(conf.User, conf.Pass))
|
||||||
|
}
|
||||||
|
if len(conf.CertFile) > 0 || len(conf.CertKeyFile) > 0 || len(conf.CACertFile) > 0 {
|
||||||
|
opts = append(opts, discov.WithSubEtcdTLS(conf.CertFile, conf.CertKeyFile,
|
||||||
|
conf.CACertFile, conf.InsecureSkipVerify))
|
||||||
|
}
|
||||||
|
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddListener adds a listener to the subscriber.
|
||||||
|
func (s *etcdSubscriber) AddListener(listener func()) error {
|
||||||
|
s.Subscriber.AddListener(listener)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value of the subscriber.
|
||||||
|
func (s *etcdSubscriber) Value() (string, error) {
|
||||||
|
vs := s.Subscriber.Values()
|
||||||
|
if len(vs) > 0 {
|
||||||
|
return vs[len(vs)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
9
core/configcenter/subscriber/subscriber.go
Normal file
9
core/configcenter/subscriber/subscriber.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package subscriber
|
||||||
|
|
||||||
|
// Subscriber is the interface for configcenter subscribers.
|
||||||
|
type Subscriber interface {
|
||||||
|
// AddListener adds a listener to the subscriber.
|
||||||
|
AddListener(listener func()) error
|
||||||
|
// Value returns the value of the subscriber.
|
||||||
|
Value() (string, error)
|
||||||
|
}
|
||||||
41
core/configcenter/unmarshaler.go
Normal file
41
core/configcenter/unmarshaler.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package configurator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/conf"
|
||||||
|
)
|
||||||
|
|
||||||
|
var registry = &unmarshalerRegistry{
|
||||||
|
unmarshalers: map[string]LoaderFn{
|
||||||
|
"json": conf.LoadFromJsonBytes,
|
||||||
|
"toml": conf.LoadFromTomlBytes,
|
||||||
|
"yaml": conf.LoadFromYamlBytes,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// LoaderFn is the function type for loading configuration.
|
||||||
|
LoaderFn func([]byte, any) error
|
||||||
|
|
||||||
|
// unmarshalerRegistry is the registry for unmarshalers.
|
||||||
|
unmarshalerRegistry struct {
|
||||||
|
unmarshalers map[string]LoaderFn
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterUnmarshaler registers an unmarshaler.
|
||||||
|
func RegisterUnmarshaler(name string, fn LoaderFn) {
|
||||||
|
registry.mu.Lock()
|
||||||
|
defer registry.mu.Unlock()
|
||||||
|
registry.unmarshalers[name] = fn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler returns the unmarshaler by name.
|
||||||
|
func Unmarshaler(name string) (LoaderFn, bool) {
|
||||||
|
registry.mu.RLock()
|
||||||
|
defer registry.mu.RUnlock()
|
||||||
|
fn, ok := registry.unmarshalers[name]
|
||||||
|
return fn, ok
|
||||||
|
}
|
||||||
28
core/configcenter/unmarshaler_test.go
Normal file
28
core/configcenter/unmarshaler_test.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package configurator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegisterUnmarshaler(t *testing.T) {
|
||||||
|
RegisterUnmarshaler("test", func(data []byte, v interface{}) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
_, ok := Unmarshaler("test")
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
_, ok = Unmarshaler("test2")
|
||||||
|
assert.False(t, ok)
|
||||||
|
|
||||||
|
_, ok = Unmarshaler("json")
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
_, ok = Unmarshaler("toml")
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
_, ok = Unmarshaler("yaml")
|
||||||
|
assert.True(t, ok)
|
||||||
|
}
|
||||||
@@ -1,5 +1,10 @@
|
|||||||
// Code generated by MockGen. DO NOT EDIT.
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
// Source: etcdclient.go
|
// Source: etcdclient.go
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -package internal -destination etcdclient_mock.go -source etcdclient.go EtcdClient
|
||||||
|
//
|
||||||
|
|
||||||
// Package internal is a generated GoMock package.
|
// Package internal is a generated GoMock package.
|
||||||
package internal
|
package internal
|
||||||
@@ -8,35 +13,36 @@ import (
|
|||||||
context "context"
|
context "context"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
grpc "google.golang.org/grpc"
|
grpc "google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockEtcdClient is a mock of EtcdClient interface
|
// MockEtcdClient is a mock of EtcdClient interface.
|
||||||
type MockEtcdClient struct {
|
type MockEtcdClient struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockEtcdClientMockRecorder
|
recorder *MockEtcdClientMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockEtcdClientMockRecorder is the mock recorder for MockEtcdClient
|
// MockEtcdClientMockRecorder is the mock recorder for MockEtcdClient.
|
||||||
type MockEtcdClientMockRecorder struct {
|
type MockEtcdClientMockRecorder struct {
|
||||||
mock *MockEtcdClient
|
mock *MockEtcdClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockEtcdClient creates a new mock instance
|
// NewMockEtcdClient creates a new mock instance.
|
||||||
func NewMockEtcdClient(ctrl *gomock.Controller) *MockEtcdClient {
|
func NewMockEtcdClient(ctrl *gomock.Controller) *MockEtcdClient {
|
||||||
mock := &MockEtcdClient{ctrl: ctrl}
|
mock := &MockEtcdClient{ctrl: ctrl}
|
||||||
mock.recorder = &MockEtcdClientMockRecorder{mock}
|
mock.recorder = &MockEtcdClientMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockEtcdClient) EXPECT() *MockEtcdClientMockRecorder {
|
func (m *MockEtcdClient) EXPECT() *MockEtcdClientMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActiveConnection mocks base method
|
// ActiveConnection mocks base method.
|
||||||
func (m *MockEtcdClient) ActiveConnection() *grpc.ClientConn {
|
func (m *MockEtcdClient) ActiveConnection() *grpc.ClientConn {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ActiveConnection")
|
ret := m.ctrl.Call(m, "ActiveConnection")
|
||||||
@@ -44,13 +50,13 @@ func (m *MockEtcdClient) ActiveConnection() *grpc.ClientConn {
|
|||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActiveConnection indicates an expected call of ActiveConnection
|
// ActiveConnection indicates an expected call of ActiveConnection.
|
||||||
func (mr *MockEtcdClientMockRecorder) ActiveConnection() *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) ActiveConnection() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveConnection", reflect.TypeOf((*MockEtcdClient)(nil).ActiveConnection))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveConnection", reflect.TypeOf((*MockEtcdClient)(nil).ActiveConnection))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close mocks base method
|
// Close mocks base method.
|
||||||
func (m *MockEtcdClient) Close() error {
|
func (m *MockEtcdClient) Close() error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Close")
|
ret := m.ctrl.Call(m, "Close")
|
||||||
@@ -58,13 +64,13 @@ func (m *MockEtcdClient) Close() error {
|
|||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close indicates an expected call of Close
|
// Close indicates an expected call of Close.
|
||||||
func (mr *MockEtcdClientMockRecorder) Close() *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Close() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockEtcdClient)(nil).Close))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockEtcdClient)(nil).Close))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ctx mocks base method
|
// Ctx mocks base method.
|
||||||
func (m *MockEtcdClient) Ctx() context.Context {
|
func (m *MockEtcdClient) Ctx() context.Context {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Ctx")
|
ret := m.ctrl.Call(m, "Ctx")
|
||||||
@@ -72,13 +78,13 @@ func (m *MockEtcdClient) Ctx() context.Context {
|
|||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ctx indicates an expected call of Ctx
|
// Ctx indicates an expected call of Ctx.
|
||||||
func (mr *MockEtcdClientMockRecorder) Ctx() *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Ctx() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ctx", reflect.TypeOf((*MockEtcdClient)(nil).Ctx))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ctx", reflect.TypeOf((*MockEtcdClient)(nil).Ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get mocks base method
|
// Get mocks base method.
|
||||||
func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []any{ctx, key}
|
varargs := []any{ctx, key}
|
||||||
@@ -91,14 +97,14 @@ func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.O
|
|||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get indicates an expected call of Get
|
// Get indicates an expected call of Get.
|
||||||
func (mr *MockEtcdClientMockRecorder) Get(ctx, key any, opts ...any) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Get(ctx, key any, opts ...any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]any{ctx, key}, opts...)
|
varargs := append([]any{ctx, key}, opts...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEtcdClient)(nil).Get), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEtcdClient)(nil).Get), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grant mocks base method
|
// Grant mocks base method.
|
||||||
func (m *MockEtcdClient) Grant(ctx context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error) {
|
func (m *MockEtcdClient) Grant(ctx context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Grant", ctx, ttl)
|
ret := m.ctrl.Call(m, "Grant", ctx, ttl)
|
||||||
@@ -107,13 +113,13 @@ func (m *MockEtcdClient) Grant(ctx context.Context, ttl int64) (*clientv3.LeaseG
|
|||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grant indicates an expected call of Grant
|
// Grant indicates an expected call of Grant.
|
||||||
func (mr *MockEtcdClientMockRecorder) Grant(ctx, ttl any) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Grant(ctx, ttl any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Grant", reflect.TypeOf((*MockEtcdClient)(nil).Grant), ctx, ttl)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Grant", reflect.TypeOf((*MockEtcdClient)(nil).Grant), ctx, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
// KeepAlive mocks base method
|
// KeepAlive mocks base method.
|
||||||
func (m *MockEtcdClient) KeepAlive(ctx context.Context, id clientv3.LeaseID) (<-chan *clientv3.LeaseKeepAliveResponse, error) {
|
func (m *MockEtcdClient) KeepAlive(ctx context.Context, id clientv3.LeaseID) (<-chan *clientv3.LeaseKeepAliveResponse, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "KeepAlive", ctx, id)
|
ret := m.ctrl.Call(m, "KeepAlive", ctx, id)
|
||||||
@@ -122,13 +128,13 @@ func (m *MockEtcdClient) KeepAlive(ctx context.Context, id clientv3.LeaseID) (<-
|
|||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// KeepAlive indicates an expected call of KeepAlive
|
// KeepAlive indicates an expected call of KeepAlive.
|
||||||
func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id any) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockEtcdClient)(nil).KeepAlive), ctx, id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockEtcdClient)(nil).KeepAlive), ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put mocks base method
|
// Put mocks base method.
|
||||||
func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []any{ctx, key, val}
|
varargs := []any{ctx, key, val}
|
||||||
@@ -141,14 +147,14 @@ func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clien
|
|||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put indicates an expected call of Put
|
// Put indicates an expected call of Put.
|
||||||
func (mr *MockEtcdClientMockRecorder) Put(ctx, key, val any, opts ...any) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Put(ctx, key, val any, opts ...any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]any{ctx, key, val}, opts...)
|
varargs := append([]any{ctx, key, val}, opts...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockEtcdClient)(nil).Put), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockEtcdClient)(nil).Put), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Revoke mocks base method
|
// Revoke mocks base method.
|
||||||
func (m *MockEtcdClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) {
|
func (m *MockEtcdClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Revoke", ctx, id)
|
ret := m.ctrl.Call(m, "Revoke", ctx, id)
|
||||||
@@ -157,13 +163,13 @@ func (m *MockEtcdClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clie
|
|||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Revoke indicates an expected call of Revoke
|
// Revoke indicates an expected call of Revoke.
|
||||||
func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id any) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revoke", reflect.TypeOf((*MockEtcdClient)(nil).Revoke), ctx, id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revoke", reflect.TypeOf((*MockEtcdClient)(nil).Revoke), ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch mocks base method
|
// Watch mocks base method.
|
||||||
func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []any{ctx, key}
|
varargs := []any{ctx, key}
|
||||||
@@ -175,7 +181,7 @@ func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3
|
|||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch indicates an expected call of Watch
|
// Watch indicates an expected call of Watch.
|
||||||
func (mr *MockEtcdClientMockRecorder) Watch(ctx, key any, opts ...any) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Watch(ctx, key any, opts ...any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]any{ctx, key}, opts...)
|
varargs := append([]any{ctx, key}, opts...)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
@@ -9,25 +10,30 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/contextx"
|
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logc"
|
||||||
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
"github.com/zeromicro/go-zero/core/syncx"
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
"github.com/zeromicro/go-zero/core/threading"
|
"github.com/zeromicro/go-zero/core/threading"
|
||||||
|
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const coolDownDeviation = 0.05
|
||||||
|
|
||||||
var (
|
var (
|
||||||
registry = Registry{
|
registry = Registry{
|
||||||
clusters: make(map[string]*cluster),
|
clusters: make(map[string]*cluster),
|
||||||
}
|
}
|
||||||
connManager = syncx.NewResourceManager()
|
connManager = syncx.NewResourceManager()
|
||||||
|
coolDownUnstable = mathx.NewUnstable(coolDownDeviation)
|
||||||
|
errClosed = errors.New("etcd monitor chan has been closed")
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Registry is a registry that manages the etcd client connections.
|
// A Registry is a registry that manages the etcd client connections.
|
||||||
type Registry struct {
|
type Registry struct {
|
||||||
clusters map[string]*cluster
|
clusters map[string]*cluster
|
||||||
lock sync.Mutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRegistry returns a global Registry.
|
// GetRegistry returns a global Registry.
|
||||||
@@ -37,60 +43,148 @@ func GetRegistry() *Registry {
|
|||||||
|
|
||||||
// GetConn returns an etcd client connection associated with given endpoints.
|
// GetConn returns an etcd client connection associated with given endpoints.
|
||||||
func (r *Registry) GetConn(endpoints []string) (EtcdClient, error) {
|
func (r *Registry) GetConn(endpoints []string) (EtcdClient, error) {
|
||||||
c, _ := r.getCluster(endpoints)
|
c, _ := r.getOrCreateCluster(endpoints)
|
||||||
return c.getClient()
|
return c.getClient()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor monitors the key on given etcd endpoints, notify with the given UpdateListener.
|
// Monitor monitors the key on given etcd endpoints, notify with the given UpdateListener.
|
||||||
func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) error {
|
func (r *Registry) Monitor(endpoints []string, key string, exactMatch bool, l UpdateListener) error {
|
||||||
c, exists := r.getCluster(endpoints)
|
wkey := watchKey{
|
||||||
|
key: key,
|
||||||
|
exactMatch: exactMatch,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, exists := r.getOrCreateCluster(endpoints)
|
||||||
// if exists, the existing values should be updated to the listener.
|
// if exists, the existing values should be updated to the listener.
|
||||||
if exists {
|
if exists {
|
||||||
kvs := c.getCurrent(key)
|
c.lock.Lock()
|
||||||
for _, kv := range kvs {
|
watcher, ok := c.watchers[wkey]
|
||||||
l.OnAdd(kv)
|
if ok {
|
||||||
|
watcher.listeners = append(watcher.listeners, l)
|
||||||
|
}
|
||||||
|
c.lock.Unlock()
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
kvs := c.getCurrent(wkey)
|
||||||
|
for _, kv := range kvs {
|
||||||
|
l.OnAdd(kv)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.monitor(key, l)
|
return c.monitor(wkey, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) getCluster(endpoints []string) (c *cluster, exists bool) {
|
func (r *Registry) Unmonitor(endpoints []string, key string, exactMatch bool, l UpdateListener) {
|
||||||
clusterKey := getClusterKey(endpoints)
|
c, exists := r.getCluster(endpoints)
|
||||||
r.lock.Lock()
|
|
||||||
defer r.lock.Unlock()
|
|
||||||
c, exists = r.clusters[clusterKey]
|
|
||||||
if !exists {
|
if !exists {
|
||||||
c = newCluster(endpoints)
|
return
|
||||||
r.clusters[clusterKey] = c
|
}
|
||||||
|
|
||||||
|
wkey := watchKey{
|
||||||
|
key: key,
|
||||||
|
exactMatch: exactMatch,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
watcher, ok := c.watchers[wkey]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, listener := range watcher.listeners {
|
||||||
|
if listener == l {
|
||||||
|
watcher.listeners = append(watcher.listeners[:i], watcher.listeners[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(watcher.listeners) == 0 {
|
||||||
|
if watcher.cancel != nil {
|
||||||
|
watcher.cancel()
|
||||||
|
}
|
||||||
|
delete(c.watchers, wkey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) getCluster(endpoints []string) (*cluster, bool) {
|
||||||
|
clusterKey := getClusterKey(endpoints)
|
||||||
|
|
||||||
|
r.lock.RLock()
|
||||||
|
c, ok := r.clusters[clusterKey]
|
||||||
|
r.lock.RUnlock()
|
||||||
|
|
||||||
|
return c, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) getOrCreateCluster(endpoints []string) (c *cluster, exists bool) {
|
||||||
|
c, exists = r.getCluster(endpoints)
|
||||||
|
if !exists {
|
||||||
|
clusterKey := getClusterKey(endpoints)
|
||||||
|
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
|
// double-check locking
|
||||||
|
c, exists = r.clusters[clusterKey]
|
||||||
|
if !exists {
|
||||||
|
c = newCluster(endpoints)
|
||||||
|
r.clusters[clusterKey] = c
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
type cluster struct {
|
type (
|
||||||
endpoints []string
|
watchKey struct {
|
||||||
key string
|
key string
|
||||||
values map[string]map[string]string
|
exactMatch bool
|
||||||
listeners map[string][]UpdateListener
|
}
|
||||||
watchGroup *threading.RoutineGroup
|
|
||||||
done chan lang.PlaceholderType
|
watchValue struct {
|
||||||
lock sync.Mutex
|
listeners []UpdateListener
|
||||||
}
|
values map[string]string
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster struct {
|
||||||
|
endpoints []string
|
||||||
|
key string
|
||||||
|
watchers map[watchKey]*watchValue
|
||||||
|
watchGroup *threading.RoutineGroup
|
||||||
|
done chan lang.PlaceholderType
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
func newCluster(endpoints []string) *cluster {
|
func newCluster(endpoints []string) *cluster {
|
||||||
return &cluster{
|
return &cluster{
|
||||||
endpoints: endpoints,
|
endpoints: endpoints,
|
||||||
key: getClusterKey(endpoints),
|
key: getClusterKey(endpoints),
|
||||||
values: make(map[string]map[string]string),
|
watchers: make(map[watchKey]*watchValue),
|
||||||
listeners: make(map[string][]UpdateListener),
|
|
||||||
watchGroup: threading.NewRoutineGroup(),
|
watchGroup: threading.NewRoutineGroup(),
|
||||||
done: make(chan lang.PlaceholderType),
|
done: make(chan lang.PlaceholderType),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) context(cli EtcdClient) context.Context {
|
func (c *cluster) addListener(key watchKey, l UpdateListener) {
|
||||||
return contextx.ValueOnlyFrom(cli.Ctx())
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
watcher, ok := c.watchers[key]
|
||||||
|
if ok {
|
||||||
|
watcher.listeners = append(watcher.listeners, l)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
val := newWatchValue()
|
||||||
|
val.listeners = []UpdateListener{l}
|
||||||
|
c.watchers[key] = val
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) getClient() (EtcdClient, error) {
|
func (c *cluster) getClient() (EtcdClient, error) {
|
||||||
@@ -104,12 +198,17 @@ func (c *cluster) getClient() (EtcdClient, error) {
|
|||||||
return val.(EtcdClient), nil
|
return val.(EtcdClient), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) getCurrent(key string) []KV {
|
func (c *cluster) getCurrent(key watchKey) []KV {
|
||||||
c.lock.Lock()
|
c.lock.RLock()
|
||||||
defer c.lock.Unlock()
|
defer c.lock.RUnlock()
|
||||||
|
|
||||||
var kvs []KV
|
watcher, ok := c.watchers[key]
|
||||||
for k, v := range c.values[key] {
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
kvs := make([]KV, 0, len(watcher.values))
|
||||||
|
for k, v := range watcher.values {
|
||||||
kvs = append(kvs, KV{
|
kvs = append(kvs, KV{
|
||||||
Key: k,
|
Key: k,
|
||||||
Val: v,
|
Val: v,
|
||||||
@@ -119,42 +218,23 @@ func (c *cluster) getCurrent(key string) []KV {
|
|||||||
return kvs
|
return kvs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) handleChanges(key string, kvs []KV) {
|
func (c *cluster) handleChanges(key watchKey, kvs []KV) {
|
||||||
var add []KV
|
|
||||||
var remove []KV
|
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
listeners := append([]UpdateListener(nil), c.listeners[key]...)
|
watcher, ok := c.watchers[key]
|
||||||
vals, ok := c.values[key]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
add = kvs
|
c.lock.Unlock()
|
||||||
vals = make(map[string]string)
|
return
|
||||||
for _, kv := range kvs {
|
|
||||||
vals[kv.Key] = kv.Val
|
|
||||||
}
|
|
||||||
c.values[key] = vals
|
|
||||||
} else {
|
|
||||||
m := make(map[string]string)
|
|
||||||
for _, kv := range kvs {
|
|
||||||
m[kv.Key] = kv.Val
|
|
||||||
}
|
|
||||||
for k, v := range vals {
|
|
||||||
if val, ok := m[k]; !ok || v != val {
|
|
||||||
remove = append(remove, KV{
|
|
||||||
Key: k,
|
|
||||||
Val: v,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range m {
|
|
||||||
if val, ok := vals[k]; !ok || v != val {
|
|
||||||
add = append(add, KV{
|
|
||||||
Key: k,
|
|
||||||
Val: v,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.values[key] = m
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
listeners := append([]UpdateListener(nil), watcher.listeners...)
|
||||||
|
// watcher.values cannot be nil
|
||||||
|
vals := watcher.values
|
||||||
|
newVals := make(map[string]string, len(kvs)+len(vals))
|
||||||
|
for _, kv := range kvs {
|
||||||
|
newVals[kv.Key] = kv.Val
|
||||||
|
}
|
||||||
|
add, remove := calculateChanges(vals, newVals)
|
||||||
|
watcher.values = newVals
|
||||||
c.lock.Unlock()
|
c.lock.Unlock()
|
||||||
|
|
||||||
for _, kv := range add {
|
for _, kv := range add {
|
||||||
@@ -169,20 +249,22 @@ func (c *cluster) handleChanges(key string, kvs []KV) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) {
|
func (c *cluster) handleWatchEvents(ctx context.Context, key watchKey, events []*clientv3.Event) {
|
||||||
c.lock.Lock()
|
c.lock.RLock()
|
||||||
listeners := append([]UpdateListener(nil), c.listeners[key]...)
|
watcher, ok := c.watchers[key]
|
||||||
c.lock.Unlock()
|
if !ok {
|
||||||
|
c.lock.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
listeners := append([]UpdateListener(nil), watcher.listeners...)
|
||||||
|
c.lock.RUnlock()
|
||||||
|
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
switch ev.Type {
|
switch ev.Type {
|
||||||
case clientv3.EventTypePut:
|
case clientv3.EventTypePut:
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
if vals, ok := c.values[key]; ok {
|
watcher.values[string(ev.Kv.Key)] = string(ev.Kv.Value)
|
||||||
vals[string(ev.Kv.Key)] = string(ev.Kv.Value)
|
|
||||||
} else {
|
|
||||||
c.values[key] = map[string]string{string(ev.Kv.Key): string(ev.Kv.Value)}
|
|
||||||
}
|
|
||||||
c.lock.Unlock()
|
c.lock.Unlock()
|
||||||
for _, l := range listeners {
|
for _, l := range listeners {
|
||||||
l.OnAdd(KV{
|
l.OnAdd(KV{
|
||||||
@@ -192,9 +274,7 @@ func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) {
|
|||||||
}
|
}
|
||||||
case clientv3.EventTypeDelete:
|
case clientv3.EventTypeDelete:
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
if vals, ok := c.values[key]; ok {
|
delete(watcher.values, string(ev.Kv.Key))
|
||||||
delete(vals, string(ev.Kv.Key))
|
|
||||||
}
|
|
||||||
c.lock.Unlock()
|
c.lock.Unlock()
|
||||||
for _, l := range listeners {
|
for _, l := range listeners {
|
||||||
l.OnDelete(KV{
|
l.OnDelete(KV{
|
||||||
@@ -203,27 +283,32 @@ func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
logx.Errorf("Unknown event type: %v", ev.Type)
|
logc.Errorf(ctx, "Unknown event type: %v", ev.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) load(cli EtcdClient, key string) int64 {
|
func (c *cluster) load(cli EtcdClient, key watchKey) int64 {
|
||||||
var resp *clientv3.GetResponse
|
var resp *clientv3.GetResponse
|
||||||
for {
|
for {
|
||||||
var err error
|
var err error
|
||||||
ctx, cancel := context.WithTimeout(c.context(cli), RequestTimeout)
|
ctx, cancel := context.WithTimeout(cli.Ctx(), RequestTimeout)
|
||||||
resp, err = cli.Get(ctx, makeKeyPrefix(key), clientv3.WithPrefix())
|
if key.exactMatch {
|
||||||
|
resp, err = cli.Get(ctx, key.key)
|
||||||
|
} else {
|
||||||
|
resp, err = cli.Get(ctx, makeKeyPrefix(key.key), clientv3.WithPrefix())
|
||||||
|
}
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
logx.Error(err)
|
logc.Errorf(cli.Ctx(), "%s, key: %s, exactMatch: %t", err.Error(), key.key, key.exactMatch)
|
||||||
time.Sleep(coolDownInterval)
|
time.Sleep(coolDownUnstable.AroundDuration(coolDownInterval))
|
||||||
}
|
}
|
||||||
|
|
||||||
var kvs []KV
|
kvs := make([]KV, 0, len(resp.Kvs))
|
||||||
for _, ev := range resp.Kvs {
|
for _, ev := range resp.Kvs {
|
||||||
kvs = append(kvs, KV{
|
kvs = append(kvs, KV{
|
||||||
Key: string(ev.Key),
|
Key: string(ev.Key),
|
||||||
@@ -236,16 +321,13 @@ func (c *cluster) load(cli EtcdClient, key string) int64 {
|
|||||||
return resp.Header.Revision
|
return resp.Header.Revision
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) monitor(key string, l UpdateListener) error {
|
func (c *cluster) monitor(key watchKey, l UpdateListener) error {
|
||||||
c.lock.Lock()
|
|
||||||
c.listeners[key] = append(c.listeners[key], l)
|
|
||||||
c.lock.Unlock()
|
|
||||||
|
|
||||||
cli, err := c.getClient()
|
cli, err := c.getClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.addListener(key, l)
|
||||||
rev := c.load(cli, key)
|
rev := c.load(cli, key)
|
||||||
c.watchGroup.Run(func() {
|
c.watchGroup.Run(func() {
|
||||||
c.watch(cli, key, rev)
|
c.watch(cli, key, rev)
|
||||||
@@ -267,16 +349,22 @@ func (c *cluster) newClient() (EtcdClient, error) {
|
|||||||
|
|
||||||
func (c *cluster) reload(cli EtcdClient) {
|
func (c *cluster) reload(cli EtcdClient) {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
|
// cancel the previous watches
|
||||||
close(c.done)
|
close(c.done)
|
||||||
c.watchGroup.Wait()
|
c.watchGroup.Wait()
|
||||||
|
keys := make([]watchKey, 0, len(c.watchers))
|
||||||
|
for wk, wval := range c.watchers {
|
||||||
|
keys = append(keys, wk)
|
||||||
|
if wval.cancel != nil {
|
||||||
|
wval.cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c.done = make(chan lang.PlaceholderType)
|
c.done = make(chan lang.PlaceholderType)
|
||||||
c.watchGroup = threading.NewRoutineGroup()
|
c.watchGroup = threading.NewRoutineGroup()
|
||||||
var keys []string
|
|
||||||
for k := range c.listeners {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
c.lock.Unlock()
|
c.lock.Unlock()
|
||||||
|
|
||||||
|
// start new watches
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
k := key
|
k := key
|
||||||
c.watchGroup.Run(func() {
|
c.watchGroup.Run(func() {
|
||||||
@@ -286,46 +374,80 @@ func (c *cluster) reload(cli EtcdClient) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) watch(cli EtcdClient, key string, rev int64) {
|
func (c *cluster) watch(cli EtcdClient, key watchKey, rev int64) {
|
||||||
for {
|
for {
|
||||||
if c.watchStream(cli, key, rev) {
|
err := c.watchStream(cli, key, rev)
|
||||||
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rev != 0 && errors.Is(err, rpctypes.ErrCompacted) {
|
||||||
|
logc.Errorf(cli.Ctx(), "etcd watch stream has been compacted, try to reload, rev %d", rev)
|
||||||
|
rev = c.load(cli, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// log the error and retry
|
||||||
|
logc.Error(cli.Ctx(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) watchStream(cli EtcdClient, key string, rev int64) bool {
|
func (c *cluster) watchStream(cli EtcdClient, key watchKey, rev int64) error {
|
||||||
var rch clientv3.WatchChan
|
ctx, rch := c.setupWatch(cli, key, rev)
|
||||||
if rev != 0 {
|
|
||||||
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix(),
|
|
||||||
clientv3.WithRev(rev+1))
|
|
||||||
} else {
|
|
||||||
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix())
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case wresp, ok := <-rch:
|
case wresp, ok := <-rch:
|
||||||
if !ok {
|
if !ok {
|
||||||
logx.Error("etcd monitor chan has been closed")
|
return errClosed
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
if wresp.Canceled {
|
if wresp.Canceled {
|
||||||
logx.Errorf("etcd monitor chan has been canceled, error: %v", wresp.Err())
|
return fmt.Errorf("etcd monitor chan has been canceled, error: %w", wresp.Err())
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
if wresp.Err() != nil {
|
if wresp.Err() != nil {
|
||||||
logx.Error(fmt.Sprintf("etcd monitor chan error: %v", wresp.Err()))
|
return fmt.Errorf("etcd monitor chan error: %w", wresp.Err())
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c.handleWatchEvents(key, wresp.Events)
|
c.handleWatchEvents(ctx, key, wresp.Events)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
case <-c.done:
|
case <-c.done:
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *cluster) setupWatch(cli EtcdClient, key watchKey, rev int64) (context.Context, clientv3.WatchChan) {
|
||||||
|
var (
|
||||||
|
rch clientv3.WatchChan
|
||||||
|
ops []clientv3.OpOption
|
||||||
|
wkey = key.key
|
||||||
|
)
|
||||||
|
|
||||||
|
if !key.exactMatch {
|
||||||
|
wkey = makeKeyPrefix(key.key)
|
||||||
|
ops = append(ops, clientv3.WithPrefix())
|
||||||
|
}
|
||||||
|
if rev != 0 {
|
||||||
|
ops = append(ops, clientv3.WithRev(rev+1))
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(cli.Ctx())
|
||||||
|
if watcher, ok := c.watchers[key]; ok {
|
||||||
|
watcher.cancel = cancel
|
||||||
|
} else {
|
||||||
|
val := newWatchValue()
|
||||||
|
val.cancel = cancel
|
||||||
|
|
||||||
|
c.lock.Lock()
|
||||||
|
c.watchers[key] = val
|
||||||
|
c.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
rch = cli.Watch(clientv3.WithRequireLeader(ctx), wkey, ops...)
|
||||||
|
|
||||||
|
return ctx, rch
|
||||||
|
}
|
||||||
|
|
||||||
func (c *cluster) watchConnState(cli EtcdClient) {
|
func (c *cluster) watchConnState(cli EtcdClient) {
|
||||||
watcher := newStateWatcher()
|
watcher := newStateWatcher()
|
||||||
watcher.addListener(func() {
|
watcher.addListener(func() {
|
||||||
@@ -354,6 +476,28 @@ func DialClient(endpoints []string) (EtcdClient, error) {
|
|||||||
return clientv3.New(cfg)
|
return clientv3.New(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func calculateChanges(oldVals, newVals map[string]string) (add, remove []KV) {
|
||||||
|
for k, v := range newVals {
|
||||||
|
if val, ok := oldVals[k]; !ok || v != val {
|
||||||
|
add = append(add, KV{
|
||||||
|
Key: k,
|
||||||
|
Val: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range oldVals {
|
||||||
|
if val, ok := newVals[k]; !ok || v != val {
|
||||||
|
remove = append(remove, KV{
|
||||||
|
Key: k,
|
||||||
|
Val: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return add, remove
|
||||||
|
}
|
||||||
|
|
||||||
func getClusterKey(endpoints []string) string {
|
func getClusterKey(endpoints []string) string {
|
||||||
sort.Strings(endpoints)
|
sort.Strings(endpoints)
|
||||||
return strings.Join(endpoints, endpointsSeparator)
|
return strings.Join(endpoints, endpointsSeparator)
|
||||||
@@ -362,3 +506,10 @@ func getClusterKey(endpoints []string) string {
|
|||||||
func makeKeyPrefix(key string) string {
|
func makeKeyPrefix(key string) string {
|
||||||
return fmt.Sprintf("%s%c", key, Delimiter)
|
return fmt.Sprintf("%s%c", key, Delimiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewClient returns a watchValue that make sure values are not nil.
|
||||||
|
func newWatchValue() *watchValue {
|
||||||
|
return &watchValue{
|
||||||
|
values: make(map[string]string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,16 +7,17 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/zeromicro/go-zero/core/contextx"
|
"github.com/zeromicro/go-zero/core/contextx"
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
|
"github.com/zeromicro/go-zero/core/threading"
|
||||||
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"go.etcd.io/etcd/client/v3/mock/mockserver"
|
"go.etcd.io/etcd/client/v3/mock/mockserver"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
var mockLock sync.Mutex
|
var mockLock sync.Mutex
|
||||||
@@ -38,9 +39,9 @@ func setMockClient(cli EtcdClient) func() {
|
|||||||
|
|
||||||
func TestGetCluster(t *testing.T) {
|
func TestGetCluster(t *testing.T) {
|
||||||
AddAccount([]string{"first"}, "foo", "bar")
|
AddAccount([]string{"first"}, "foo", "bar")
|
||||||
c1, _ := GetRegistry().getCluster([]string{"first"})
|
c1, _ := GetRegistry().getOrCreateCluster([]string{"first"})
|
||||||
c2, _ := GetRegistry().getCluster([]string{"second"})
|
c2, _ := GetRegistry().getOrCreateCluster([]string{"second"})
|
||||||
c3, _ := GetRegistry().getCluster([]string{"first"})
|
c3, _ := GetRegistry().getOrCreateCluster([]string{"first"})
|
||||||
assert.Equal(t, c1, c3)
|
assert.Equal(t, c1, c3)
|
||||||
assert.NotEqual(t, c1, c2)
|
assert.NotEqual(t, c1, c2)
|
||||||
}
|
}
|
||||||
@@ -50,6 +51,36 @@ func TestGetClusterKey(t *testing.T) {
|
|||||||
getClusterKey([]string{"remotehost:5678", "localhost:1234"}))
|
getClusterKey([]string{"remotehost:5678", "localhost:1234"}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnmonitor(t *testing.T) {
|
||||||
|
t.Run("no listener", func(t *testing.T) {
|
||||||
|
reg := &Registry{
|
||||||
|
clusters: map[string]*cluster{},
|
||||||
|
}
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
reg.Unmonitor([]string{"any"}, "any", false, nil)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no value", func(t *testing.T) {
|
||||||
|
reg := &Registry{
|
||||||
|
clusters: map[string]*cluster{
|
||||||
|
"any": {
|
||||||
|
watchers: map[watchKey]*watchValue{
|
||||||
|
{
|
||||||
|
key: "any",
|
||||||
|
}: {
|
||||||
|
values: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
reg.Unmonitor([]string{"any"}, "another", false, nil)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestCluster_HandleChanges(t *testing.T) {
|
func TestCluster_HandleChanges(t *testing.T) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
l := NewMockUpdateListener(ctrl)
|
l := NewMockUpdateListener(ctrl)
|
||||||
@@ -78,8 +109,14 @@ func TestCluster_HandleChanges(t *testing.T) {
|
|||||||
Val: "4",
|
Val: "4",
|
||||||
})
|
})
|
||||||
c := newCluster([]string{"any"})
|
c := newCluster([]string{"any"})
|
||||||
c.listeners["any"] = []UpdateListener{l}
|
key := watchKey{
|
||||||
c.handleChanges("any", []KV{
|
key: "any",
|
||||||
|
exactMatch: false,
|
||||||
|
}
|
||||||
|
c.watchers[key] = &watchValue{
|
||||||
|
listeners: []UpdateListener{l},
|
||||||
|
}
|
||||||
|
c.handleChanges(key, []KV{
|
||||||
{
|
{
|
||||||
Key: "first",
|
Key: "first",
|
||||||
Val: "1",
|
Val: "1",
|
||||||
@@ -92,8 +129,8 @@ func TestCluster_HandleChanges(t *testing.T) {
|
|||||||
assert.EqualValues(t, map[string]string{
|
assert.EqualValues(t, map[string]string{
|
||||||
"first": "1",
|
"first": "1",
|
||||||
"second": "2",
|
"second": "2",
|
||||||
}, c.values["any"])
|
}, c.watchers[key].values)
|
||||||
c.handleChanges("any", []KV{
|
c.handleChanges(key, []KV{
|
||||||
{
|
{
|
||||||
Key: "third",
|
Key: "third",
|
||||||
Val: "3",
|
Val: "3",
|
||||||
@@ -106,7 +143,7 @@ func TestCluster_HandleChanges(t *testing.T) {
|
|||||||
assert.EqualValues(t, map[string]string{
|
assert.EqualValues(t, map[string]string{
|
||||||
"third": "3",
|
"third": "3",
|
||||||
"fourth": "4",
|
"fourth": "4",
|
||||||
}, c.values["any"])
|
}, c.watchers[key].values)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCluster_Load(t *testing.T) {
|
func TestCluster_Load(t *testing.T) {
|
||||||
@@ -126,9 +163,11 @@ func TestCluster_Load(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
cli.EXPECT().Ctx().Return(context.Background())
|
cli.EXPECT().Ctx().Return(context.Background())
|
||||||
c := &cluster{
|
c := &cluster{
|
||||||
values: make(map[string]map[string]string),
|
watchers: make(map[watchKey]*watchValue),
|
||||||
}
|
}
|
||||||
c.load(cli, "any")
|
c.load(cli, watchKey{
|
||||||
|
key: "any",
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCluster_Watch(t *testing.T) {
|
func TestCluster_Watch(t *testing.T) {
|
||||||
@@ -160,11 +199,16 @@ func TestCluster_Watch(t *testing.T) {
|
|||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
c := &cluster{
|
c := &cluster{
|
||||||
listeners: make(map[string][]UpdateListener),
|
watchers: make(map[watchKey]*watchValue),
|
||||||
values: make(map[string]map[string]string),
|
}
|
||||||
|
key := watchKey{
|
||||||
|
key: "any",
|
||||||
}
|
}
|
||||||
listener := NewMockUpdateListener(ctrl)
|
listener := NewMockUpdateListener(ctrl)
|
||||||
c.listeners["any"] = []UpdateListener{listener}
|
c.watchers[key] = &watchValue{
|
||||||
|
listeners: []UpdateListener{listener},
|
||||||
|
values: make(map[string]string),
|
||||||
|
}
|
||||||
listener.EXPECT().OnAdd(gomock.Any()).Do(func(kv KV) {
|
listener.EXPECT().OnAdd(gomock.Any()).Do(func(kv KV) {
|
||||||
assert.Equal(t, "hello", kv.Key)
|
assert.Equal(t, "hello", kv.Key)
|
||||||
assert.Equal(t, "world", kv.Val)
|
assert.Equal(t, "world", kv.Val)
|
||||||
@@ -173,7 +217,7 @@ func TestCluster_Watch(t *testing.T) {
|
|||||||
listener.EXPECT().OnDelete(gomock.Any()).Do(func(_ any) {
|
listener.EXPECT().OnDelete(gomock.Any()).Do(func(_ any) {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}).MaxTimes(1)
|
}).MaxTimes(1)
|
||||||
go c.watch(cli, "any", 0)
|
go c.watch(cli, key, 0)
|
||||||
ch <- clientv3.WatchResponse{
|
ch <- clientv3.WatchResponse{
|
||||||
Events: []*clientv3.Event{
|
Events: []*clientv3.Event{
|
||||||
{
|
{
|
||||||
@@ -211,17 +255,111 @@ func TestClusterWatch_RespFailures(t *testing.T) {
|
|||||||
ch := make(chan clientv3.WatchResponse)
|
ch := make(chan clientv3.WatchResponse)
|
||||||
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch).AnyTimes()
|
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch).AnyTimes()
|
||||||
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
||||||
c := new(cluster)
|
c := &cluster{
|
||||||
|
watchers: make(map[watchKey]*watchValue),
|
||||||
|
}
|
||||||
c.done = make(chan lang.PlaceholderType)
|
c.done = make(chan lang.PlaceholderType)
|
||||||
go func() {
|
go func() {
|
||||||
ch <- resp
|
ch <- resp
|
||||||
close(c.done)
|
close(c.done)
|
||||||
}()
|
}()
|
||||||
c.watch(cli, "any", 0)
|
key := watchKey{
|
||||||
|
key: "any",
|
||||||
|
}
|
||||||
|
c.watch(cli, key, 0)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCluster_getCurrent(t *testing.T) {
|
||||||
|
t.Run("no value", func(t *testing.T) {
|
||||||
|
c := &cluster{
|
||||||
|
watchers: map[watchKey]*watchValue{
|
||||||
|
{
|
||||||
|
key: "any",
|
||||||
|
}: {
|
||||||
|
values: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Nil(t, c.getCurrent(watchKey{
|
||||||
|
key: "another",
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCluster_handleWatchEvents(t *testing.T) {
|
||||||
|
t.Run("no value", func(t *testing.T) {
|
||||||
|
c := &cluster{
|
||||||
|
watchers: map[watchKey]*watchValue{
|
||||||
|
{
|
||||||
|
key: "any",
|
||||||
|
}: {
|
||||||
|
values: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
c.handleWatchEvents(context.Background(), watchKey{
|
||||||
|
key: "another",
|
||||||
|
}, nil)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCluster_addListener(t *testing.T) {
|
||||||
|
t.Run("has listener", func(t *testing.T) {
|
||||||
|
c := &cluster{
|
||||||
|
watchers: map[watchKey]*watchValue{
|
||||||
|
{
|
||||||
|
key: "any",
|
||||||
|
}: {
|
||||||
|
listeners: make([]UpdateListener, 0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
c.addListener(watchKey{
|
||||||
|
key: "any",
|
||||||
|
}, nil)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no listener", func(t *testing.T) {
|
||||||
|
c := &cluster{
|
||||||
|
watchers: map[watchKey]*watchValue{
|
||||||
|
{
|
||||||
|
key: "any",
|
||||||
|
}: {
|
||||||
|
listeners: make([]UpdateListener, 0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
c.addListener(watchKey{
|
||||||
|
key: "another",
|
||||||
|
}, nil)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCluster_reload(t *testing.T) {
|
||||||
|
c := &cluster{
|
||||||
|
watchers: map[watchKey]*watchValue{},
|
||||||
|
watchGroup: threading.NewRoutineGroup(),
|
||||||
|
done: make(chan lang.PlaceholderType),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
cli := NewMockEtcdClient(ctrl)
|
||||||
|
restore := setMockClient(cli)
|
||||||
|
defer restore()
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
c.reload(cli)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestClusterWatch_CloseChan(t *testing.T) {
|
func TestClusterWatch_CloseChan(t *testing.T) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
@@ -231,13 +369,17 @@ func TestClusterWatch_CloseChan(t *testing.T) {
|
|||||||
ch := make(chan clientv3.WatchResponse)
|
ch := make(chan clientv3.WatchResponse)
|
||||||
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch).AnyTimes()
|
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch).AnyTimes()
|
||||||
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
||||||
c := new(cluster)
|
c := &cluster{
|
||||||
|
watchers: make(map[watchKey]*watchValue),
|
||||||
|
}
|
||||||
c.done = make(chan lang.PlaceholderType)
|
c.done = make(chan lang.PlaceholderType)
|
||||||
go func() {
|
go func() {
|
||||||
close(ch)
|
close(ch)
|
||||||
close(c.done)
|
close(c.done)
|
||||||
}()
|
}()
|
||||||
c.watch(cli, "any", 0)
|
c.watch(cli, watchKey{
|
||||||
|
key: "any",
|
||||||
|
}, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValueOnlyContext(t *testing.T) {
|
func TestValueOnlyContext(t *testing.T) {
|
||||||
@@ -280,16 +422,59 @@ func TestRegistry_Monitor(t *testing.T) {
|
|||||||
GetRegistry().lock.Lock()
|
GetRegistry().lock.Lock()
|
||||||
GetRegistry().clusters = map[string]*cluster{
|
GetRegistry().clusters = map[string]*cluster{
|
||||||
getClusterKey(endpoints): {
|
getClusterKey(endpoints): {
|
||||||
listeners: map[string][]UpdateListener{},
|
watchers: map[watchKey]*watchValue{
|
||||||
values: map[string]map[string]string{
|
{
|
||||||
"foo": {
|
key: "foo",
|
||||||
"bar": "baz",
|
exactMatch: true,
|
||||||
|
}: {
|
||||||
|
values: map[string]string{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
GetRegistry().lock.Unlock()
|
GetRegistry().lock.Unlock()
|
||||||
assert.Error(t, GetRegistry().Monitor(endpoints, "foo", new(mockListener)))
|
assert.Error(t, GetRegistry().Monitor(endpoints, "foo", false, new(mockListener)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegistry_Unmonitor(t *testing.T) {
|
||||||
|
svr, err := mockserver.StartMockServers(1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
svr.StartAt(0)
|
||||||
|
|
||||||
|
_, cancel := context.WithCancel(context.Background())
|
||||||
|
endpoints := []string{svr.Servers[0].Address}
|
||||||
|
GetRegistry().lock.Lock()
|
||||||
|
GetRegistry().clusters = map[string]*cluster{
|
||||||
|
getClusterKey(endpoints): {
|
||||||
|
watchers: map[watchKey]*watchValue{
|
||||||
|
{
|
||||||
|
key: "foo",
|
||||||
|
exactMatch: true,
|
||||||
|
}: {
|
||||||
|
values: map[string]string{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
cancel: cancel,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
GetRegistry().lock.Unlock()
|
||||||
|
l := new(mockListener)
|
||||||
|
assert.NoError(t, GetRegistry().Monitor(endpoints, "foo", true, l))
|
||||||
|
watchVals := GetRegistry().clusters[getClusterKey(endpoints)].watchers[watchKey{
|
||||||
|
key: "foo",
|
||||||
|
exactMatch: true,
|
||||||
|
}]
|
||||||
|
assert.Equal(t, 1, len(watchVals.listeners))
|
||||||
|
GetRegistry().Unmonitor(endpoints, "foo", true, l)
|
||||||
|
watchVals = GetRegistry().clusters[getClusterKey(endpoints)].watchers[watchKey{
|
||||||
|
key: "foo",
|
||||||
|
exactMatch: true,
|
||||||
|
}]
|
||||||
|
assert.Nil(t, watchVals)
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockListener struct {
|
type mockListener struct {
|
||||||
|
|||||||
@@ -1,5 +1,10 @@
|
|||||||
// Code generated by MockGen. DO NOT EDIT.
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
// Source: statewatcher.go
|
// Source: statewatcher.go
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -package internal -destination statewatcher_mock.go -source statewatcher.go etcdConn
|
||||||
|
//
|
||||||
|
|
||||||
// Package internal is a generated GoMock package.
|
// Package internal is a generated GoMock package.
|
||||||
package internal
|
package internal
|
||||||
@@ -8,34 +13,35 @@ import (
|
|||||||
context "context"
|
context "context"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "go.uber.org/mock/gomock"
|
||||||
connectivity "google.golang.org/grpc/connectivity"
|
connectivity "google.golang.org/grpc/connectivity"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MocketcdConn is a mock of etcdConn interface
|
// MocketcdConn is a mock of etcdConn interface.
|
||||||
type MocketcdConn struct {
|
type MocketcdConn struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MocketcdConnMockRecorder
|
recorder *MocketcdConnMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MocketcdConnMockRecorder is the mock recorder for MocketcdConn
|
// MocketcdConnMockRecorder is the mock recorder for MocketcdConn.
|
||||||
type MocketcdConnMockRecorder struct {
|
type MocketcdConnMockRecorder struct {
|
||||||
mock *MocketcdConn
|
mock *MocketcdConn
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMocketcdConn creates a new mock instance
|
// NewMocketcdConn creates a new mock instance.
|
||||||
func NewMocketcdConn(ctrl *gomock.Controller) *MocketcdConn {
|
func NewMocketcdConn(ctrl *gomock.Controller) *MocketcdConn {
|
||||||
mock := &MocketcdConn{ctrl: ctrl}
|
mock := &MocketcdConn{ctrl: ctrl}
|
||||||
mock.recorder = &MocketcdConnMockRecorder{mock}
|
mock.recorder = &MocketcdConnMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MocketcdConn) EXPECT() *MocketcdConnMockRecorder {
|
func (m *MocketcdConn) EXPECT() *MocketcdConnMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetState mocks base method
|
// GetState mocks base method.
|
||||||
func (m *MocketcdConn) GetState() connectivity.State {
|
func (m *MocketcdConn) GetState() connectivity.State {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetState")
|
ret := m.ctrl.Call(m, "GetState")
|
||||||
@@ -43,13 +49,13 @@ func (m *MocketcdConn) GetState() connectivity.State {
|
|||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetState indicates an expected call of GetState
|
// GetState indicates an expected call of GetState.
|
||||||
func (mr *MocketcdConnMockRecorder) GetState() *gomock.Call {
|
func (mr *MocketcdConnMockRecorder) GetState() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MocketcdConn)(nil).GetState))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MocketcdConn)(nil).GetState))
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForStateChange mocks base method
|
// WaitForStateChange mocks base method.
|
||||||
func (m *MocketcdConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
|
func (m *MocketcdConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "WaitForStateChange", ctx, sourceState)
|
ret := m.ctrl.Call(m, "WaitForStateChange", ctx, sourceState)
|
||||||
@@ -57,7 +63,7 @@ func (m *MocketcdConn) WaitForStateChange(ctx context.Context, sourceState conne
|
|||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForStateChange indicates an expected call of WaitForStateChange
|
// WaitForStateChange indicates an expected call of WaitForStateChange.
|
||||||
func (mr *MocketcdConnMockRecorder) WaitForStateChange(ctx, sourceState any) *gomock.Call {
|
func (mr *MocketcdConnMockRecorder) WaitForStateChange(ctx, sourceState any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForStateChange", reflect.TypeOf((*MocketcdConn)(nil).WaitForStateChange), ctx, sourceState)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForStateChange", reflect.TypeOf((*MocketcdConn)(nil).WaitForStateChange), ctx, sourceState)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"go.uber.org/mock/gomock"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateListener wraps the OnAdd and OnDelete methods.
|
// UpdateListener wraps the OnAdd and OnDelete methods.
|
||||||
|
// The implementation should be thread-safe and idempotent.
|
||||||
UpdateListener interface {
|
UpdateListener interface {
|
||||||
OnAdd(kv KV)
|
OnAdd(kv KV)
|
||||||
OnDelete(kv KV)
|
OnDelete(kv KV)
|
||||||
|
|||||||
@@ -1,5 +1,10 @@
|
|||||||
// Code generated by MockGen. DO NOT EDIT.
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
// Source: updatelistener.go
|
// Source: updatelistener.go
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -package internal -destination updatelistener_mock.go -source updatelistener.go UpdateListener
|
||||||
|
//
|
||||||
|
|
||||||
// Package internal is a generated GoMock package.
|
// Package internal is a generated GoMock package.
|
||||||
package internal
|
package internal
|
||||||
@@ -7,51 +12,52 @@ package internal
|
|||||||
import (
|
import (
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockUpdateListener is a mock of UpdateListener interface
|
// MockUpdateListener is a mock of UpdateListener interface.
|
||||||
type MockUpdateListener struct {
|
type MockUpdateListener struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockUpdateListenerMockRecorder
|
recorder *MockUpdateListenerMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockUpdateListenerMockRecorder is the mock recorder for MockUpdateListener
|
// MockUpdateListenerMockRecorder is the mock recorder for MockUpdateListener.
|
||||||
type MockUpdateListenerMockRecorder struct {
|
type MockUpdateListenerMockRecorder struct {
|
||||||
mock *MockUpdateListener
|
mock *MockUpdateListener
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockUpdateListener creates a new mock instance
|
// NewMockUpdateListener creates a new mock instance.
|
||||||
func NewMockUpdateListener(ctrl *gomock.Controller) *MockUpdateListener {
|
func NewMockUpdateListener(ctrl *gomock.Controller) *MockUpdateListener {
|
||||||
mock := &MockUpdateListener{ctrl: ctrl}
|
mock := &MockUpdateListener{ctrl: ctrl}
|
||||||
mock.recorder = &MockUpdateListenerMockRecorder{mock}
|
mock.recorder = &MockUpdateListenerMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockUpdateListener) EXPECT() *MockUpdateListenerMockRecorder {
|
func (m *MockUpdateListener) EXPECT() *MockUpdateListenerMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnAdd mocks base method
|
// OnAdd mocks base method.
|
||||||
func (m *MockUpdateListener) OnAdd(kv KV) {
|
func (m *MockUpdateListener) OnAdd(kv KV) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
m.ctrl.Call(m, "OnAdd", kv)
|
m.ctrl.Call(m, "OnAdd", kv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnAdd indicates an expected call of OnAdd
|
// OnAdd indicates an expected call of OnAdd.
|
||||||
func (mr *MockUpdateListenerMockRecorder) OnAdd(kv any) *gomock.Call {
|
func (mr *MockUpdateListenerMockRecorder) OnAdd(kv any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnAdd", reflect.TypeOf((*MockUpdateListener)(nil).OnAdd), kv)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnAdd", reflect.TypeOf((*MockUpdateListener)(nil).OnAdd), kv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnDelete mocks base method
|
// OnDelete mocks base method.
|
||||||
func (m *MockUpdateListener) OnDelete(kv KV) {
|
func (m *MockUpdateListener) OnDelete(kv KV) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
m.ctrl.Call(m, "OnDelete", kv)
|
m.ctrl.Call(m, "OnDelete", kv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnDelete indicates an expected call of OnDelete
|
// OnDelete indicates an expected call of OnDelete.
|
||||||
func (mr *MockUpdateListenerMockRecorder) OnDelete(kv any) *gomock.Call {
|
func (mr *MockUpdateListenerMockRecorder) OnDelete(kv any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDelete", reflect.TypeOf((*MockUpdateListener)(nil).OnDelete), kv)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDelete", reflect.TypeOf((*MockUpdateListener)(nil).OnDelete), kv)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/discov/internal"
|
"github.com/zeromicro/go-zero/core/discov/internal"
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
|
"github.com/zeromicro/go-zero/core/logc"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/proc"
|
"github.com/zeromicro/go-zero/core/proc"
|
||||||
"github.com/zeromicro/go-zero/core/syncx"
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
@@ -91,12 +92,12 @@ func (p *Publisher) doKeepAlive() error {
|
|||||||
default:
|
default:
|
||||||
cli, err := p.doRegister()
|
cli, err := p.doRegister()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logx.Errorf("etcd publisher doRegister: %s", err.Error())
|
logc.Errorf(cli.Ctx(), "etcd publisher doRegister: %v", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.keepAliveAsync(cli); err != nil {
|
if err := p.keepAliveAsync(cli); err != nil {
|
||||||
logx.Errorf("etcd publisher keepAliveAsync: %s", err.Error())
|
logc.Errorf(cli.Ctx(), "etcd publisher keepAliveAsync: %v", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,23 +125,48 @@ func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
threading.GoSafe(func() {
|
threading.GoSafe(func() {
|
||||||
|
wch := cli.Watch(cli.Ctx(), p.fullKey, clientv3.WithFilterPut())
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case _, ok := <-ch:
|
case _, ok := <-ch:
|
||||||
if !ok {
|
if !ok {
|
||||||
p.revoke(cli)
|
p.revoke(cli)
|
||||||
if err := p.doKeepAlive(); err != nil {
|
if err := p.doKeepAlive(); err != nil {
|
||||||
logx.Errorf("etcd publisher KeepAlive: %s", err.Error())
|
logc.Errorf(cli.Ctx(), "etcd publisher KeepAlive: %v", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case c := <-wch:
|
||||||
|
if c.Err() != nil {
|
||||||
|
logc.Errorf(cli.Ctx(), "etcd publisher watch: %v", c.Err())
|
||||||
|
if err := p.doKeepAlive(); err != nil {
|
||||||
|
logc.Errorf(cli.Ctx(), "etcd publisher KeepAlive: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, evt := range c.Events {
|
||||||
|
if evt.Type == clientv3.EventTypeDelete {
|
||||||
|
logc.Infof(cli.Ctx(), "etcd publisher watch: %s, event: %v",
|
||||||
|
evt.Kv.Key, evt.Type)
|
||||||
|
_, err := cli.Put(cli.Ctx(), p.fullKey, p.value, clientv3.WithLease(p.lease))
|
||||||
|
if err != nil {
|
||||||
|
logc.Errorf(cli.Ctx(), "etcd publisher re-put key: %v", err)
|
||||||
|
} else {
|
||||||
|
logc.Infof(cli.Ctx(), "etcd publisher re-put key: %s, value: %s",
|
||||||
|
p.fullKey, p.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
case <-p.pauseChan:
|
case <-p.pauseChan:
|
||||||
logx.Infof("paused etcd renew, key: %s, value: %s", p.key, p.value)
|
logc.Infof(cli.Ctx(), "paused etcd renew, key: %s, value: %s", p.key, p.value)
|
||||||
p.revoke(cli)
|
p.revoke(cli)
|
||||||
select {
|
select {
|
||||||
case <-p.resumeChan:
|
case <-p.resumeChan:
|
||||||
if err := p.doKeepAlive(); err != nil {
|
if err := p.doKeepAlive(); err != nil {
|
||||||
logx.Errorf("etcd publisher KeepAlive: %s", err.Error())
|
logc.Errorf(cli.Ctx(), "etcd publisher KeepAlive: %v", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-p.quit.Done():
|
case <-p.quit.Done():
|
||||||
@@ -175,7 +201,7 @@ func (p *Publisher) register(client internal.EtcdClient) (clientv3.LeaseID, erro
|
|||||||
|
|
||||||
func (p *Publisher) revoke(cli internal.EtcdClient) {
|
func (p *Publisher) revoke(cli internal.EtcdClient) {
|
||||||
if _, err := cli.Revoke(cli.Ctx(), p.lease); err != nil {
|
if _, err := cli.Revoke(cli.Ctx(), p.lease); err != nil {
|
||||||
logx.Errorf("etcd publisher revoke: %s", err.Error())
|
logc.Errorf(cli.Ctx(), "etcd publisher revoke: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,13 +9,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/zeromicro/go-zero/core/discov/internal"
|
"github.com/zeromicro/go-zero/core/discov/internal"
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
@@ -211,6 +212,9 @@ func TestPublisher_keepAliveAsyncQuit(t *testing.T) {
|
|||||||
defer restore()
|
defer restore()
|
||||||
cli.EXPECT().Ctx().AnyTimes()
|
cli.EXPECT().Ctx().AnyTimes()
|
||||||
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
||||||
|
// Add Watch expectation for the new watch mechanism
|
||||||
|
watchChan := make(<-chan clientv3.WatchResponse)
|
||||||
|
cli.EXPECT().Watch(gomock.Any(), gomock.Any(), gomock.Any()).Return(watchChan)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
|
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
|
||||||
@@ -232,6 +236,9 @@ func TestPublisher_keepAliveAsyncPause(t *testing.T) {
|
|||||||
defer restore()
|
defer restore()
|
||||||
cli.EXPECT().Ctx().AnyTimes()
|
cli.EXPECT().Ctx().AnyTimes()
|
||||||
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
||||||
|
// Add Watch expectation for the new watch mechanism
|
||||||
|
watchChan := make(<-chan clientv3.WatchResponse)
|
||||||
|
cli.EXPECT().Watch(gomock.Any(), gomock.Any(), gomock.Any()).Return(watchChan)
|
||||||
pub := NewPublisher(nil, "thekey", "thevalue")
|
pub := NewPublisher(nil, "thekey", "thevalue")
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@@ -245,6 +252,112 @@ func TestPublisher_keepAliveAsyncPause(t *testing.T) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test case for key deletion and re-registration (covers lines 148-155)
|
||||||
|
func TestPublisher_keepAliveAsyncKeyDeletion(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
const id clientv3.LeaseID = 1
|
||||||
|
cli := internal.NewMockEtcdClient(ctrl)
|
||||||
|
restore := setMockClient(cli)
|
||||||
|
defer restore()
|
||||||
|
cli.EXPECT().Ctx().AnyTimes()
|
||||||
|
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
||||||
|
|
||||||
|
// Create a watch channel that will send a delete event
|
||||||
|
watchChan := make(chan clientv3.WatchResponse, 1)
|
||||||
|
watchResp := clientv3.WatchResponse{
|
||||||
|
Events: []*clientv3.Event{{
|
||||||
|
Type: clientv3.EventTypeDelete,
|
||||||
|
Kv: &mvccpb.KeyValue{
|
||||||
|
Key: []byte("thekey"),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
watchChan <- watchResp
|
||||||
|
|
||||||
|
cli.EXPECT().Watch(gomock.Any(), gomock.Any(), gomock.Any()).Return((<-chan clientv3.WatchResponse)(watchChan))
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1) // Only wait for Revoke call
|
||||||
|
|
||||||
|
// Use a channel to signal when Put has been called
|
||||||
|
putCalled := make(chan struct{})
|
||||||
|
|
||||||
|
// Expect the re-put operation when key is deleted
|
||||||
|
cli.EXPECT().Put(gomock.Any(), "thekey", "thevalue", gomock.Any()).Do(func(_, _, _, _ any) {
|
||||||
|
close(putCalled) // Signal that Put has been called
|
||||||
|
}).Return(nil, nil)
|
||||||
|
|
||||||
|
// Expect revoke when Stop is called
|
||||||
|
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
|
||||||
|
wg.Done()
|
||||||
|
})
|
||||||
|
|
||||||
|
pub := NewPublisher(nil, "thekey", "thevalue")
|
||||||
|
pub.lease = id
|
||||||
|
pub.fullKey = "thekey"
|
||||||
|
|
||||||
|
assert.Nil(t, pub.keepAliveAsync(cli))
|
||||||
|
|
||||||
|
// Wait for Put to be called, then stop
|
||||||
|
<-putCalled
|
||||||
|
pub.Stop()
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test case for key deletion with re-put error (covers error branch in lines 151-152)
|
||||||
|
func TestPublisher_keepAliveAsyncKeyDeletionPutError(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
const id clientv3.LeaseID = 1
|
||||||
|
cli := internal.NewMockEtcdClient(ctrl)
|
||||||
|
restore := setMockClient(cli)
|
||||||
|
defer restore()
|
||||||
|
cli.EXPECT().Ctx().AnyTimes()
|
||||||
|
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
||||||
|
|
||||||
|
// Create a watch channel that will send a delete event
|
||||||
|
watchChan := make(chan clientv3.WatchResponse, 1)
|
||||||
|
watchResp := clientv3.WatchResponse{
|
||||||
|
Events: []*clientv3.Event{{
|
||||||
|
Type: clientv3.EventTypeDelete,
|
||||||
|
Kv: &mvccpb.KeyValue{
|
||||||
|
Key: []byte("thekey"),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
watchChan <- watchResp
|
||||||
|
|
||||||
|
cli.EXPECT().Watch(gomock.Any(), gomock.Any(), gomock.Any()).Return((<-chan clientv3.WatchResponse)(watchChan))
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1) // Only wait for Revoke call
|
||||||
|
|
||||||
|
// Use a channel to signal when Put has been called
|
||||||
|
putCalled := make(chan struct{})
|
||||||
|
|
||||||
|
// Expect the re-put operation to fail
|
||||||
|
cli.EXPECT().Put(gomock.Any(), "thekey", "thevalue", gomock.Any()).Do(func(_, _, _, _ any) {
|
||||||
|
close(putCalled) // Signal that Put has been called
|
||||||
|
}).Return(nil, errors.New("put error"))
|
||||||
|
|
||||||
|
// Expect revoke when Stop is called
|
||||||
|
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
|
||||||
|
wg.Done()
|
||||||
|
})
|
||||||
|
|
||||||
|
pub := NewPublisher(nil, "thekey", "thevalue")
|
||||||
|
pub.lease = id
|
||||||
|
pub.fullKey = "thekey"
|
||||||
|
|
||||||
|
assert.Nil(t, pub.keepAliveAsync(cli))
|
||||||
|
|
||||||
|
// Wait for Put to be called, then stop
|
||||||
|
<-putCalled
|
||||||
|
pub.Stop()
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
func TestPublisher_Resume(t *testing.T) {
|
func TestPublisher_Resume(t *testing.T) {
|
||||||
publisher := new(Publisher)
|
publisher := new(Publisher)
|
||||||
publisher.resumeChan = make(chan lang.PlaceholderType)
|
publisher.resumeChan = make(chan lang.PlaceholderType)
|
||||||
@@ -273,6 +386,9 @@ func TestPublisher_keepAliveAsync(t *testing.T) {
|
|||||||
defer restore()
|
defer restore()
|
||||||
cli.EXPECT().Ctx().AnyTimes()
|
cli.EXPECT().Ctx().AnyTimes()
|
||||||
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
||||||
|
// Add Watch expectation for the new watch mechanism
|
||||||
|
watchChan := make(<-chan clientv3.WatchResponse)
|
||||||
|
cli.EXPECT().Watch(gomock.Any(), gomock.Any(), gomock.Any()).Return(watchChan)
|
||||||
cli.EXPECT().Grant(gomock.Any(), timeToLive).Return(&clientv3.LeaseGrantResponse{
|
cli.EXPECT().Grant(gomock.Any(), timeToLive).Return(&clientv3.LeaseGrantResponse{
|
||||||
ID: 1,
|
ID: 1,
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|||||||
@@ -15,9 +15,11 @@ type (
|
|||||||
|
|
||||||
// A Subscriber is used to subscribe the given key on an etcd cluster.
|
// A Subscriber is used to subscribe the given key on an etcd cluster.
|
||||||
Subscriber struct {
|
Subscriber struct {
|
||||||
endpoints []string
|
endpoints []string
|
||||||
exclusive bool
|
exclusive bool
|
||||||
items *container
|
key string
|
||||||
|
exactMatch bool
|
||||||
|
items *container
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -28,13 +30,14 @@ type (
|
|||||||
func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscriber, error) {
|
func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscriber, error) {
|
||||||
sub := &Subscriber{
|
sub := &Subscriber{
|
||||||
endpoints: endpoints,
|
endpoints: endpoints,
|
||||||
|
key: key,
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
opt(sub)
|
opt(sub)
|
||||||
}
|
}
|
||||||
sub.items = newContainer(sub.exclusive)
|
sub.items = newContainer(sub.exclusive)
|
||||||
|
|
||||||
if err := internal.GetRegistry().Monitor(endpoints, key, sub.items); err != nil {
|
if err := internal.GetRegistry().Monitor(endpoints, key, sub.exactMatch, sub.items); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,6 +49,11 @@ func (s *Subscriber) AddListener(listener func()) {
|
|||||||
s.items.addListener(listener)
|
s.items.addListener(listener)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close closes the subscriber.
|
||||||
|
func (s *Subscriber) Close() {
|
||||||
|
internal.GetRegistry().Unmonitor(s.endpoints, s.key, s.exactMatch, s.items)
|
||||||
|
}
|
||||||
|
|
||||||
// Values returns all the subscription values.
|
// Values returns all the subscription values.
|
||||||
func (s *Subscriber) Values() []string {
|
func (s *Subscriber) Values() []string {
|
||||||
return s.items.getValues()
|
return s.items.getValues()
|
||||||
@@ -59,6 +67,13 @@ func Exclusive() SubOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithExactMatch turn off querying using key prefixes.
|
||||||
|
func WithExactMatch() SubOption {
|
||||||
|
return func(sub *Subscriber) {
|
||||||
|
sub.exactMatch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithSubEtcdAccount provides the etcd username/password.
|
// WithSubEtcdAccount provides the etcd username/password.
|
||||||
func WithSubEtcdAccount(user, pass string) SubOption {
|
func WithSubEtcdAccount(user, pass string) SubOption {
|
||||||
return func(sub *Subscriber) {
|
return func(sub *Subscriber) {
|
||||||
|
|||||||
@@ -225,3 +225,28 @@ func TestWithSubEtcdAccount(t *testing.T) {
|
|||||||
assert.Equal(t, user, account.User)
|
assert.Equal(t, user, account.User)
|
||||||
assert.Equal(t, "bar", account.Pass)
|
assert.Equal(t, "bar", account.Pass)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWithExactMatch(t *testing.T) {
|
||||||
|
sub := new(Subscriber)
|
||||||
|
WithExactMatch()(sub)
|
||||||
|
sub.items = newContainer(sub.exclusive)
|
||||||
|
var count int32
|
||||||
|
sub.AddListener(func() {
|
||||||
|
atomic.AddInt32(&count, 1)
|
||||||
|
})
|
||||||
|
sub.items.notifyChange()
|
||||||
|
assert.Empty(t, sub.Values())
|
||||||
|
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSubscriberClose(t *testing.T) {
|
||||||
|
l := newContainer(false)
|
||||||
|
sub := &Subscriber{
|
||||||
|
endpoints: []string{"localhost:12379"},
|
||||||
|
key: "foo",
|
||||||
|
items: l,
|
||||||
|
}
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
sub.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,18 +1,21 @@
|
|||||||
package errorx
|
package errorx
|
||||||
|
|
||||||
import "bytes"
|
import (
|
||||||
|
"errors"
|
||||||
type (
|
"sync"
|
||||||
// A BatchError is an error that can hold multiple errors.
|
|
||||||
BatchError struct {
|
|
||||||
errs errorArray
|
|
||||||
}
|
|
||||||
|
|
||||||
errorArray []error
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Add adds errs to be, nil errors are ignored.
|
// BatchError is an error that can hold multiple errors.
|
||||||
|
type BatchError struct {
|
||||||
|
errs []error
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds one or more non-nil errors to the BatchError instance.
|
||||||
func (be *BatchError) Add(errs ...error) {
|
func (be *BatchError) Add(errs ...error) {
|
||||||
|
be.lock.Lock()
|
||||||
|
defer be.lock.Unlock()
|
||||||
|
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
be.errs = append(be.errs, err)
|
be.errs = append(be.errs, err)
|
||||||
@@ -20,33 +23,20 @@ func (be *BatchError) Add(errs ...error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Err returns an error that represents all errors.
|
// Err returns an error that represents all accumulated errors.
|
||||||
|
// It returns nil if there are no errors.
|
||||||
func (be *BatchError) Err() error {
|
func (be *BatchError) Err() error {
|
||||||
switch len(be.errs) {
|
be.lock.RLock()
|
||||||
case 0:
|
defer be.lock.RUnlock()
|
||||||
return nil
|
|
||||||
case 1:
|
// If there are no non-nil errors, errors.Join(...) returns nil.
|
||||||
return be.errs[0]
|
return errors.Join(be.errs...)
|
||||||
default:
|
|
||||||
return be.errs
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotNil checks if any error inside.
|
// NotNil checks if there is at least one error inside the BatchError.
|
||||||
func (be *BatchError) NotNil() bool {
|
func (be *BatchError) NotNil() bool {
|
||||||
|
be.lock.RLock()
|
||||||
|
defer be.lock.RUnlock()
|
||||||
|
|
||||||
return len(be.errs) > 0
|
return len(be.errs) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string that represents inside errors.
|
|
||||||
func (ea errorArray) Error() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
for i := range ea {
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte('\n')
|
|
||||||
}
|
|
||||||
buf.WriteString(ea[i].Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package errorx
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -33,7 +34,7 @@ func TestBatchErrorNilFromFunc(t *testing.T) {
|
|||||||
func TestBatchErrorOneError(t *testing.T) {
|
func TestBatchErrorOneError(t *testing.T) {
|
||||||
var batch BatchError
|
var batch BatchError
|
||||||
batch.Add(errors.New(err1))
|
batch.Add(errors.New(err1))
|
||||||
assert.NotNil(t, batch)
|
assert.NotNil(t, batch.Err())
|
||||||
assert.Equal(t, err1, batch.Err().Error())
|
assert.Equal(t, err1, batch.Err().Error())
|
||||||
assert.True(t, batch.NotNil())
|
assert.True(t, batch.NotNil())
|
||||||
}
|
}
|
||||||
@@ -42,7 +43,105 @@ func TestBatchErrorWithErrors(t *testing.T) {
|
|||||||
var batch BatchError
|
var batch BatchError
|
||||||
batch.Add(errors.New(err1))
|
batch.Add(errors.New(err1))
|
||||||
batch.Add(errors.New(err2))
|
batch.Add(errors.New(err2))
|
||||||
assert.NotNil(t, batch)
|
assert.NotNil(t, batch.Err())
|
||||||
assert.Equal(t, fmt.Sprintf("%s\n%s", err1, err2), batch.Err().Error())
|
assert.Equal(t, fmt.Sprintf("%s\n%s", err1, err2), batch.Err().Error())
|
||||||
assert.True(t, batch.NotNil())
|
assert.True(t, batch.NotNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBatchErrorConcurrentAdd(t *testing.T) {
|
||||||
|
const count = 10000
|
||||||
|
var batch BatchError
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
wg.Add(count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
batch.Add(errors.New(err1))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
assert.NotNil(t, batch.Err())
|
||||||
|
assert.Equal(t, count, len(batch.errs))
|
||||||
|
assert.True(t, batch.NotNil())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBatchError_Unwrap(t *testing.T) {
|
||||||
|
t.Run("nil", func(t *testing.T) {
|
||||||
|
var be BatchError
|
||||||
|
assert.Nil(t, be.Err())
|
||||||
|
assert.True(t, errors.Is(be.Err(), nil))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("one error", func(t *testing.T) {
|
||||||
|
var errFoo = errors.New("foo")
|
||||||
|
var errBar = errors.New("bar")
|
||||||
|
var be BatchError
|
||||||
|
be.Add(errFoo)
|
||||||
|
assert.True(t, errors.Is(be.Err(), errFoo))
|
||||||
|
assert.False(t, errors.Is(be.Err(), errBar))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("two errors", func(t *testing.T) {
|
||||||
|
var errFoo = errors.New("foo")
|
||||||
|
var errBar = errors.New("bar")
|
||||||
|
var errBaz = errors.New("baz")
|
||||||
|
var be BatchError
|
||||||
|
be.Add(errFoo)
|
||||||
|
be.Add(errBar)
|
||||||
|
assert.True(t, errors.Is(be.Err(), errFoo))
|
||||||
|
assert.True(t, errors.Is(be.Err(), errBar))
|
||||||
|
assert.False(t, errors.Is(be.Err(), errBaz))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBatchError_Add(t *testing.T) {
|
||||||
|
var be BatchError
|
||||||
|
|
||||||
|
// Test adding nil errors
|
||||||
|
be.Add(nil, nil)
|
||||||
|
assert.False(t, be.NotNil(), "Expected BatchError to be empty after adding nil errors")
|
||||||
|
|
||||||
|
// Test adding non-nil errors
|
||||||
|
err1 := errors.New("error 1")
|
||||||
|
err2 := errors.New("error 2")
|
||||||
|
be.Add(err1, err2)
|
||||||
|
assert.True(t, be.NotNil(), "Expected BatchError to be non-empty after adding errors")
|
||||||
|
|
||||||
|
// Test adding a mix of nil and non-nil errors
|
||||||
|
err3 := errors.New("error 3")
|
||||||
|
be.Add(nil, err3, nil)
|
||||||
|
assert.True(t, be.NotNil(), "Expected BatchError to be non-empty after adding a mix of nil and non-nil errors")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBatchError_Err(t *testing.T) {
|
||||||
|
var be BatchError
|
||||||
|
|
||||||
|
// Test Err() on empty BatchError
|
||||||
|
assert.Nil(t, be.Err(), "Expected nil error for empty BatchError")
|
||||||
|
|
||||||
|
// Test Err() with multiple errors
|
||||||
|
err1 := errors.New("error 1")
|
||||||
|
err2 := errors.New("error 2")
|
||||||
|
be.Add(err1, err2)
|
||||||
|
|
||||||
|
combinedErr := be.Err()
|
||||||
|
assert.NotNil(t, combinedErr, "Expected nil error for BatchError with multiple errors")
|
||||||
|
|
||||||
|
// Check if the combined error contains both error messages
|
||||||
|
errString := combinedErr.Error()
|
||||||
|
assert.Truef(t, errors.Is(combinedErr, err1), "Combined error doesn't contain first error: %s", errString)
|
||||||
|
assert.Truef(t, errors.Is(combinedErr, err2), "Combined error doesn't contain second error: %s", errString)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBatchError_NotNil(t *testing.T) {
|
||||||
|
var be BatchError
|
||||||
|
|
||||||
|
// Test NotNil() on empty BatchError
|
||||||
|
assert.Nil(t, be.Err(), "Expected nil error for empty BatchError")
|
||||||
|
|
||||||
|
// Test NotNil() after adding an error
|
||||||
|
be.Add(errors.New("test error"))
|
||||||
|
assert.NotNil(t, be.Err(), "Expected non-nil error after adding an error")
|
||||||
|
}
|
||||||
|
|||||||
14
core/errorx/check.go
Normal file
14
core/errorx/check.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package errorx
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// In checks if the given err is one of errs.
|
||||||
|
func In(err error, errs ...error) bool {
|
||||||
|
for _, each := range errs {
|
||||||
|
if errors.Is(err, each) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
70
core/errorx/check_test.go
Normal file
70
core/errorx/check_test.go
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
package errorx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIn(t *testing.T) {
|
||||||
|
err1 := errors.New("error 1")
|
||||||
|
err2 := errors.New("error 2")
|
||||||
|
err3 := errors.New("error 3")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
errs []error
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Error matches one of the errors in the list",
|
||||||
|
err: err1,
|
||||||
|
errs: []error{err1, err2},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error does not match any errors in the list",
|
||||||
|
err: err3,
|
||||||
|
errs: []error{err1, err2},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty error list",
|
||||||
|
err: err1,
|
||||||
|
errs: []error{},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Nil error with non-nil list",
|
||||||
|
err: nil,
|
||||||
|
errs: []error{err1, err2},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-nil error with nil in list",
|
||||||
|
err: err1,
|
||||||
|
errs: []error{nil, err2},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error matches nil error in the list",
|
||||||
|
err: nil,
|
||||||
|
errs: []error{nil, err2},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Nil error with empty list",
|
||||||
|
err: nil,
|
||||||
|
errs: []error{},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := In(tt.err, tt.errs...); got != tt.want {
|
||||||
|
t.Errorf("In() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -35,6 +35,7 @@ func firstLine(file *os.File) (string, error) {
|
|||||||
for {
|
for {
|
||||||
buf := make([]byte, bufSize)
|
buf := make([]byte, bufSize)
|
||||||
n, err := file.ReadAt(buf, offset)
|
n, err := file.ReadAt(buf, offset)
|
||||||
|
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -45,6 +46,10 @@ func firstLine(file *os.File) (string, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
return string(append(first, buf[:n]...)), nil
|
||||||
|
}
|
||||||
|
|
||||||
first = append(first, buf[:n]...)
|
first = append(first, buf[:n]...)
|
||||||
offset += bufSize
|
offset += bufSize
|
||||||
}
|
}
|
||||||
@@ -57,30 +62,42 @@ func lastLine(filename string, file *os.File) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var last []byte
|
var last []byte
|
||||||
|
bufLen := int64(bufSize)
|
||||||
offset := info.Size()
|
offset := info.Size()
|
||||||
for {
|
|
||||||
offset -= bufSize
|
for offset > 0 {
|
||||||
if offset < 0 {
|
if offset < bufLen {
|
||||||
|
bufLen = offset
|
||||||
offset = 0
|
offset = 0
|
||||||
|
} else {
|
||||||
|
offset -= bufLen
|
||||||
}
|
}
|
||||||
buf := make([]byte, bufSize)
|
|
||||||
|
buf := make([]byte, bufLen)
|
||||||
n, err := file.ReadAt(buf, offset)
|
n, err := file.ReadAt(buf, offset)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
if buf[n-1] == '\n' {
|
if buf[n-1] == '\n' {
|
||||||
buf = buf[:n-1]
|
buf = buf[:n-1]
|
||||||
n--
|
n--
|
||||||
} else {
|
} else {
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
}
|
}
|
||||||
for n--; n >= 0; n-- {
|
|
||||||
if buf[n] == '\n' {
|
for i := n - 1; i >= 0; i-- {
|
||||||
return string(append(buf[n+1:], last...)), nil
|
if buf[i] == '\n' {
|
||||||
|
return string(append(buf[i+1:], last...)), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
last = append(buf, last...)
|
last = append(buf, last...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return string(last), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,6 +52,7 @@ last line`
|
|||||||
second line
|
second line
|
||||||
last line
|
last line
|
||||||
`
|
`
|
||||||
|
emptyContent = ``
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFirstLine(t *testing.T) {
|
func TestFirstLine(t *testing.T) {
|
||||||
@@ -79,6 +80,26 @@ func TestFirstLineError(t *testing.T) {
|
|||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFirstLineEmptyFile(t *testing.T) {
|
||||||
|
filename, err := fs.TempFilenameWithText(emptyContent)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := FirstLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstLineWithoutNewline(t *testing.T) {
|
||||||
|
filename, err := fs.TempFilenameWithText(longLine)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := FirstLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, longLine, val)
|
||||||
|
}
|
||||||
|
|
||||||
func TestLastLine(t *testing.T) {
|
func TestLastLine(t *testing.T) {
|
||||||
filename, err := fs.TempFilenameWithText(text)
|
filename, err := fs.TempFilenameWithText(text)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -99,6 +120,16 @@ func TestLastLineWithLastNewline(t *testing.T) {
|
|||||||
assert.Equal(t, longLine, val)
|
assert.Equal(t, longLine, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLastLineWithoutLastNewline(t *testing.T) {
|
||||||
|
filename, err := fs.TempFilenameWithText(longLine)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := LastLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, longLine, val)
|
||||||
|
}
|
||||||
|
|
||||||
func TestLastLineShort(t *testing.T) {
|
func TestLastLineShort(t *testing.T) {
|
||||||
filename, err := fs.TempFilenameWithText(shortText)
|
filename, err := fs.TempFilenameWithText(shortText)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -123,3 +154,67 @@ func TestLastLineError(t *testing.T) {
|
|||||||
_, err := LastLine("/tmp/does-not-exist")
|
_, err := LastLine("/tmp/does-not-exist")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLastLineEmptyFile(t *testing.T) {
|
||||||
|
filename, err := fs.TempFilenameWithText(emptyContent)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := LastLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstLineExactlyBufSize(t *testing.T) {
|
||||||
|
content := make([]byte, bufSize)
|
||||||
|
for i := range content {
|
||||||
|
content[i] = 'a'
|
||||||
|
}
|
||||||
|
content[bufSize-1] = '\n' // Ensure there is a newline at the edge
|
||||||
|
|
||||||
|
filename, err := fs.TempFilenameWithText(string(content))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := FirstLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, string(content[:bufSize-1]), val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLastLineExactlyBufSize(t *testing.T) {
|
||||||
|
content := make([]byte, bufSize)
|
||||||
|
for i := range content {
|
||||||
|
content[i] = 'a'
|
||||||
|
}
|
||||||
|
content[bufSize-1] = '\n' // Ensure there is a newline at the edge
|
||||||
|
|
||||||
|
filename, err := fs.TempFilenameWithText(string(content))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := LastLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, string(content[:bufSize-1]), val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstLineLargeFile(t *testing.T) {
|
||||||
|
content := text + text + text + "\n" + "extra"
|
||||||
|
filename, err := fs.TempFilenameWithText(content)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := FirstLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "first line", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLastLineLargeFile(t *testing.T) {
|
||||||
|
content := text + text + text + "\n" + "extra"
|
||||||
|
filename, err := fs.TempFilenameWithText(content)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
|
||||||
|
val, err := LastLine(filename)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "extra", val)
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import "gopkg.in/cheggaaa/pb.v1"
|
|||||||
type (
|
type (
|
||||||
// A Scanner is used to read lines.
|
// A Scanner is used to read lines.
|
||||||
Scanner interface {
|
Scanner interface {
|
||||||
// Scan checks if has remaining to read.
|
// Scan checks if it has remaining to read.
|
||||||
Scan() bool
|
Scan() bool
|
||||||
// Text returns next line.
|
// Text returns next line.
|
||||||
Text() string
|
Text() string
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build linux || darwin
|
//go:build linux || darwin || freebsd
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
package fx
|
package fx
|
||||||
|
|
||||||
import "github.com/zeromicro/go-zero/core/threading"
|
import (
|
||||||
|
"github.com/zeromicro/go-zero/core/errorx"
|
||||||
|
"github.com/zeromicro/go-zero/core/threading"
|
||||||
|
)
|
||||||
|
|
||||||
// Parallel runs fns parallelly and waits for done.
|
// Parallel runs fns parallelly and waits for done.
|
||||||
func Parallel(fns ...func()) {
|
func Parallel(fns ...func()) {
|
||||||
@@ -10,3 +13,20 @@ func Parallel(fns ...func()) {
|
|||||||
}
|
}
|
||||||
group.Wait()
|
group.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ParallelErr(fns ...func() error) error {
|
||||||
|
var be errorx.BatchError
|
||||||
|
|
||||||
|
group := threading.NewRoutineGroup()
|
||||||
|
for _, fn := range fns {
|
||||||
|
f := fn
|
||||||
|
group.RunSafe(func() {
|
||||||
|
if err := f(); err != nil {
|
||||||
|
be.Add(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
group.Wait()
|
||||||
|
|
||||||
|
return be.Err()
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package fx
|
package fx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -22,3 +23,54 @@ func TestParallel(t *testing.T) {
|
|||||||
})
|
})
|
||||||
assert.Equal(t, int32(6), count)
|
assert.Equal(t, int32(6), count)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParallelErr(t *testing.T) {
|
||||||
|
var count int32
|
||||||
|
err := ParallelErr(
|
||||||
|
func() error {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
atomic.AddInt32(&count, 1)
|
||||||
|
return errors.New("failed to exec #1")
|
||||||
|
},
|
||||||
|
func() error {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
atomic.AddInt32(&count, 2)
|
||||||
|
return errors.New("failed to exec #2")
|
||||||
|
|
||||||
|
},
|
||||||
|
func() error {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
atomic.AddInt32(&count, 3)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.Equal(t, int32(6), count)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.ErrorContains(t, err, "failed to exec #1", "failed to exec #2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParallelErrErrorNil(t *testing.T) {
|
||||||
|
var count int32
|
||||||
|
err := ParallelErr(
|
||||||
|
func() error {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
atomic.AddInt32(&count, 1)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
func() error {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
atomic.AddInt32(&count, 2)
|
||||||
|
return nil
|
||||||
|
|
||||||
|
},
|
||||||
|
func() error {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
atomic.AddInt32(&count, 3)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.Equal(t, int32(6), count)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package fx
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/errorx"
|
"github.com/zeromicro/go-zero/core/errorx"
|
||||||
@@ -14,9 +15,10 @@ type (
|
|||||||
RetryOption func(*retryOptions)
|
RetryOption func(*retryOptions)
|
||||||
|
|
||||||
retryOptions struct {
|
retryOptions struct {
|
||||||
times int
|
times int
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
|
ignoreErrors []error
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -62,6 +64,11 @@ func retry(ctx context.Context, fn func(errChan chan error, retryCount int), opt
|
|||||||
select {
|
select {
|
||||||
case err := <-errChan:
|
case err := <-errChan:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
for _, ignoreErr := range options.ignoreErrors {
|
||||||
|
if errors.Is(err, ignoreErr) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
berr.Add(err)
|
berr.Add(err)
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@@ -84,19 +91,28 @@ func retry(ctx context.Context, fn func(errChan chan error, retryCount int), opt
|
|||||||
return berr.Err()
|
return berr.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithRetry customize a DoWithRetry call with given retry times.
|
// WithIgnoreErrors Ignore the specified errors
|
||||||
func WithRetry(times int) RetryOption {
|
func WithIgnoreErrors(ignoreErrors []error) RetryOption {
|
||||||
return func(options *retryOptions) {
|
return func(options *retryOptions) {
|
||||||
options.times = times
|
options.ignoreErrors = ignoreErrors
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithInterval customizes a DoWithRetry call with given interval.
|
||||||
func WithInterval(interval time.Duration) RetryOption {
|
func WithInterval(interval time.Duration) RetryOption {
|
||||||
return func(options *retryOptions) {
|
return func(options *retryOptions) {
|
||||||
options.interval = interval
|
options.interval = interval
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithRetry customizes a DoWithRetry call with given retry times.
|
||||||
|
func WithRetry(times int) RetryOption {
|
||||||
|
return func(options *retryOptions) {
|
||||||
|
options.times = times
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout customizes a DoWithRetry call with given timeout.
|
||||||
func WithTimeout(timeout time.Duration) RetryOption {
|
func WithTimeout(timeout time.Duration) RetryOption {
|
||||||
return func(options *retryOptions) {
|
return func(options *retryOptions) {
|
||||||
options.timeout = timeout
|
options.timeout = timeout
|
||||||
|
|||||||
@@ -97,6 +97,24 @@ func TestRetryWithInterval(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRetryWithWithIgnoreErrors(t *testing.T) {
|
||||||
|
ignoreErr1 := errors.New("ignore error1")
|
||||||
|
ignoreErr2 := errors.New("ignore error2")
|
||||||
|
ignoreErrs := []error{ignoreErr1, ignoreErr2}
|
||||||
|
|
||||||
|
assert.Nil(t, DoWithRetry(func() error {
|
||||||
|
return ignoreErr1
|
||||||
|
}, WithIgnoreErrors(ignoreErrs)))
|
||||||
|
|
||||||
|
assert.Nil(t, DoWithRetry(func() error {
|
||||||
|
return ignoreErr2
|
||||||
|
}, WithIgnoreErrors(ignoreErrs)))
|
||||||
|
|
||||||
|
assert.NotNil(t, DoWithRetry(func() error {
|
||||||
|
return errors.New("any")
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
func TestRetryCtx(t *testing.T) {
|
func TestRetryCtx(t *testing.T) {
|
||||||
t.Run("with timeout", func(t *testing.T) {
|
t.Run("with timeout", func(t *testing.T) {
|
||||||
assert.NotNil(t, DoWithRetryCtx(context.Background(), func(ctx context.Context, retryCount int) error {
|
assert.NotNil(t, DoWithRetryCtx(context.Background(), func(ctx context.Context, retryCount int) error {
|
||||||
|
|||||||
@@ -84,10 +84,10 @@ func Range(source <-chan any) Stream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllMach returns whether all elements of this stream match the provided predicate.
|
// AllMatch returns whether all elements of this stream match the provided predicate.
|
||||||
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
||||||
// If the stream is empty then true is returned and the predicate is not evaluated.
|
// If the stream is empty then true is returned and the predicate is not evaluated.
|
||||||
func (s Stream) AllMach(predicate func(item any) bool) bool {
|
func (s Stream) AllMatch(predicate func(item any) bool) bool {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
if !predicate(item) {
|
if !predicate(item) {
|
||||||
// make sure the former goroutine not block, and current func returns fast.
|
// make sure the former goroutine not block, and current func returns fast.
|
||||||
@@ -99,10 +99,10 @@ func (s Stream) AllMach(predicate func(item any) bool) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// AnyMach returns whether any elements of this stream match the provided predicate.
|
// AnyMatch returns whether any elements of this stream match the provided predicate.
|
||||||
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
||||||
// If the stream is empty then false is returned and the predicate is not evaluated.
|
// If the stream is empty then false is returned and the predicate is not evaluated.
|
||||||
func (s Stream) AnyMach(predicate func(item any) bool) bool {
|
func (s Stream) AnyMatch(predicate func(item any) bool) bool {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
if predicate(item) {
|
if predicate(item) {
|
||||||
// make sure the former goroutine not block, and current func returns fast.
|
// make sure the former goroutine not block, and current func returns fast.
|
||||||
@@ -352,7 +352,7 @@ func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
|
|||||||
}, opts...).Done()
|
}, opts...).Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reduce is an utility method to let the caller deal with the underlying channel.
|
// Reduce is a utility method to let the caller deal with the underlying channel.
|
||||||
func (s Stream) Reduce(fn ReduceFunc) (any, error) {
|
func (s Stream) Reduce(fn ReduceFunc) (any, error) {
|
||||||
return fn(s.source)
|
return fn(s.source)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -398,16 +398,16 @@ func TestWalk(t *testing.T) {
|
|||||||
|
|
||||||
func TestStream_AnyMach(t *testing.T) {
|
func TestStream_AnyMach(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
|
assetEqual(t, false, Just(1, 2, 3).AnyMatch(func(item any) bool {
|
||||||
return item.(int) == 4
|
return item.(int) == 4
|
||||||
}))
|
}))
|
||||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
|
assetEqual(t, false, Just(1, 2, 3).AnyMatch(func(item any) bool {
|
||||||
return item.(int) == 0
|
return item.(int) == 0
|
||||||
}))
|
}))
|
||||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
|
assetEqual(t, true, Just(1, 2, 3).AnyMatch(func(item any) bool {
|
||||||
return item.(int) == 2
|
return item.(int) == 2
|
||||||
}))
|
}))
|
||||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
|
assetEqual(t, true, Just(1, 2, 3).AnyMatch(func(item any) bool {
|
||||||
return item.(int) == 2
|
return item.(int) == 2
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
@@ -416,17 +416,17 @@ func TestStream_AnyMach(t *testing.T) {
|
|||||||
func TestStream_AllMach(t *testing.T) {
|
func TestStream_AllMach(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, true, Just(1, 2, 3).AllMach(func(item any) bool {
|
t, true, Just(1, 2, 3).AllMatch(func(item any) bool {
|
||||||
return true
|
return true
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, false, Just(1, 2, 3).AllMach(func(item any) bool {
|
t, false, Just(1, 2, 3).AllMatch(func(item any) bool {
|
||||||
return false
|
return false
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, false, Just(1, 2, 3).AllMach(func(item any) bool {
|
t, false, Just(1, 2, 3).AllMatch(func(item any) bool {
|
||||||
return item.(int) == 1
|
return item.(int) == 1
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -86,21 +86,16 @@ func TestConsistentHashIncrementalTransfer(t *testing.T) {
|
|||||||
|
|
||||||
func TestConsistentHashTransferOnFailure(t *testing.T) {
|
func TestConsistentHashTransferOnFailure(t *testing.T) {
|
||||||
index := 41
|
index := 41
|
||||||
keys, newKeys := getKeysBeforeAndAfterFailure(t, "localhost:", index)
|
ratioNotExists := getTransferRatioOnFailure(t, index)
|
||||||
var transferred int
|
assert.True(t, ratioNotExists == 0, fmt.Sprintf("%d: %f", index, ratioNotExists))
|
||||||
for k, v := range newKeys {
|
index = 13
|
||||||
if v != keys[k] {
|
ratio := getTransferRatioOnFailure(t, index)
|
||||||
transferred++
|
assert.True(t, ratio < 2.5/keySize, fmt.Sprintf("%d: %f", index, ratio))
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ratio := float32(transferred) / float32(requestSize)
|
|
||||||
assert.True(t, ratio < 2.5/float32(keySize), fmt.Sprintf("%d: %f", index, ratio))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConsistentHashLeastTransferOnFailure(t *testing.T) {
|
func TestConsistentHashLeastTransferOnFailure(t *testing.T) {
|
||||||
prefix := "localhost:"
|
prefix := "localhost:"
|
||||||
index := 41
|
index := 13
|
||||||
keys, newKeys := getKeysBeforeAndAfterFailure(t, prefix, index)
|
keys, newKeys := getKeysBeforeAndAfterFailure(t, prefix, index)
|
||||||
for k, v := range keys {
|
for k, v := range keys {
|
||||||
newV := newKeys[k]
|
newV := newKeys[k]
|
||||||
@@ -164,6 +159,17 @@ func getKeysBeforeAndAfterFailure(t *testing.T, prefix string, index int) (map[i
|
|||||||
return keys, newKeys
|
return keys, newKeys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getTransferRatioOnFailure(t *testing.T, index int) float32 {
|
||||||
|
keys, newKeys := getKeysBeforeAndAfterFailure(t, "localhost:", index)
|
||||||
|
var transferred int
|
||||||
|
for k, v := range newKeys {
|
||||||
|
if v != keys[k] {
|
||||||
|
transferred++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return float32(transferred) / float32(requestSize)
|
||||||
|
}
|
||||||
|
|
||||||
type mockNode struct {
|
type mockNode struct {
|
||||||
addr string
|
addr string
|
||||||
id int
|
id int
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package hash
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"fmt"
|
"encoding/hex"
|
||||||
|
|
||||||
"github.com/spaolacci/murmur3"
|
"github.com/spaolacci/murmur3"
|
||||||
)
|
)
|
||||||
@@ -20,6 +20,7 @@ func Md5(data []byte) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Md5Hex returns the md5 hex string of data.
|
// Md5Hex returns the md5 hex string of data.
|
||||||
|
// This function is optimized for better performance than fmt.Sprintf.
|
||||||
func Md5Hex(data []byte) string {
|
func Md5Hex(data []byte) string {
|
||||||
return fmt.Sprintf("%x", Md5(data))
|
return hex.EncodeToString(Md5(data))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package iox
|
|||||||
|
|
||||||
import "os"
|
import "os"
|
||||||
|
|
||||||
// RedirectInOut redirects stdin to r, stdout to w, and callers need to call restore afterwards.
|
// RedirectInOut redirects stdin to r, stdout to w, and callers need to call restore afterward.
|
||||||
func RedirectInOut() (restore func(), err error) {
|
func RedirectInOut() (restore func(), err error) {
|
||||||
var r, w *os.File
|
var r, w *os.File
|
||||||
r, w, err = os.Pipe()
|
r, w, err = os.Pipe()
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
const bufSize = 32 * 1024
|
const bufSize = 32 * 1024
|
||||||
|
|
||||||
// CountLines returns the number of lines in file.
|
// CountLines returns the number of lines in the file.
|
||||||
func CountLines(file string) (int, error) {
|
func CountLines(file string) (int, error) {
|
||||||
f, err := os.Open(file)
|
f, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -2,11 +2,12 @@ package iox
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A TextLineScanner is a scanner that can scan lines from given reader.
|
// A TextLineScanner is a scanner that can scan lines from the given reader.
|
||||||
type TextLineScanner struct {
|
type TextLineScanner struct {
|
||||||
reader *bufio.Reader
|
reader *bufio.Reader
|
||||||
hasNext bool
|
hasNext bool
|
||||||
@@ -14,7 +15,7 @@ type TextLineScanner struct {
|
|||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTextLineScanner returns a TextLineScanner with given reader.
|
// NewTextLineScanner returns a TextLineScanner with the given reader.
|
||||||
func NewTextLineScanner(reader io.Reader) *TextLineScanner {
|
func NewTextLineScanner(reader io.Reader) *TextLineScanner {
|
||||||
return &TextLineScanner{
|
return &TextLineScanner{
|
||||||
reader: bufio.NewReader(reader),
|
reader: bufio.NewReader(reader),
|
||||||
@@ -30,7 +31,7 @@ func (scanner *TextLineScanner) Scan() bool {
|
|||||||
|
|
||||||
line, err := scanner.reader.ReadString('\n')
|
line, err := scanner.reader.ReadString('\n')
|
||||||
scanner.line = strings.TrimRight(line, "\n")
|
scanner.line = strings.TrimRight(line, "\n")
|
||||||
if err == io.EOF {
|
if errors.Is(err, io.EOF) {
|
||||||
scanner.hasNext = false
|
scanner.hasNext = false
|
||||||
return true
|
return true
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
|||||||
@@ -8,9 +8,25 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Marshal marshals v into json bytes.
|
// Marshal marshals v into json bytes, without escaping HTML and removes the trailing newline.
|
||||||
func Marshal(v any) ([]byte, error) {
|
func Marshal(v any) ([]byte, error) {
|
||||||
return json.Marshal(v)
|
// why not use json.Marshal? https://github.com/golang/go/issues/28453
|
||||||
|
// it changes the behavior of json.Marshal, like & -> \u0026, < -> \u003c, > -> \u003e
|
||||||
|
// which is not what we want in API responses
|
||||||
|
var buf bytes.Buffer
|
||||||
|
enc := json.NewEncoder(&buf)
|
||||||
|
enc.SetEscapeHTML(false)
|
||||||
|
if err := enc.Encode(v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := buf.Bytes()
|
||||||
|
// Remove trailing newline added by json.Encoder.Encode
|
||||||
|
if len(bs) > 0 && bs[len(bs)-1] == '\n' {
|
||||||
|
bs = bs[:len(bs)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return bs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalToString marshals v into a string.
|
// MarshalToString marshals v into a string.
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package jsonx
|
package jsonx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -101,3 +102,105 @@ func TestUnmarshalFromReaderError(t *testing.T) {
|
|||||||
err := UnmarshalFromReader(strings.NewReader(s), &v)
|
err := UnmarshalFromReader(strings.NewReader(s), &v)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_doMarshalJson(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
v any
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want []byte
|
||||||
|
wantErr assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
args: args{nil},
|
||||||
|
want: []byte("null"),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "string",
|
||||||
|
args: args{"hello"},
|
||||||
|
want: []byte(`"hello"`),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "int",
|
||||||
|
args: args{42},
|
||||||
|
want: []byte("42"),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bool",
|
||||||
|
args: args{true},
|
||||||
|
want: []byte("true"),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "struct",
|
||||||
|
args: args{
|
||||||
|
struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}{Name: "test"},
|
||||||
|
},
|
||||||
|
want: []byte(`{"name":"test"}`),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "slice",
|
||||||
|
args: args{[]int{1, 2, 3}},
|
||||||
|
want: []byte("[1,2,3]"),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "map",
|
||||||
|
args: args{map[string]int{"a": 1, "b": 2}},
|
||||||
|
want: []byte(`{"a":1,"b":2}`),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unmarshalable type",
|
||||||
|
args: args{complex(1, 2)},
|
||||||
|
want: nil,
|
||||||
|
wantErr: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "channel type",
|
||||||
|
args: args{make(chan int)},
|
||||||
|
want: nil,
|
||||||
|
wantErr: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url with query params",
|
||||||
|
args: args{"https://example.com/api?name=test&age=25"},
|
||||||
|
want: []byte(`"https://example.com/api?name=test&age=25"`),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url with encoded query params",
|
||||||
|
args: args{"https://example.com/api?data=hello%20world&special=%26%3D"},
|
||||||
|
want: []byte(`"https://example.com/api?data=hello%20world&special=%26%3D"`),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url with multiple query params",
|
||||||
|
args: args{"http://localhost:8080/users?page=1&limit=10&sort=name&order=asc"},
|
||||||
|
want: []byte(`"http://localhost:8080/users?page=1&limit=10&sort=name&order=asc"`),
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := Marshal(tt.args.v)
|
||||||
|
if !tt.wantErr(t, err, fmt.Sprintf("Marshal(%v)", tt.args.v)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equalf(t, string(tt.want), string(got), "Marshal(%v)", tt.args.v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package limit
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
_ "embed"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@@ -28,20 +29,9 @@ var (
|
|||||||
// ErrUnknownCode is an error that represents unknown status code.
|
// ErrUnknownCode is an error that represents unknown status code.
|
||||||
ErrUnknownCode = errors.New("unknown status code")
|
ErrUnknownCode = errors.New("unknown status code")
|
||||||
|
|
||||||
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
//go:embed periodscript.lua
|
||||||
periodScript = redis.NewScript(`local limit = tonumber(ARGV[1])
|
periodLuaScript string
|
||||||
local window = tonumber(ARGV[2])
|
periodScript = redis.NewScript(periodLuaScript)
|
||||||
local current = redis.call("INCRBY", KEYS[1], 1)
|
|
||||||
if current == 1 then
|
|
||||||
redis.call("expire", KEYS[1], window)
|
|
||||||
end
|
|
||||||
if current < limit then
|
|
||||||
return 1
|
|
||||||
elseif current == limit then
|
|
||||||
return 2
|
|
||||||
else
|
|
||||||
return 0
|
|
||||||
end`)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
|||||||
14
core/limit/periodscript.lua
Normal file
14
core/limit/periodscript.lua
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
-- to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
||||||
|
local limit = tonumber(ARGV[1])
|
||||||
|
local window = tonumber(ARGV[2])
|
||||||
|
local current = redis.call("INCRBY", KEYS[1], 1)
|
||||||
|
if current == 1 then
|
||||||
|
redis.call("expire", KEYS[1], window)
|
||||||
|
end
|
||||||
|
if current < limit then
|
||||||
|
return 1
|
||||||
|
elseif current == limit then
|
||||||
|
return 2
|
||||||
|
else
|
||||||
|
return 0
|
||||||
|
end
|
||||||
@@ -2,6 +2,7 @@ package limit
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
_ "embed"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -9,6 +10,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/errorx"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/stores/redis"
|
"github.com/zeromicro/go-zero/core/stores/redis"
|
||||||
xrate "golang.org/x/time/rate"
|
xrate "golang.org/x/time/rate"
|
||||||
@@ -20,37 +22,11 @@ const (
|
|||||||
pingInterval = time.Millisecond * 100
|
pingInterval = time.Millisecond * 100
|
||||||
)
|
)
|
||||||
|
|
||||||
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
var (
|
||||||
// KEYS[1] as tokens_key
|
//go:embed tokenscript.lua
|
||||||
// KEYS[2] as timestamp_key
|
tokenLuaScript string
|
||||||
var script = redis.NewScript(`local rate = tonumber(ARGV[1])
|
tokenScript = redis.NewScript(tokenLuaScript)
|
||||||
local capacity = tonumber(ARGV[2])
|
)
|
||||||
local now = tonumber(ARGV[3])
|
|
||||||
local requested = tonumber(ARGV[4])
|
|
||||||
local fill_time = capacity/rate
|
|
||||||
local ttl = math.floor(fill_time*2)
|
|
||||||
local last_tokens = tonumber(redis.call("get", KEYS[1]))
|
|
||||||
if last_tokens == nil then
|
|
||||||
last_tokens = capacity
|
|
||||||
end
|
|
||||||
|
|
||||||
local last_refreshed = tonumber(redis.call("get", KEYS[2]))
|
|
||||||
if last_refreshed == nil then
|
|
||||||
last_refreshed = 0
|
|
||||||
end
|
|
||||||
|
|
||||||
local delta = math.max(0, now-last_refreshed)
|
|
||||||
local filled_tokens = math.min(capacity, last_tokens+(delta*rate))
|
|
||||||
local allowed = filled_tokens >= requested
|
|
||||||
local new_tokens = filled_tokens
|
|
||||||
if allowed then
|
|
||||||
new_tokens = filled_tokens - requested
|
|
||||||
end
|
|
||||||
|
|
||||||
redis.call("setex", KEYS[1], ttl, new_tokens)
|
|
||||||
redis.call("setex", KEYS[2], ttl, now)
|
|
||||||
|
|
||||||
return allowed`)
|
|
||||||
|
|
||||||
// A TokenLimiter controls how frequently events are allowed to happen with in one second.
|
// A TokenLimiter controls how frequently events are allowed to happen with in one second.
|
||||||
type TokenLimiter struct {
|
type TokenLimiter struct {
|
||||||
@@ -112,7 +88,7 @@ func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) boo
|
|||||||
}
|
}
|
||||||
|
|
||||||
resp, err := lim.store.ScriptRunCtx(ctx,
|
resp, err := lim.store.ScriptRunCtx(ctx,
|
||||||
script,
|
tokenScript,
|
||||||
[]string{
|
[]string{
|
||||||
lim.tokenKey,
|
lim.tokenKey,
|
||||||
lim.timestampKey,
|
lim.timestampKey,
|
||||||
@@ -125,10 +101,10 @@ func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) boo
|
|||||||
})
|
})
|
||||||
// redis allowed == false
|
// redis allowed == false
|
||||||
// Lua boolean false -> r Nil bulk reply
|
// Lua boolean false -> r Nil bulk reply
|
||||||
if err == redis.Nil {
|
if errors.Is(err, redis.Nil) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
|
if errorx.In(err, context.DeadlineExceeded, context.Canceled) {
|
||||||
logx.Errorf("fail to use rate limiter: %s", err)
|
logx.Errorf("fail to use rate limiter: %s", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
31
core/limit/tokenscript.lua
Normal file
31
core/limit/tokenscript.lua
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
-- to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
||||||
|
-- KEYS[1] as tokens_key
|
||||||
|
-- KEYS[2] as timestamp_key
|
||||||
|
local rate = tonumber(ARGV[1])
|
||||||
|
local capacity = tonumber(ARGV[2])
|
||||||
|
local now = tonumber(ARGV[3])
|
||||||
|
local requested = tonumber(ARGV[4])
|
||||||
|
local fill_time = capacity/rate
|
||||||
|
local ttl = math.floor(fill_time*2)
|
||||||
|
local last_tokens = tonumber(redis.call("get", KEYS[1]))
|
||||||
|
if last_tokens == nil then
|
||||||
|
last_tokens = capacity
|
||||||
|
end
|
||||||
|
|
||||||
|
local last_refreshed = tonumber(redis.call("get", KEYS[2]))
|
||||||
|
if last_refreshed == nil then
|
||||||
|
last_refreshed = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
local delta = math.max(0, now-last_refreshed)
|
||||||
|
local filled_tokens = math.min(capacity, last_tokens+(delta*rate))
|
||||||
|
local allowed = filled_tokens >= requested
|
||||||
|
local new_tokens = filled_tokens
|
||||||
|
if allowed then
|
||||||
|
new_tokens = filled_tokens - requested
|
||||||
|
end
|
||||||
|
|
||||||
|
redis.call("setex", KEYS[1], ttl, new_tokens)
|
||||||
|
redis.call("setex", KEYS[2], ttl, now)
|
||||||
|
|
||||||
|
return allowed
|
||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/collection"
|
"github.com/zeromicro/go-zero/core/collection"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
"github.com/zeromicro/go-zero/core/stat"
|
"github.com/zeromicro/go-zero/core/stat"
|
||||||
"github.com/zeromicro/go-zero/core/syncx"
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
@@ -21,8 +22,11 @@ const (
|
|||||||
defaultCpuThreshold = 900
|
defaultCpuThreshold = 900
|
||||||
defaultMinRt = float64(time.Second / time.Millisecond)
|
defaultMinRt = float64(time.Second / time.Millisecond)
|
||||||
// moving average hyperparameter beta for calculating requests on the fly
|
// moving average hyperparameter beta for calculating requests on the fly
|
||||||
flyingBeta = 0.9
|
flyingBeta = 0.9
|
||||||
coolOffDuration = time.Second
|
coolOffDuration = time.Second
|
||||||
|
cpuMax = 1000 // millicpu
|
||||||
|
millisecondsPerSecond = 1000
|
||||||
|
overloadFactorLowerBound = 0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -66,14 +70,14 @@ type (
|
|||||||
|
|
||||||
adaptiveShedder struct {
|
adaptiveShedder struct {
|
||||||
cpuThreshold int64
|
cpuThreshold int64
|
||||||
windows int64
|
windowScale float64
|
||||||
flying int64
|
flying int64
|
||||||
avgFlying float64
|
avgFlying float64
|
||||||
avgFlyingLock syncx.SpinLock
|
avgFlyingLock syncx.SpinLock
|
||||||
overloadTime *syncx.AtomicDuration
|
overloadTime *syncx.AtomicDuration
|
||||||
droppedRecently *syncx.AtomicBool
|
droppedRecently *syncx.AtomicBool
|
||||||
passCounter *collection.RollingWindow
|
passCounter *collection.RollingWindow[int64, *collection.Bucket[int64]]
|
||||||
rtCounter *collection.RollingWindow
|
rtCounter *collection.RollingWindow[int64, *collection.Bucket[int64]]
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -103,15 +107,16 @@ func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
|
|||||||
opt(&options)
|
opt(&options)
|
||||||
}
|
}
|
||||||
bucketDuration := options.window / time.Duration(options.buckets)
|
bucketDuration := options.window / time.Duration(options.buckets)
|
||||||
|
newBucket := func() *collection.Bucket[int64] {
|
||||||
|
return new(collection.Bucket[int64])
|
||||||
|
}
|
||||||
return &adaptiveShedder{
|
return &adaptiveShedder{
|
||||||
cpuThreshold: options.cpuThreshold,
|
cpuThreshold: options.cpuThreshold,
|
||||||
windows: int64(time.Second / bucketDuration),
|
windowScale: float64(time.Second) / float64(bucketDuration) / millisecondsPerSecond,
|
||||||
overloadTime: syncx.NewAtomicDuration(),
|
overloadTime: syncx.NewAtomicDuration(),
|
||||||
droppedRecently: syncx.NewAtomicBool(),
|
droppedRecently: syncx.NewAtomicBool(),
|
||||||
passCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
|
passCounter: collection.NewRollingWindow[int64, *collection.Bucket[int64]](newBucket, options.buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]()),
|
||||||
collection.IgnoreCurrentBucket()),
|
rtCounter: collection.NewRollingWindow[int64, *collection.Bucket[int64]](newBucket, options.buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]()),
|
||||||
rtCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
|
|
||||||
collection.IgnoreCurrentBucket()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,10 +139,10 @@ func (as *adaptiveShedder) Allow() (Promise, error) {
|
|||||||
func (as *adaptiveShedder) addFlying(delta int64) {
|
func (as *adaptiveShedder) addFlying(delta int64) {
|
||||||
flying := atomic.AddInt64(&as.flying, delta)
|
flying := atomic.AddInt64(&as.flying, delta)
|
||||||
// update avgFlying when the request is finished.
|
// update avgFlying when the request is finished.
|
||||||
// this strategy makes avgFlying have a little bit lag against flying, and smoother.
|
// this strategy makes avgFlying have a little bit of lag against flying, and smoother.
|
||||||
// when the flying requests increase rapidly, avgFlying increase slower, accept more requests.
|
// when the flying requests increase rapidly, avgFlying increase slower, accept more requests.
|
||||||
// when the flying requests drop rapidly, avgFlying drop slower, accept less requests.
|
// when the flying requests drop rapidly, avgFlying drop slower, accept fewer requests.
|
||||||
// it makes the service to serve as more requests as possible.
|
// it makes the service to serve as many requests as possible.
|
||||||
if delta < 0 {
|
if delta < 0 {
|
||||||
as.avgFlyingLock.Lock()
|
as.avgFlyingLock.Lock()
|
||||||
as.avgFlying = as.avgFlying*flyingBeta + float64(flying)*(1-flyingBeta)
|
as.avgFlying = as.avgFlying*flyingBeta + float64(flying)*(1-flyingBeta)
|
||||||
@@ -149,39 +154,42 @@ func (as *adaptiveShedder) highThru() bool {
|
|||||||
as.avgFlyingLock.Lock()
|
as.avgFlyingLock.Lock()
|
||||||
avgFlying := as.avgFlying
|
avgFlying := as.avgFlying
|
||||||
as.avgFlyingLock.Unlock()
|
as.avgFlyingLock.Unlock()
|
||||||
maxFlight := as.maxFlight()
|
maxFlight := as.maxFlight() * as.overloadFactor()
|
||||||
return int64(avgFlying) > maxFlight && atomic.LoadInt64(&as.flying) > maxFlight
|
return avgFlying > maxFlight && float64(atomic.LoadInt64(&as.flying)) > maxFlight
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *adaptiveShedder) maxFlight() int64 {
|
func (as *adaptiveShedder) maxFlight() float64 {
|
||||||
// windows = buckets per second
|
// windows = buckets per second
|
||||||
// maxQPS = maxPASS * windows
|
// maxQPS = maxPASS * windows
|
||||||
// minRT = min average response time in milliseconds
|
// minRT = min average response time in milliseconds
|
||||||
// maxQPS * minRT / milliseconds_per_second
|
// allowedFlying = maxQPS * minRT / milliseconds_per_second
|
||||||
return int64(math.Max(1, float64(as.maxPass()*as.windows)*(as.minRt()/1e3)))
|
maxFlight := float64(as.maxPass()) * as.minRt() * as.windowScale
|
||||||
|
return mathx.AtLeast(maxFlight, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *adaptiveShedder) maxPass() int64 {
|
func (as *adaptiveShedder) maxPass() int64 {
|
||||||
var result float64 = 1
|
var result int64 = 1
|
||||||
|
|
||||||
as.passCounter.Reduce(func(b *collection.Bucket) {
|
as.passCounter.Reduce(func(b *collection.Bucket[int64]) {
|
||||||
if b.Sum > result {
|
if b.Sum > result {
|
||||||
result = b.Sum
|
result = b.Sum
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return int64(result)
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *adaptiveShedder) minRt() float64 {
|
func (as *adaptiveShedder) minRt() float64 {
|
||||||
|
// if no requests in previous windows, return defaultMinRt,
|
||||||
|
// its a reasonable large value to avoid dropping requests.
|
||||||
result := defaultMinRt
|
result := defaultMinRt
|
||||||
|
|
||||||
as.rtCounter.Reduce(func(b *collection.Bucket) {
|
as.rtCounter.Reduce(func(b *collection.Bucket[int64]) {
|
||||||
if b.Count <= 0 {
|
if b.Count <= 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
avg := math.Round(b.Sum / float64(b.Count))
|
avg := math.Round(float64(b.Sum) / float64(b.Count))
|
||||||
if avg < result {
|
if avg < result {
|
||||||
result = avg
|
result = avg
|
||||||
}
|
}
|
||||||
@@ -190,6 +198,13 @@ func (as *adaptiveShedder) minRt() float64 {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *adaptiveShedder) overloadFactor() float64 {
|
||||||
|
// as.cpuThreshold must be less than cpuMax
|
||||||
|
factor := (cpuMax - float64(stat.CpuUsage())) / (cpuMax - float64(as.cpuThreshold))
|
||||||
|
// at least accept 10% of acceptable requests, even cpu is highly overloaded.
|
||||||
|
return mathx.Between(factor, overloadFactorLowerBound, 1)
|
||||||
|
}
|
||||||
|
|
||||||
func (as *adaptiveShedder) shouldDrop() bool {
|
func (as *adaptiveShedder) shouldDrop() bool {
|
||||||
if as.systemOverloaded() || as.stillHot() {
|
if as.systemOverloaded() || as.stillHot() {
|
||||||
if as.highThru() {
|
if as.highThru() {
|
||||||
@@ -236,14 +251,14 @@ func (as *adaptiveShedder) systemOverloaded() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBuckets customizes the Shedder with given number of buckets.
|
// WithBuckets customizes the Shedder with the given number of buckets.
|
||||||
func WithBuckets(buckets int) ShedderOption {
|
func WithBuckets(buckets int) ShedderOption {
|
||||||
return func(opts *shedderOptions) {
|
return func(opts *shedderOptions) {
|
||||||
opts.buckets = buckets
|
opts.buckets = buckets
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithCpuThreshold customizes the Shedder with given cpu threshold.
|
// WithCpuThreshold customizes the Shedder with the given cpu threshold.
|
||||||
func WithCpuThreshold(threshold int64) ShedderOption {
|
func WithCpuThreshold(threshold int64) ShedderOption {
|
||||||
return func(opts *shedderOptions) {
|
return func(opts *shedderOptions) {
|
||||||
opts.cpuThreshold = threshold
|
opts.cpuThreshold = threshold
|
||||||
@@ -269,6 +284,6 @@ func (p *promise) Fail() {
|
|||||||
func (p *promise) Pass() {
|
func (p *promise) Pass() {
|
||||||
rt := float64(timex.Since(p.start)) / float64(time.Millisecond)
|
rt := float64(timex.Since(p.start)) / float64(time.Millisecond)
|
||||||
p.shedder.addFlying(-1)
|
p.shedder.addFlying(-1)
|
||||||
p.shedder.rtCounter.Add(math.Ceil(rt))
|
p.shedder.rtCounter.Add(int64(math.Ceil(rt)))
|
||||||
p.shedder.passCounter.Add(1)
|
p.shedder.passCounter.Add(1)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
buckets = 10
|
buckets = 10
|
||||||
bucketDuration = time.Millisecond * 50
|
bucketDuration = time.Millisecond * 50
|
||||||
|
windowFactor = 0.01
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -57,7 +58,7 @@ func TestAdaptiveShedder(t *testing.T) {
|
|||||||
func TestAdaptiveShedderMaxPass(t *testing.T) {
|
func TestAdaptiveShedderMaxPass(t *testing.T) {
|
||||||
passCounter := newRollingWindow()
|
passCounter := newRollingWindow()
|
||||||
for i := 1; i <= 10; i++ {
|
for i := 1; i <= 10; i++ {
|
||||||
passCounter.Add(float64(i * 100))
|
passCounter.Add(int64(i * 100))
|
||||||
time.Sleep(bucketDuration)
|
time.Sleep(bucketDuration)
|
||||||
}
|
}
|
||||||
shedder := &adaptiveShedder{
|
shedder := &adaptiveShedder{
|
||||||
@@ -82,7 +83,7 @@ func TestAdaptiveShedderMinRt(t *testing.T) {
|
|||||||
time.Sleep(bucketDuration)
|
time.Sleep(bucketDuration)
|
||||||
}
|
}
|
||||||
for j := i*10 + 1; j <= i*10+10; j++ {
|
for j := i*10 + 1; j <= i*10+10; j++ {
|
||||||
rtCounter.Add(float64(j))
|
rtCounter.Add(int64(j))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shedder := &adaptiveShedder{
|
shedder := &adaptiveShedder{
|
||||||
@@ -106,18 +107,18 @@ func TestAdaptiveShedderMaxFlight(t *testing.T) {
|
|||||||
if i > 0 {
|
if i > 0 {
|
||||||
time.Sleep(bucketDuration)
|
time.Sleep(bucketDuration)
|
||||||
}
|
}
|
||||||
passCounter.Add(float64((i + 1) * 100))
|
passCounter.Add(int64((i + 1) * 100))
|
||||||
for j := i*10 + 1; j <= i*10+10; j++ {
|
for j := i*10 + 1; j <= i*10+10; j++ {
|
||||||
rtCounter.Add(float64(j))
|
rtCounter.Add(int64(j))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shedder := &adaptiveShedder{
|
shedder := &adaptiveShedder{
|
||||||
passCounter: passCounter,
|
passCounter: passCounter,
|
||||||
rtCounter: rtCounter,
|
rtCounter: rtCounter,
|
||||||
windows: buckets,
|
windowScale: windowFactor,
|
||||||
droppedRecently: syncx.NewAtomicBool(),
|
droppedRecently: syncx.NewAtomicBool(),
|
||||||
}
|
}
|
||||||
assert.Equal(t, int64(54), shedder.maxFlight())
|
assert.Equal(t, float64(54), shedder.maxFlight())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAdaptiveShedderShouldDrop(t *testing.T) {
|
func TestAdaptiveShedderShouldDrop(t *testing.T) {
|
||||||
@@ -128,15 +129,15 @@ func TestAdaptiveShedderShouldDrop(t *testing.T) {
|
|||||||
if i > 0 {
|
if i > 0 {
|
||||||
time.Sleep(bucketDuration)
|
time.Sleep(bucketDuration)
|
||||||
}
|
}
|
||||||
passCounter.Add(float64((i + 1) * 100))
|
passCounter.Add(int64((i + 1) * 100))
|
||||||
for j := i*10 + 1; j <= i*10+10; j++ {
|
for j := i*10 + 1; j <= i*10+10; j++ {
|
||||||
rtCounter.Add(float64(j))
|
rtCounter.Add(int64(j))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shedder := &adaptiveShedder{
|
shedder := &adaptiveShedder{
|
||||||
passCounter: passCounter,
|
passCounter: passCounter,
|
||||||
rtCounter: rtCounter,
|
rtCounter: rtCounter,
|
||||||
windows: buckets,
|
windowScale: windowFactor,
|
||||||
overloadTime: syncx.NewAtomicDuration(),
|
overloadTime: syncx.NewAtomicDuration(),
|
||||||
droppedRecently: syncx.NewAtomicBool(),
|
droppedRecently: syncx.NewAtomicBool(),
|
||||||
}
|
}
|
||||||
@@ -149,7 +150,8 @@ func TestAdaptiveShedderShouldDrop(t *testing.T) {
|
|||||||
|
|
||||||
// cpu >= 800, inflight > maxPass
|
// cpu >= 800, inflight > maxPass
|
||||||
shedder.avgFlying = 80
|
shedder.avgFlying = 80
|
||||||
shedder.flying = 50
|
// because of the overloadFactor, so we need to make sure maxFlight is greater than flying
|
||||||
|
shedder.flying = int64(shedder.maxFlight()*shedder.overloadFactor()) - 5
|
||||||
assert.False(t, shedder.shouldDrop())
|
assert.False(t, shedder.shouldDrop())
|
||||||
|
|
||||||
// cpu >= 800, inflight > maxPass
|
// cpu >= 800, inflight > maxPass
|
||||||
@@ -182,15 +184,15 @@ func TestAdaptiveShedderStillHot(t *testing.T) {
|
|||||||
if i > 0 {
|
if i > 0 {
|
||||||
time.Sleep(bucketDuration)
|
time.Sleep(bucketDuration)
|
||||||
}
|
}
|
||||||
passCounter.Add(float64((i + 1) * 100))
|
passCounter.Add(int64((i + 1) * 100))
|
||||||
for j := i*10 + 1; j <= i*10+10; j++ {
|
for j := i*10 + 1; j <= i*10+10; j++ {
|
||||||
rtCounter.Add(float64(j))
|
rtCounter.Add(int64(j))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shedder := &adaptiveShedder{
|
shedder := &adaptiveShedder{
|
||||||
passCounter: passCounter,
|
passCounter: passCounter,
|
||||||
rtCounter: rtCounter,
|
rtCounter: rtCounter,
|
||||||
windows: buckets,
|
windowScale: windowFactor,
|
||||||
overloadTime: syncx.NewAtomicDuration(),
|
overloadTime: syncx.NewAtomicDuration(),
|
||||||
droppedRecently: syncx.ForAtomicBool(true),
|
droppedRecently: syncx.ForAtomicBool(true),
|
||||||
}
|
}
|
||||||
@@ -239,6 +241,32 @@ func BenchmarkAdaptiveShedder_Allow(b *testing.B) {
|
|||||||
b.Run("low load", bench)
|
b.Run("low load", bench)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRollingWindow() *collection.RollingWindow {
|
func BenchmarkMaxFlight(b *testing.B) {
|
||||||
return collection.NewRollingWindow(buckets, bucketDuration, collection.IgnoreCurrentBucket())
|
passCounter := newRollingWindow()
|
||||||
|
rtCounter := newRollingWindow()
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
time.Sleep(bucketDuration)
|
||||||
|
}
|
||||||
|
passCounter.Add(int64((i + 1) * 100))
|
||||||
|
for j := i*10 + 1; j <= i*10+10; j++ {
|
||||||
|
rtCounter.Add(int64(j))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
shedder := &adaptiveShedder{
|
||||||
|
passCounter: passCounter,
|
||||||
|
rtCounter: rtCounter,
|
||||||
|
windowScale: windowFactor,
|
||||||
|
droppedRecently: syncx.NewAtomicBool(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_ = shedder.maxFlight()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRollingWindow() *collection.RollingWindow[int64, *collection.Bucket[int64]] {
|
||||||
|
return collection.NewRollingWindow[int64, *collection.Bucket[int64]](func() *collection.Bucket[int64] {
|
||||||
|
return new(collection.Bucket[int64])
|
||||||
|
}, buckets, bucketDuration, collection.IgnoreCurrentBucket[int64, *collection.Bucket[int64]]())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/syncx"
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A ShedderGroup is a manager to manage key based shedders.
|
// A ShedderGroup is a manager to manage key-based shedders.
|
||||||
type ShedderGroup struct {
|
type ShedderGroup struct {
|
||||||
options []ShedderOption
|
options []ShedderOption
|
||||||
manager *syncx.ResourceManager
|
manager *syncx.ResourceManager
|
||||||
|
|||||||
@@ -37,12 +37,19 @@ func Debugf(ctx context.Context, format string, v ...interface{}) {
|
|||||||
getLogger(ctx).Debugf(format, v...)
|
getLogger(ctx).Debugf(format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Debugfn writes fn result into access log.
|
||||||
|
// This is useful when the function is expensive to compute,
|
||||||
|
// and we want to log it only when necessary.
|
||||||
|
func Debugfn(ctx context.Context, fn func() any) {
|
||||||
|
getLogger(ctx).Debugfn(fn)
|
||||||
|
}
|
||||||
|
|
||||||
// Debugv writes v into access log with json content.
|
// Debugv writes v into access log with json content.
|
||||||
func Debugv(ctx context.Context, v interface{}) {
|
func Debugv(ctx context.Context, v interface{}) {
|
||||||
getLogger(ctx).Debugv(v)
|
getLogger(ctx).Debugv(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debugw writes msg along with fields into access log.
|
// Debugw writes msg along with fields into the access log.
|
||||||
func Debugw(ctx context.Context, msg string, fields ...LogField) {
|
func Debugw(ctx context.Context, msg string, fields ...LogField) {
|
||||||
getLogger(ctx).Debugw(msg, fields...)
|
getLogger(ctx).Debugw(msg, fields...)
|
||||||
}
|
}
|
||||||
@@ -57,13 +64,20 @@ func Errorf(ctx context.Context, format string, v ...any) {
|
|||||||
getLogger(ctx).Errorf(fmt.Errorf(format, v...).Error())
|
getLogger(ctx).Errorf(fmt.Errorf(format, v...).Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Errorfn writes fn result into error log.
|
||||||
|
// This is useful when the function is expensive to compute,
|
||||||
|
// and we want to log it only when necessary.
|
||||||
|
func Errorfn(ctx context.Context, fn func() any) {
|
||||||
|
getLogger(ctx).Errorfn(fn)
|
||||||
|
}
|
||||||
|
|
||||||
// Errorv writes v into error log with json content.
|
// Errorv writes v into error log with json content.
|
||||||
// No call stack attached, because not elegant to pack the messages.
|
// No call stack attached, because not elegant to pack the messages.
|
||||||
func Errorv(ctx context.Context, v any) {
|
func Errorv(ctx context.Context, v any) {
|
||||||
getLogger(ctx).Errorv(v)
|
getLogger(ctx).Errorv(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorw writes msg along with fields into error log.
|
// Errorw writes msg along with fields into the error log.
|
||||||
func Errorw(ctx context.Context, msg string, fields ...LogField) {
|
func Errorw(ctx context.Context, msg string, fields ...LogField) {
|
||||||
getLogger(ctx).Errorw(msg, fields...)
|
getLogger(ctx).Errorw(msg, fields...)
|
||||||
}
|
}
|
||||||
@@ -83,12 +97,19 @@ func Infof(ctx context.Context, format string, v ...any) {
|
|||||||
getLogger(ctx).Infof(format, v...)
|
getLogger(ctx).Infof(format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Infofn writes fn result into access log.
|
||||||
|
// This is useful when the function is expensive to compute,
|
||||||
|
// and we want to log it only when necessary.
|
||||||
|
func Infofn(ctx context.Context, fn func() any) {
|
||||||
|
getLogger(ctx).Infofn(fn)
|
||||||
|
}
|
||||||
|
|
||||||
// Infov writes v into access log with json content.
|
// Infov writes v into access log with json content.
|
||||||
func Infov(ctx context.Context, v any) {
|
func Infov(ctx context.Context, v any) {
|
||||||
getLogger(ctx).Infov(v)
|
getLogger(ctx).Infov(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infow writes msg along with fields into access log.
|
// Infow writes msg along with fields into the access log.
|
||||||
func Infow(ctx context.Context, msg string, fields ...LogField) {
|
func Infow(ctx context.Context, msg string, fields ...LogField) {
|
||||||
getLogger(ctx).Infow(msg, fields...)
|
getLogger(ctx).Infow(msg, fields...)
|
||||||
}
|
}
|
||||||
@@ -108,10 +129,11 @@ func SetLevel(level uint32) {
|
|||||||
logx.SetLevel(level)
|
logx.SetLevel(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetUp sets up the logx. If already set up, just return nil.
|
// SetUp sets up the logx.
|
||||||
// we allow SetUp to be called multiple times, because for example
|
// If already set up, return nil.
|
||||||
|
// We allow SetUp to be called multiple times, because, for example,
|
||||||
// we need to allow different service frameworks to initialize logx respectively.
|
// we need to allow different service frameworks to initialize logx respectively.
|
||||||
// the same logic for SetUp
|
// The same logic for SetUp
|
||||||
func SetUp(c LogConf) error {
|
func SetUp(c LogConf) error {
|
||||||
return logx.SetUp(c)
|
return logx.SetUp(c)
|
||||||
}
|
}
|
||||||
@@ -126,6 +148,13 @@ func Slowf(ctx context.Context, format string, v ...any) {
|
|||||||
getLogger(ctx).Slowf(format, v...)
|
getLogger(ctx).Slowf(format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Slowfn writes fn result into slow log.
|
||||||
|
// This is useful when the function is expensive to compute,
|
||||||
|
// and we want to log it only when necessary.
|
||||||
|
func Slowfn(ctx context.Context, fn func() any) {
|
||||||
|
getLogger(ctx).Slowfn(fn)
|
||||||
|
}
|
||||||
|
|
||||||
// Slowv writes v into slow log with json content.
|
// Slowv writes v into slow log with json content.
|
||||||
func Slowv(ctx context.Context, v any) {
|
func Slowv(ctx context.Context, v any) {
|
||||||
getLogger(ctx).Slowv(v)
|
getLogger(ctx).Slowv(v)
|
||||||
|
|||||||
@@ -49,6 +49,15 @@ func TestErrorf(t *testing.T) {
|
|||||||
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestErrorfn(t *testing.T) {
|
||||||
|
buf := logtest.NewCollector(t)
|
||||||
|
file, line := getFileLine()
|
||||||
|
Errorfn(context.Background(), func() any {
|
||||||
|
return fmt.Sprintf("foo %s", "bar")
|
||||||
|
})
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
func TestErrorv(t *testing.T) {
|
func TestErrorv(t *testing.T) {
|
||||||
buf := logtest.NewCollector(t)
|
buf := logtest.NewCollector(t)
|
||||||
file, line := getFileLine()
|
file, line := getFileLine()
|
||||||
@@ -77,6 +86,15 @@ func TestInfof(t *testing.T) {
|
|||||||
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInfofn(t *testing.T) {
|
||||||
|
buf := logtest.NewCollector(t)
|
||||||
|
file, line := getFileLine()
|
||||||
|
Infofn(context.Background(), func() any {
|
||||||
|
return fmt.Sprintf("foo %s", "bar")
|
||||||
|
})
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
func TestInfov(t *testing.T) {
|
func TestInfov(t *testing.T) {
|
||||||
buf := logtest.NewCollector(t)
|
buf := logtest.NewCollector(t)
|
||||||
file, line := getFileLine()
|
file, line := getFileLine()
|
||||||
@@ -105,6 +123,15 @@ func TestDebugf(t *testing.T) {
|
|||||||
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDebugfn(t *testing.T) {
|
||||||
|
buf := logtest.NewCollector(t)
|
||||||
|
file, line := getFileLine()
|
||||||
|
Debugfn(context.Background(), func() any {
|
||||||
|
return fmt.Sprintf("foo %s", "bar")
|
||||||
|
})
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
func TestDebugv(t *testing.T) {
|
func TestDebugv(t *testing.T) {
|
||||||
buf := logtest.NewCollector(t)
|
buf := logtest.NewCollector(t)
|
||||||
file, line := getFileLine()
|
file, line := getFileLine()
|
||||||
@@ -148,6 +175,15 @@ func TestSlowf(t *testing.T) {
|
|||||||
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSlowfn(t *testing.T) {
|
||||||
|
buf := logtest.NewCollector(t)
|
||||||
|
file, line := getFileLine()
|
||||||
|
Slowfn(context.Background(), func() any {
|
||||||
|
return fmt.Sprintf("foo %s", "bar")
|
||||||
|
})
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
func TestSlowv(t *testing.T) {
|
func TestSlowv(t *testing.T) {
|
||||||
buf := logtest.NewCollector(t)
|
buf := logtest.NewCollector(t)
|
||||||
file, line := getFileLine()
|
file, line := getFileLine()
|
||||||
|
|||||||
@@ -1,45 +1,70 @@
|
|||||||
package logx
|
package logx
|
||||||
|
|
||||||
// A LogConf is a logging config.
|
type (
|
||||||
type LogConf struct {
|
// A LogConf is a logging config.
|
||||||
// ServiceName represents the service name.
|
LogConf struct {
|
||||||
ServiceName string `json:",optional"`
|
// ServiceName represents the service name.
|
||||||
// Mode represents the logging mode, default is `console`.
|
ServiceName string `json:",optional"`
|
||||||
// console: log to console.
|
// Mode represents the logging mode, default is `console`.
|
||||||
// file: log to file.
|
// console: log to console.
|
||||||
// volume: used in k8s, prepend the hostname to the log file name.
|
// file: log to file.
|
||||||
Mode string `json:",default=console,options=[console,file,volume]"`
|
// volume: used in k8s, prepend the hostname to the log file name.
|
||||||
// Encoding represents the encoding type, default is `json`.
|
Mode string `json:",default=console,options=[console,file,volume]"`
|
||||||
// json: json encoding.
|
// Encoding represents the encoding type, default is `json`.
|
||||||
// plain: plain text encoding, typically used in development.
|
// json: json encoding.
|
||||||
Encoding string `json:",default=json,options=[json,plain]"`
|
// plain: plain text encoding, typically used in development.
|
||||||
// TimeFormat represents the time format, default is `2006-01-02T15:04:05.000Z07:00`.
|
Encoding string `json:",default=json,options=[json,plain]"`
|
||||||
TimeFormat string `json:",optional"`
|
// TimeFormat represents the time format, default is `2006-01-02T15:04:05.000Z07:00`.
|
||||||
// Path represents the log file path, default is `logs`.
|
TimeFormat string `json:",optional"`
|
||||||
Path string `json:",default=logs"`
|
// Path represents the log file path, default is `logs`.
|
||||||
// Level represents the log level, default is `info`.
|
Path string `json:",default=logs"`
|
||||||
Level string `json:",default=info,options=[debug,info,error,severe]"`
|
// Level represents the log level, default is `info`.
|
||||||
// MaxContentLength represents the max content bytes, default is no limit.
|
Level string `json:",default=info,options=[debug,info,error,severe]"`
|
||||||
MaxContentLength uint32 `json:",optional"`
|
// MaxContentLength represents the max content bytes, default is no limit.
|
||||||
// Compress represents whether to compress the log file, default is `false`.
|
MaxContentLength uint32 `json:",optional"`
|
||||||
Compress bool `json:",optional"`
|
// Compress represents whether to compress the log file, default is `false`.
|
||||||
// Stat represents whether to log statistics, default is `true`.
|
Compress bool `json:",optional"`
|
||||||
Stat bool `json:",default=true"`
|
// Stat represents whether to log statistics, default is `true`.
|
||||||
// KeepDays represents how many days the log files will be kept. Default to keep all files.
|
Stat bool `json:",default=true"`
|
||||||
// Only take effect when Mode is `file` or `volume`, both work when Rotation is `daily` or `size`.
|
// KeepDays represents how many days the log files will be kept. Default to keep all files.
|
||||||
KeepDays int `json:",optional"`
|
// Only take effect when Mode is `file` or `volume`, both work when Rotation is `daily` or `size`.
|
||||||
// StackCooldownMillis represents the cooldown time for stack logging, default is 100ms.
|
KeepDays int `json:",optional"`
|
||||||
StackCooldownMillis int `json:",default=100"`
|
// StackCooldownMillis represents the cooldown time for stack logging, default is 100ms.
|
||||||
// MaxBackups represents how many backup log files will be kept. 0 means all files will be kept forever.
|
StackCooldownMillis int `json:",default=100"`
|
||||||
// Only take effect when RotationRuleType is `size`.
|
// MaxBackups represents how many backup log files will be kept. 0 means all files will be kept forever.
|
||||||
// Even though `MaxBackups` sets 0, log files will still be removed
|
// Only take effect when RotationRuleType is `size`.
|
||||||
// if the `KeepDays` limitation is reached.
|
// Even though `MaxBackups` sets 0, log files will still be removed
|
||||||
MaxBackups int `json:",default=0"`
|
// if the `KeepDays` limitation is reached.
|
||||||
// MaxSize represents how much space the writing log file takes up. 0 means no limit. The unit is `MB`.
|
MaxBackups int `json:",default=0"`
|
||||||
// Only take effect when RotationRuleType is `size`
|
// MaxSize represents how much space the writing log file takes up. 0 means no limit. The unit is `MB`.
|
||||||
MaxSize int `json:",default=0"`
|
// Only take effect when RotationRuleType is `size`
|
||||||
// Rotation represents the type of log rotation rule. Default is `daily`.
|
MaxSize int `json:",default=0"`
|
||||||
// daily: daily rotation.
|
// Rotation represents the type of log rotation rule. Default is `daily`.
|
||||||
// size: size limited rotation.
|
// daily: daily rotation.
|
||||||
Rotation string `json:",default=daily,options=[daily,size]"`
|
// size: size limited rotation.
|
||||||
}
|
Rotation string `json:",default=daily,options=[daily,size]"`
|
||||||
|
// FileTimeFormat represents the time format for file name, default is `2006-01-02T15:04:05.000Z07:00`.
|
||||||
|
FileTimeFormat string `json:",optional"`
|
||||||
|
// FieldKeys represents the field keys.
|
||||||
|
FieldKeys fieldKeyConf `json:",optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldKeyConf struct {
|
||||||
|
// CallerKey represents the caller key.
|
||||||
|
CallerKey string `json:",default=caller"`
|
||||||
|
// ContentKey represents the content key.
|
||||||
|
ContentKey string `json:",default=content"`
|
||||||
|
// DurationKey represents the duration key.
|
||||||
|
DurationKey string `json:",default=duration"`
|
||||||
|
// LevelKey represents the level key.
|
||||||
|
LevelKey string `json:",default=level"`
|
||||||
|
// SpanKey represents the span key.
|
||||||
|
SpanKey string `json:",default=span"`
|
||||||
|
// TimestampKey represents the timestamp key.
|
||||||
|
TimestampKey string `json:",default=@timestamp"`
|
||||||
|
// TraceKey represents the trace key.
|
||||||
|
TraceKey string `json:",default=trace"`
|
||||||
|
// TruncatedKey represents the truncated key.
|
||||||
|
TruncatedKey string `json:",default=truncated"`
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|||||||
@@ -7,12 +7,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fieldsContextKey contextKey
|
|
||||||
globalFields atomic.Value
|
globalFields atomic.Value
|
||||||
globalFieldsLock sync.Mutex
|
globalFieldsLock sync.Mutex
|
||||||
)
|
)
|
||||||
|
|
||||||
type contextKey struct{}
|
type fieldsKey struct{}
|
||||||
|
|
||||||
// AddGlobalFields adds global fields.
|
// AddGlobalFields adds global fields.
|
||||||
func AddGlobalFields(fields ...LogField) {
|
func AddGlobalFields(fields ...LogField) {
|
||||||
@@ -29,16 +28,16 @@ func AddGlobalFields(fields ...LogField) {
|
|||||||
|
|
||||||
// ContextWithFields returns a new context with the given fields.
|
// ContextWithFields returns a new context with the given fields.
|
||||||
func ContextWithFields(ctx context.Context, fields ...LogField) context.Context {
|
func ContextWithFields(ctx context.Context, fields ...LogField) context.Context {
|
||||||
if val := ctx.Value(fieldsContextKey); val != nil {
|
if val := ctx.Value(fieldsKey{}); val != nil {
|
||||||
if arr, ok := val.([]LogField); ok {
|
if arr, ok := val.([]LogField); ok {
|
||||||
allFields := make([]LogField, 0, len(arr)+len(fields))
|
allFields := make([]LogField, 0, len(arr)+len(fields))
|
||||||
allFields = append(allFields, arr...)
|
allFields = append(allFields, arr...)
|
||||||
allFields = append(allFields, fields...)
|
allFields = append(allFields, fields...)
|
||||||
return context.WithValue(ctx, fieldsContextKey, allFields)
|
return context.WithValue(ctx, fieldsKey{}, allFields)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return context.WithValue(ctx, fieldsContextKey, fields)
|
return context.WithValue(ctx, fieldsKey{}, fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithFields returns a new logger with the given fields.
|
// WithFields returns a new logger with the given fields.
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func TestAddGlobalFields(t *testing.T) {
|
|||||||
|
|
||||||
func TestContextWithFields(t *testing.T) {
|
func TestContextWithFields(t *testing.T) {
|
||||||
ctx := ContextWithFields(context.Background(), Field("a", 1), Field("b", 2))
|
ctx := ContextWithFields(context.Background(), Field("a", 1), Field("b", 2))
|
||||||
vals := ctx.Value(fieldsContextKey)
|
vals := ctx.Value(fieldsKey{})
|
||||||
assert.NotNil(t, vals)
|
assert.NotNil(t, vals)
|
||||||
fields, ok := vals.([]LogField)
|
fields, ok := vals.([]LogField)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
@@ -43,7 +43,7 @@ func TestContextWithFields(t *testing.T) {
|
|||||||
|
|
||||||
func TestWithFields(t *testing.T) {
|
func TestWithFields(t *testing.T) {
|
||||||
ctx := WithFields(context.Background(), Field("a", 1), Field("b", 2))
|
ctx := WithFields(context.Background(), Field("a", 1), Field("b", 2))
|
||||||
vals := ctx.Value(fieldsContextKey)
|
vals := ctx.Value(fieldsKey{})
|
||||||
assert.NotNil(t, vals)
|
assert.NotNil(t, vals)
|
||||||
fields, ok := vals.([]LogField)
|
fields, ok := vals.([]LogField)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
@@ -55,7 +55,7 @@ func TestWithFieldsAppend(t *testing.T) {
|
|||||||
ctx := context.WithValue(context.Background(), dummyKey, "dummy")
|
ctx := context.WithValue(context.Background(), dummyKey, "dummy")
|
||||||
ctx = ContextWithFields(ctx, Field("a", 1), Field("b", 2))
|
ctx = ContextWithFields(ctx, Field("a", 1), Field("b", 2))
|
||||||
ctx = ContextWithFields(ctx, Field("c", 3), Field("d", 4))
|
ctx = ContextWithFields(ctx, Field("c", 3), Field("d", 4))
|
||||||
vals := ctx.Value(fieldsContextKey)
|
vals := ctx.Value(fieldsKey{})
|
||||||
assert.NotNil(t, vals)
|
assert.NotNil(t, vals)
|
||||||
fields, ok := vals.([]LogField)
|
fields, ok := vals.([]LogField)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
@@ -80,8 +80,8 @@ func TestWithFieldsAppendCopy(t *testing.T) {
|
|||||||
ctxa := ContextWithFields(ctx, af)
|
ctxa := ContextWithFields(ctx, af)
|
||||||
ctxb := ContextWithFields(ctx, bf)
|
ctxb := ContextWithFields(ctx, bf)
|
||||||
|
|
||||||
assert.EqualValues(t, af, ctxa.Value(fieldsContextKey).([]LogField)[count])
|
assert.EqualValues(t, af, ctxa.Value(fieldsKey{}).([]LogField)[count])
|
||||||
assert.EqualValues(t, bf, ctxb.Value(fieldsContextKey).([]LogField)[count])
|
assert.EqualValues(t, bf, ctxb.Value(fieldsKey{}).([]LogField)[count])
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkAtomicValue(b *testing.B) {
|
func BenchmarkAtomicValue(b *testing.B) {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
package logx
|
package logx
|
||||||
|
|
||||||
// A LessLogger is a logger that control to log once during the given duration.
|
// A LessLogger is a logger that controls to log once during the given duration.
|
||||||
type LessLogger struct {
|
type LessLogger struct {
|
||||||
*limitedExecutor
|
*limitedExecutor
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,18 +7,22 @@ import (
|
|||||||
|
|
||||||
// A Logger represents a logger.
|
// A Logger represents a logger.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
// Debug logs a message at info level.
|
// Debug logs a message at debug level.
|
||||||
Debug(...any)
|
Debug(...any)
|
||||||
// Debugf logs a message at info level.
|
// Debugf logs a message at debug level.
|
||||||
Debugf(string, ...any)
|
Debugf(string, ...any)
|
||||||
// Debugv logs a message at info level.
|
// Debugfn logs a message at debug level.
|
||||||
|
Debugfn(func() any)
|
||||||
|
// Debugv logs a message at debug level.
|
||||||
Debugv(any)
|
Debugv(any)
|
||||||
// Debugw logs a message at info level.
|
// Debugw logs a message at debug level.
|
||||||
Debugw(string, ...LogField)
|
Debugw(string, ...LogField)
|
||||||
// Error logs a message at error level.
|
// Error logs a message at error level.
|
||||||
Error(...any)
|
Error(...any)
|
||||||
// Errorf logs a message at error level.
|
// Errorf logs a message at error level.
|
||||||
Errorf(string, ...any)
|
Errorf(string, ...any)
|
||||||
|
// Errorfn logs a message at error level.
|
||||||
|
Errorfn(func() any)
|
||||||
// Errorv logs a message at error level.
|
// Errorv logs a message at error level.
|
||||||
Errorv(any)
|
Errorv(any)
|
||||||
// Errorw logs a message at error level.
|
// Errorw logs a message at error level.
|
||||||
@@ -27,6 +31,8 @@ type Logger interface {
|
|||||||
Info(...any)
|
Info(...any)
|
||||||
// Infof logs a message at info level.
|
// Infof logs a message at info level.
|
||||||
Infof(string, ...any)
|
Infof(string, ...any)
|
||||||
|
// Infofn logs a message at info level.
|
||||||
|
Infofn(func() any)
|
||||||
// Infov logs a message at info level.
|
// Infov logs a message at info level.
|
||||||
Infov(any)
|
Infov(any)
|
||||||
// Infow logs a message at info level.
|
// Infow logs a message at info level.
|
||||||
@@ -35,6 +41,8 @@ type Logger interface {
|
|||||||
Slow(...any)
|
Slow(...any)
|
||||||
// Slowf logs a message at slow level.
|
// Slowf logs a message at slow level.
|
||||||
Slowf(string, ...any)
|
Slowf(string, ...any)
|
||||||
|
// Slowfn logs a message at slow level.
|
||||||
|
Slowfn(func() any)
|
||||||
// Slowv logs a message at slow level.
|
// Slowv logs a message at slow level.
|
||||||
Slowv(any)
|
Slowv(any)
|
||||||
// Sloww logs a message at slow level.
|
// Sloww logs a message at slow level.
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"reflect"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/sysx"
|
"github.com/zeromicro/go-zero/core/sysx"
|
||||||
)
|
)
|
||||||
@@ -17,14 +17,13 @@ import (
|
|||||||
const callerDepth = 4
|
const callerDepth = 4
|
||||||
|
|
||||||
var (
|
var (
|
||||||
timeFormat = "2006-01-02T15:04:05.000Z07:00"
|
timeFormat = "2006-01-02T15:04:05.000Z07:00"
|
||||||
logLevel uint32
|
|
||||||
encoding uint32 = jsonEncodingType
|
encoding uint32 = jsonEncodingType
|
||||||
// maxContentLength is used to truncate the log content, 0 for not truncating.
|
// maxContentLength is used to truncate the log content, 0 for not truncating.
|
||||||
maxContentLength uint32
|
maxContentLength uint32
|
||||||
// use uint32 for atomic operations
|
// use uint32 for atomic operations
|
||||||
disableLog uint32
|
|
||||||
disableStat uint32
|
disableStat uint32
|
||||||
|
logLevel uint32
|
||||||
options logOptions
|
options logOptions
|
||||||
writer = new(atomicWriter)
|
writer = new(atomicWriter)
|
||||||
setupOnce sync.Once
|
setupOnce sync.Once
|
||||||
@@ -52,6 +51,26 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AddWriter adds a new writer.
|
||||||
|
// If there is already a writer, the new writer will be added to the writer chain.
|
||||||
|
// For example, to write logs to both file and console, if there is already a file writer,
|
||||||
|
// ```go
|
||||||
|
// logx.AddWriter(logx.NewWriter(os.Stdout))
|
||||||
|
// ```
|
||||||
|
func AddWriter(w Writer) {
|
||||||
|
ow := Reset()
|
||||||
|
if ow == nil {
|
||||||
|
SetWriter(w)
|
||||||
|
} else {
|
||||||
|
// no need to check if the existing writer is a comboWriter,
|
||||||
|
// because it is not common to add more than one writer.
|
||||||
|
// even more than one writer, the behavior is the same.
|
||||||
|
SetWriter(comboWriter{
|
||||||
|
writers: []Writer{ow, w},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Alert alerts v in alert level, and the message is written to error log.
|
// Alert alerts v in alert level, and the message is written to error log.
|
||||||
func Alert(v string) {
|
func Alert(v string) {
|
||||||
getWriter().Alert(v)
|
getWriter().Alert(v)
|
||||||
@@ -80,6 +99,14 @@ func Debugf(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Debugfn writes function result into access log if debug level enabled.
|
||||||
|
// This is useful when the function is expensive to call and debug level disabled.
|
||||||
|
func Debugfn(fn func() any) {
|
||||||
|
if shallLog(DebugLevel) {
|
||||||
|
writeDebug(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Debugv writes v into access log with json content.
|
// Debugv writes v into access log with json content.
|
||||||
func Debugv(v any) {
|
func Debugv(v any) {
|
||||||
if shallLog(DebugLevel) {
|
if shallLog(DebugLevel) {
|
||||||
@@ -87,7 +114,7 @@ func Debugv(v any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debugw writes msg along with fields into access log.
|
// Debugw writes msg along with fields into the access log.
|
||||||
func Debugw(msg string, fields ...LogField) {
|
func Debugw(msg string, fields ...LogField) {
|
||||||
if shallLog(DebugLevel) {
|
if shallLog(DebugLevel) {
|
||||||
writeDebug(msg, fields...)
|
writeDebug(msg, fields...)
|
||||||
@@ -96,7 +123,7 @@ func Debugw(msg string, fields ...LogField) {
|
|||||||
|
|
||||||
// Disable disables the logging.
|
// Disable disables the logging.
|
||||||
func Disable() {
|
func Disable() {
|
||||||
atomic.StoreUint32(&disableLog, 1)
|
atomic.StoreUint32(&logLevel, disableLevel)
|
||||||
writer.Store(nopWriter{})
|
writer.Store(nopWriter{})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,6 +146,13 @@ func Errorf(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Errorfn writes function result into error log.
|
||||||
|
func Errorfn(fn func() any) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
writeError(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ErrorStack writes v along with call stack into error log.
|
// ErrorStack writes v along with call stack into error log.
|
||||||
func ErrorStack(v ...any) {
|
func ErrorStack(v ...any) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
@@ -143,7 +177,7 @@ func Errorv(v any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorw writes msg along with fields into error log.
|
// Errorw writes msg along with fields into the error log.
|
||||||
func Errorw(msg string, fields ...LogField) {
|
func Errorw(msg string, fields ...LogField) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
writeError(msg, fields...)
|
writeError(msg, fields...)
|
||||||
@@ -152,39 +186,9 @@ func Errorw(msg string, fields ...LogField) {
|
|||||||
|
|
||||||
// Field returns a LogField for the given key and value.
|
// Field returns a LogField for the given key and value.
|
||||||
func Field(key string, value any) LogField {
|
func Field(key string, value any) LogField {
|
||||||
switch val := value.(type) {
|
return LogField{
|
||||||
case error:
|
Key: key,
|
||||||
return LogField{Key: key, Value: val.Error()}
|
Value: value,
|
||||||
case []error:
|
|
||||||
var errs []string
|
|
||||||
for _, err := range val {
|
|
||||||
errs = append(errs, err.Error())
|
|
||||||
}
|
|
||||||
return LogField{Key: key, Value: errs}
|
|
||||||
case time.Duration:
|
|
||||||
return LogField{Key: key, Value: fmt.Sprint(val)}
|
|
||||||
case []time.Duration:
|
|
||||||
var durs []string
|
|
||||||
for _, dur := range val {
|
|
||||||
durs = append(durs, fmt.Sprint(dur))
|
|
||||||
}
|
|
||||||
return LogField{Key: key, Value: durs}
|
|
||||||
case []time.Time:
|
|
||||||
var times []string
|
|
||||||
for _, t := range val {
|
|
||||||
times = append(times, fmt.Sprint(t))
|
|
||||||
}
|
|
||||||
return LogField{Key: key, Value: times}
|
|
||||||
case fmt.Stringer:
|
|
||||||
return LogField{Key: key, Value: val.String()}
|
|
||||||
case []fmt.Stringer:
|
|
||||||
var strs []string
|
|
||||||
for _, str := range val {
|
|
||||||
strs = append(strs, str.String())
|
|
||||||
}
|
|
||||||
return LogField{Key: key, Value: strs}
|
|
||||||
default:
|
|
||||||
return LogField{Key: key, Value: val}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,6 +206,14 @@ func Infof(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Infofn writes function result into access log.
|
||||||
|
// This is useful when the function is expensive to call and info level disabled.
|
||||||
|
func Infofn(fn func() any) {
|
||||||
|
if shallLog(InfoLevel) {
|
||||||
|
writeInfo(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Infov writes v into access log with json content.
|
// Infov writes v into access log with json content.
|
||||||
func Infov(v any) {
|
func Infov(v any) {
|
||||||
if shallLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
@@ -209,7 +221,7 @@ func Infov(v any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infow writes msg along with fields into access log.
|
// Infow writes msg along with fields into the access log.
|
||||||
func Infow(msg string, fields ...LogField) {
|
func Infow(msg string, fields ...LogField) {
|
||||||
if shallLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
writeInfo(msg, fields...)
|
writeInfo(msg, fields...)
|
||||||
@@ -250,20 +262,22 @@ func SetLevel(level uint32) {
|
|||||||
|
|
||||||
// SetWriter sets the logging writer. It can be used to customize the logging.
|
// SetWriter sets the logging writer. It can be used to customize the logging.
|
||||||
func SetWriter(w Writer) {
|
func SetWriter(w Writer) {
|
||||||
if atomic.LoadUint32(&disableLog) == 0 {
|
if atomic.LoadUint32(&logLevel) != disableLevel {
|
||||||
writer.Store(w)
|
writer.Store(w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetUp sets up the logx. If already set up, just return nil.
|
// SetUp sets up the logx.
|
||||||
// we allow SetUp to be called multiple times, because for example
|
// If already set up, return nil.
|
||||||
|
// We allow SetUp to be called multiple times, because, for example,
|
||||||
// we need to allow different service frameworks to initialize logx respectively.
|
// we need to allow different service frameworks to initialize logx respectively.
|
||||||
func SetUp(c LogConf) (err error) {
|
func SetUp(c LogConf) (err error) {
|
||||||
// Just ignore the subsequent SetUp calls.
|
// Ignore the later SetUp calls.
|
||||||
// Because multiple services in one process might call SetUp respectively.
|
// Because multiple services in one process might call SetUp respectively.
|
||||||
// Need to wait for the first caller to complete the execution.
|
// Need to wait for the first caller to complete the execution.
|
||||||
setupOnce.Do(func() {
|
setupOnce.Do(func() {
|
||||||
setupLogLevel(c)
|
setupLogLevel(c.Level)
|
||||||
|
setupFieldKeys(c.FieldKeys)
|
||||||
|
|
||||||
if !c.Stat {
|
if !c.Stat {
|
||||||
DisableStat()
|
DisableStat()
|
||||||
@@ -273,6 +287,10 @@ func SetUp(c LogConf) (err error) {
|
|||||||
timeFormat = c.TimeFormat
|
timeFormat = c.TimeFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(c.FileTimeFormat) > 0 {
|
||||||
|
fileTimeFormat = c.FileTimeFormat
|
||||||
|
}
|
||||||
|
|
||||||
atomic.StoreUint32(&maxContentLength, c.MaxContentLength)
|
atomic.StoreUint32(&maxContentLength, c.MaxContentLength)
|
||||||
|
|
||||||
switch c.Encoding {
|
switch c.Encoding {
|
||||||
@@ -323,6 +341,14 @@ func Slowf(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Slowfn writes function result into slow log.
|
||||||
|
// This is useful when the function is expensive to call and slow level disabled.
|
||||||
|
func Slowfn(fn func() any) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
writeSlow(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Slowv writes v into slow log with json content.
|
// Slowv writes v into slow log with json content.
|
||||||
func Slowv(v any) {
|
func Slowv(v any) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
@@ -414,6 +440,32 @@ func createOutput(path string) (io.WriteCloser, error) {
|
|||||||
return NewLogger(path, rule, options.gzipEnabled)
|
return NewLogger(path, rule, options.gzipEnabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func encodeError(err error) (ret string) {
|
||||||
|
return encodeWithRecover(err, func() string {
|
||||||
|
return err.Error()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeStringer(v fmt.Stringer) (ret string) {
|
||||||
|
return encodeWithRecover(v, func() string {
|
||||||
|
return v.String()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeWithRecover(arg any, fn func() string) (ret string) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
ret = nilAngleString
|
||||||
|
} else {
|
||||||
|
ret = fmt.Sprintf("panic: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return fn()
|
||||||
|
}
|
||||||
|
|
||||||
func getWriter() Writer {
|
func getWriter() Writer {
|
||||||
w := writer.Load()
|
w := writer.Load()
|
||||||
if w == nil {
|
if w == nil {
|
||||||
@@ -429,8 +481,35 @@ func handleOptions(opts []LogOption) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupLogLevel(c LogConf) {
|
func setupFieldKeys(c fieldKeyConf) {
|
||||||
switch c.Level {
|
if len(c.CallerKey) > 0 {
|
||||||
|
callerKey = c.CallerKey
|
||||||
|
}
|
||||||
|
if len(c.ContentKey) > 0 {
|
||||||
|
contentKey = c.ContentKey
|
||||||
|
}
|
||||||
|
if len(c.DurationKey) > 0 {
|
||||||
|
durationKey = c.DurationKey
|
||||||
|
}
|
||||||
|
if len(c.LevelKey) > 0 {
|
||||||
|
levelKey = c.LevelKey
|
||||||
|
}
|
||||||
|
if len(c.SpanKey) > 0 {
|
||||||
|
spanKey = c.SpanKey
|
||||||
|
}
|
||||||
|
if len(c.TimestampKey) > 0 {
|
||||||
|
timestampKey = c.TimestampKey
|
||||||
|
}
|
||||||
|
if len(c.TraceKey) > 0 {
|
||||||
|
traceKey = c.TraceKey
|
||||||
|
}
|
||||||
|
if len(c.TruncatedKey) > 0 {
|
||||||
|
truncatedKey = c.TruncatedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupLogLevel(level string) {
|
||||||
|
switch level {
|
||||||
case levelDebug:
|
case levelDebug:
|
||||||
SetLevel(DebugLevel)
|
SetLevel(DebugLevel)
|
||||||
case levelInfo:
|
case levelInfo:
|
||||||
@@ -478,15 +557,15 @@ func shallLogStat() bool {
|
|||||||
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
||||||
// The caller should check shallLog before calling this function.
|
// The caller should check shallLog before calling this function.
|
||||||
func writeDebug(val any, fields ...LogField) {
|
func writeDebug(val any, fields ...LogField) {
|
||||||
getWriter().Debug(val, addCaller(fields...)...)
|
getWriter().Debug(val, mergeGlobalFields(addCaller(fields...))...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeError writes v into error log.
|
// writeError writes v into the error log.
|
||||||
// Not checking shallLog here is for performance consideration.
|
// Not checking shallLog here is for performance consideration.
|
||||||
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
||||||
// The caller should check shallLog before calling this function.
|
// The caller should check shallLog before calling this function.
|
||||||
func writeError(val any, fields ...LogField) {
|
func writeError(val any, fields ...LogField) {
|
||||||
getWriter().Error(val, addCaller(fields...)...)
|
getWriter().Error(val, mergeGlobalFields(addCaller(fields...))...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeInfo writes v into info log.
|
// writeInfo writes v into info log.
|
||||||
@@ -494,7 +573,7 @@ func writeError(val any, fields ...LogField) {
|
|||||||
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
||||||
// The caller should check shallLog before calling this function.
|
// The caller should check shallLog before calling this function.
|
||||||
func writeInfo(val any, fields ...LogField) {
|
func writeInfo(val any, fields ...LogField) {
|
||||||
getWriter().Info(val, addCaller(fields...)...)
|
getWriter().Info(val, mergeGlobalFields(addCaller(fields...))...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeSevere writes v into severe log.
|
// writeSevere writes v into severe log.
|
||||||
@@ -510,7 +589,7 @@ func writeSevere(msg string) {
|
|||||||
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
||||||
// The caller should check shallLog before calling this function.
|
// The caller should check shallLog before calling this function.
|
||||||
func writeSlow(val any, fields ...LogField) {
|
func writeSlow(val any, fields ...LogField) {
|
||||||
getWriter().Slow(val, addCaller(fields...)...)
|
getWriter().Slow(val, mergeGlobalFields(addCaller(fields...))...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeStack writes v into stack log.
|
// writeStack writes v into stack log.
|
||||||
@@ -521,10 +600,10 @@ func writeStack(msg string) {
|
|||||||
getWriter().Stack(fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
getWriter().Stack(fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeStat writes v into stat log.
|
// writeStat writes v into the stat log.
|
||||||
// Not checking shallLog here is for performance consideration.
|
// Not checking shallLog here is for performance consideration.
|
||||||
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
// If we check shallLog here, the fmt.Sprint might be called even if the log level is not enabled.
|
||||||
// The caller should check shallLog before calling this function.
|
// The caller should check shallLog before calling this function.
|
||||||
func writeStat(msg string) {
|
func writeStat(msg string) {
|
||||||
getWriter().Stat(msg, addCaller()...)
|
getWriter().Stat(msg, mergeGlobalFields(addCaller())...)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package logx
|
package logx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -16,6 +17,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/sdk/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -248,6 +251,32 @@ func TestStructedLogDebugf(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogDebugfn(t *testing.T) {
|
||||||
|
t.Run("debugfn with output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelDebug, w, func(v ...any) {
|
||||||
|
Debugfn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("debugfn without output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLogEmpty(t, w, InfoLevel, func(v ...any) {
|
||||||
|
Debugfn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogDebugv(t *testing.T) {
|
func TestStructedLogDebugv(t *testing.T) {
|
||||||
w := new(mockWriter)
|
w := new(mockWriter)
|
||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
@@ -288,6 +317,32 @@ func TestStructedLogErrorf(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogErrorfn(t *testing.T) {
|
||||||
|
t.Run("errorfn with output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelError, w, func(v ...any) {
|
||||||
|
Errorfn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("errorfn without output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLogEmpty(t, w, SevereLevel, func(v ...any) {
|
||||||
|
Errorfn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogErrorv(t *testing.T) {
|
func TestStructedLogErrorv(t *testing.T) {
|
||||||
w := new(mockWriter)
|
w := new(mockWriter)
|
||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
@@ -328,6 +383,32 @@ func TestStructedLogInfof(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedInfofn(t *testing.T) {
|
||||||
|
t.Run("infofn with output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelInfo, w, func(v ...any) {
|
||||||
|
Infofn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("infofn without output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLogEmpty(t, w, ErrorLevel, func(v ...any) {
|
||||||
|
Infofn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogInfov(t *testing.T) {
|
func TestStructedLogInfov(t *testing.T) {
|
||||||
w := new(mockWriter)
|
w := new(mockWriter)
|
||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
@@ -348,6 +429,27 @@ func TestStructedLogInfow(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogFieldNil(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
var s *string
|
||||||
|
Infow("test", Field("bb", s))
|
||||||
|
var d *nilStringer
|
||||||
|
Infow("test", Field("bb", d))
|
||||||
|
var e *nilError
|
||||||
|
Errorw("test", Field("bb", e))
|
||||||
|
})
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
var p panicStringer
|
||||||
|
Infow("test", Field("bb", p))
|
||||||
|
var ps innerPanicStringer
|
||||||
|
Infow("test", Field("bb", ps))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogInfoConsoleAny(t *testing.T) {
|
func TestStructedLogInfoConsoleAny(t *testing.T) {
|
||||||
w := new(mockWriter)
|
w := new(mockWriter)
|
||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
@@ -430,6 +532,17 @@ func TestStructedLogInfoConsoleText(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInfofnWithErrorLevel(t *testing.T) {
|
||||||
|
called := false
|
||||||
|
SetLevel(ErrorLevel)
|
||||||
|
defer SetLevel(DebugLevel)
|
||||||
|
Infofn(func() any {
|
||||||
|
called = true
|
||||||
|
return "info log"
|
||||||
|
})
|
||||||
|
assert.False(t, called)
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogSlow(t *testing.T) {
|
func TestStructedLogSlow(t *testing.T) {
|
||||||
w := new(mockWriter)
|
w := new(mockWriter)
|
||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
@@ -450,6 +563,32 @@ func TestStructedLogSlowf(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructedLogSlowfn(t *testing.T) {
|
||||||
|
t.Run("slowfn with output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLog(t, levelSlow, w, func(v ...any) {
|
||||||
|
Slowfn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("slowfn without output", func(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
doTestStructedLogEmpty(t, w, SevereLevel, func(v ...any) {
|
||||||
|
Slowfn(func() any {
|
||||||
|
return fmt.Sprint(v...)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestStructedLogSlowv(t *testing.T) {
|
func TestStructedLogSlowv(t *testing.T) {
|
||||||
w := new(mockWriter)
|
w := new(mockWriter)
|
||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
@@ -570,7 +709,7 @@ func TestErrorfWithWrappedError(t *testing.T) {
|
|||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
defer writer.Store(old)
|
defer writer.Store(old)
|
||||||
|
|
||||||
Errorf("hello %w", errors.New(message))
|
Errorf("hello %s", errors.New(message))
|
||||||
assert.True(t, strings.Contains(w.String(), "hello there"))
|
assert.True(t, strings.Contains(w.String(), "hello there"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -640,15 +779,9 @@ func TestSetup(t *testing.T) {
|
|||||||
MaxBackups: 3,
|
MaxBackups: 3,
|
||||||
MaxSize: 1024 * 1024,
|
MaxSize: 1024 * 1024,
|
||||||
}))
|
}))
|
||||||
setupLogLevel(LogConf{
|
setupLogLevel(levelInfo)
|
||||||
Level: levelInfo,
|
setupLogLevel(levelError)
|
||||||
})
|
setupLogLevel(levelSevere)
|
||||||
setupLogLevel(LogConf{
|
|
||||||
Level: levelError,
|
|
||||||
})
|
|
||||||
setupLogLevel(LogConf{
|
|
||||||
Level: levelSevere,
|
|
||||||
})
|
|
||||||
_, err := createOutput("")
|
_, err := createOutput("")
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
Disable()
|
Disable()
|
||||||
@@ -658,6 +791,10 @@ func TestSetup(t *testing.T) {
|
|||||||
|
|
||||||
func TestDisable(t *testing.T) {
|
func TestDisable(t *testing.T) {
|
||||||
Disable()
|
Disable()
|
||||||
|
defer func() {
|
||||||
|
SetLevel(InfoLevel)
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
|
}()
|
||||||
|
|
||||||
var opt logOptions
|
var opt logOptions
|
||||||
WithKeepDays(1)(&opt)
|
WithKeepDays(1)(&opt)
|
||||||
@@ -666,6 +803,7 @@ func TestDisable(t *testing.T) {
|
|||||||
WithMaxSize(1024)(&opt)
|
WithMaxSize(1024)(&opt)
|
||||||
assert.Nil(t, Close())
|
assert.Nil(t, Close())
|
||||||
assert.Nil(t, Close())
|
assert.Nil(t, Close())
|
||||||
|
assert.Equal(t, uint32(disableLevel), atomic.LoadUint32(&logLevel))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDisableStat(t *testing.T) {
|
func TestDisableStat(t *testing.T) {
|
||||||
@@ -679,8 +817,19 @@ func TestDisableStat(t *testing.T) {
|
|||||||
assert.Equal(t, 0, w.builder.Len())
|
assert.Equal(t, 0, w.builder.Len())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddWriter(t *testing.T) {
|
||||||
|
const message = "hello there"
|
||||||
|
w := new(mockWriter)
|
||||||
|
AddWriter(w)
|
||||||
|
w1 := new(mockWriter)
|
||||||
|
AddWriter(w1)
|
||||||
|
Error(message)
|
||||||
|
assert.Contains(t, w.String(), message)
|
||||||
|
assert.Contains(t, w1.String(), message)
|
||||||
|
}
|
||||||
|
|
||||||
func TestSetWriter(t *testing.T) {
|
func TestSetWriter(t *testing.T) {
|
||||||
atomic.StoreUint32(&disableLog, 0)
|
atomic.StoreUint32(&logLevel, 0)
|
||||||
Reset()
|
Reset()
|
||||||
SetWriter(nopWriter{})
|
SetWriter(nopWriter{})
|
||||||
assert.NotNil(t, writer.Load())
|
assert.NotNil(t, writer.Load())
|
||||||
@@ -704,6 +853,95 @@ func TestWithKeepDays(t *testing.T) {
|
|||||||
assert.Equal(t, 1, opt.keepDays)
|
assert.Equal(t, 1, opt.keepDays)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWithField_LogLevel(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
level uint32
|
||||||
|
fn func(string, ...LogField)
|
||||||
|
count int32
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "debug/info",
|
||||||
|
level: DebugLevel,
|
||||||
|
fn: Infow,
|
||||||
|
count: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "info/error",
|
||||||
|
level: InfoLevel,
|
||||||
|
fn: Errorw,
|
||||||
|
count: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "info/info",
|
||||||
|
level: InfoLevel,
|
||||||
|
fn: Infow,
|
||||||
|
count: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "info/severe",
|
||||||
|
level: InfoLevel,
|
||||||
|
fn: Errorw,
|
||||||
|
count: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error/info",
|
||||||
|
level: ErrorLevel,
|
||||||
|
fn: Infow,
|
||||||
|
count: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error/debug",
|
||||||
|
level: ErrorLevel,
|
||||||
|
fn: Debugw,
|
||||||
|
count: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
olevel := atomic.LoadUint32(&logLevel)
|
||||||
|
SetLevel(tt.level)
|
||||||
|
defer SetLevel(olevel)
|
||||||
|
|
||||||
|
var val countingStringer
|
||||||
|
tt.fn("hello there", Field("foo", &val))
|
||||||
|
assert.Equal(t, tt.count, val.Count())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithField_LogLevelWithContext(t *testing.T) {
|
||||||
|
t.Run("context more than once with info/info", func(t *testing.T) {
|
||||||
|
olevel := atomic.LoadUint32(&logLevel)
|
||||||
|
SetLevel(InfoLevel)
|
||||||
|
defer SetLevel(olevel)
|
||||||
|
|
||||||
|
var val countingStringer
|
||||||
|
ctx := ContextWithFields(context.Background(), Field("foo", &val))
|
||||||
|
logger := WithContext(ctx)
|
||||||
|
logger.Info("hello there")
|
||||||
|
logger.Info("hello there")
|
||||||
|
logger.Info("hello there")
|
||||||
|
assert.True(t, val.Count() > 0)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("context more than once with error/info", func(t *testing.T) {
|
||||||
|
olevel := atomic.LoadUint32(&logLevel)
|
||||||
|
SetLevel(ErrorLevel)
|
||||||
|
defer SetLevel(olevel)
|
||||||
|
|
||||||
|
var val countingStringer
|
||||||
|
ctx := ContextWithFields(context.Background(), Field("foo", &val))
|
||||||
|
logger := WithContext(ctx)
|
||||||
|
logger.Info("hello there")
|
||||||
|
logger.Info("hello there")
|
||||||
|
logger.Info("hello there")
|
||||||
|
assert.Equal(t, int32(0), val.Count())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkCopyByteSliceAppend(b *testing.B) {
|
func BenchmarkCopyByteSliceAppend(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
var buf []byte
|
var buf []byte
|
||||||
@@ -810,15 +1048,26 @@ func doTestStructedLogConsole(t *testing.T, w *mockWriter, write func(...any)) {
|
|||||||
assert.True(t, strings.Contains(w.String(), message))
|
assert.True(t, strings.Contains(w.String(), message))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func doTestStructedLogEmpty(t *testing.T, w *mockWriter, level uint32, write func(...any)) {
|
||||||
|
olevel := atomic.LoadUint32(&logLevel)
|
||||||
|
SetLevel(level)
|
||||||
|
defer SetLevel(olevel)
|
||||||
|
|
||||||
|
const message = "hello there"
|
||||||
|
write(message)
|
||||||
|
assert.Empty(t, w.String())
|
||||||
|
}
|
||||||
|
|
||||||
func testSetLevelTwiceWithMode(t *testing.T, mode string, w *mockWriter) {
|
func testSetLevelTwiceWithMode(t *testing.T, mode string, w *mockWriter) {
|
||||||
writer.Store(nil)
|
writer.Store(nil)
|
||||||
SetUp(LogConf{
|
SetUp(LogConf{
|
||||||
Mode: mode,
|
Mode: mode,
|
||||||
Level: "debug",
|
Level: "debug",
|
||||||
Path: "/dev/null",
|
Path: "/dev/null",
|
||||||
Encoding: plainEncoding,
|
Encoding: plainEncoding,
|
||||||
Stat: false,
|
Stat: false,
|
||||||
TimeFormat: time.RFC3339,
|
TimeFormat: time.RFC3339,
|
||||||
|
FileTimeFormat: time.DateTime,
|
||||||
})
|
})
|
||||||
SetUp(LogConf{
|
SetUp(LogConf{
|
||||||
Mode: mode,
|
Mode: mode,
|
||||||
@@ -858,3 +1107,112 @@ func validateFields(t *testing.T, content string, fields map[string]any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type nilError struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *nilError) Error() string {
|
||||||
|
return e.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
type nilStringer struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *nilStringer) String() string {
|
||||||
|
return s.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
type innerPanicStringer struct {
|
||||||
|
Inner *struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s innerPanicStringer) String() string {
|
||||||
|
return s.Inner.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
type panicStringer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s panicStringer) String() string {
|
||||||
|
panic("panic")
|
||||||
|
}
|
||||||
|
|
||||||
|
type countingStringer struct {
|
||||||
|
count int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *countingStringer) Count() int32 {
|
||||||
|
return atomic.LoadInt32(&s.count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *countingStringer) String() string {
|
||||||
|
atomic.AddInt32(&s.count, 1)
|
||||||
|
return "countingStringer"
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogKey(t *testing.T) {
|
||||||
|
setupOnce = sync.Once{}
|
||||||
|
MustSetup(LogConf{
|
||||||
|
ServiceName: "any",
|
||||||
|
Mode: "console",
|
||||||
|
Encoding: "json",
|
||||||
|
TimeFormat: timeFormat,
|
||||||
|
FieldKeys: fieldKeyConf{
|
||||||
|
CallerKey: "_caller",
|
||||||
|
ContentKey: "_content",
|
||||||
|
DurationKey: "_duration",
|
||||||
|
LevelKey: "_level",
|
||||||
|
SpanKey: "_span",
|
||||||
|
TimestampKey: "_timestamp",
|
||||||
|
TraceKey: "_trace",
|
||||||
|
TruncatedKey: "_truncated",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
setupFieldKeys(fieldKeyConf{
|
||||||
|
CallerKey: defaultCallerKey,
|
||||||
|
ContentKey: defaultContentKey,
|
||||||
|
DurationKey: defaultDurationKey,
|
||||||
|
LevelKey: defaultLevelKey,
|
||||||
|
SpanKey: defaultSpanKey,
|
||||||
|
TimestampKey: defaultTimestampKey,
|
||||||
|
TraceKey: defaultTraceKey,
|
||||||
|
TruncatedKey: defaultTruncatedKey,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
const message = "hello there"
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
otp := otel.GetTracerProvider()
|
||||||
|
tp := trace.NewTracerProvider(trace.WithSampler(trace.AlwaysSample()))
|
||||||
|
otel.SetTracerProvider(tp)
|
||||||
|
defer otel.SetTracerProvider(otp)
|
||||||
|
|
||||||
|
ctx, span := tp.Tracer("trace-id").Start(context.Background(), "span-id")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
WithContext(ctx).WithDuration(time.Second).Info(message)
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
var m map[string]string
|
||||||
|
if err := json.Unmarshal([]byte(w.String()), &m); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, "info", m["_level"])
|
||||||
|
assert.Equal(t, message, m["_content"])
|
||||||
|
assert.Equal(t, "1000.0ms", m["_duration"])
|
||||||
|
assert.Regexp(t, `logx/logs_test.go:\d+`, m["_caller"])
|
||||||
|
assert.NotEmpty(t, m["_trace"])
|
||||||
|
assert.NotEmpty(t, m["_span"])
|
||||||
|
parsedTime, err := time.Parse(timeFormat, m["_timestamp"])
|
||||||
|
assert.True(t, err == nil)
|
||||||
|
assert.Equal(t, now.Minute(), parsedTime.Minute())
|
||||||
|
}
|
||||||
|
|||||||
@@ -52,6 +52,12 @@ func (l *richLogger) Debugf(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *richLogger) Debugfn(fn func() any) {
|
||||||
|
if shallLog(DebugLevel) {
|
||||||
|
l.debug(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *richLogger) Debugv(v any) {
|
func (l *richLogger) Debugv(v any) {
|
||||||
if shallLog(DebugLevel) {
|
if shallLog(DebugLevel) {
|
||||||
l.debug(v)
|
l.debug(v)
|
||||||
@@ -76,6 +82,12 @@ func (l *richLogger) Errorf(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *richLogger) Errorfn(fn func() any) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
l.err(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *richLogger) Errorv(v any) {
|
func (l *richLogger) Errorv(v any) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.err(v)
|
l.err(v)
|
||||||
@@ -100,6 +112,12 @@ func (l *richLogger) Infof(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *richLogger) Infofn(fn func() any) {
|
||||||
|
if shallLog(InfoLevel) {
|
||||||
|
l.info(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *richLogger) Infov(v any) {
|
func (l *richLogger) Infov(v any) {
|
||||||
if shallLog(InfoLevel) {
|
if shallLog(InfoLevel) {
|
||||||
l.info(v)
|
l.info(v)
|
||||||
@@ -124,6 +142,12 @@ func (l *richLogger) Slowf(format string, v ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *richLogger) Slowfn(fn func() any) {
|
||||||
|
if shallLog(ErrorLevel) {
|
||||||
|
l.slow(fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *richLogger) Slowv(v any) {
|
func (l *richLogger) Slowv(v any) {
|
||||||
if shallLog(ErrorLevel) {
|
if shallLog(ErrorLevel) {
|
||||||
l.slow(v)
|
l.slow(v)
|
||||||
@@ -141,28 +165,50 @@ func (l *richLogger) WithCallerSkip(skip int) Logger {
|
|||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
l.callerSkip = skip
|
return &richLogger{
|
||||||
return l
|
ctx: l.ctx,
|
||||||
|
callerSkip: skip,
|
||||||
|
fields: l.fields,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *richLogger) WithContext(ctx context.Context) Logger {
|
func (l *richLogger) WithContext(ctx context.Context) Logger {
|
||||||
l.ctx = ctx
|
return &richLogger{
|
||||||
return l
|
ctx: ctx,
|
||||||
|
callerSkip: l.callerSkip,
|
||||||
|
fields: l.fields,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *richLogger) WithDuration(duration time.Duration) Logger {
|
func (l *richLogger) WithDuration(duration time.Duration) Logger {
|
||||||
l.fields = append(l.fields, Field(durationKey, timex.ReprOfDuration(duration)))
|
fields := append(l.fields, Field(durationKey, timex.ReprOfDuration(duration)))
|
||||||
return l
|
|
||||||
|
return &richLogger{
|
||||||
|
ctx: l.ctx,
|
||||||
|
callerSkip: l.callerSkip,
|
||||||
|
fields: fields,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *richLogger) WithFields(fields ...LogField) Logger {
|
func (l *richLogger) WithFields(fields ...LogField) Logger {
|
||||||
l.fields = append(l.fields, fields...)
|
if len(fields) == 0 {
|
||||||
return l
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
f := append(l.fields, fields...)
|
||||||
|
|
||||||
|
return &richLogger{
|
||||||
|
ctx: l.ctx,
|
||||||
|
callerSkip: l.callerSkip,
|
||||||
|
fields: f,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *richLogger) buildFields(fields ...LogField) []LogField {
|
func (l *richLogger) buildFields(fields ...LogField) []LogField {
|
||||||
fields = append(l.fields, fields...)
|
fields = append(l.fields, fields...)
|
||||||
|
// caller field should always appear together with global fields
|
||||||
fields = append(fields, Field(callerKey, getCaller(callerDepth+l.callerSkip)))
|
fields = append(fields, Field(callerKey, getCaller(callerDepth+l.callerSkip)))
|
||||||
|
fields = mergeGlobalFields(fields)
|
||||||
|
|
||||||
if l.ctx == nil {
|
if l.ctx == nil {
|
||||||
return fields
|
return fields
|
||||||
@@ -178,7 +224,7 @@ func (l *richLogger) buildFields(fields ...LogField) []LogField {
|
|||||||
fields = append(fields, Field(spanKey, spanID))
|
fields = append(fields, Field(spanKey, spanID))
|
||||||
}
|
}
|
||||||
|
|
||||||
val := l.ctx.Value(fieldsContextKey)
|
val := l.ctx.Value(fieldsKey{})
|
||||||
if val != nil {
|
if val != nil {
|
||||||
if arr, ok := val.([]LogField); ok {
|
if arr, ok := val.([]LogField); ok {
|
||||||
fields = append(fields, arr...)
|
fields = append(fields, arr...)
|
||||||
|
|||||||
@@ -63,6 +63,11 @@ func TestTraceDebug(t *testing.T) {
|
|||||||
l.WithDuration(time.Second).Debugf(testlog)
|
l.WithDuration(time.Second).Debugf(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
|
l.WithDuration(time.Second).Debugfn(func() any {
|
||||||
|
return testlog
|
||||||
|
})
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
|
w.Reset()
|
||||||
l.WithDuration(time.Second).Debugv(testlog)
|
l.WithDuration(time.Second).Debugv(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
@@ -103,6 +108,11 @@ func TestTraceError(t *testing.T) {
|
|||||||
l.WithDuration(time.Second).Errorf(testlog)
|
l.WithDuration(time.Second).Errorf(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
|
l.WithDuration(time.Second).Errorfn(func() any {
|
||||||
|
return testlog
|
||||||
|
})
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
|
w.Reset()
|
||||||
l.WithDuration(time.Second).Errorv(testlog)
|
l.WithDuration(time.Second).Errorv(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
@@ -140,6 +150,11 @@ func TestTraceInfo(t *testing.T) {
|
|||||||
l.WithDuration(time.Second).Infof(testlog)
|
l.WithDuration(time.Second).Infof(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
|
l.WithDuration(time.Second).Infofn(func() any {
|
||||||
|
return testlog
|
||||||
|
})
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
|
w.Reset()
|
||||||
l.WithDuration(time.Second).Infov(testlog)
|
l.WithDuration(time.Second).Infov(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
@@ -213,6 +228,11 @@ func TestTraceSlow(t *testing.T) {
|
|||||||
l.WithDuration(time.Second).Slowf(testlog)
|
l.WithDuration(time.Second).Slowf(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
|
l.WithDuration(time.Second).Slowfn(func() any {
|
||||||
|
return testlog
|
||||||
|
})
|
||||||
|
validate(t, w.String(), true, true)
|
||||||
|
w.Reset()
|
||||||
l.WithDuration(time.Second).Slowv(testlog)
|
l.WithDuration(time.Second).Slowv(testlog)
|
||||||
validate(t, w.String(), true, true)
|
validate(t, w.String(), true, true)
|
||||||
w.Reset()
|
w.Reset()
|
||||||
@@ -287,6 +307,54 @@ func TestLogWithCallerSkip(t *testing.T) {
|
|||||||
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
assert.True(t, w.Contains(fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLogWithCallerSkipCopy(t *testing.T) {
|
||||||
|
log1 := WithCallerSkip(2)
|
||||||
|
log2 := log1.WithCallerSkip(3)
|
||||||
|
log3 := log2.WithCallerSkip(-1)
|
||||||
|
assert.Equal(t, 2, log1.(*richLogger).callerSkip)
|
||||||
|
assert.Equal(t, 3, log2.(*richLogger).callerSkip)
|
||||||
|
assert.Equal(t, 3, log3.(*richLogger).callerSkip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogWithContextCopy(t *testing.T) {
|
||||||
|
c1 := context.Background()
|
||||||
|
c2 := context.WithValue(context.Background(), "foo", "bar")
|
||||||
|
log1 := WithContext(c1)
|
||||||
|
log2 := log1.WithContext(c2)
|
||||||
|
assert.Equal(t, c1, log1.(*richLogger).ctx)
|
||||||
|
assert.Equal(t, c2, log2.(*richLogger).ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogWithDurationCopy(t *testing.T) {
|
||||||
|
log1 := WithContext(context.Background())
|
||||||
|
log2 := log1.WithDuration(time.Second)
|
||||||
|
assert.Empty(t, log1.(*richLogger).fields)
|
||||||
|
assert.Equal(t, 1, len(log2.(*richLogger).fields))
|
||||||
|
|
||||||
|
var w mockWriter
|
||||||
|
old := writer.Swap(&w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
log2.Info("hello")
|
||||||
|
assert.Contains(t, w.String(), `"duration":"1000.0ms"`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogWithFieldsCopy(t *testing.T) {
|
||||||
|
log1 := WithContext(context.Background())
|
||||||
|
log2 := log1.WithFields(Field("foo", "bar"))
|
||||||
|
log3 := log1.WithFields()
|
||||||
|
assert.Empty(t, log1.(*richLogger).fields)
|
||||||
|
assert.Equal(t, 1, len(log2.(*richLogger).fields))
|
||||||
|
assert.Equal(t, log1, log3)
|
||||||
|
assert.Empty(t, log3.(*richLogger).fields)
|
||||||
|
|
||||||
|
var w mockWriter
|
||||||
|
old := writer.Swap(&w)
|
||||||
|
defer writer.Store(old)
|
||||||
|
|
||||||
|
log2.Info("hello")
|
||||||
|
assert.Contains(t, w.String(), `"foo":"bar"`)
|
||||||
|
}
|
||||||
|
|
||||||
func TestLoggerWithFields(t *testing.T) {
|
func TestLoggerWithFields(t *testing.T) {
|
||||||
w := new(mockWriter)
|
w := new(mockWriter)
|
||||||
old := writer.Swap(w)
|
old := writer.Swap(w)
|
||||||
@@ -355,3 +423,49 @@ type mockValue struct {
|
|||||||
Foo string `json:"foo"`
|
Foo string `json:"foo"`
|
||||||
Content any `json:"content"`
|
Content any `json:"content"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type testJson struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
Score float64 `json:"score"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t testJson) MarshalJSON() ([]byte, error) {
|
||||||
|
type testJsonImpl testJson
|
||||||
|
return json.Marshal(testJsonImpl(t))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t testJson) String() string {
|
||||||
|
return fmt.Sprintf("%s %d %f", t.Name, t.Age, t.Score)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogWithJson(t *testing.T) {
|
||||||
|
w := new(mockWriter)
|
||||||
|
old := writer.Swap(w)
|
||||||
|
writer.lock.RLock()
|
||||||
|
defer func() {
|
||||||
|
writer.lock.RUnlock()
|
||||||
|
writer.Store(old)
|
||||||
|
}()
|
||||||
|
|
||||||
|
l := WithContext(context.Background()).WithFields(Field("bar", testJson{
|
||||||
|
Name: "foo",
|
||||||
|
Age: 1,
|
||||||
|
Score: 1.0,
|
||||||
|
}))
|
||||||
|
l.Info(testlog)
|
||||||
|
|
||||||
|
type mockValue2 struct {
|
||||||
|
mockValue
|
||||||
|
Bar testJson `json:"bar"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var val mockValue2
|
||||||
|
err := json.Unmarshal([]byte(w.String()), &val)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, testlog, val.Content)
|
||||||
|
assert.Equal(t, "foo", val.Bar.Name)
|
||||||
|
assert.Equal(t, 1, val.Bar.Age)
|
||||||
|
assert.Equal(t, 1.0, val.Bar.Score)
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,8 +18,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
dateFormat = "2006-01-02"
|
|
||||||
fileTimeFormat = time.RFC3339
|
|
||||||
hoursPerDay = 24
|
hoursPerDay = 24
|
||||||
bufferSize = 100
|
bufferSize = 100
|
||||||
defaultDirMode = 0o755
|
defaultDirMode = 0o755
|
||||||
@@ -28,8 +26,12 @@ const (
|
|||||||
megaBytes = 1 << 20
|
megaBytes = 1 << 20
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrLogFileClosed is an error that indicates the log file is already closed.
|
var (
|
||||||
var ErrLogFileClosed = errors.New("error: log file closed")
|
// ErrLogFileClosed is an error that indicates the log file is already closed.
|
||||||
|
ErrLogFileClosed = errors.New("error: log file closed")
|
||||||
|
|
||||||
|
fileTimeFormat = time.RFC3339
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A RotateRule interface is used to define the log rotating rules.
|
// A RotateRule interface is used to define the log rotating rules.
|
||||||
@@ -113,7 +115,7 @@ func (r *DailyRotateRule) OutdatedFiles() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var buf strings.Builder
|
var buf strings.Builder
|
||||||
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(dateFormat)
|
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(time.DateOnly)
|
||||||
buf.WriteString(r.filename)
|
buf.WriteString(r.filename)
|
||||||
buf.WriteString(r.delimiter)
|
buf.WriteString(r.delimiter)
|
||||||
buf.WriteString(boundary)
|
buf.WriteString(boundary)
|
||||||
@@ -209,7 +211,7 @@ func (r *SizeLimitRotateRule) OutdatedFiles() []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var result []string
|
result := make([]string, 0, len(outdated))
|
||||||
for k := range outdated {
|
for k := range outdated {
|
||||||
result = append(result, k)
|
result = append(result, k)
|
||||||
}
|
}
|
||||||
@@ -319,7 +321,7 @@ func (l *RotateLogger) maybeCompressFile(file string) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if _, err := os.Stat(file); err != nil {
|
if _, err := os.Stat(file); err != nil {
|
||||||
// file not exists or other error, ignore compression
|
// file doesn't exist or another error, ignore compression
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -422,7 +424,7 @@ func compressLogFile(file string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getNowDate() string {
|
func getNowDate() string {
|
||||||
return time.Now().Format(dateFormat)
|
return time.Now().Format(time.DateOnly)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNowDateInRFC3339Format() string {
|
func getNowDateInRFC3339Format() string {
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func TestDailyRotateRuleOutdatedFiles(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("temp files", func(t *testing.T) {
|
t.Run("temp files", func(t *testing.T) {
|
||||||
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat)
|
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(time.DateOnly)
|
||||||
f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_ = f1.Close()
|
_ = f1.Close()
|
||||||
@@ -73,7 +73,7 @@ func TestDailyRotateRuleOutdatedFiles(t *testing.T) {
|
|||||||
|
|
||||||
func TestDailyRotateRuleShallRotate(t *testing.T) {
|
func TestDailyRotateRuleShallRotate(t *testing.T) {
|
||||||
var rule DailyRotateRule
|
var rule DailyRotateRule
|
||||||
rule.rotatedTime = time.Now().Add(time.Hour * 24).Format(dateFormat)
|
rule.rotatedTime = time.Now().Add(time.Hour * 24).Format(time.DateOnly)
|
||||||
assert.True(t, rule.ShallRotate(0))
|
assert.True(t, rule.ShallRotate(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,12 +117,12 @@ func TestSizeLimitRotateRuleOutdatedFiles(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("temp files", func(t *testing.T) {
|
t.Run("temp files", func(t *testing.T) {
|
||||||
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat)
|
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(time.DateOnly)
|
||||||
f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
f2, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
f2, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
boundary1 := time.Now().Add(time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat)
|
boundary1 := time.Now().Add(time.Hour * time.Duration(hoursPerDay) * 2).Format(time.DateOnly)
|
||||||
f3, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary1)
|
f3, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
@@ -144,12 +144,12 @@ func TestSizeLimitRotateRuleOutdatedFiles(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("no backups", func(t *testing.T) {
|
t.Run("no backups", func(t *testing.T) {
|
||||||
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat)
|
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(time.DateOnly)
|
||||||
f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
f2, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
f2, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
boundary1 := time.Now().Add(time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat)
|
boundary1 := time.Now().Add(time.Hour * time.Duration(hoursPerDay) * 2).Format(time.DateOnly)
|
||||||
f3, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary1)
|
f3, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
@@ -319,7 +319,7 @@ func TestRotateLoggerWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// the following write calls cannot be changed to Write, because of DATA RACE.
|
// the following write calls cannot be changed to Write, because of DATA RACE.
|
||||||
logger.write([]byte(`foo`))
|
logger.write([]byte(`foo`))
|
||||||
rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(dateFormat)
|
rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(time.DateOnly)
|
||||||
logger.write([]byte(`bar`))
|
logger.write([]byte(`bar`))
|
||||||
logger.Close()
|
logger.Close()
|
||||||
logger.write([]byte(`baz`))
|
logger.write([]byte(`baz`))
|
||||||
@@ -447,7 +447,7 @@ func TestRotateLoggerWithSizeLimitRotateRuleWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// the following write calls cannot be changed to Write, because of DATA RACE.
|
// the following write calls cannot be changed to Write, because of DATA RACE.
|
||||||
logger.write([]byte(`foo`))
|
logger.write([]byte(`foo`))
|
||||||
rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(dateFormat)
|
rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(time.DateOnly)
|
||||||
logger.write([]byte(`bar`))
|
logger.write([]byte(`bar`))
|
||||||
logger.Close()
|
logger.Close()
|
||||||
logger.write([]byte(`baz`))
|
logger.write([]byte(`baz`))
|
||||||
|
|||||||
21
core/logx/sensitive.go
Normal file
21
core/logx/sensitive.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
// Sensitive is an interface that defines a method for masking sensitive information in logs.
|
||||||
|
// It is typically implemented by types that contain sensitive data,
|
||||||
|
// such as passwords or personal information.
|
||||||
|
// Infov, Errorv, Debugv, and Slowv methods will call this method to mask sensitive data.
|
||||||
|
// The values in LogField will also be masked if they implement the Sensitive interface.
|
||||||
|
type Sensitive interface {
|
||||||
|
// MaskSensitive masks sensitive information in the log.
|
||||||
|
MaskSensitive() any
|
||||||
|
}
|
||||||
|
|
||||||
|
// maskSensitive returns the value returned by MaskSensitive method,
|
||||||
|
// if the value implements Sensitive interface.
|
||||||
|
func maskSensitive(v any) any {
|
||||||
|
if s, ok := v.(Sensitive); ok {
|
||||||
|
return s.MaskSensitive()
|
||||||
|
}
|
||||||
|
|
||||||
|
return v
|
||||||
|
}
|
||||||
50
core/logx/sensitive_test.go
Normal file
50
core/logx/sensitive_test.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maskedContent = "******"
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
Name string
|
||||||
|
Pass string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u User) MaskSensitive() any {
|
||||||
|
return User{
|
||||||
|
Name: u.Name,
|
||||||
|
Pass: maskedContent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type NonSensitiveUser struct {
|
||||||
|
Name string
|
||||||
|
Pass string
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMaskSensitive(t *testing.T) {
|
||||||
|
t.Run("sensitive", func(t *testing.T) {
|
||||||
|
user := User{
|
||||||
|
Name: "kevin",
|
||||||
|
Pass: "123",
|
||||||
|
}
|
||||||
|
|
||||||
|
mu := maskSensitive(user)
|
||||||
|
assert.Equal(t, user.Name, mu.(User).Name)
|
||||||
|
assert.Equal(t, maskedContent, mu.(User).Pass)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non-sensitive", func(t *testing.T) {
|
||||||
|
user := NonSensitiveUser{
|
||||||
|
Name: "kevin",
|
||||||
|
Pass: "123",
|
||||||
|
}
|
||||||
|
|
||||||
|
mu := maskSensitive(user)
|
||||||
|
assert.Equal(t, user.Name, mu.(NonSensitiveUser).Name)
|
||||||
|
assert.Equal(t, user.Pass, mu.(NonSensitiveUser).Pass)
|
||||||
|
})
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user