mirror of
https://github.com/zeromicro/go-zero.git
synced 2026-05-13 01:40:00 +08:00
Compare commits
788 Commits
tools/goct
...
v1.5.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
49f73265b9 | ||
|
|
7568674b2b | ||
|
|
3da740b7fc | ||
|
|
ce4eb6ed61 | ||
|
|
9970ff55cd | ||
|
|
d10740f871 | ||
|
|
027193dc99 | ||
|
|
de1e0f2410 | ||
|
|
062073ce58 | ||
|
|
e20b02f311 | ||
|
|
02357d2616 | ||
|
|
489d69f779 | ||
|
|
117611a170 | ||
|
|
0a46ad7ac1 | ||
|
|
bf905eaff3 | ||
|
|
88cb35e3d5 | ||
|
|
078825b4eb | ||
|
|
bbfce6abe9 | ||
|
|
0d11ce03a8 | ||
|
|
757ed19dc5 | ||
|
|
c5fd074aac | ||
|
|
8fa0bd1f1c | ||
|
|
ede19a89ec | ||
|
|
73664b92f0 | ||
|
|
8d9c2fa22a | ||
|
|
22fad4bb9c | ||
|
|
189e9bd9da | ||
|
|
98c9b5928a | ||
|
|
e13fd62d38 | ||
|
|
ffacae89eb | ||
|
|
49135fe25e | ||
|
|
2e6402f4b5 | ||
|
|
07f03ebd0c | ||
|
|
92f2676afc | ||
|
|
1807305e6d | ||
|
|
38a97d4531 | ||
|
|
b9f98ecc4a | ||
|
|
1dc222f4b2 | ||
|
|
a79b8de24d | ||
|
|
5da8a93c75 | ||
|
|
b49fc81618 | ||
|
|
6a692453dc | ||
|
|
8d0cceb80c | ||
|
|
e06abf4f6f | ||
|
|
ee555a85da | ||
|
|
1904af2323 | ||
|
|
95b85336d6 | ||
|
|
ca4ce7bce8 | ||
|
|
9065eb90d9 | ||
|
|
50bc361430 | ||
|
|
455a6c8f97 | ||
|
|
04434646eb | ||
|
|
992a56e90b | ||
|
|
ed4d5e5813 | ||
|
|
fe85e7cb42 | ||
|
|
9c6b516bb8 | ||
|
|
2e9063a9a1 | ||
|
|
c3648be533 | ||
|
|
0ab06f62ca | ||
|
|
6170d7b790 | ||
|
|
18d163c4f7 | ||
|
|
a561048d59 | ||
|
|
7a647ca40c | ||
|
|
3f6f14f976 | ||
|
|
a78d57bebd | ||
|
|
74452eb7b5 | ||
|
|
a9e364a01a | ||
|
|
29c2e20b41 | ||
|
|
42c146bcbd | ||
|
|
b61e364458 | ||
|
|
18a4dcb79f | ||
|
|
60a13f1e53 | ||
|
|
3e093bf34e | ||
|
|
211b9498ef | ||
|
|
cca45be3c5 | ||
|
|
e735915d89 | ||
|
|
f77e2c9cfa | ||
|
|
544aa7c432 | ||
|
|
4cef2b412c | ||
|
|
123c61ad12 | ||
|
|
fbf129d535 | ||
|
|
c8a17a97be | ||
|
|
3a493cd6a6 | ||
|
|
7a0c04bc21 | ||
|
|
3c9fe0b381 | ||
|
|
f8b2dc8c9f | ||
|
|
37cb00d789 | ||
|
|
e3e7bc736b | ||
|
|
fafbee24b8 | ||
|
|
8ec29d29ce | ||
|
|
cb7f3e8a17 | ||
|
|
03391b48ca | ||
|
|
d0dedb0624 | ||
|
|
e136deb3a7 | ||
|
|
a2592a17e9 | ||
|
|
05abf4a2ff | ||
|
|
d40000d4b9 | ||
|
|
4620924105 | ||
|
|
a05fe7bf0a | ||
|
|
dd347e96b0 | ||
|
|
a972f400c6 | ||
|
|
fb7664a764 | ||
|
|
7d5d7d9085 | ||
|
|
9911c11e9c | ||
|
|
0d5a68869d | ||
|
|
d9d79e930d | ||
|
|
d953675085 | ||
|
|
dbc8f9faca | ||
|
|
96998ae570 | ||
|
|
7086fb6dda | ||
|
|
1ad7809fde | ||
|
|
142c46228b | ||
|
|
ba771f8ff1 | ||
|
|
f3cf891d4f | ||
|
|
ba71964b16 | ||
|
|
97ada59175 | ||
|
|
b41ccc5992 | ||
|
|
e23f421976 | ||
|
|
dc5b8dd716 | ||
|
|
a40d8b0684 | ||
|
|
cb39b5836f | ||
|
|
4988f2a4da | ||
|
|
7ca89a85ab | ||
|
|
1ac2384750 | ||
|
|
0e040ec5b4 | ||
|
|
4bc1b78a91 | ||
|
|
148afcf1a7 | ||
|
|
1cd1b17f70 | ||
|
|
59c110688d | ||
|
|
6a25323467 | ||
|
|
6aeb3dfb1c | ||
|
|
0cb61b9a9c | ||
|
|
10d263395c | ||
|
|
d65801f258 | ||
|
|
eaac0ba8de | ||
|
|
b449f2f39e | ||
|
|
c57b0b8f90 | ||
|
|
696406b887 | ||
|
|
cc1779936e | ||
|
|
de4924a274 | ||
|
|
2b08e0510c | ||
|
|
afac48a8ea | ||
|
|
a50b604dc9 | ||
|
|
eda44b6ae8 | ||
|
|
284331b7b1 | ||
|
|
66be213346 | ||
|
|
92c8899f47 | ||
|
|
238c830f17 | ||
|
|
ace125f189 | ||
|
|
a5e5f04bcf | ||
|
|
3bc40d9eaf | ||
|
|
133c40ac1c | ||
|
|
eaaf87cdeb | ||
|
|
6dbcfb5e5d | ||
|
|
16a5f30b0c | ||
|
|
4e6d800877 | ||
|
|
af19addf47 | ||
|
|
ebc425b797 | ||
|
|
b6bedcd522 | ||
|
|
12060c9c0c | ||
|
|
e575bf8317 | ||
|
|
0fe84b225c | ||
|
|
33af0745a0 | ||
|
|
1d0265a77e | ||
|
|
03fe036204 | ||
|
|
03d073a884 | ||
|
|
64ab00e8e3 | ||
|
|
d113e1352c | ||
|
|
32e3116ee3 | ||
|
|
1dd18e2329 | ||
|
|
44b2389f9c | ||
|
|
8bc34c58f4 | ||
|
|
5756627904 | ||
|
|
cddf3875cf | ||
|
|
9be17a2d28 | ||
|
|
b8a86e2135 | ||
|
|
072db116c3 | ||
|
|
cacd5dc91a | ||
|
|
3736dacf1e | ||
|
|
434973c206 | ||
|
|
84f9863b63 | ||
|
|
99a7e6600d | ||
|
|
ea7dab3d26 | ||
|
|
d7d6eccce6 | ||
|
|
0a5a26385d | ||
|
|
62e59837c6 | ||
|
|
981d7dab13 | ||
|
|
d9a732a273 | ||
|
|
b6f1bce695 | ||
|
|
0988c4148f | ||
|
|
165133b91b | ||
|
|
cd8081c567 | ||
|
|
e40a089086 | ||
|
|
d9780fb2a6 | ||
|
|
2c8ae994cf | ||
|
|
67a046b554 | ||
|
|
a019a1f59f | ||
|
|
aed312f3c0 | ||
|
|
58138fd56c | ||
|
|
f2588b238f | ||
|
|
cc5ae722a2 | ||
|
|
1ee61709d9 | ||
|
|
dd117ce9cf | ||
|
|
3c0dc8435e | ||
|
|
fde05ccb28 | ||
|
|
464ed51728 | ||
|
|
413ee919e6 | ||
|
|
35b9568657 | ||
|
|
167d76b46d | ||
|
|
ab9eeff500 | ||
|
|
eab904af64 | ||
|
|
ae87114282 | ||
|
|
7e0ac77139 | ||
|
|
696da4efee | ||
|
|
ceab564429 | ||
|
|
4bd8025c5b | ||
|
|
f3369f8e81 | ||
|
|
c9b05ae07e | ||
|
|
32a59dbc27 | ||
|
|
ba0dff2d61 | ||
|
|
10da5e0424 | ||
|
|
4bed34090f | ||
|
|
2bfecf9354 | ||
|
|
6d129e0264 | ||
|
|
a2df1bb164 | ||
|
|
5f02e623f5 | ||
|
|
963b52fb1b | ||
|
|
02265d0bfe | ||
|
|
2e57e91826 | ||
|
|
82c642d3f4 | ||
|
|
b2571883ca | ||
|
|
00ff50c2cc | ||
|
|
4d7fa08b0b | ||
|
|
367afb544c | ||
|
|
43b8c7f641 | ||
|
|
a2dcb0079a | ||
|
|
f9619328f2 | ||
|
|
bae061a67e | ||
|
|
0b176e17ac | ||
|
|
6340e24c17 | ||
|
|
74e0676617 | ||
|
|
0defb7522f | ||
|
|
0c786ca849 | ||
|
|
26c541b9cb | ||
|
|
ade6f9ee46 | ||
|
|
f4502171ea | ||
|
|
8157e2118d | ||
|
|
e52dace416 | ||
|
|
dc260f196a | ||
|
|
559726112c | ||
|
|
a5fcf24c04 | ||
|
|
fc9b3ffdc1 | ||
|
|
e71c505e94 | ||
|
|
21c49009c0 | ||
|
|
69d355eb4b | ||
|
|
83f88d177f | ||
|
|
641ebf1667 | ||
|
|
cf435bfcc1 | ||
|
|
28f1b15b8e | ||
|
|
42413dc294 | ||
|
|
ec7ac43948 | ||
|
|
deefc1a8eb | ||
|
|
036328f1ea | ||
|
|
85057a623d | ||
|
|
1c544a26be | ||
|
|
20a61ce43e | ||
|
|
dd294e8cd6 | ||
|
|
3e9d0161bc | ||
|
|
cf6c349118 | ||
|
|
c7a0ec428c | ||
|
|
ce1c02f4f9 | ||
|
|
c3756a8f1c | ||
|
|
f4fd735aee | ||
|
|
683d793719 | ||
|
|
affbcb5698 | ||
|
|
f0d1722bbd | ||
|
|
c4f8eca459 | ||
|
|
251c071418 | ||
|
|
6652c4e445 | ||
|
|
f73613dff0 | ||
|
|
7a75dce465 | ||
|
|
801f1adf71 | ||
|
|
f76b976262 | ||
|
|
a49f9060c2 | ||
|
|
ebe28882eb | ||
|
|
fdc57d07d7 | ||
|
|
ef22042f4d | ||
|
|
944193ce25 | ||
|
|
dcfc9b79f1 | ||
|
|
b7052854bb | ||
|
|
4729a16142 | ||
|
|
3604659027 | ||
|
|
9f7f94b673 | ||
|
|
0b3629b636 | ||
|
|
a644ec7edd | ||
|
|
9941055eaa | ||
|
|
10fd9131a1 | ||
|
|
90828a0d4a | ||
|
|
b1c3c21c81 | ||
|
|
97a8b3ade5 | ||
|
|
95a5f64493 | ||
|
|
20e659749a | ||
|
|
94708cc78f | ||
|
|
06fafd2153 | ||
|
|
79de932646 | ||
|
|
b562e940e7 | ||
|
|
69068cdaf0 | ||
|
|
f25788ebea | ||
|
|
1293c4321b | ||
|
|
e3e08a7396 | ||
|
|
4b071f4c33 | ||
|
|
81831b60a9 | ||
|
|
1677a4dceb | ||
|
|
dac3600b53 | ||
|
|
3db64c7d47 | ||
|
|
7eb6aae949 | ||
|
|
07128213d6 | ||
|
|
9504d30049 | ||
|
|
ce73b9a85c | ||
|
|
4d2a146733 | ||
|
|
46e236fef7 | ||
|
|
06e4914e41 | ||
|
|
9cadab2684 | ||
|
|
7fe2492009 | ||
|
|
22bdf0bbd5 | ||
|
|
c92a2d1b77 | ||
|
|
b21162d638 | ||
|
|
7c9ef3ca67 | ||
|
|
bbadbe0175 | ||
|
|
f9beab1095 | ||
|
|
de5c59aad3 | ||
|
|
36d3765c5c | ||
|
|
d326e6f813 | ||
|
|
ea52fe2e0d | ||
|
|
05a5de7c6d | ||
|
|
d4c9fd2aff | ||
|
|
776673d57d | ||
|
|
1b87f5e30d | ||
|
|
bc47959384 | ||
|
|
9f6d926455 | ||
|
|
f7a4e3a19e | ||
|
|
a515a3c735 | ||
|
|
6f6f1ae21f | ||
|
|
10f94ffcc2 | ||
|
|
f068062b13 | ||
|
|
799c118d95 | ||
|
|
74cc6b55e8 | ||
|
|
fc59aec2e7 | ||
|
|
7868667b4f | ||
|
|
773b59106b | ||
|
|
97f8667b71 | ||
|
|
b51339b69b | ||
|
|
38a73d7fbe | ||
|
|
e50689beed | ||
|
|
1bc138bd34 | ||
|
|
4b9066eda6 | ||
|
|
0c66e041b5 | ||
|
|
aa2be0163a | ||
|
|
ada2941e87 | ||
|
|
59c0013cd1 | ||
|
|
05737f6519 | ||
|
|
4f6a900fd4 | ||
|
|
63cfe60f1a | ||
|
|
e7acadb15d | ||
|
|
111e626a73 | ||
|
|
1a6d7b3ef6 | ||
|
|
2e1e4f3574 | ||
|
|
22d0a2120a | ||
|
|
68e15360c2 | ||
|
|
1b344a8851 | ||
|
|
d640544a40 | ||
|
|
e6aa6fc361 | ||
|
|
4c927624b0 | ||
|
|
0ea92b7280 | ||
|
|
2cde970c9e | ||
|
|
5061158bd6 | ||
|
|
9138056c01 | ||
|
|
0b1884b6bd | ||
|
|
1f6688e5c1 | ||
|
|
ae7f1aabdd | ||
|
|
b8664be2bb | ||
|
|
6e16a9647e | ||
|
|
bb0e76be47 | ||
|
|
27a20e1ed3 | ||
|
|
cbbbee0ace | ||
|
|
e9650d547b | ||
|
|
60160f56b8 | ||
|
|
05c2f313c7 | ||
|
|
f2a0f78288 | ||
|
|
3e96994b7b | ||
|
|
66c2a28e66 | ||
|
|
9672071b5d | ||
|
|
9581e8445a | ||
|
|
6ec8bc6655 | ||
|
|
d935c83a54 | ||
|
|
590d784800 | ||
|
|
784276b360 | ||
|
|
da80662b0f | ||
|
|
cfda972d50 | ||
|
|
6078bf1a04 | ||
|
|
ce638d26d9 | ||
|
|
422f401153 | ||
|
|
dfeef5e497 | ||
|
|
8c72136631 | ||
|
|
9d6c8f67f5 | ||
|
|
f70805ee60 | ||
|
|
a1466e1707 | ||
|
|
1b477bbef9 | ||
|
|
813625d995 | ||
|
|
15a2802f12 | ||
|
|
5d00dfb962 | ||
|
|
d9620bb072 | ||
|
|
d978563523 | ||
|
|
fb6d7e2fd2 | ||
|
|
2d60f0c65a | ||
|
|
5d4ae201d0 | ||
|
|
05007c86bb | ||
|
|
93584c6ca6 | ||
|
|
22bb7e95fd | ||
|
|
bebf6322ff | ||
|
|
36678f9023 | ||
|
|
90cdd61efc | ||
|
|
28166dedd6 | ||
|
|
0316b6e10e | ||
|
|
4cb68a034a | ||
|
|
847a396f1c | ||
|
|
c1babdf8b2 | ||
|
|
040c9e0954 | ||
|
|
1c85d39add | ||
|
|
4cd065f4f4 | ||
|
|
b9c97678bc | ||
|
|
5208def65a | ||
|
|
3b96dc1598 | ||
|
|
fa3f1bc19c | ||
|
|
8ed22eafdd | ||
|
|
05dd6bd743 | ||
|
|
9af1a42386 | ||
|
|
f3645e420e | ||
|
|
62abac0b7e | ||
|
|
6357e27418 | ||
|
|
1568c3be0e | ||
|
|
27e773fa1f | ||
|
|
d8e17be33e | ||
|
|
da5770ee2b | ||
|
|
731b3ebf6f | ||
|
|
1e0f94ba86 | ||
|
|
a987512c7b | ||
|
|
c1c7584de1 | ||
|
|
98b9a25cc7 | ||
|
|
a8305def3d | ||
|
|
d20d8324e7 | ||
|
|
c638fce31c | ||
|
|
34294702b0 | ||
|
|
4fad067a0e | ||
|
|
3f3c811e08 | ||
|
|
dbdbb68676 | ||
|
|
83772344b0 | ||
|
|
49367f1713 | ||
|
|
91b8effb24 | ||
|
|
4879d4dfcd | ||
|
|
b18479dd43 | ||
|
|
5cd9229986 | ||
|
|
3d38d36605 | ||
|
|
003adae51f | ||
|
|
5348375b99 | ||
|
|
5d7919a9f5 | ||
|
|
9b334b5428 | ||
|
|
685d14e662 | ||
|
|
edbf1a3b63 | ||
|
|
92145b56dc | ||
|
|
34eb3fc12e | ||
|
|
101304be53 | ||
|
|
f630bc735b | ||
|
|
ca3c687f1c | ||
|
|
1b51d0ce82 | ||
|
|
d9218e1551 | ||
|
|
9c448c64ef | ||
|
|
bc85eaa9b1 | ||
|
|
2a6f801978 | ||
|
|
8d567b5508 | ||
|
|
0dd2768d09 | ||
|
|
4324ddc024 | ||
|
|
557383fbbf | ||
|
|
b206dd28a3 | ||
|
|
453fa309b1 | ||
|
|
4d7dae9cea | ||
|
|
d228b9038d | ||
|
|
13477238a3 | ||
|
|
95a574e9e9 | ||
|
|
453100e0e2 | ||
|
|
d70e73ec66 | ||
|
|
300b124e42 | ||
|
|
3bad043413 | ||
|
|
23f34234d0 | ||
|
|
d71b3c841f | ||
|
|
24787a946b | ||
|
|
6e50c87dca | ||
|
|
e672b3f8e1 | ||
|
|
1c09db6d5d | ||
|
|
96acf1f5a6 | ||
|
|
97a171441d | ||
|
|
725e6056e1 | ||
|
|
1410f7dc20 | ||
|
|
8afe68f3f1 | ||
|
|
74c41e8c5e | ||
|
|
48f7e01158 | ||
|
|
f6f6ee5c8c | ||
|
|
b364c54940 | ||
|
|
e0e3f97c7c | ||
|
|
6a2d6786c6 | ||
|
|
18035bd4d4 | ||
|
|
f3b8fef34f | ||
|
|
6a4885ba64 | ||
|
|
f2cef2b963 | ||
|
|
bfd0869ee2 | ||
|
|
4e26e0407e | ||
|
|
d200ba4a7b | ||
|
|
ce7e2a2a9a | ||
|
|
c92400ead2 | ||
|
|
0b109c1954 | ||
|
|
d42979f705 | ||
|
|
29d81381c1 | ||
|
|
89f6c97097 | ||
|
|
ff6f109065 | ||
|
|
7da77302f4 | ||
|
|
76086fc717 | ||
|
|
555c4ecd1a | ||
|
|
630dfa0887 | ||
|
|
38cd7b7df0 | ||
|
|
9148f8df2a | ||
|
|
13f051d0e5 | ||
|
|
93b3f5030f | ||
|
|
b44e8f5c75 | ||
|
|
b9eb03e9a9 | ||
|
|
86b531406b | ||
|
|
47c49de94e | ||
|
|
50f16e2892 | ||
|
|
018ca82048 | ||
|
|
6976ba7e13 | ||
|
|
9b6e4c440c | ||
|
|
9eea311a4d | ||
|
|
86d70317bf | ||
|
|
6518eb10b3 | ||
|
|
0147d7a9d1 | ||
|
|
1b2b7647d6 | ||
|
|
af6d37c33d | ||
|
|
3da5c5f530 | ||
|
|
1694a92db0 | ||
|
|
c27e00b45c | ||
|
|
ed1c937998 | ||
|
|
db9a1f3e27 | ||
|
|
392a390a3f | ||
|
|
2a900e1795 | ||
|
|
0f5d8c6be3 | ||
|
|
f2caf9237a | ||
|
|
2f0e4e3ebf | ||
|
|
2c6b422f6b | ||
|
|
4d34998338 | ||
|
|
8be47b9c99 | ||
|
|
1d95e95cf8 | ||
|
|
3fa8c5940d | ||
|
|
c44edd7cac | ||
|
|
af05219b70 | ||
|
|
f366e1d936 | ||
|
|
6c94e4652e | ||
|
|
edfaa6d906 | ||
|
|
b6b96d9dad | ||
|
|
87800419f5 | ||
|
|
50a5fb7715 | ||
|
|
aa8f07d064 | ||
|
|
7868bdf660 | ||
|
|
46078e716d | ||
|
|
bb33a20bc8 | ||
|
|
5536473a08 | ||
|
|
323b35ed2d | ||
|
|
30958a91f7 | ||
|
|
b94b68a427 | ||
|
|
07145b210e | ||
|
|
321a20add6 | ||
|
|
65098d4737 | ||
|
|
35425f6164 | ||
|
|
a0060ff81b | ||
|
|
289a325757 | ||
|
|
3fbe0f87b7 | ||
|
|
ea98d210fd | ||
|
|
b9bc1fdcf8 | ||
|
|
6dc570bcd7 | ||
|
|
e21997f0d7 | ||
|
|
92c0b7c3c5 | ||
|
|
6d3ed98744 | ||
|
|
fb519fa547 | ||
|
|
e9501c3fb3 | ||
|
|
fd12659729 | ||
|
|
72ebbb9774 | ||
|
|
f1fdd55b38 | ||
|
|
58787746db | ||
|
|
ca88b69d24 | ||
|
|
6b1e15cab1 | ||
|
|
6f86e5bff8 | ||
|
|
3f492df74e | ||
|
|
5e7b1f6bfe | ||
|
|
e80a64fa67 | ||
|
|
95282edb78 | ||
|
|
7b82eda993 | ||
|
|
5d09cd0e7c | ||
|
|
1e717f9f5c | ||
|
|
c6e2b4a43a | ||
|
|
e567a0c718 | ||
|
|
52f060caae | ||
|
|
f486685e99 | ||
|
|
3ae874d75d | ||
|
|
c58eb13328 | ||
|
|
14ca39bc86 | ||
|
|
3ea8a2d4b6 | ||
|
|
6d2b9fd904 | ||
|
|
5451d96a81 | ||
|
|
69c2bad410 | ||
|
|
5383e29ce6 | ||
|
|
51472004a3 | ||
|
|
caf5b7b1f1 | ||
|
|
bef9aa55e6 | ||
|
|
d0a59b13a6 | ||
|
|
469e62067c | ||
|
|
a36d58aac9 | ||
|
|
aa5118c2aa | ||
|
|
974ba5c9aa | ||
|
|
ec1de4f48d | ||
|
|
bab72b7630 | ||
|
|
ac321fc146 | ||
|
|
ae2c76765c | ||
|
|
f21970c117 | ||
|
|
d0a58d1f2d | ||
|
|
3bbc90ec24 | ||
|
|
cef83efd4e | ||
|
|
cc09ab2aba | ||
|
|
f7a60cdc24 | ||
|
|
c3a49ece8d | ||
|
|
1a38eddffe | ||
|
|
5bcee4cf7c | ||
|
|
5c9fae7e62 | ||
|
|
ec3e02624c | ||
|
|
22b157bb6c | ||
|
|
095b603788 | ||
|
|
bc3c9484d1 | ||
|
|
162e9cef86 | ||
|
|
94ddb3380e | ||
|
|
16c61c6657 | ||
|
|
14bf2f33f7 | ||
|
|
305587aa81 | ||
|
|
2cdff97934 | ||
|
|
bbe1249ecb | ||
|
|
e62870e268 | ||
|
|
92b450eb11 | ||
|
|
d58cf7a12a | ||
|
|
036d803fbb | ||
|
|
c6ab11b14f | ||
|
|
9e20b1bbfe | ||
|
|
fadef0ccd9 | ||
|
|
4382ec0e0d | ||
|
|
db99addc64 | ||
|
|
97bf3856c1 | ||
|
|
ff6c6558dd | ||
|
|
5d4e7c84ee | ||
|
|
cb4fcf2c6c | ||
|
|
ee88abce14 | ||
|
|
ecc3653d44 | ||
|
|
ba8ac974aa | ||
|
|
50de01fb49 | ||
|
|
fabea4c448 | ||
|
|
6d9dfc08f9 | ||
|
|
252fabcc4b | ||
|
|
415c4c91fc | ||
|
|
0cc9d4ff8d | ||
|
|
8bc34defc4 | ||
|
|
8dd764679c | ||
|
|
9fe868ade9 | ||
|
|
4e48286838 | ||
|
|
ab01442d46 | ||
|
|
8694e38384 | ||
|
|
d5e550e79b | ||
|
|
affdab660e | ||
|
|
7d5858e83a | ||
|
|
815a6a6485 | ||
|
|
475d17e17d | ||
|
|
8472415472 | ||
|
|
faad6e27e3 | ||
|
|
58a0b17451 | ||
|
|
89eccfdb97 | ||
|
|
78ea0769fd | ||
|
|
e0fa8d820d | ||
|
|
dfd58c213c | ||
|
|
83cacf51b7 | ||
|
|
6dccfa29fd | ||
|
|
7e0b0ab0b1 | ||
|
|
ac18cc470d | ||
|
|
f4471846ff | ||
|
|
9c2d526a11 | ||
|
|
2b9fc26c38 | ||
|
|
321dc2d410 | ||
|
|
500bd87c85 | ||
|
|
e9620c8c05 | ||
|
|
70e51bb352 | ||
|
|
278cd123c8 | ||
|
|
3febb1a5d0 | ||
|
|
d8054d8def | ||
|
|
ec271db7a0 | ||
|
|
bbac994c8a | ||
|
|
c1d9e6a00b | ||
|
|
0aeb49a6b0 | ||
|
|
fe262766b4 | ||
|
|
7181505c8a | ||
|
|
f060a226bc | ||
|
|
93d524b797 | ||
|
|
5c169f4f49 | ||
|
|
d29dfa12e3 | ||
|
|
194f55e08e | ||
|
|
c0f9892fe3 | ||
|
|
227104d7d7 | ||
|
|
448029aa4b | ||
|
|
17e0afeac0 | ||
|
|
18916b5189 | ||
|
|
c11a09be23 | ||
|
|
56e1ecf2f3 | ||
|
|
f9e6013a6c | ||
|
|
b5d1d8b0d1 | ||
|
|
09e6d94f9e | ||
|
|
2a5717d7fb | ||
|
|
85cf662c6f | ||
|
|
3279a7ef0f | ||
|
|
fec908a19b | ||
|
|
f5ed0cda58 | ||
|
|
cc9d16f505 | ||
|
|
c05d74b44c | ||
|
|
32c88b6352 | ||
|
|
7dabec260f | ||
|
|
4feb88f9b5 | ||
|
|
2776caed0e | ||
|
|
c55694d957 | ||
|
|
209ffb934b | ||
|
|
26a33932cd | ||
|
|
d6a692971f | ||
|
|
4624390e54 | ||
|
|
63b7d292c1 | ||
|
|
365c569d7c | ||
|
|
68a81fea8a | ||
|
|
08a8bd7ef7 | ||
|
|
b939ce75ba | ||
|
|
3b7ca86e4f | ||
|
|
60760b52ab | ||
|
|
96c128c58a | ||
|
|
0c35f39a7d | ||
|
|
6a66dde0a1 | ||
|
|
36b9fcba44 | ||
|
|
bf99dda620 | ||
|
|
511dfcb409 | ||
|
|
900bc96420 | ||
|
|
be277a7376 | ||
|
|
f15a4f9188 | ||
|
|
e31128650e | ||
|
|
168740b64d | ||
|
|
cc4c4928e0 | ||
|
|
fba6543b23 | ||
|
|
877eb6ac56 | ||
|
|
259a5a13e7 | ||
|
|
cf7c7cb392 | ||
|
|
86d01e2e99 | ||
|
|
7a28e19a27 | ||
|
|
900ea63d68 | ||
|
|
87ab86cdd0 | ||
|
|
0697494ffd | ||
|
|
ffd69a2f5e | ||
|
|
66f10bb5e6 | ||
|
|
8131a0e777 | ||
|
|
32a557dff6 | ||
|
|
db949e40f1 | ||
|
|
e0454138e0 | ||
|
|
3b07ed1b97 | ||
|
|
daa98f5a27 | ||
|
|
842656aa90 | ||
|
|
aa29036cb3 | ||
|
|
607bae27fa | ||
|
|
7c63676be4 | ||
|
|
9e113909b3 | ||
|
|
bd105474ca | ||
|
|
a078f5d764 | ||
|
|
b215fa3ee6 | ||
|
|
50b1928502 | ||
|
|
493e3bcf4b |
@@ -1,3 +1,8 @@
|
|||||||
comment: false
|
comment:
|
||||||
|
layout: "flags, files"
|
||||||
|
behavior: once
|
||||||
|
require_changes: true
|
||||||
ignore:
|
ignore:
|
||||||
- "tools"
|
- "tools"
|
||||||
|
- "**/mock"
|
||||||
|
- "**/*_mock.go"
|
||||||
|
|||||||
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -9,4 +9,5 @@ community_bridge: # Replace with a single Community Bridge project-name e.g., cl
|
|||||||
liberapay: # Replace with a single Liberapay username
|
liberapay: # Replace with a single Liberapay username
|
||||||
issuehunt: # Replace with a single IssueHunt username
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
otechie: # Replace with a single Otechie username
|
otechie: # Replace with a single Otechie username
|
||||||
custom: https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
|
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
|
||||||
|
ethereum: # 0x5052b7f6B937B02563996D23feb69b38D06Ca150 | kevwan
|
||||||
|
|||||||
15
.github/dependabot.yml
vendored
Normal file
15
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# To get started with Dependabot version updates, you'll need to specify which
|
||||||
|
# package ecosystems to update and where the package manifests are located.
|
||||||
|
# Please see the documentation for all configuration options:
|
||||||
|
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "gomod" # See documentation for possible values
|
||||||
|
directory: "/" # Location of package manifests
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
- package-ecosystem: "gomod" # See documentation for possible values
|
||||||
|
directory: "/tools/goctl" # Location of package manifests
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
@@ -35,11 +35,11 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v1
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
@@ -50,7 +50,7 @@ jobs:
|
|||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v1
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
# 📚 https://git.io/JvXDl
|
# 📚 https://git.io/JvXDl
|
||||||
@@ -64,4 +64,4 @@ jobs:
|
|||||||
# make release
|
# make release
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v1
|
uses: github/codeql-action/analyze@v2
|
||||||
|
|||||||
41
.github/workflows/go.yml
vendored
41
.github/workflows/go.yml
vendored
@@ -11,14 +11,16 @@ jobs:
|
|||||||
name: Linux
|
name: Linux
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ^1.15
|
|
||||||
id: go
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up Go 1.x
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: 1.18
|
||||||
|
check-latest: true
|
||||||
|
cache: true
|
||||||
|
id: go
|
||||||
|
|
||||||
- name: Get dependencies
|
- name: Get dependencies
|
||||||
run: |
|
run: |
|
||||||
@@ -27,26 +29,33 @@ jobs:
|
|||||||
- name: Lint
|
- name: Lint
|
||||||
run: |
|
run: |
|
||||||
go vet -stdmethods=false $(go list ./...)
|
go vet -stdmethods=false $(go list ./...)
|
||||||
go install mvdan.cc/gofumpt@latest
|
|
||||||
test -z "$(gofumpt -s -l -extra .)" || echo "Please run 'gofumpt -l -w -extra .'"
|
go mod tidy
|
||||||
|
if ! test -z "$(git status --porcelain)"; then
|
||||||
|
echo "Please run 'go mod tidy'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
- name: Codecov
|
- name: Codecov
|
||||||
uses: codecov/codecov-action@v2
|
uses: codecov/codecov-action@v3
|
||||||
|
|
||||||
test-win:
|
test-win:
|
||||||
name: Windows
|
name: Windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ^1.15
|
|
||||||
|
|
||||||
- name: Checkout codebase
|
- name: Checkout codebase
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up Go 1.x
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
# use 1.18 to guarantee Go 1.18 compatibility
|
||||||
|
go-version: 1.18
|
||||||
|
check-latest: true
|
||||||
|
cache: true
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/issue-translator.yml
vendored
2
.github/workflows/issue-translator.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: tomsun28/issues-translate-action@v2.6
|
- uses: usthe/issues-translate-action@v2.7
|
||||||
with:
|
with:
|
||||||
IS_MODIFY_TITLE: true
|
IS_MODIFY_TITLE: true
|
||||||
# not require, default false, . Decide whether to modify the issue title
|
# not require, default false, . Decide whether to modify the issue title
|
||||||
|
|||||||
6
.github/workflows/issues.yml
vendored
6
.github/workflows/issues.yml
vendored
@@ -7,10 +7,10 @@ jobs:
|
|||||||
close-issues:
|
close-issues:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v3
|
- uses: actions/stale@v6
|
||||||
with:
|
with:
|
||||||
days-before-issue-stale: 30
|
days-before-issue-stale: 365
|
||||||
days-before-issue-close: 14
|
days-before-issue-close: 90
|
||||||
stale-issue-label: "stale"
|
stale-issue-label: "stale"
|
||||||
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
|
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
|
||||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||||
|
|||||||
28
.github/workflows/release.yaml
vendored
Normal file
28
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- "tools/goctl/*"
|
||||||
|
jobs:
|
||||||
|
releases-matrix:
|
||||||
|
name: Release goctl binary
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
# build and publish in parallel: linux/386, linux/amd64, linux/arm64,
|
||||||
|
# windows/386, windows/amd64, windows/arm64, darwin/amd64, darwin/arm64
|
||||||
|
goos: [ linux, windows, darwin ]
|
||||||
|
goarch: [ "386", amd64, arm64 ]
|
||||||
|
exclude:
|
||||||
|
- goarch: "386"
|
||||||
|
goos: darwin
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: zeromicro/go-zero-release-action@master
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
goversion: "https://dl.google.com/go/go1.18.10.linux-amd64.tar.gz"
|
||||||
|
project_path: "tools/goctl"
|
||||||
|
binary_name: "goctl"
|
||||||
|
extra_files: tools/goctl/readme.md tools/goctl/readme-cn.md
|
||||||
2
.github/workflows/reviewdog.yml
vendored
2
.github/workflows/reviewdog.yml
vendored
@@ -5,7 +5,7 @@ jobs:
|
|||||||
name: runner / staticcheck
|
name: runner / staticcheck
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: reviewdog/action-staticcheck@v1
|
- uses: reviewdog/action-staticcheck@v1
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.github_token }}
|
github_token: ${{ secrets.github_token }}
|
||||||
|
|||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -11,16 +11,18 @@
|
|||||||
!api
|
!api
|
||||||
|
|
||||||
# ignore
|
# ignore
|
||||||
.idea
|
**/.idea
|
||||||
**/.DS_Store
|
**/.DS_Store
|
||||||
**/logs
|
**/logs
|
||||||
|
|
||||||
# for test purpose
|
# for test purpose
|
||||||
**/adhoc
|
**/adhoc
|
||||||
**/testdata
|
go.work
|
||||||
|
go.work.sum
|
||||||
|
|
||||||
# gitlab ci
|
# gitlab ci
|
||||||
.cache
|
.cache
|
||||||
|
.golangci.yml
|
||||||
|
|
||||||
# vim auto backup file
|
# vim auto backup file
|
||||||
*~
|
*~
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2020 xiaoheiban_server_go
|
Copyright (c) 2022 zeromicro
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
28
ROADMAP.md
28
ROADMAP.md
@@ -1,28 +0,0 @@
|
|||||||
# go-zero Roadmap
|
|
||||||
|
|
||||||
This document defines a high level roadmap for go-zero development and upcoming releases.
|
|
||||||
Community and contributor involvement is vital for successfully implementing all desired items for each release.
|
|
||||||
We hope that the items listed below will inspire further engagement from the community to keep go-zero progressing and shipping exciting and valuable features.
|
|
||||||
|
|
||||||
## 2021 Q2
|
|
||||||
- [x] Support service discovery through K8S client api
|
|
||||||
- [x] Log full sql statements for easier sql problem solving
|
|
||||||
|
|
||||||
## 2021 Q3
|
|
||||||
- [x] Support `goctl model pg` to support PostgreSQL code generation
|
|
||||||
- [x] Adapt builtin tracing mechanism to opentracing solutions
|
|
||||||
|
|
||||||
## 2021 Q4
|
|
||||||
- [x] Support `username/password` authentication in ETCD
|
|
||||||
- [x] Support `SSL/TLS` in ETCD
|
|
||||||
- [x] Support `SSL/TLS` in `zRPC`
|
|
||||||
- [x] Support `TLS` in redis connections
|
|
||||||
- [x] Support `goctl bug` to report bugs conveniently
|
|
||||||
|
|
||||||
## 2022
|
|
||||||
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file
|
|
||||||
- [ ] Add `httpx.Client` with governance, like circuit breaker etc.
|
|
||||||
- [ ] Support `goctl doctor` command to report potential issues for given service
|
|
||||||
- [ ] Support `context` in redis related methods for timeout and tracing
|
|
||||||
- [ ] Support `context` in sql related methods for timeout and tracing
|
|
||||||
- [ ] Support `context` in mongodb related methods for timeout and tracing
|
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package bloom
|
package bloom
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@@ -8,28 +9,29 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/stores/redis"
|
"github.com/zeromicro/go-zero/core/stores/redis"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// for detailed error rate table, see http://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
|
||||||
// for detailed error rate table, see http://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
|
// maps as k in the error rate table
|
||||||
// maps as k in the error rate table
|
const maps = 14
|
||||||
maps = 14
|
|
||||||
setScript = `
|
var (
|
||||||
|
// ErrTooLargeOffset indicates the offset is too large in bitset.
|
||||||
|
ErrTooLargeOffset = errors.New("too large offset")
|
||||||
|
|
||||||
|
setScript = redis.NewScript(`
|
||||||
for _, offset in ipairs(ARGV) do
|
for _, offset in ipairs(ARGV) do
|
||||||
redis.call("setbit", KEYS[1], offset, 1)
|
redis.call("setbit", KEYS[1], offset, 1)
|
||||||
end
|
end
|
||||||
`
|
`)
|
||||||
testScript = `
|
testScript = redis.NewScript(`
|
||||||
for _, offset in ipairs(ARGV) do
|
for _, offset in ipairs(ARGV) do
|
||||||
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
|
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
|
||||||
return false
|
return false
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return true
|
return true
|
||||||
`
|
`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrTooLargeOffset indicates the offset is too large in bitset.
|
|
||||||
var ErrTooLargeOffset = errors.New("too large offset")
|
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A Filter is a bloom filter.
|
// A Filter is a bloom filter.
|
||||||
Filter struct {
|
Filter struct {
|
||||||
@@ -38,8 +40,8 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
bitSetProvider interface {
|
bitSetProvider interface {
|
||||||
check([]uint) (bool, error)
|
check(ctx context.Context, offsets []uint) (bool, error)
|
||||||
set([]uint) error
|
set(ctx context.Context, offsets []uint) error
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -58,22 +60,29 @@ func New(store *redis.Redis, key string, bits uint) *Filter {
|
|||||||
|
|
||||||
// Add adds data into f.
|
// Add adds data into f.
|
||||||
func (f *Filter) Add(data []byte) error {
|
func (f *Filter) Add(data []byte) error {
|
||||||
|
return f.AddCtx(context.Background(), data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCtx adds data into f with context.
|
||||||
|
func (f *Filter) AddCtx(ctx context.Context, data []byte) error {
|
||||||
locations := f.getLocations(data)
|
locations := f.getLocations(data)
|
||||||
return f.bitSet.set(locations)
|
return f.bitSet.set(ctx, locations)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exists checks if data is in f.
|
// Exists checks if data is in f.
|
||||||
func (f *Filter) Exists(data []byte) (bool, error) {
|
func (f *Filter) Exists(data []byte) (bool, error) {
|
||||||
|
return f.ExistsCtx(context.Background(), data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistsCtx checks if data is in f with context.
|
||||||
|
func (f *Filter) ExistsCtx(ctx context.Context, data []byte) (bool, error) {
|
||||||
locations := f.getLocations(data)
|
locations := f.getLocations(data)
|
||||||
isSet, err := f.bitSet.check(locations)
|
isSet, err := f.bitSet.check(ctx, locations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if !isSet {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
return isSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Filter) getLocations(data []byte) []uint {
|
func (f *Filter) getLocations(data []byte) []uint {
|
||||||
@@ -114,13 +123,13 @@ func (r *redisBitSet) buildOffsetArgs(offsets []uint) ([]string, error) {
|
|||||||
return args, nil
|
return args, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *redisBitSet) check(offsets []uint) (bool, error) {
|
func (r *redisBitSet) check(ctx context.Context, offsets []uint) (bool, error) {
|
||||||
args, err := r.buildOffsetArgs(offsets)
|
args, err := r.buildOffsetArgs(offsets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := r.store.Eval(testScript, []string{r.key}, args)
|
resp, err := r.store.ScriptRunCtx(ctx, testScript, []string{r.key}, args)
|
||||||
if err == redis.Nil {
|
if err == redis.Nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -135,22 +144,24 @@ func (r *redisBitSet) check(offsets []uint) (bool, error) {
|
|||||||
return exists == 1, nil
|
return exists == 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// del only use for testing.
|
||||||
func (r *redisBitSet) del() error {
|
func (r *redisBitSet) del() error {
|
||||||
_, err := r.store.Del(r.key)
|
_, err := r.store.Del(r.key)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// expire only use for testing.
|
||||||
func (r *redisBitSet) expire(seconds int) error {
|
func (r *redisBitSet) expire(seconds int) error {
|
||||||
return r.store.Expire(r.key, seconds)
|
return r.store.Expire(r.key, seconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *redisBitSet) set(offsets []uint) error {
|
func (r *redisBitSet) set(ctx context.Context, offsets []uint) error {
|
||||||
args, err := r.buildOffsetArgs(offsets)
|
args, err := r.buildOffsetArgs(offsets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = r.store.Eval(setScript, []string{r.key}, args)
|
_, err = r.store.ScriptRunCtx(ctx, setScript, []string{r.key}, args)
|
||||||
if err == redis.Nil {
|
if err == redis.Nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,30 +1,31 @@
|
|||||||
package bloom
|
package bloom
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/stores/redis/redistest"
|
"github.com/zeromicro/go-zero/core/stores/redis/redistest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRedisBitSet_New_Set_Test(t *testing.T) {
|
func TestRedisBitSet_New_Set_Test(t *testing.T) {
|
||||||
store, clean, err := redistest.CreateRedis()
|
store := redistest.CreateRedis(t)
|
||||||
assert.Nil(t, err)
|
ctx := context.Background()
|
||||||
defer clean()
|
|
||||||
|
|
||||||
bitSet := newRedisBitSet(store, "test_key", 1024)
|
bitSet := newRedisBitSet(store, "test_key", 1024)
|
||||||
isSetBefore, err := bitSet.check([]uint{0})
|
isSetBefore, err := bitSet.check(ctx, []uint{0})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if isSetBefore {
|
if isSetBefore {
|
||||||
t.Fatal("Bit should not be set")
|
t.Fatal("Bit should not be set")
|
||||||
}
|
}
|
||||||
err = bitSet.set([]uint{512})
|
err = bitSet.set(ctx, []uint{512})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
isSetAfter, err := bitSet.check([]uint{512})
|
isSetAfter, err := bitSet.check(ctx, []uint{512})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -42,9 +43,7 @@ func TestRedisBitSet_New_Set_Test(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRedisBitSet_Add(t *testing.T) {
|
func TestRedisBitSet_Add(t *testing.T) {
|
||||||
store, clean, err := redistest.CreateRedis()
|
store := redistest.CreateRedis(t)
|
||||||
assert.Nil(t, err)
|
|
||||||
defer clean()
|
|
||||||
|
|
||||||
filter := New(store, "test_key", 64)
|
filter := New(store, "test_key", 64)
|
||||||
assert.Nil(t, filter.Add([]byte("hello")))
|
assert.Nil(t, filter.Add([]byte("hello")))
|
||||||
@@ -53,3 +52,51 @@ func TestRedisBitSet_Add(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFilter_Exists(t *testing.T) {
|
||||||
|
store, clean := redistest.CreateRedisWithClean(t)
|
||||||
|
|
||||||
|
rbs := New(store, "test", 64)
|
||||||
|
_, err := rbs.Exists([]byte{0, 1, 2})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
clean()
|
||||||
|
rbs = New(store, "test", 64)
|
||||||
|
_, err = rbs.Exists([]byte{0, 1, 2})
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRedisBitSet_check(t *testing.T) {
|
||||||
|
store, clean := redistest.CreateRedisWithClean(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
rbs := newRedisBitSet(store, "test", 0)
|
||||||
|
assert.Error(t, rbs.set(ctx, []uint{0, 1, 2}))
|
||||||
|
_, err := rbs.check(ctx, []uint{0, 1, 2})
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
rbs = newRedisBitSet(store, "test", 64)
|
||||||
|
_, err = rbs.check(ctx, []uint{0, 1, 2})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
clean()
|
||||||
|
rbs = newRedisBitSet(store, "test", 64)
|
||||||
|
_, err = rbs.check(ctx, []uint{0, 1, 2})
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRedisBitSet_set(t *testing.T) {
|
||||||
|
logx.Disable()
|
||||||
|
store, clean := redistest.CreateRedisWithClean(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
rbs := newRedisBitSet(store, "test", 0)
|
||||||
|
assert.Error(t, rbs.set(ctx, []uint{0, 1, 2}))
|
||||||
|
|
||||||
|
rbs = newRedisBitSet(store, "test", 64)
|
||||||
|
assert.NoError(t, rbs.set(ctx, []uint{0, 1, 2}))
|
||||||
|
|
||||||
|
clean()
|
||||||
|
rbs = newRedisBitSet(store, "test", 64)
|
||||||
|
assert.Error(t, rbs.set(ctx, []uint{0, 1, 2}))
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/mathx"
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
"github.com/zeromicro/go-zero/core/proc"
|
"github.com/zeromicro/go-zero/core/proc"
|
||||||
"github.com/zeromicro/go-zero/core/stat"
|
"github.com/zeromicro/go-zero/core/stat"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -171,7 +171,7 @@ func (lt loggedThrottle) allow() (Promise, error) {
|
|||||||
func (lt loggedThrottle) doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error {
|
func (lt loggedThrottle) doReq(req func() error, fallback func(err error) error, acceptable Acceptable) error {
|
||||||
return lt.logError(lt.internalThrottle.doReq(req, fallback, func(err error) bool {
|
return lt.logError(lt.internalThrottle.doReq(req, fallback, func(err error) bool {
|
||||||
accept := acceptable(err)
|
accept := acceptable(err)
|
||||||
if !accept {
|
if !accept && err != nil {
|
||||||
lt.errWin.add(err.Error())
|
lt.errWin.add(err.Error())
|
||||||
}
|
}
|
||||||
return accept
|
return accept
|
||||||
@@ -198,7 +198,7 @@ type errorWindow struct {
|
|||||||
|
|
||||||
func (ew *errorWindow) add(reason string) {
|
func (ew *errorWindow) add(reason string) {
|
||||||
ew.lock.Lock()
|
ew.lock.Lock()
|
||||||
ew.reasons[ew.index] = fmt.Sprintf("%s %s", timex.Time().Format(timeFormat), reason)
|
ew.reasons[ew.index] = fmt.Sprintf("%s %s", time.Now().Format(timeFormat), reason)
|
||||||
ew.index = (ew.index + 1) % numHistoryReasons
|
ew.index = (ew.index + 1) % numHistoryReasons
|
||||||
ew.count = mathx.MinInt(ew.count+1, numHistoryReasons)
|
ew.count = mathx.MinInt(ew.count+1, numHistoryReasons)
|
||||||
ew.lock.Unlock()
|
ew.lock.Unlock()
|
||||||
|
|||||||
@@ -20,16 +20,16 @@ func (b noOpBreaker) Do(req func() error) error {
|
|||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b noOpBreaker) DoWithAcceptable(req func() error, acceptable Acceptable) error {
|
func (b noOpBreaker) DoWithAcceptable(req func() error, _ Acceptable) error {
|
||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b noOpBreaker) DoWithFallback(req func() error, fallback func(err error) error) error {
|
func (b noOpBreaker) DoWithFallback(req func() error, _ func(err error) error) error {
|
||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b noOpBreaker) DoWithFallbackAcceptable(req func() error, fallback func(err error) error,
|
func (b noOpBreaker) DoWithFallbackAcceptable(req func() error, _ func(err error) error,
|
||||||
acceptable Acceptable) error {
|
_ Acceptable) error {
|
||||||
return req()
|
return req()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,5 +38,5 @@ type nopPromise struct{}
|
|||||||
func (p nopPromise) Accept() {
|
func (p nopPromise) Accept() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p nopPromise) Reject(reason string) {
|
func (p nopPromise) Reject(_ string) {
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,9 +32,11 @@ func NewECBEncrypter(b cipher.Block) cipher.BlockMode {
|
|||||||
return (*ecbEncrypter)(newECB(b))
|
return (*ecbEncrypter)(newECB(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlockSize returns the mode's block size.
|
||||||
func (x *ecbEncrypter) BlockSize() int { return x.blockSize }
|
func (x *ecbEncrypter) BlockSize() int { return x.blockSize }
|
||||||
|
|
||||||
// why we don't return error is because cipher.BlockMode doesn't allow this
|
// CryptBlocks encrypts a number of blocks. The length of src must be a multiple of
|
||||||
|
// the block size. Dst and src must overlap entirely or not at all.
|
||||||
func (x *ecbEncrypter) CryptBlocks(dst, src []byte) {
|
func (x *ecbEncrypter) CryptBlocks(dst, src []byte) {
|
||||||
if len(src)%x.blockSize != 0 {
|
if len(src)%x.blockSize != 0 {
|
||||||
logx.Error("crypto/cipher: input not full blocks")
|
logx.Error("crypto/cipher: input not full blocks")
|
||||||
@@ -59,11 +61,13 @@ func NewECBDecrypter(b cipher.Block) cipher.BlockMode {
|
|||||||
return (*ecbDecrypter)(newECB(b))
|
return (*ecbDecrypter)(newECB(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlockSize returns the mode's block size.
|
||||||
func (x *ecbDecrypter) BlockSize() int {
|
func (x *ecbDecrypter) BlockSize() int {
|
||||||
return x.blockSize
|
return x.blockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// why we don't return error is because cipher.BlockMode doesn't allow this
|
// CryptBlocks decrypts a number of blocks. The length of src must be a multiple of
|
||||||
|
// the block size. Dst and src must overlap entirely or not at all.
|
||||||
func (x *ecbDecrypter) CryptBlocks(dst, src []byte) {
|
func (x *ecbDecrypter) CryptBlocks(dst, src []byte) {
|
||||||
if len(src)%x.blockSize != 0 {
|
if len(src)%x.blockSize != 0 {
|
||||||
logx.Error("crypto/cipher: input not full blocks")
|
logx.Error("crypto/cipher: input not full blocks")
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package codec
|
package codec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/aes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -10,7 +11,8 @@ import (
|
|||||||
func TestAesEcb(t *testing.T) {
|
func TestAesEcb(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
|
key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
|
||||||
val = []byte("hello")
|
val = []byte("helloworld")
|
||||||
|
valLong = []byte("helloworldlong..")
|
||||||
badKey1 = []byte("aaaaaaaaa")
|
badKey1 = []byte("aaaaaaaaa")
|
||||||
// more than 32 chars
|
// more than 32 chars
|
||||||
badKey2 = []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
badKey2 = []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||||
@@ -31,6 +33,39 @@ func TestAesEcb(t *testing.T) {
|
|||||||
src, err := EcbDecrypt(key, dst)
|
src, err := EcbDecrypt(key, dst)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, val, src)
|
assert.Equal(t, val, src)
|
||||||
|
block, err := aes.NewCipher(key)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
encrypter := NewECBEncrypter(block)
|
||||||
|
assert.Equal(t, 16, encrypter.BlockSize())
|
||||||
|
decrypter := NewECBDecrypter(block)
|
||||||
|
assert.Equal(t, 16, decrypter.BlockSize())
|
||||||
|
|
||||||
|
dst = make([]byte, 8)
|
||||||
|
encrypter.CryptBlocks(dst, val)
|
||||||
|
for _, b := range dst {
|
||||||
|
assert.Equal(t, byte(0), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
dst = make([]byte, 8)
|
||||||
|
encrypter.CryptBlocks(dst, valLong)
|
||||||
|
for _, b := range dst {
|
||||||
|
assert.Equal(t, byte(0), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
dst = make([]byte, 8)
|
||||||
|
decrypter.CryptBlocks(dst, val)
|
||||||
|
for _, b := range dst {
|
||||||
|
assert.Equal(t, byte(0), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
dst = make([]byte, 8)
|
||||||
|
decrypter.CryptBlocks(dst, valLong)
|
||||||
|
for _, b := range dst {
|
||||||
|
assert.Equal(t, byte(0), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = EcbEncryptBase64("cTR0N3dDKkYtSmFOZFJnVWpYbjJyNXU4eC9BP0QK", "aGVsbG93b3JsZGxvbmcuLgo=")
|
||||||
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAesEcbBase64(t *testing.T) {
|
func TestAesEcbBase64(t *testing.T) {
|
||||||
|
|||||||
@@ -80,3 +80,17 @@ func TestKeyBytes(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.True(t, len(key.Bytes()) > 0)
|
assert.True(t, len(key.Bytes()) > 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDHOnErrors(t *testing.T) {
|
||||||
|
key, err := GenerateKey()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotEmpty(t, key.Bytes())
|
||||||
|
_, err = ComputeKey(key.PubKey, key.PriKey)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = ComputeKey(nil, key.PriKey)
|
||||||
|
assert.Error(t, err)
|
||||||
|
_, err = ComputeKey(key.PubKey, nil)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
assert.NotNil(t, NewPublicKey([]byte("")))
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package codec
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -21,3 +23,45 @@ func TestGzip(t *testing.T) {
|
|||||||
assert.True(t, len(bs) < buf.Len())
|
assert.True(t, len(bs) < buf.Len())
|
||||||
assert.Equal(t, buf.Bytes(), actual)
|
assert.Equal(t, buf.Bytes(), actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGunzip(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input []byte
|
||||||
|
expected []byte
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid input",
|
||||||
|
input: func() []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
gz := gzip.NewWriter(&buf)
|
||||||
|
gz.Write([]byte("hello"))
|
||||||
|
gz.Close()
|
||||||
|
return buf.Bytes()
|
||||||
|
}(),
|
||||||
|
expected: []byte("hello"),
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid input",
|
||||||
|
input: []byte("invalid input"),
|
||||||
|
expected: nil,
|
||||||
|
expectedErr: gzip.ErrHeader,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
result, err := Gunzip(test.input)
|
||||||
|
|
||||||
|
if !bytes.Equal(result, test.expected) {
|
||||||
|
t.Errorf("unexpected result: %v", result)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !errors.Is(err, test.expectedErr) {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -48,7 +48,7 @@ type (
|
|||||||
|
|
||||||
// NewRsaDecrypter returns a RsaDecrypter with the given file.
|
// NewRsaDecrypter returns a RsaDecrypter with the given file.
|
||||||
func NewRsaDecrypter(file string) (RsaDecrypter, error) {
|
func NewRsaDecrypter(file string) (RsaDecrypter, error) {
|
||||||
content, err := ioutil.ReadFile(file)
|
content, err := os.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package codec
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -41,6 +42,7 @@ func TestCryption(t *testing.T) {
|
|||||||
|
|
||||||
file, err := fs.TempFilenameWithText(priKey)
|
file, err := fs.TempFilenameWithText(priKey)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(file)
|
||||||
dec, err := NewRsaDecrypter(file)
|
dec, err := NewRsaDecrypter(file)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
actual, err := dec.Decrypt(ret)
|
actual, err := dec.Decrypt(ret)
|
||||||
|
|||||||
@@ -26,11 +26,11 @@ type (
|
|||||||
// CacheOption defines the method to customize a Cache.
|
// CacheOption defines the method to customize a Cache.
|
||||||
CacheOption func(cache *Cache)
|
CacheOption func(cache *Cache)
|
||||||
|
|
||||||
// A Cache object is a in-memory cache.
|
// A Cache object is an in-memory cache.
|
||||||
Cache struct {
|
Cache struct {
|
||||||
name string
|
name string
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
data map[string]interface{}
|
data map[string]any
|
||||||
expire time.Duration
|
expire time.Duration
|
||||||
timingWheel *TimingWheel
|
timingWheel *TimingWheel
|
||||||
lruCache lru
|
lruCache lru
|
||||||
@@ -43,7 +43,7 @@ type (
|
|||||||
// NewCache returns a Cache with given expire.
|
// NewCache returns a Cache with given expire.
|
||||||
func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
||||||
cache := &Cache{
|
cache := &Cache{
|
||||||
data: make(map[string]interface{}),
|
data: make(map[string]any),
|
||||||
expire: expire,
|
expire: expire,
|
||||||
lruCache: emptyLruCache,
|
lruCache: emptyLruCache,
|
||||||
barrier: syncx.NewSingleFlight(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
@@ -59,7 +59,7 @@ func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
|||||||
}
|
}
|
||||||
cache.stats = newCacheStat(cache.name, cache.size)
|
cache.stats = newCacheStat(cache.name, cache.size)
|
||||||
|
|
||||||
timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v interface{}) {
|
timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v any) {
|
||||||
key, ok := k.(string)
|
key, ok := k.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
@@ -85,7 +85,7 @@ func (c *Cache) Del(key string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the item with the given key from c.
|
// Get returns the item with the given key from c.
|
||||||
func (c *Cache) Get(key string) (interface{}, bool) {
|
func (c *Cache) Get(key string) (any, bool) {
|
||||||
value, ok := c.doGet(key)
|
value, ok := c.doGet(key)
|
||||||
if ok {
|
if ok {
|
||||||
c.stats.IncrementHit()
|
c.stats.IncrementHit()
|
||||||
@@ -97,14 +97,19 @@ func (c *Cache) Get(key string) (interface{}, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set sets value into c with key.
|
// Set sets value into c with key.
|
||||||
func (c *Cache) Set(key string, value interface{}) {
|
func (c *Cache) Set(key string, value any) {
|
||||||
|
c.SetWithExpire(key, value, c.expire)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWithExpire sets value into c with key and expire with the given value.
|
||||||
|
func (c *Cache) SetWithExpire(key string, value any, expire time.Duration) {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
_, ok := c.data[key]
|
_, ok := c.data[key]
|
||||||
c.data[key] = value
|
c.data[key] = value
|
||||||
c.lruCache.add(key)
|
c.lruCache.add(key)
|
||||||
c.lock.Unlock()
|
c.lock.Unlock()
|
||||||
|
|
||||||
expiry := c.unstableExpiry.AroundDuration(c.expire)
|
expiry := c.unstableExpiry.AroundDuration(expire)
|
||||||
if ok {
|
if ok {
|
||||||
c.timingWheel.MoveTimer(key, expiry)
|
c.timingWheel.MoveTimer(key, expiry)
|
||||||
} else {
|
} else {
|
||||||
@@ -115,14 +120,14 @@ func (c *Cache) Set(key string, value interface{}) {
|
|||||||
// Take returns the item with the given key.
|
// Take returns the item with the given key.
|
||||||
// If the item is in c, return it directly.
|
// If the item is in c, return it directly.
|
||||||
// If not, use fetch method to get the item, set into c and return it.
|
// If not, use fetch method to get the item, set into c and return it.
|
||||||
func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) {
|
func (c *Cache) Take(key string, fetch func() (any, error)) (any, error) {
|
||||||
if val, ok := c.doGet(key); ok {
|
if val, ok := c.doGet(key); ok {
|
||||||
c.stats.IncrementHit()
|
c.stats.IncrementHit()
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var fresh bool
|
var fresh bool
|
||||||
val, err := c.barrier.Do(key, func() (interface{}, error) {
|
val, err := c.barrier.Do(key, func() (any, error) {
|
||||||
// because O(1) on map search in memory, and fetch is an IO query
|
// because O(1) on map search in memory, and fetch is an IO query
|
||||||
// so we do double check, cache might be taken by another call
|
// so we do double check, cache might be taken by another call
|
||||||
if val, ok := c.doGet(key); ok {
|
if val, ok := c.doGet(key); ok {
|
||||||
@@ -152,7 +157,7 @@ func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}
|
|||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) doGet(key string) (interface{}, bool) {
|
func (c *Cache) doGet(key string) (any, bool) {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
defer c.lock.Unlock()
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func TestCacheSet(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
cache.Set("first", "first element")
|
cache.Set("first", "first element")
|
||||||
cache.Set("second", "second element")
|
cache.SetWithExpire("second", "second element", time.Second*3)
|
||||||
|
|
||||||
value, ok := cache.Get("first")
|
value, ok := cache.Get("first")
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
@@ -52,7 +52,7 @@ func TestCacheTake(t *testing.T) {
|
|||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
cache.Take("first", func() (interface{}, error) {
|
cache.Take("first", func() (any, error) {
|
||||||
atomic.AddInt32(&count, 1)
|
atomic.AddInt32(&count, 1)
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
return "first element", nil
|
return "first element", nil
|
||||||
@@ -76,7 +76,7 @@ func TestCacheTakeExists(t *testing.T) {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
cache.Set("first", "first element")
|
cache.Set("first", "first element")
|
||||||
cache.Take("first", func() (interface{}, error) {
|
cache.Take("first", func() (any, error) {
|
||||||
atomic.AddInt32(&count, 1)
|
atomic.AddInt32(&count, 1)
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
return "first element", nil
|
return "first element", nil
|
||||||
@@ -99,7 +99,7 @@ func TestCacheTakeError(t *testing.T) {
|
|||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
_, err := cache.Take("first", func() (interface{}, error) {
|
_, err := cache.Take("first", func() (any, error) {
|
||||||
atomic.AddInt32(&count, 1)
|
atomic.AddInt32(&count, 1)
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
return "", errDummy
|
return "", errDummy
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import "sync"
|
|||||||
// A Queue is a FIFO queue.
|
// A Queue is a FIFO queue.
|
||||||
type Queue struct {
|
type Queue struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
elements []interface{}
|
elements []any
|
||||||
size int
|
size int
|
||||||
head int
|
head int
|
||||||
tail int
|
tail int
|
||||||
@@ -15,7 +15,7 @@ type Queue struct {
|
|||||||
// NewQueue returns a Queue object.
|
// NewQueue returns a Queue object.
|
||||||
func NewQueue(size int) *Queue {
|
func NewQueue(size int) *Queue {
|
||||||
return &Queue{
|
return &Queue{
|
||||||
elements: make([]interface{}, size),
|
elements: make([]any, size),
|
||||||
size: size,
|
size: size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -30,12 +30,12 @@ func (q *Queue) Empty() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put puts element into q at the last position.
|
// Put puts element into q at the last position.
|
||||||
func (q *Queue) Put(element interface{}) {
|
func (q *Queue) Put(element any) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
if q.head == q.tail && q.count > 0 {
|
if q.head == q.tail && q.count > 0 {
|
||||||
nodes := make([]interface{}, len(q.elements)+q.size)
|
nodes := make([]any, len(q.elements)+q.size)
|
||||||
copy(nodes, q.elements[q.head:])
|
copy(nodes, q.elements[q.head:])
|
||||||
copy(nodes[len(q.elements)-q.head:], q.elements[:q.head])
|
copy(nodes[len(q.elements)-q.head:], q.elements[:q.head])
|
||||||
q.head = 0
|
q.head = 0
|
||||||
@@ -49,7 +49,7 @@ func (q *Queue) Put(element interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Take takes the first element out of q if not empty.
|
// Take takes the first element out of q if not empty.
|
||||||
func (q *Queue) Take() (interface{}, bool) {
|
func (q *Queue) Take() (any, bool) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
|
|||||||
@@ -61,3 +61,41 @@ func TestPutMore(t *testing.T) {
|
|||||||
assert.Equal(t, string(element), string(body.([]byte)))
|
assert.Equal(t, string(element), string(body.([]byte)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPutMoreWithHeaderNotZero(t *testing.T) {
|
||||||
|
elements := [][]byte{
|
||||||
|
[]byte("hello"),
|
||||||
|
[]byte("world"),
|
||||||
|
[]byte("again"),
|
||||||
|
}
|
||||||
|
queue := NewQueue(4)
|
||||||
|
for i := range elements {
|
||||||
|
queue.Put(elements[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// take 1
|
||||||
|
body, ok := queue.Take()
|
||||||
|
assert.True(t, ok)
|
||||||
|
element, ok := body.([]byte)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, element, []byte("hello"))
|
||||||
|
|
||||||
|
// put more
|
||||||
|
queue.Put([]byte("b4"))
|
||||||
|
queue.Put([]byte("b5")) // will store in elements[0]
|
||||||
|
queue.Put([]byte("b6")) // cause expansion
|
||||||
|
|
||||||
|
results := [][]byte{
|
||||||
|
[]byte("world"),
|
||||||
|
[]byte("again"),
|
||||||
|
[]byte("b4"),
|
||||||
|
[]byte("b5"),
|
||||||
|
[]byte("b6"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, element := range results {
|
||||||
|
body, ok := queue.Take()
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, string(element), string(body.([]byte)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ import "sync"
|
|||||||
|
|
||||||
// A Ring can be used as fixed size ring.
|
// A Ring can be used as fixed size ring.
|
||||||
type Ring struct {
|
type Ring struct {
|
||||||
elements []interface{}
|
elements []any
|
||||||
index int
|
index int
|
||||||
lock sync.Mutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRing returns a Ring object with the given size n.
|
// NewRing returns a Ring object with the given size n.
|
||||||
@@ -16,12 +16,12 @@ func NewRing(n int) *Ring {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &Ring{
|
return &Ring{
|
||||||
elements: make([]interface{}, n),
|
elements: make([]any, n),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds v into r.
|
// Add adds v into r.
|
||||||
func (r *Ring) Add(v interface{}) {
|
func (r *Ring) Add(v any) {
|
||||||
r.lock.Lock()
|
r.lock.Lock()
|
||||||
defer r.lock.Unlock()
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
@@ -30,9 +30,9 @@ func (r *Ring) Add(v interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Take takes all items from r.
|
// Take takes all items from r.
|
||||||
func (r *Ring) Take() []interface{} {
|
func (r *Ring) Take() []any {
|
||||||
r.lock.Lock()
|
r.lock.RLock()
|
||||||
defer r.lock.Unlock()
|
defer r.lock.RUnlock()
|
||||||
|
|
||||||
var size int
|
var size int
|
||||||
var start int
|
var start int
|
||||||
@@ -43,7 +43,7 @@ func (r *Ring) Take() []interface{} {
|
|||||||
size = r.index
|
size = r.index
|
||||||
}
|
}
|
||||||
|
|
||||||
elements := make([]interface{}, size)
|
elements := make([]any, size)
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
elements[i] = r.elements[(start+i)%len(r.elements)]
|
elements[i] = r.elements[(start+i)%len(r.elements)]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func TestRingLess(t *testing.T) {
|
|||||||
ring.Add(i)
|
ring.Add(i)
|
||||||
}
|
}
|
||||||
elements := ring.Take()
|
elements := ring.Take()
|
||||||
assert.ElementsMatch(t, []interface{}{0, 1, 2}, elements)
|
assert.ElementsMatch(t, []any{0, 1, 2}, elements)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRingMore(t *testing.T) {
|
func TestRingMore(t *testing.T) {
|
||||||
@@ -28,7 +28,7 @@ func TestRingMore(t *testing.T) {
|
|||||||
ring.Add(i)
|
ring.Add(i)
|
||||||
}
|
}
|
||||||
elements := ring.Take()
|
elements := ring.Take()
|
||||||
assert.ElementsMatch(t, []interface{}{6, 7, 8, 9, 10}, elements)
|
assert.ElementsMatch(t, []any{6, 7, 8, 9, 10}, elements)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRingAdd(t *testing.T) {
|
func TestRingAdd(t *testing.T) {
|
||||||
|
|||||||
@@ -14,20 +14,20 @@ type SafeMap struct {
|
|||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
deletionOld int
|
deletionOld int
|
||||||
deletionNew int
|
deletionNew int
|
||||||
dirtyOld map[interface{}]interface{}
|
dirtyOld map[any]any
|
||||||
dirtyNew map[interface{}]interface{}
|
dirtyNew map[any]any
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSafeMap returns a SafeMap.
|
// NewSafeMap returns a SafeMap.
|
||||||
func NewSafeMap() *SafeMap {
|
func NewSafeMap() *SafeMap {
|
||||||
return &SafeMap{
|
return &SafeMap{
|
||||||
dirtyOld: make(map[interface{}]interface{}),
|
dirtyOld: make(map[any]any),
|
||||||
dirtyNew: make(map[interface{}]interface{}),
|
dirtyNew: make(map[any]any),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Del deletes the value with the given key from m.
|
// Del deletes the value with the given key from m.
|
||||||
func (m *SafeMap) Del(key interface{}) {
|
func (m *SafeMap) Del(key any) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
if _, ok := m.dirtyOld[key]; ok {
|
if _, ok := m.dirtyOld[key]; ok {
|
||||||
delete(m.dirtyOld, key)
|
delete(m.dirtyOld, key)
|
||||||
@@ -42,21 +42,21 @@ func (m *SafeMap) Del(key interface{}) {
|
|||||||
}
|
}
|
||||||
m.dirtyOld = m.dirtyNew
|
m.dirtyOld = m.dirtyNew
|
||||||
m.deletionOld = m.deletionNew
|
m.deletionOld = m.deletionNew
|
||||||
m.dirtyNew = make(map[interface{}]interface{})
|
m.dirtyNew = make(map[any]any)
|
||||||
m.deletionNew = 0
|
m.deletionNew = 0
|
||||||
}
|
}
|
||||||
if m.deletionNew >= maxDeletion && len(m.dirtyNew) < copyThreshold {
|
if m.deletionNew >= maxDeletion && len(m.dirtyNew) < copyThreshold {
|
||||||
for k, v := range m.dirtyNew {
|
for k, v := range m.dirtyNew {
|
||||||
m.dirtyOld[k] = v
|
m.dirtyOld[k] = v
|
||||||
}
|
}
|
||||||
m.dirtyNew = make(map[interface{}]interface{})
|
m.dirtyNew = make(map[any]any)
|
||||||
m.deletionNew = 0
|
m.deletionNew = 0
|
||||||
}
|
}
|
||||||
m.lock.Unlock()
|
m.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets the value with the given key from m.
|
// Get gets the value with the given key from m.
|
||||||
func (m *SafeMap) Get(key interface{}) (interface{}, bool) {
|
func (m *SafeMap) Get(key any) (any, bool) {
|
||||||
m.lock.RLock()
|
m.lock.RLock()
|
||||||
defer m.lock.RUnlock()
|
defer m.lock.RUnlock()
|
||||||
|
|
||||||
@@ -68,8 +68,26 @@ func (m *SafeMap) Get(key interface{}) (interface{}, bool) {
|
|||||||
return val, ok
|
return val, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Range calls f sequentially for each key and value present in the map.
|
||||||
|
// If f returns false, range stops the iteration.
|
||||||
|
func (m *SafeMap) Range(f func(key, val any) bool) {
|
||||||
|
m.lock.RLock()
|
||||||
|
defer m.lock.RUnlock()
|
||||||
|
|
||||||
|
for k, v := range m.dirtyOld {
|
||||||
|
if !f(k, v) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range m.dirtyNew {
|
||||||
|
if !f(k, v) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set sets the value into m with the given key.
|
// Set sets the value into m with the given key.
|
||||||
func (m *SafeMap) Set(key, value interface{}) {
|
func (m *SafeMap) Set(key, value any) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
if m.deletionOld <= maxDeletion {
|
if m.deletionOld <= maxDeletion {
|
||||||
if _, ok := m.dirtyNew[key]; ok {
|
if _, ok := m.dirtyNew[key]; ok {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package collection
|
package collection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -107,3 +108,42 @@ func testSafeMapWithParameters(t *testing.T, size, exception int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSafeMap_Range(t *testing.T) {
|
||||||
|
const (
|
||||||
|
size = 100000
|
||||||
|
exception1 = 5
|
||||||
|
exception2 = 500
|
||||||
|
)
|
||||||
|
|
||||||
|
m := NewSafeMap()
|
||||||
|
newMap := NewSafeMap()
|
||||||
|
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
m.Set(i, i)
|
||||||
|
}
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
if i%exception1 == 0 {
|
||||||
|
m.Del(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := size; i < size<<1; i++ {
|
||||||
|
m.Set(i, i)
|
||||||
|
}
|
||||||
|
for i := size; i < size<<1; i++ {
|
||||||
|
if i%exception2 != 0 {
|
||||||
|
m.Del(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int32
|
||||||
|
m.Range(func(k, v any) bool {
|
||||||
|
atomic.AddInt32(&count, 1)
|
||||||
|
newMap.Set(k, v)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
assert.Equal(t, int(atomic.LoadInt32(&count)), m.Size())
|
||||||
|
assert.Equal(t, m.dirtyNew, newMap.dirtyNew)
|
||||||
|
assert.Equal(t, m.dirtyOld, newMap.dirtyOld)
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,28 +17,28 @@ const (
|
|||||||
|
|
||||||
// Set is not thread-safe, for concurrent use, make sure to use it with synchronization.
|
// Set is not thread-safe, for concurrent use, make sure to use it with synchronization.
|
||||||
type Set struct {
|
type Set struct {
|
||||||
data map[interface{}]lang.PlaceholderType
|
data map[any]lang.PlaceholderType
|
||||||
tp int
|
tp int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSet returns a managed Set, can only put the values with the same type.
|
// NewSet returns a managed Set, can only put the values with the same type.
|
||||||
func NewSet() *Set {
|
func NewSet() *Set {
|
||||||
return &Set{
|
return &Set{
|
||||||
data: make(map[interface{}]lang.PlaceholderType),
|
data: make(map[any]lang.PlaceholderType),
|
||||||
tp: untyped,
|
tp: untyped,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUnmanagedSet returns a unmanaged Set, which can put values with different types.
|
// NewUnmanagedSet returns an unmanaged Set, which can put values with different types.
|
||||||
func NewUnmanagedSet() *Set {
|
func NewUnmanagedSet() *Set {
|
||||||
return &Set{
|
return &Set{
|
||||||
data: make(map[interface{}]lang.PlaceholderType),
|
data: make(map[any]lang.PlaceholderType),
|
||||||
tp: unmanaged,
|
tp: unmanaged,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds i into s.
|
// Add adds i into s.
|
||||||
func (s *Set) Add(i ...interface{}) {
|
func (s *Set) Add(i ...any) {
|
||||||
for _, each := range i {
|
for _, each := range i {
|
||||||
s.add(each)
|
s.add(each)
|
||||||
}
|
}
|
||||||
@@ -80,7 +80,7 @@ func (s *Set) AddStr(ss ...string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Contains checks if i is in s.
|
// Contains checks if i is in s.
|
||||||
func (s *Set) Contains(i interface{}) bool {
|
func (s *Set) Contains(i any) bool {
|
||||||
if len(s.data) == 0 {
|
if len(s.data) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -91,8 +91,8 @@ func (s *Set) Contains(i interface{}) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Keys returns the keys in s.
|
// Keys returns the keys in s.
|
||||||
func (s *Set) Keys() []interface{} {
|
func (s *Set) Keys() []any {
|
||||||
var keys []interface{}
|
var keys []any
|
||||||
|
|
||||||
for key := range s.data {
|
for key := range s.data {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
@@ -167,7 +167,7 @@ func (s *Set) KeysStr() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes i from s.
|
// Remove removes i from s.
|
||||||
func (s *Set) Remove(i interface{}) {
|
func (s *Set) Remove(i any) {
|
||||||
s.validate(i)
|
s.validate(i)
|
||||||
delete(s.data, i)
|
delete(s.data, i)
|
||||||
}
|
}
|
||||||
@@ -177,7 +177,7 @@ func (s *Set) Count() int {
|
|||||||
return len(s.data)
|
return len(s.data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) add(i interface{}) {
|
func (s *Set) add(i any) {
|
||||||
switch s.tp {
|
switch s.tp {
|
||||||
case unmanaged:
|
case unmanaged:
|
||||||
// do nothing
|
// do nothing
|
||||||
@@ -189,7 +189,7 @@ func (s *Set) add(i interface{}) {
|
|||||||
s.data[i] = lang.Placeholder
|
s.data[i] = lang.Placeholder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) setType(i interface{}) {
|
func (s *Set) setType(i any) {
|
||||||
// s.tp can only be untyped here
|
// s.tp can only be untyped here
|
||||||
switch i.(type) {
|
switch i.(type) {
|
||||||
case int:
|
case int:
|
||||||
@@ -205,7 +205,7 @@ func (s *Set) setType(i interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) validate(i interface{}) {
|
func (s *Set) validate(i any) {
|
||||||
if s.tp == unmanaged {
|
if s.tp == unmanaged {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -213,23 +213,23 @@ func (s *Set) validate(i interface{}) {
|
|||||||
switch i.(type) {
|
switch i.(type) {
|
||||||
case int:
|
case int:
|
||||||
if s.tp != intType {
|
if s.tp != intType {
|
||||||
logx.Errorf("Error: element is int, but set contains elements with type %d", s.tp)
|
logx.Errorf("element is int, but set contains elements with type %d", s.tp)
|
||||||
}
|
}
|
||||||
case int64:
|
case int64:
|
||||||
if s.tp != int64Type {
|
if s.tp != int64Type {
|
||||||
logx.Errorf("Error: element is int64, but set contains elements with type %d", s.tp)
|
logx.Errorf("element is int64, but set contains elements with type %d", s.tp)
|
||||||
}
|
}
|
||||||
case uint:
|
case uint:
|
||||||
if s.tp != uintType {
|
if s.tp != uintType {
|
||||||
logx.Errorf("Error: element is uint, but set contains elements with type %d", s.tp)
|
logx.Errorf("element is uint, but set contains elements with type %d", s.tp)
|
||||||
}
|
}
|
||||||
case uint64:
|
case uint64:
|
||||||
if s.tp != uint64Type {
|
if s.tp != uint64Type {
|
||||||
logx.Errorf("Error: element is uint64, but set contains elements with type %d", s.tp)
|
logx.Errorf("element is uint64, but set contains elements with type %d", s.tp)
|
||||||
}
|
}
|
||||||
case string:
|
case string:
|
||||||
if s.tp != stringType {
|
if s.tp != stringType {
|
||||||
logx.Errorf("Error: element is string, but set contains elements with type %d", s.tp)
|
logx.Errorf("element is string, but set contains elements with type %d", s.tp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRawSet(b *testing.B) {
|
func BenchmarkRawSet(b *testing.B) {
|
||||||
m := make(map[interface{}]struct{})
|
m := make(map[any]struct{})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m[i] = struct{}{}
|
m[i] = struct{}{}
|
||||||
_ = m[i]
|
_ = m[i]
|
||||||
@@ -39,7 +39,7 @@ func BenchmarkSet(b *testing.B) {
|
|||||||
func TestAdd(t *testing.T) {
|
func TestAdd(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewUnmanagedSet()
|
set := NewUnmanagedSet()
|
||||||
values := []interface{}{1, 2, 3}
|
values := []any{1, 2, 3}
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.Add(values...)
|
set.Add(values...)
|
||||||
@@ -135,7 +135,7 @@ func TestContainsUnmanagedWithoutElements(t *testing.T) {
|
|||||||
func TestRemove(t *testing.T) {
|
func TestRemove(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet()
|
||||||
set.Add([]interface{}{1, 2, 3}...)
|
set.Add([]any{1, 2, 3}...)
|
||||||
|
|
||||||
// when
|
// when
|
||||||
set.Remove(2)
|
set.Remove(2)
|
||||||
@@ -147,7 +147,7 @@ func TestRemove(t *testing.T) {
|
|||||||
func TestCount(t *testing.T) {
|
func TestCount(t *testing.T) {
|
||||||
// given
|
// given
|
||||||
set := NewSet()
|
set := NewSet()
|
||||||
set.Add([]interface{}{1, 2, 3}...)
|
set.Add([]any{1, 2, 3}...)
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert.Equal(t, set.Count(), 3)
|
assert.Equal(t, set.Count(), 3)
|
||||||
@@ -198,5 +198,5 @@ func TestSetType(t *testing.T) {
|
|||||||
set.add(1)
|
set.add(1)
|
||||||
set.add("2")
|
set.add("2")
|
||||||
vals := set.Keys()
|
vals := set.Keys()
|
||||||
assert.ElementsMatch(t, []interface{}{1, "2"}, vals)
|
assert.ElementsMatch(t, []any{1, "2"}, vals)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package collection
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"container/list"
|
"container/list"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -12,9 +13,14 @@ import (
|
|||||||
|
|
||||||
const drainWorkers = 8
|
const drainWorkers = 8
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrClosed = errors.New("TimingWheel is closed already")
|
||||||
|
ErrArgument = errors.New("incorrect task argument")
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// Execute defines the method to execute the task.
|
// Execute defines the method to execute the task.
|
||||||
Execute func(key, value interface{})
|
Execute func(key, value any)
|
||||||
|
|
||||||
// A TimingWheel is a timing wheel object to schedule tasks.
|
// A TimingWheel is a timing wheel object to schedule tasks.
|
||||||
TimingWheel struct {
|
TimingWheel struct {
|
||||||
@@ -27,14 +33,14 @@ type (
|
|||||||
execute Execute
|
execute Execute
|
||||||
setChannel chan timingEntry
|
setChannel chan timingEntry
|
||||||
moveChannel chan baseEntry
|
moveChannel chan baseEntry
|
||||||
removeChannel chan interface{}
|
removeChannel chan any
|
||||||
drainChannel chan func(key, value interface{})
|
drainChannel chan func(key, value any)
|
||||||
stopChannel chan lang.PlaceholderType
|
stopChannel chan lang.PlaceholderType
|
||||||
}
|
}
|
||||||
|
|
||||||
timingEntry struct {
|
timingEntry struct {
|
||||||
baseEntry
|
baseEntry
|
||||||
value interface{}
|
value any
|
||||||
circle int
|
circle int
|
||||||
diff int
|
diff int
|
||||||
removed bool
|
removed bool
|
||||||
@@ -42,7 +48,7 @@ type (
|
|||||||
|
|
||||||
baseEntry struct {
|
baseEntry struct {
|
||||||
delay time.Duration
|
delay time.Duration
|
||||||
key interface{}
|
key any
|
||||||
}
|
}
|
||||||
|
|
||||||
positionEntry struct {
|
positionEntry struct {
|
||||||
@@ -51,22 +57,24 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
timingTask struct {
|
timingTask struct {
|
||||||
key interface{}
|
key any
|
||||||
value interface{}
|
value any
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewTimingWheel returns a TimingWheel.
|
// NewTimingWheel returns a TimingWheel.
|
||||||
func NewTimingWheel(interval time.Duration, numSlots int, execute Execute) (*TimingWheel, error) {
|
func NewTimingWheel(interval time.Duration, numSlots int, execute Execute) (*TimingWheel, error) {
|
||||||
if interval <= 0 || numSlots <= 0 || execute == nil {
|
if interval <= 0 || numSlots <= 0 || execute == nil {
|
||||||
return nil, fmt.Errorf("interval: %v, slots: %d, execute: %p", interval, numSlots, execute)
|
return nil, fmt.Errorf("interval: %v, slots: %d, execute: %p",
|
||||||
|
interval, numSlots, execute)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newTimingWheelWithClock(interval, numSlots, execute, timex.NewTicker(interval))
|
return NewTimingWheelWithTicker(interval, numSlots, execute, timex.NewTicker(interval))
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execute, ticker timex.Ticker) (
|
// NewTimingWheelWithTicker returns a TimingWheel with the given ticker.
|
||||||
*TimingWheel, error) {
|
func NewTimingWheelWithTicker(interval time.Duration, numSlots int, execute Execute,
|
||||||
|
ticker timex.Ticker) (*TimingWheel, error) {
|
||||||
tw := &TimingWheel{
|
tw := &TimingWheel{
|
||||||
interval: interval,
|
interval: interval,
|
||||||
ticker: ticker,
|
ticker: ticker,
|
||||||
@@ -77,8 +85,8 @@ func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execu
|
|||||||
numSlots: numSlots,
|
numSlots: numSlots,
|
||||||
setChannel: make(chan timingEntry),
|
setChannel: make(chan timingEntry),
|
||||||
moveChannel: make(chan baseEntry),
|
moveChannel: make(chan baseEntry),
|
||||||
removeChannel: make(chan interface{}),
|
removeChannel: make(chan any),
|
||||||
drainChannel: make(chan func(key, value interface{})),
|
drainChannel: make(chan func(key, value any)),
|
||||||
stopChannel: make(chan lang.PlaceholderType),
|
stopChannel: make(chan lang.PlaceholderType),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,52 +97,72 @@ func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Drain drains all items and executes them.
|
// Drain drains all items and executes them.
|
||||||
func (tw *TimingWheel) Drain(fn func(key, value interface{})) {
|
func (tw *TimingWheel) Drain(fn func(key, value any)) error {
|
||||||
tw.drainChannel <- fn
|
select {
|
||||||
|
case tw.drainChannel <- fn:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveTimer moves the task with the given key to the given delay.
|
// MoveTimer moves the task with the given key to the given delay.
|
||||||
func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) {
|
func (tw *TimingWheel) MoveTimer(key any, delay time.Duration) error {
|
||||||
if delay <= 0 || key == nil {
|
if delay <= 0 || key == nil {
|
||||||
return
|
return ErrArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
tw.moveChannel <- baseEntry{
|
select {
|
||||||
|
case tw.moveChannel <- baseEntry{
|
||||||
delay: delay,
|
delay: delay,
|
||||||
key: key,
|
key: key,
|
||||||
|
}:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveTimer removes the task with the given key.
|
// RemoveTimer removes the task with the given key.
|
||||||
func (tw *TimingWheel) RemoveTimer(key interface{}) {
|
func (tw *TimingWheel) RemoveTimer(key any) error {
|
||||||
if key == nil {
|
if key == nil {
|
||||||
return
|
return ErrArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
tw.removeChannel <- key
|
select {
|
||||||
|
case tw.removeChannel <- key:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTimer sets the task value with the given key to the delay.
|
// SetTimer sets the task value with the given key to the delay.
|
||||||
func (tw *TimingWheel) SetTimer(key, value interface{}, delay time.Duration) {
|
func (tw *TimingWheel) SetTimer(key, value any, delay time.Duration) error {
|
||||||
if delay <= 0 || key == nil {
|
if delay <= 0 || key == nil {
|
||||||
return
|
return ErrArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
tw.setChannel <- timingEntry{
|
select {
|
||||||
|
case tw.setChannel <- timingEntry{
|
||||||
baseEntry: baseEntry{
|
baseEntry: baseEntry{
|
||||||
delay: delay,
|
delay: delay,
|
||||||
key: key,
|
key: key,
|
||||||
},
|
},
|
||||||
value: value,
|
value: value,
|
||||||
|
}:
|
||||||
|
return nil
|
||||||
|
case <-tw.stopChannel:
|
||||||
|
return ErrClosed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops tw.
|
// Stop stops tw. No more actions after stopping a TimingWheel.
|
||||||
func (tw *TimingWheel) Stop() {
|
func (tw *TimingWheel) Stop() {
|
||||||
close(tw.stopChannel)
|
close(tw.stopChannel)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *TimingWheel) drainAll(fn func(key, value interface{})) {
|
func (tw *TimingWheel) drainAll(fn func(key, value any)) {
|
||||||
runner := threading.NewTaskRunner(drainWorkers)
|
runner := threading.NewTaskRunner(drainWorkers)
|
||||||
for _, slot := range tw.slots {
|
for _, slot := range tw.slots {
|
||||||
for e := slot.Front(); e != nil; {
|
for e := slot.Front(); e != nil; {
|
||||||
@@ -204,7 +232,7 @@ func (tw *TimingWheel) onTick() {
|
|||||||
tw.scanAndRunTasks(l)
|
tw.scanAndRunTasks(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *TimingWheel) removeTask(key interface{}) {
|
func (tw *TimingWheel) removeTask(key any) {
|
||||||
val, ok := tw.timers.Get(key)
|
val, ok := tw.timers.Get(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -20,15 +20,14 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestNewTimingWheel(t *testing.T) {
|
func TestNewTimingWheel(t *testing.T) {
|
||||||
_, err := NewTimingWheel(0, 10, func(key, value interface{}) {})
|
_, err := NewTimingWheel(0, 10, func(key, value any) {})
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimingWheel_Drain(t *testing.T) {
|
func TestTimingWheel_Drain(t *testing.T) {
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
|
||||||
}, ticker)
|
}, ticker)
|
||||||
defer tw.Stop()
|
|
||||||
tw.SetTimer("first", 3, testStep*4)
|
tw.SetTimer("first", 3, testStep*4)
|
||||||
tw.SetTimer("second", 5, testStep*7)
|
tw.SetTimer("second", 5, testStep*7)
|
||||||
tw.SetTimer("third", 7, testStep*7)
|
tw.SetTimer("third", 7, testStep*7)
|
||||||
@@ -37,7 +36,7 @@ func TestTimingWheel_Drain(t *testing.T) {
|
|||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(3)
|
wg.Add(3)
|
||||||
tw.Drain(func(key, value interface{}) {
|
tw.Drain(func(key, value any) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
keys = append(keys, key.(string))
|
keys = append(keys, key.(string))
|
||||||
@@ -51,17 +50,19 @@ func TestTimingWheel_Drain(t *testing.T) {
|
|||||||
assert.EqualValues(t, []string{"first", "second", "third"}, keys)
|
assert.EqualValues(t, []string{"first", "second", "third"}, keys)
|
||||||
assert.EqualValues(t, []int{3, 5, 7}, vals)
|
assert.EqualValues(t, []int{3, 5, 7}, vals)
|
||||||
var count int
|
var count int
|
||||||
tw.Drain(func(key, value interface{}) {
|
tw.Drain(func(key, value any) {
|
||||||
count++
|
count++
|
||||||
})
|
})
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
assert.Equal(t, 0, count)
|
assert.Equal(t, 0, count)
|
||||||
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.Drain(func(key, value any) {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimingWheel_SetTimerSoon(t *testing.T) {
|
func TestTimingWheel_SetTimerSoon(t *testing.T) {
|
||||||
run := syncx.NewAtomicBool()
|
run := syncx.NewAtomicBool()
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
|
||||||
assert.True(t, run.CompareAndSwap(false, true))
|
assert.True(t, run.CompareAndSwap(false, true))
|
||||||
assert.Equal(t, "any", k)
|
assert.Equal(t, "any", k)
|
||||||
assert.Equal(t, 3, v.(int))
|
assert.Equal(t, 3, v.(int))
|
||||||
@@ -77,7 +78,7 @@ func TestTimingWheel_SetTimerSoon(t *testing.T) {
|
|||||||
func TestTimingWheel_SetTimerTwice(t *testing.T) {
|
func TestTimingWheel_SetTimerTwice(t *testing.T) {
|
||||||
run := syncx.NewAtomicBool()
|
run := syncx.NewAtomicBool()
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
|
||||||
assert.True(t, run.CompareAndSwap(false, true))
|
assert.True(t, run.CompareAndSwap(false, true))
|
||||||
assert.Equal(t, "any", k)
|
assert.Equal(t, "any", k)
|
||||||
assert.Equal(t, 5, v.(int))
|
assert.Equal(t, 5, v.(int))
|
||||||
@@ -95,23 +96,29 @@ func TestTimingWheel_SetTimerTwice(t *testing.T) {
|
|||||||
|
|
||||||
func TestTimingWheel_SetTimerWrongDelay(t *testing.T) {
|
func TestTimingWheel_SetTimerWrongDelay(t *testing.T) {
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {}, ticker)
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {}, ticker)
|
||||||
defer tw.Stop()
|
defer tw.Stop()
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
tw.SetTimer("any", 3, -testStep)
|
tw.SetTimer("any", 3, -testStep)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTimingWheel_SetTimerAfterClose(t *testing.T) {
|
||||||
|
ticker := timex.NewFakeTicker()
|
||||||
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {}, ticker)
|
||||||
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.SetTimer("any", 3, testStep))
|
||||||
|
}
|
||||||
|
|
||||||
func TestTimingWheel_MoveTimer(t *testing.T) {
|
func TestTimingWheel_MoveTimer(t *testing.T) {
|
||||||
run := syncx.NewAtomicBool()
|
run := syncx.NewAtomicBool()
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 3, func(k, v interface{}) {
|
tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v any) {
|
||||||
assert.True(t, run.CompareAndSwap(false, true))
|
assert.True(t, run.CompareAndSwap(false, true))
|
||||||
assert.Equal(t, "any", k)
|
assert.Equal(t, "any", k)
|
||||||
assert.Equal(t, 3, v.(int))
|
assert.Equal(t, 3, v.(int))
|
||||||
ticker.Done()
|
ticker.Done()
|
||||||
}, ticker)
|
}, ticker)
|
||||||
defer tw.Stop()
|
|
||||||
tw.SetTimer("any", 3, testStep*4)
|
tw.SetTimer("any", 3, testStep*4)
|
||||||
tw.MoveTimer("any", testStep*7)
|
tw.MoveTimer("any", testStep*7)
|
||||||
tw.MoveTimer("any", -testStep)
|
tw.MoveTimer("any", -testStep)
|
||||||
@@ -125,12 +132,14 @@ func TestTimingWheel_MoveTimer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
assert.Nil(t, ticker.Wait(waitTime))
|
assert.Nil(t, ticker.Wait(waitTime))
|
||||||
assert.True(t, run.True())
|
assert.True(t, run.True())
|
||||||
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.MoveTimer("any", time.Millisecond))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimingWheel_MoveTimerSoon(t *testing.T) {
|
func TestTimingWheel_MoveTimerSoon(t *testing.T) {
|
||||||
run := syncx.NewAtomicBool()
|
run := syncx.NewAtomicBool()
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 3, func(k, v interface{}) {
|
tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v any) {
|
||||||
assert.True(t, run.CompareAndSwap(false, true))
|
assert.True(t, run.CompareAndSwap(false, true))
|
||||||
assert.Equal(t, "any", k)
|
assert.Equal(t, "any", k)
|
||||||
assert.Equal(t, 3, v.(int))
|
assert.Equal(t, 3, v.(int))
|
||||||
@@ -146,7 +155,7 @@ func TestTimingWheel_MoveTimerSoon(t *testing.T) {
|
|||||||
func TestTimingWheel_MoveTimerEarlier(t *testing.T) {
|
func TestTimingWheel_MoveTimerEarlier(t *testing.T) {
|
||||||
run := syncx.NewAtomicBool()
|
run := syncx.NewAtomicBool()
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
|
||||||
assert.True(t, run.CompareAndSwap(false, true))
|
assert.True(t, run.CompareAndSwap(false, true))
|
||||||
assert.Equal(t, "any", k)
|
assert.Equal(t, "any", k)
|
||||||
assert.Equal(t, 3, v.(int))
|
assert.Equal(t, 3, v.(int))
|
||||||
@@ -164,7 +173,7 @@ func TestTimingWheel_MoveTimerEarlier(t *testing.T) {
|
|||||||
|
|
||||||
func TestTimingWheel_RemoveTimer(t *testing.T) {
|
func TestTimingWheel_RemoveTimer(t *testing.T) {
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {}, ticker)
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {}, ticker)
|
||||||
tw.SetTimer("any", 3, testStep)
|
tw.SetTimer("any", 3, testStep)
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
tw.RemoveTimer("any")
|
tw.RemoveTimer("any")
|
||||||
@@ -175,6 +184,7 @@ func TestTimingWheel_RemoveTimer(t *testing.T) {
|
|||||||
ticker.Tick()
|
ticker.Tick()
|
||||||
}
|
}
|
||||||
tw.Stop()
|
tw.Stop()
|
||||||
|
assert.Equal(t, ErrClosed, tw.RemoveTimer("any"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimingWheel_SetTimer(t *testing.T) {
|
func TestTimingWheel_SetTimer(t *testing.T) {
|
||||||
@@ -226,7 +236,7 @@ func TestTimingWheel_SetTimer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
var actual int32
|
var actual int32
|
||||||
done := make(chan lang.PlaceholderType)
|
done := make(chan lang.PlaceholderType)
|
||||||
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
|
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
|
||||||
assert.Equal(t, 1, key.(int))
|
assert.Equal(t, 1, key.(int))
|
||||||
assert.Equal(t, 2, value.(int))
|
assert.Equal(t, 2, value.(int))
|
||||||
actual = atomic.LoadInt32(&count)
|
actual = atomic.LoadInt32(&count)
|
||||||
@@ -307,7 +317,7 @@ func TestTimingWheel_SetAndMoveThenStart(t *testing.T) {
|
|||||||
}
|
}
|
||||||
var actual int32
|
var actual int32
|
||||||
done := make(chan lang.PlaceholderType)
|
done := make(chan lang.PlaceholderType)
|
||||||
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
|
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
|
||||||
actual = atomic.LoadInt32(&count)
|
actual = atomic.LoadInt32(&count)
|
||||||
close(done)
|
close(done)
|
||||||
}, ticker)
|
}, ticker)
|
||||||
@@ -395,7 +405,7 @@ func TestTimingWheel_SetAndMoveTwice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
var actual int32
|
var actual int32
|
||||||
done := make(chan lang.PlaceholderType)
|
done := make(chan lang.PlaceholderType)
|
||||||
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
|
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
|
||||||
actual = atomic.LoadInt32(&count)
|
actual = atomic.LoadInt32(&count)
|
||||||
close(done)
|
close(done)
|
||||||
}, ticker)
|
}, ticker)
|
||||||
@@ -476,7 +486,7 @@ func TestTimingWheel_ElapsedAndSet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
var actual int32
|
var actual int32
|
||||||
done := make(chan lang.PlaceholderType)
|
done := make(chan lang.PlaceholderType)
|
||||||
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
|
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
|
||||||
actual = atomic.LoadInt32(&count)
|
actual = atomic.LoadInt32(&count)
|
||||||
close(done)
|
close(done)
|
||||||
}, ticker)
|
}, ticker)
|
||||||
@@ -567,7 +577,7 @@ func TestTimingWheel_ElapsedAndSetThenMove(t *testing.T) {
|
|||||||
}
|
}
|
||||||
var actual int32
|
var actual int32
|
||||||
done := make(chan lang.PlaceholderType)
|
done := make(chan lang.PlaceholderType)
|
||||||
tw, err := newTimingWheelWithClock(testStep, test.slots, func(key, value interface{}) {
|
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
|
||||||
actual = atomic.LoadInt32(&count)
|
actual = atomic.LoadInt32(&count)
|
||||||
close(done)
|
close(done)
|
||||||
}, ticker)
|
}, ticker)
|
||||||
@@ -602,7 +612,7 @@ func TestMoveAndRemoveTask(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
var keys []int
|
var keys []int
|
||||||
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
|
||||||
assert.Equal(t, "any", k)
|
assert.Equal(t, "any", k)
|
||||||
assert.Equal(t, 3, v.(int))
|
assert.Equal(t, 3, v.(int))
|
||||||
keys = append(keys, v.(int))
|
keys = append(keys, v.(int))
|
||||||
@@ -622,7 +632,7 @@ func TestMoveAndRemoveTask(t *testing.T) {
|
|||||||
func BenchmarkTimingWheel(b *testing.B) {
|
func BenchmarkTimingWheel(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
tw, _ := NewTimingWheel(time.Second, 100, func(k, v interface{}) {})
|
tw, _ := NewTimingWheel(time.Second, 100, func(k, v any) {})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
tw.SetTimer(i, i, time.Second)
|
tw.SetTimer(i, i, time.Second)
|
||||||
tw.SetTimer(b.N+i, b.N+i, time.Second)
|
tw.SetTimer(b.N+i, b.N+i, time.Second)
|
||||||
|
|||||||
73
core/color/color.go
Normal file
73
core/color/color.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package color
|
||||||
|
|
||||||
|
import "github.com/fatih/color"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoColor is no color for both foreground and background.
|
||||||
|
NoColor Color = iota
|
||||||
|
// FgBlack is the foreground color black.
|
||||||
|
FgBlack
|
||||||
|
// FgRed is the foreground color red.
|
||||||
|
FgRed
|
||||||
|
// FgGreen is the foreground color green.
|
||||||
|
FgGreen
|
||||||
|
// FgYellow is the foreground color yellow.
|
||||||
|
FgYellow
|
||||||
|
// FgBlue is the foreground color blue.
|
||||||
|
FgBlue
|
||||||
|
// FgMagenta is the foreground color magenta.
|
||||||
|
FgMagenta
|
||||||
|
// FgCyan is the foreground color cyan.
|
||||||
|
FgCyan
|
||||||
|
// FgWhite is the foreground color white.
|
||||||
|
FgWhite
|
||||||
|
|
||||||
|
// BgBlack is the background color black.
|
||||||
|
BgBlack
|
||||||
|
// BgRed is the background color red.
|
||||||
|
BgRed
|
||||||
|
// BgGreen is the background color green.
|
||||||
|
BgGreen
|
||||||
|
// BgYellow is the background color yellow.
|
||||||
|
BgYellow
|
||||||
|
// BgBlue is the background color blue.
|
||||||
|
BgBlue
|
||||||
|
// BgMagenta is the background color magenta.
|
||||||
|
BgMagenta
|
||||||
|
// BgCyan is the background color cyan.
|
||||||
|
BgCyan
|
||||||
|
// BgWhite is the background color white.
|
||||||
|
BgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
var colors = map[Color][]color.Attribute{
|
||||||
|
FgBlack: {color.FgBlack, color.Bold},
|
||||||
|
FgRed: {color.FgRed, color.Bold},
|
||||||
|
FgGreen: {color.FgGreen, color.Bold},
|
||||||
|
FgYellow: {color.FgYellow, color.Bold},
|
||||||
|
FgBlue: {color.FgBlue, color.Bold},
|
||||||
|
FgMagenta: {color.FgMagenta, color.Bold},
|
||||||
|
FgCyan: {color.FgCyan, color.Bold},
|
||||||
|
FgWhite: {color.FgWhite, color.Bold},
|
||||||
|
BgBlack: {color.BgBlack, color.FgHiWhite, color.Bold},
|
||||||
|
BgRed: {color.BgRed, color.FgHiWhite, color.Bold},
|
||||||
|
BgGreen: {color.BgGreen, color.FgHiWhite, color.Bold},
|
||||||
|
BgYellow: {color.BgHiYellow, color.FgHiBlack, color.Bold},
|
||||||
|
BgBlue: {color.BgBlue, color.FgHiWhite, color.Bold},
|
||||||
|
BgMagenta: {color.BgMagenta, color.FgHiWhite, color.Bold},
|
||||||
|
BgCyan: {color.BgCyan, color.FgHiWhite, color.Bold},
|
||||||
|
BgWhite: {color.BgHiWhite, color.FgHiBlack, color.Bold},
|
||||||
|
}
|
||||||
|
|
||||||
|
type Color uint32
|
||||||
|
|
||||||
|
// WithColor returns a string with the given color applied.
|
||||||
|
func WithColor(text string, colour Color) string {
|
||||||
|
c := color.New(colors[colour]...)
|
||||||
|
return c.Sprint(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithColorPadding returns a string with the given color applied with leading and trailing spaces.
|
||||||
|
func WithColorPadding(text string, colour Color) string {
|
||||||
|
return WithColor(" "+text+" ", colour)
|
||||||
|
}
|
||||||
17
core/color/color_test.go
Normal file
17
core/color/color_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package color
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithColor(t *testing.T) {
|
||||||
|
output := WithColor("Hello", BgRed)
|
||||||
|
assert.Equal(t, "Hello", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithColorPadding(t *testing.T) {
|
||||||
|
output := WithColorPadding("Hello", BgRed)
|
||||||
|
assert.Equal(t, " Hello ", output)
|
||||||
|
}
|
||||||
@@ -2,28 +2,53 @@ package conf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/jsonx"
|
||||||
"github.com/zeromicro/go-zero/core/mapping"
|
"github.com/zeromicro/go-zero/core/mapping"
|
||||||
|
"github.com/zeromicro/go-zero/internal/encoding"
|
||||||
)
|
)
|
||||||
|
|
||||||
var loaders = map[string]func([]byte, interface{}) error{
|
const (
|
||||||
".json": LoadConfigFromJsonBytes,
|
jsonTagKey = "json"
|
||||||
".yaml": LoadConfigFromYamlBytes,
|
jsonTagSep = ','
|
||||||
".yml": LoadConfigFromYamlBytes,
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
fillDefaultUnmarshaler = mapping.NewUnmarshaler(jsonTagKey, mapping.WithDefault())
|
||||||
|
loaders = map[string]func([]byte, any) error{
|
||||||
|
".json": LoadFromJsonBytes,
|
||||||
|
".toml": LoadFromTomlBytes,
|
||||||
|
".yaml": LoadFromYamlBytes,
|
||||||
|
".yml": LoadFromYamlBytes,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// children and mapField should not be both filled.
|
||||||
|
// named fields and map cannot be bound to the same field name.
|
||||||
|
type fieldInfo struct {
|
||||||
|
children map[string]*fieldInfo
|
||||||
|
mapField *fieldInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
|
// FillDefault fills the default values for the given v,
|
||||||
func LoadConfig(file string, v interface{}, opts ...Option) error {
|
// and the premise is that the value of v must be guaranteed to be empty.
|
||||||
content, err := ioutil.ReadFile(file)
|
func FillDefault(v any) error {
|
||||||
|
return fillDefaultUnmarshaler.Unmarshal(map[string]any{}, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load loads config into v from file, .json, .yaml and .yml are acceptable.
|
||||||
|
func Load(file string, v any, opts ...Option) error {
|
||||||
|
content, err := os.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
loader, ok := loaders[path.Ext(file)]
|
loader, ok := loaders[strings.ToLower(path.Ext(file))]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unrecognized file type: %s", file)
|
return fmt.Errorf("unrecognized file type: %s", file)
|
||||||
}
|
}
|
||||||
@@ -40,19 +65,297 @@ func LoadConfig(file string, v interface{}, opts ...Option) error {
|
|||||||
return loader(content, v)
|
return loader(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
|
||||||
|
// Deprecated: use Load instead.
|
||||||
|
func LoadConfig(file string, v any, opts ...Option) error {
|
||||||
|
return Load(file, v, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromJsonBytes loads config into v from content json bytes.
|
||||||
|
func LoadFromJsonBytes(content []byte, v any) error {
|
||||||
|
info, err := buildFieldsInfo(reflect.TypeOf(v), "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var m map[string]any
|
||||||
|
if err = jsonx.Unmarshal(content, &m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerCaseKeyMap := toLowerCaseKeyMap(m, info)
|
||||||
|
|
||||||
|
return mapping.UnmarshalJsonMap(lowerCaseKeyMap, v, mapping.WithCanonicalKeyFunc(toLowerCase))
|
||||||
|
}
|
||||||
|
|
||||||
// LoadConfigFromJsonBytes loads config into v from content json bytes.
|
// LoadConfigFromJsonBytes loads config into v from content json bytes.
|
||||||
func LoadConfigFromJsonBytes(content []byte, v interface{}) error {
|
// Deprecated: use LoadFromJsonBytes instead.
|
||||||
return mapping.UnmarshalJsonBytes(content, v)
|
func LoadConfigFromJsonBytes(content []byte, v any) error {
|
||||||
|
return LoadFromJsonBytes(content, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromTomlBytes loads config into v from content toml bytes.
|
||||||
|
func LoadFromTomlBytes(content []byte, v any) error {
|
||||||
|
b, err := encoding.TomlToJson(content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return LoadFromJsonBytes(b, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromYamlBytes loads config into v from content yaml bytes.
|
||||||
|
func LoadFromYamlBytes(content []byte, v any) error {
|
||||||
|
b, err := encoding.YamlToJson(content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return LoadFromJsonBytes(b, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfigFromYamlBytes loads config into v from content yaml bytes.
|
// LoadConfigFromYamlBytes loads config into v from content yaml bytes.
|
||||||
func LoadConfigFromYamlBytes(content []byte, v interface{}) error {
|
// Deprecated: use LoadFromYamlBytes instead.
|
||||||
return mapping.UnmarshalYamlBytes(content, v)
|
func LoadConfigFromYamlBytes(content []byte, v any) error {
|
||||||
|
return LoadFromYamlBytes(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustLoad loads config into v from path, exits on error.
|
// MustLoad loads config into v from path, exits on error.
|
||||||
func MustLoad(path string, v interface{}, opts ...Option) {
|
func MustLoad(path string, v any, opts ...Option) {
|
||||||
if err := LoadConfig(path, v, opts...); err != nil {
|
if err := Load(path, v, opts...); err != nil {
|
||||||
log.Fatalf("error: config file %s, %s", path, err.Error())
|
log.Fatalf("error: config file %s, %s", path, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addOrMergeFields(info *fieldInfo, key string, child *fieldInfo, fullName string) error {
|
||||||
|
if prev, ok := info.children[key]; ok {
|
||||||
|
if child.mapField != nil {
|
||||||
|
return newConflictKeyError(fullName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mergeFields(prev, key, child.children, fullName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info.children[key] = child
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildAnonymousFieldInfo(info *fieldInfo, lowerCaseName string, ft reflect.Type, fullName string) error {
|
||||||
|
switch ft.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
fields, err := buildFieldsInfo(ft, fullName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range fields.children {
|
||||||
|
if err = addOrMergeFields(info, k, v, fullName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
elemField, err := buildFieldsInfo(mapping.Deref(ft.Elem()), fullName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := info.children[lowerCaseName]; ok {
|
||||||
|
return newConflictKeyError(fullName)
|
||||||
|
}
|
||||||
|
|
||||||
|
info.children[lowerCaseName] = &fieldInfo{
|
||||||
|
children: make(map[string]*fieldInfo),
|
||||||
|
mapField: elemField,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if _, ok := info.children[lowerCaseName]; ok {
|
||||||
|
return newConflictKeyError(fullName)
|
||||||
|
}
|
||||||
|
|
||||||
|
info.children[lowerCaseName] = &fieldInfo{
|
||||||
|
children: make(map[string]*fieldInfo),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildFieldsInfo(tp reflect.Type, fullName string) (*fieldInfo, error) {
|
||||||
|
tp = mapping.Deref(tp)
|
||||||
|
|
||||||
|
switch tp.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
return buildStructFieldsInfo(tp, fullName)
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
return buildFieldsInfo(mapping.Deref(tp.Elem()), fullName)
|
||||||
|
case reflect.Chan, reflect.Func:
|
||||||
|
return nil, fmt.Errorf("unsupported type: %s", tp.Kind())
|
||||||
|
default:
|
||||||
|
return &fieldInfo{
|
||||||
|
children: make(map[string]*fieldInfo),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildNamedFieldInfo(info *fieldInfo, lowerCaseName string, ft reflect.Type, fullName string) error {
|
||||||
|
var finfo *fieldInfo
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch ft.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
finfo, err = buildFieldsInfo(ft, fullName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
finfo, err = buildFieldsInfo(ft.Elem(), fullName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
elemInfo, err := buildFieldsInfo(mapping.Deref(ft.Elem()), fullName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
finfo = &fieldInfo{
|
||||||
|
children: make(map[string]*fieldInfo),
|
||||||
|
mapField: elemInfo,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
finfo, err = buildFieldsInfo(ft, fullName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return addOrMergeFields(info, lowerCaseName, finfo, fullName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildStructFieldsInfo(tp reflect.Type, fullName string) (*fieldInfo, error) {
|
||||||
|
info := &fieldInfo{
|
||||||
|
children: make(map[string]*fieldInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < tp.NumField(); i++ {
|
||||||
|
field := tp.Field(i)
|
||||||
|
if !field.IsExported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := getTagName(field)
|
||||||
|
lowerCaseName := toLowerCase(name)
|
||||||
|
ft := mapping.Deref(field.Type)
|
||||||
|
// flatten anonymous fields
|
||||||
|
if field.Anonymous {
|
||||||
|
if err := buildAnonymousFieldInfo(info, lowerCaseName, ft,
|
||||||
|
getFullName(fullName, lowerCaseName)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if err := buildNamedFieldInfo(info, lowerCaseName, ft,
|
||||||
|
getFullName(fullName, lowerCaseName)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTagName get the tag name of the given field, if no tag name, use file.Name.
|
||||||
|
// field.Name is returned on tags like `json:""` and `json:",optional"`.
|
||||||
|
func getTagName(field reflect.StructField) string {
|
||||||
|
if tag, ok := field.Tag.Lookup(jsonTagKey); ok {
|
||||||
|
if pos := strings.IndexByte(tag, jsonTagSep); pos >= 0 {
|
||||||
|
tag = tag[:pos]
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = strings.TrimSpace(tag)
|
||||||
|
if len(tag) > 0 {
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return field.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeFields(prev *fieldInfo, key string, children map[string]*fieldInfo, fullName string) error {
|
||||||
|
if len(prev.children) == 0 || len(children) == 0 {
|
||||||
|
return newConflictKeyError(fullName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// merge fields
|
||||||
|
for k, v := range children {
|
||||||
|
if _, ok := prev.children[k]; ok {
|
||||||
|
return newConflictKeyError(fullName)
|
||||||
|
}
|
||||||
|
|
||||||
|
prev.children[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLowerCase(s string) string {
|
||||||
|
return strings.ToLower(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLowerCaseInterface(v any, info *fieldInfo) any {
|
||||||
|
switch vv := v.(type) {
|
||||||
|
case map[string]any:
|
||||||
|
return toLowerCaseKeyMap(vv, info)
|
||||||
|
case []any:
|
||||||
|
var arr []any
|
||||||
|
for _, vvv := range vv {
|
||||||
|
arr = append(arr, toLowerCaseInterface(vvv, info))
|
||||||
|
}
|
||||||
|
return arr
|
||||||
|
default:
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLowerCaseKeyMap(m map[string]any, info *fieldInfo) map[string]any {
|
||||||
|
res := make(map[string]any)
|
||||||
|
|
||||||
|
for k, v := range m {
|
||||||
|
ti, ok := info.children[k]
|
||||||
|
if ok {
|
||||||
|
res[k] = toLowerCaseInterface(v, ti)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
lk := toLowerCase(k)
|
||||||
|
if ti, ok = info.children[lk]; ok {
|
||||||
|
res[lk] = toLowerCaseInterface(v, ti)
|
||||||
|
} else if info.mapField != nil {
|
||||||
|
res[k] = toLowerCaseInterface(v, info.mapField)
|
||||||
|
} else {
|
||||||
|
res[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
type conflictKeyError struct {
|
||||||
|
key string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConflictKeyError(key string) conflictKeyError {
|
||||||
|
return conflictKeyError{key: key}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e conflictKeyError) Error() string {
|
||||||
|
return fmt.Sprintf("conflict key %s, pay attention to anonymous fields", e.key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFullName(parent, child string) string {
|
||||||
|
if len(parent) == 0 {
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join([]string{parent, child}, ".")
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,6 @@ import (
|
|||||||
|
|
||||||
// PropertyError represents a configuration error message.
|
// PropertyError represents a configuration error message.
|
||||||
type PropertyError struct {
|
type PropertyError struct {
|
||||||
error
|
|
||||||
message string
|
message string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
58
core/conf/readme.md
Normal file
58
core/conf/readme.md
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
## How to use
|
||||||
|
|
||||||
|
1. Define a config structure, like below:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type RestfulConf struct {
|
||||||
|
ServiceName string `json:",env=SERVICE_NAME"` // read from env automatically
|
||||||
|
Host string `json:",default=0.0.0.0"`
|
||||||
|
Port int
|
||||||
|
LogMode string `json:",options=[file,console]"`
|
||||||
|
Verbose bool `json:",optional"`
|
||||||
|
MaxConns int `json:",default=10000"`
|
||||||
|
MaxBytes int64 `json:",default=1048576"`
|
||||||
|
Timeout time.Duration `json:",default=3s"`
|
||||||
|
CpuThreshold int64 `json:",default=900,range=[0:1000]"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Write the yaml, toml or json config file:
|
||||||
|
|
||||||
|
- yaml example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# most fields are optional or have default values
|
||||||
|
port: 8080
|
||||||
|
logMode: console
|
||||||
|
# you can use env settings
|
||||||
|
maxBytes: ${MAX_BYTES}
|
||||||
|
```
|
||||||
|
|
||||||
|
- toml example
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# most fields are optional or have default values
|
||||||
|
port = 8_080
|
||||||
|
logMode = "console"
|
||||||
|
# you can use env settings
|
||||||
|
maxBytes = "${MAX_BYTES}"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Load the config from a file:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// exit on error
|
||||||
|
var config RestfulConf
|
||||||
|
conf.MustLoad(configFile, &config)
|
||||||
|
|
||||||
|
// or handle the error on your own
|
||||||
|
var config RestfulConf
|
||||||
|
if err := conf.Load(configFile, &config); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable reading from environments
|
||||||
|
var config RestfulConf
|
||||||
|
conf.MustLoad(configFile, &config, conf.UseEnv())
|
||||||
|
```
|
||||||
|
|
||||||
@@ -14,13 +14,13 @@ type contextValuer struct {
|
|||||||
context.Context
|
context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cv contextValuer) Value(key string) (interface{}, bool) {
|
func (cv contextValuer) Value(key string) (any, bool) {
|
||||||
v := cv.Context.Value(key)
|
v := cv.Context.Value(key)
|
||||||
return v, v != nil
|
return v, v != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// For unmarshals ctx into v.
|
// For unmarshals ctx into v.
|
||||||
func For(ctx context.Context, v interface{}) error {
|
func For(ctx context.Context, v any) error {
|
||||||
return unmarshaler.UnmarshalValuer(contextValuer{
|
return unmarshaler.UnmarshalValuer(contextValuer{
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
}, v)
|
}, v)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ var (
|
|||||||
type EtcdConf struct {
|
type EtcdConf struct {
|
||||||
Hosts []string
|
Hosts []string
|
||||||
Key string
|
Key string
|
||||||
|
ID int64 `json:",optional"`
|
||||||
User string `json:",optional"`
|
User string `json:",optional"`
|
||||||
Pass string `json:",optional"`
|
Pass string `json:",optional"`
|
||||||
CertFile string `json:",optional"`
|
CertFile string `json:",optional"`
|
||||||
@@ -26,6 +27,11 @@ func (c EtcdConf) HasAccount() bool {
|
|||||||
return len(c.User) > 0 && len(c.Pass) > 0
|
return len(c.User) > 0 && len(c.Pass) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasID returns if ID provided.
|
||||||
|
func (c EtcdConf) HasID() bool {
|
||||||
|
return c.ID > 0
|
||||||
|
}
|
||||||
|
|
||||||
// HasTLS returns if TLS CertFile/CertKeyFile/CACertFile are provided.
|
// HasTLS returns if TLS CertFile/CertKeyFile/CACertFile are provided.
|
||||||
func (c EtcdConf) HasTLS() bool {
|
func (c EtcdConf) HasTLS() bool {
|
||||||
return len(c.CertFile) > 0 && len(c.CertKeyFile) > 0 && len(c.CACertFile) > 0
|
return len(c.CertFile) > 0 && len(c.CertKeyFile) > 0 && len(c.CACertFile) > 0
|
||||||
|
|||||||
@@ -80,3 +80,90 @@ func TestEtcdConf_HasAccount(t *testing.T) {
|
|||||||
assert.Equal(t, test.hasAccount, test.EtcdConf.HasAccount())
|
assert.Equal(t, test.hasAccount, test.EtcdConf.HasAccount())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEtcdConf_HasID(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
EtcdConf
|
||||||
|
hasServerID bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
EtcdConf: EtcdConf{
|
||||||
|
Hosts: []string{"any"},
|
||||||
|
ID: -1,
|
||||||
|
},
|
||||||
|
hasServerID: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
EtcdConf: EtcdConf{
|
||||||
|
Hosts: []string{"any"},
|
||||||
|
ID: 0,
|
||||||
|
},
|
||||||
|
hasServerID: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
EtcdConf: EtcdConf{
|
||||||
|
Hosts: []string{"any"},
|
||||||
|
ID: 10000,
|
||||||
|
},
|
||||||
|
hasServerID: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
assert.Equal(t, test.hasServerID, test.EtcdConf.HasID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEtcdConf_HasTLS(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
conf EtcdConf
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty config",
|
||||||
|
conf: EtcdConf{},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing CertFile",
|
||||||
|
conf: EtcdConf{
|
||||||
|
CertKeyFile: "key",
|
||||||
|
CACertFile: "ca",
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing CertKeyFile",
|
||||||
|
conf: EtcdConf{
|
||||||
|
CertFile: "cert",
|
||||||
|
CACertFile: "ca",
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing CACertFile",
|
||||||
|
conf: EtcdConf{
|
||||||
|
CertFile: "cert",
|
||||||
|
CertKeyFile: "key",
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid config",
|
||||||
|
conf: EtcdConf{
|
||||||
|
CertFile: "cert",
|
||||||
|
CertKeyFile: "key",
|
||||||
|
CACertFile: "ca",
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := tt.conf.HasTLS()
|
||||||
|
assert.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package internal
|
|||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,7 +37,7 @@ func AddTLS(endpoints []string, certFile, certKeyFile, caFile string, insecureSk
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
caData, err := ioutil.ReadFile(caFile)
|
caData, err := os.ReadFile(caFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,85 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
certContent = `-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDazCCAlOgAwIBAgIUEg9GVO2oaPn+YSmiqmFIuAo10WIwDQYJKoZIhvcNAQEM
|
||||||
|
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||||
|
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAgFw0yMzAzMTExMzIxMjNaGA8yMTIz
|
||||||
|
MDIxNTEzMjEyM1owRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
|
||||||
|
ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
|
||||||
|
AQEBBQADggEPADCCAQoCggEBALplXlWsIf0O/IgnIplmiZHKGnxyfyufyE2FBRNk
|
||||||
|
OofRqbKuPH8GNqbkvZm7N29fwTDAQ+mViAggCkDht4hOzoWJMA7KYJt8JnTSWL48
|
||||||
|
M1lcrpc9DL2gszC/JF/FGvyANbBtLklkZPFBGdHUX14pjrT937wqPtm+SqUHSvRT
|
||||||
|
B7bmwmm2drRcmhpVm98LSlV7uQ2EgnJgsLjBPITKUejLmVLHfgX0RwQ2xIpX9pS4
|
||||||
|
FCe1BTacwl2gGp7Mje7y4Mfv3o0ArJW6Tuwbjx59ZXwb1KIP71b7bT04AVS8ZeYO
|
||||||
|
UMLKKuB5UR9x9Rn6cLXOTWBpcMVyzDgrAFLZjnE9LPUolZMCAwEAAaNRME8wHwYD
|
||||||
|
VR0jBBgwFoAUeW8w8pmhncbRgTsl48k4/7wnfx8wCQYDVR0TBAIwADALBgNVHQ8E
|
||||||
|
BAMCBPAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBDAUAA4IBAQAI
|
||||||
|
y9xaoS88CLPBsX6mxfcTAFVfGNTRW9VN9Ng1cCnUR+YGoXGM/l+qP4f7p8ocdGwK
|
||||||
|
iYZErVTzXYIn+D27//wpY3klJk3gAnEUBT3QRkStBw7XnpbeZ2oPBK+cmDnCnZPS
|
||||||
|
BIF1wxPX7vIgaxs5Zsdqwk3qvZ4Djr2wP7LabNWTLSBKgQoUY45Liw6pffLwcGF9
|
||||||
|
UKlu54bvGze2SufISCR3ib+I+FLvqpvJhXToZWYb/pfI/HccuCL1oot1x8vx6DQy
|
||||||
|
U+TYxlZsKS5mdNxAX3dqEkEMsgEi+g/tzDPXJImfeCGGBhIOXLm8SRypiuGdEbc9
|
||||||
|
xkWYxRPegajuEZGvCqVs
|
||||||
|
-----END CERTIFICATE-----`
|
||||||
|
keyContent = `-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEowIBAAKCAQEAumVeVawh/Q78iCcimWaJkcoafHJ/K5/ITYUFE2Q6h9Gpsq48
|
||||||
|
fwY2puS9mbs3b1/BMMBD6ZWICCAKQOG3iE7OhYkwDspgm3wmdNJYvjwzWVyulz0M
|
||||||
|
vaCzML8kX8Ua/IA1sG0uSWRk8UEZ0dRfXimOtP3fvCo+2b5KpQdK9FMHtubCabZ2
|
||||||
|
tFyaGlWb3wtKVXu5DYSCcmCwuME8hMpR6MuZUsd+BfRHBDbEilf2lLgUJ7UFNpzC
|
||||||
|
XaAansyN7vLgx+/ejQCslbpO7BuPHn1lfBvUog/vVvttPTgBVLxl5g5Qwsoq4HlR
|
||||||
|
H3H1Gfpwtc5NYGlwxXLMOCsAUtmOcT0s9SiVkwIDAQABAoIBAD5meTJNMgO55Kjg
|
||||||
|
ESExxpRcCIno+tHr5+6rvYtEXqPheOIsmmwb9Gfi4+Z3WpOaht5/Pz0Ppj6yGzyl
|
||||||
|
U//6AgGKb+BDuBvVcDpjwPnOxZIBCSHwejdxeQu0scSuA97MPS0XIAvJ5FEv7ijk
|
||||||
|
5Bht6SyGYURpECltHygoTNuGgGqmO+McCJRLE9L09lTBI6UQ/JQwWJqSr7wx6iPU
|
||||||
|
M1Ze/srIV+7cyEPu6i0DGjS1gSQKkX68Lqn1w6oE290O+OZvleO0gZ02fLDWCZke
|
||||||
|
aeD9+EU/Pw+rqm3H6o0szOFIpzhRp41FUdW9sybB3Yp3u7c/574E+04Z/e30LMKs
|
||||||
|
TCtE1QECgYEA3K7KIpw0NH2HXL5C3RHcLmr204xeBfS70riBQQuVUgYdmxak2ima
|
||||||
|
80RInskY8hRhSGTg0l+VYIH8cmjcUyqMSOELS5XfRH99r4QPiK8AguXg80T4VumY
|
||||||
|
W3Pf+zEC2ssgP/gYthV0g0Xj5m2QxktOF9tRw5nkg739ZR4dI9lm/iECgYEA2Dnf
|
||||||
|
uwEDGqHiQRF6/fh5BG/nGVMvrefkqx6WvTJQ3k/M/9WhxB+lr/8yH46TuS8N2b29
|
||||||
|
FoTf3Mr9T7pr/PWkOPzoY3P56nYbKU8xSwCim9xMzhBMzj8/N9ukJvXy27/VOz56
|
||||||
|
eQaKqnvdXNGtPJrIMDGHps2KKWlKLyAlapzjVTMCgYAA/W++tACv85g13EykfT4F
|
||||||
|
n0k4LbsGP9DP4zABQLIMyiY72eAncmRVjwrcW36XJ2xATOONTgx3gF3HjZzfaqNy
|
||||||
|
eD/6uNNllUTVEryXGmHgNHPL45VRnn6memCY2eFvZdXhM5W4y2PYaunY0MkDercA
|
||||||
|
+GTngbs6tBF88KOk04bYwQKBgFl68cRgsdkmnwwQYNaTKfmVGYzYaQXNzkqmWPko
|
||||||
|
xmCJo6tHzC7ubdG8iRCYHzfmahPuuj6EdGPZuSRyYFgJi5Ftz/nAN+84OxtIQ3zn
|
||||||
|
YWOgskQgaLh9YfsKsQ7Sf1NDOsnOnD5TX7UXl07fEpLe9vNCvAFiU8e5Y9LGudU5
|
||||||
|
4bYTAoGBAMdX3a3bXp4cZvXNBJ/QLVyxC6fP1Q4haCR1Od3m+T00Jth2IX2dk/fl
|
||||||
|
p6xiJT1av5JtYabv1dFKaXOS5s1kLGGuCCSKpkvFZm826aQ2AFm0XGqEQDLeei5b
|
||||||
|
A52Kpy/YJ+RkG4BTFtAooFq6DmA0cnoP6oPvG2h6XtDJwDTPInJb
|
||||||
|
-----END RSA PRIVATE KEY-----`
|
||||||
|
caContent = `-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDbTCCAlWgAwIBAgIUBJvFoCowKich7MMfseJ+DYzzirowDQYJKoZIhvcNAQEM
|
||||||
|
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||||
|
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAgFw0yMzAzMTExMzIxMDNaGA8yMTIz
|
||||||
|
MDIxNTEzMjEwM1owRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
|
||||||
|
ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
|
||||||
|
AQEBBQADggEPADCCAQoCggEBAO4to2YMYj0bxgr2FCiweSTSFuPx33zSw2x/s9Wf
|
||||||
|
OR41bm2DFsyYT5f3sOIKlXZEdLmOKty2e3ho3yC0EyNpVHdykkkHT3aDI17quZax
|
||||||
|
kYi/URqqtl1Z08A22txolc04hAZisg2BypGi3vql81UW1t3zyloGnJoIAeXR9uca
|
||||||
|
ljP6Bk3bwsxoVBLi1JtHrO0hHLQaeHmKhAyrys06X0LRdn7Px48yRZlt6FaLSa8X
|
||||||
|
YiRM0G44bVy/h6BkoQjMYGwVmCVk6zjJ9U7ZPFqdnDMNxAfR+hjDnYodqdLDMTTR
|
||||||
|
1NPVrnEnNwFx0AMLvgt/ba/45vZCEAmSZnFXFAJJcM7ai9ECAwEAAaNTMFEwHQYD
|
||||||
|
VR0OBBYEFHlvMPKZoZ3G0YE7JePJOP+8J38fMB8GA1UdIwQYMBaAFHlvMPKZoZ3G
|
||||||
|
0YE7JePJOP+8J38fMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggEB
|
||||||
|
AMX8dNulADOo9uQgBMyFb9TVra7iY0zZjzv4GY5XY7scd52n6CnfAPvYBBDnTr/O
|
||||||
|
BgNp5jaujb4+9u/2qhV3f9n+/3WOb2CmPehBgVSzlXqHeQ9lshmgwZPeem2T+8Tm
|
||||||
|
Nnc/xQnsUfCFszUDxpkr55+aLVM22j02RWqcZ4q7TAaVYL+kdFVMc8FoqG/0ro6A
|
||||||
|
BjE/Qn0Nn7ciX1VUjDt8l+k7ummPJTmzdi6i6E4AwO9dzrGNgGJ4aWL8cC6xYcIX
|
||||||
|
goVIRTFeONXSDno/oPjWHpIPt7L15heMpKBHNuzPkKx2YVqPHE5QZxWfS+Lzgx+Q
|
||||||
|
E2oTTM0rYKOZ8p6000mhvKI=
|
||||||
|
-----END CERTIFICATE-----`
|
||||||
|
)
|
||||||
|
|
||||||
func TestAccount(t *testing.T) {
|
func TestAccount(t *testing.T) {
|
||||||
endpoints := []string{
|
endpoints := []string{
|
||||||
"192.168.0.2:2379",
|
"192.168.0.2:2379",
|
||||||
@@ -32,3 +105,34 @@ func TestAccount(t *testing.T) {
|
|||||||
assert.Equal(t, username, account.User)
|
assert.Equal(t, username, account.User)
|
||||||
assert.Equal(t, anotherPassword, account.Pass)
|
assert.Equal(t, anotherPassword, account.Pass)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTLSMethods(t *testing.T) {
|
||||||
|
certFile := createTempFile(t, []byte(certContent))
|
||||||
|
defer os.Remove(certFile)
|
||||||
|
keyFile := createTempFile(t, []byte(keyContent))
|
||||||
|
defer os.Remove(keyFile)
|
||||||
|
caFile := createTempFile(t, []byte(caContent))
|
||||||
|
defer os.Remove(caFile)
|
||||||
|
|
||||||
|
assert.NoError(t, AddTLS([]string{"foo"}, certFile, keyFile, caFile, false))
|
||||||
|
cfg, ok := GetTLS([]string{"foo"})
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
|
assert.Error(t, AddTLS([]string{"bar"}, "bad-file", keyFile, caFile, false))
|
||||||
|
assert.Error(t, AddTLS([]string{"bar"}, certFile, keyFile, "bad-file", false))
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTempFile(t *testing.T, body []byte) string {
|
||||||
|
tmpFile, err := os.CreateTemp(os.TempDir(), "go-unit-*.tmp")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpFile.Close()
|
||||||
|
if err = os.WriteFile(tmpFile.Name(), body, os.ModePerm); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmpFile.Name()
|
||||||
|
}
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ func (mr *MockEtcdClientMockRecorder) Ctx() *gomock.Call {
|
|||||||
// Get mocks base method
|
// Get mocks base method
|
||||||
func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{ctx, key}
|
varargs := []any{ctx, key}
|
||||||
for _, a := range opts {
|
for _, a := range opts {
|
||||||
varargs = append(varargs, a)
|
varargs = append(varargs, a)
|
||||||
}
|
}
|
||||||
@@ -92,9 +92,9 @@ func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.O
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get indicates an expected call of Get
|
// Get indicates an expected call of Get
|
||||||
func (mr *MockEtcdClientMockRecorder) Get(ctx, key interface{}, opts ...interface{}) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Get(ctx, key any, opts ...any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{ctx, key}, opts...)
|
varargs := append([]any{ctx, key}, opts...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEtcdClient)(nil).Get), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEtcdClient)(nil).Get), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,7 +108,7 @@ func (m *MockEtcdClient) Grant(ctx context.Context, ttl int64) (*clientv3.LeaseG
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Grant indicates an expected call of Grant
|
// Grant indicates an expected call of Grant
|
||||||
func (mr *MockEtcdClientMockRecorder) Grant(ctx, ttl interface{}) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Grant(ctx, ttl any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Grant", reflect.TypeOf((*MockEtcdClient)(nil).Grant), ctx, ttl)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Grant", reflect.TypeOf((*MockEtcdClient)(nil).Grant), ctx, ttl)
|
||||||
}
|
}
|
||||||
@@ -123,7 +123,7 @@ func (m *MockEtcdClient) KeepAlive(ctx context.Context, id clientv3.LeaseID) (<-
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KeepAlive indicates an expected call of KeepAlive
|
// KeepAlive indicates an expected call of KeepAlive
|
||||||
func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id interface{}) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockEtcdClient)(nil).KeepAlive), ctx, id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockEtcdClient)(nil).KeepAlive), ctx, id)
|
||||||
}
|
}
|
||||||
@@ -131,7 +131,7 @@ func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id interface{}) *gomock.Cal
|
|||||||
// Put mocks base method
|
// Put mocks base method
|
||||||
func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{ctx, key, val}
|
varargs := []any{ctx, key, val}
|
||||||
for _, a := range opts {
|
for _, a := range opts {
|
||||||
varargs = append(varargs, a)
|
varargs = append(varargs, a)
|
||||||
}
|
}
|
||||||
@@ -142,9 +142,9 @@ func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clien
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put indicates an expected call of Put
|
// Put indicates an expected call of Put
|
||||||
func (mr *MockEtcdClientMockRecorder) Put(ctx, key, val interface{}, opts ...interface{}) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Put(ctx, key, val any, opts ...any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{ctx, key, val}, opts...)
|
varargs := append([]any{ctx, key, val}, opts...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockEtcdClient)(nil).Put), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockEtcdClient)(nil).Put), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,7 +158,7 @@ func (m *MockEtcdClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clie
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Revoke indicates an expected call of Revoke
|
// Revoke indicates an expected call of Revoke
|
||||||
func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id interface{}) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revoke", reflect.TypeOf((*MockEtcdClient)(nil).Revoke), ctx, id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revoke", reflect.TypeOf((*MockEtcdClient)(nil).Revoke), ctx, id)
|
||||||
}
|
}
|
||||||
@@ -166,7 +166,7 @@ func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id interface{}) *gomock.Call {
|
|||||||
// Watch mocks base method
|
// Watch mocks base method
|
||||||
func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{ctx, key}
|
varargs := []any{ctx, key}
|
||||||
for _, a := range opts {
|
for _, a := range opts {
|
||||||
varargs = append(varargs, a)
|
varargs = append(varargs, a)
|
||||||
}
|
}
|
||||||
@@ -176,8 +176,8 @@ func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Watch indicates an expected call of Watch
|
// Watch indicates an expected call of Watch
|
||||||
func (mr *MockEtcdClientMockRecorder) Watch(ctx, key interface{}, opts ...interface{}) *gomock.Call {
|
func (mr *MockEtcdClientMockRecorder) Watch(ctx, key any, opts ...any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{ctx, key}, opts...)
|
varargs := append([]any{ctx, key}, opts...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockEtcdClient)(nil).Watch), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockEtcdClient)(nil).Watch), varargs...)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -191,9 +191,11 @@ func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
case clientv3.EventTypeDelete:
|
case clientv3.EventTypeDelete:
|
||||||
|
c.lock.Lock()
|
||||||
if vals, ok := c.values[key]; ok {
|
if vals, ok := c.values[key]; ok {
|
||||||
delete(vals, string(ev.Kv.Key))
|
delete(vals, string(ev.Kv.Key))
|
||||||
}
|
}
|
||||||
|
c.lock.Unlock()
|
||||||
for _, l := range listeners {
|
for _, l := range listeners {
|
||||||
l.OnDelete(KV{
|
l.OnDelete(KV{
|
||||||
Key: string(ev.Kv.Key),
|
Key: string(ev.Kv.Key),
|
||||||
@@ -206,7 +208,7 @@ func (c *cluster) handleWatchEvents(key string, events []*clientv3.Event) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) load(cli EtcdClient, key string) {
|
func (c *cluster) load(cli EtcdClient, key string) int64 {
|
||||||
var resp *clientv3.GetResponse
|
var resp *clientv3.GetResponse
|
||||||
for {
|
for {
|
||||||
var err error
|
var err error
|
||||||
@@ -230,6 +232,8 @@ func (c *cluster) load(cli EtcdClient, key string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
c.handleChanges(key, kvs)
|
c.handleChanges(key, kvs)
|
||||||
|
|
||||||
|
return resp.Header.Revision
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) monitor(key string, l UpdateListener) error {
|
func (c *cluster) monitor(key string, l UpdateListener) error {
|
||||||
@@ -242,9 +246,9 @@ func (c *cluster) monitor(key string, l UpdateListener) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.load(cli, key)
|
rev := c.load(cli, key)
|
||||||
c.watchGroup.Run(func() {
|
c.watchGroup.Run(func() {
|
||||||
c.watch(cli, key)
|
c.watch(cli, key, rev)
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -276,22 +280,29 @@ func (c *cluster) reload(cli EtcdClient) {
|
|||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
k := key
|
k := key
|
||||||
c.watchGroup.Run(func() {
|
c.watchGroup.Run(func() {
|
||||||
c.load(cli, k)
|
rev := c.load(cli, k)
|
||||||
c.watch(cli, k)
|
c.watch(cli, k, rev)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) watch(cli EtcdClient, key string) {
|
func (c *cluster) watch(cli EtcdClient, key string, rev int64) {
|
||||||
for {
|
for {
|
||||||
if c.watchStream(cli, key) {
|
if c.watchStream(cli, key, rev) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) watchStream(cli EtcdClient, key string) bool {
|
func (c *cluster) watchStream(cli EtcdClient, key string, rev int64) bool {
|
||||||
rch := cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix())
|
var rch clientv3.WatchChan
|
||||||
|
if rev != 0 {
|
||||||
|
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix(),
|
||||||
|
clientv3.WithRev(rev+1))
|
||||||
|
} else {
|
||||||
|
rch = cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix())
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case wresp, ok := <-rch:
|
case wresp, ok := <-rch:
|
||||||
@@ -332,6 +343,7 @@ func DialClient(endpoints []string) (EtcdClient, error) {
|
|||||||
DialKeepAliveTime: dialKeepAliveTime,
|
DialKeepAliveTime: dialKeepAliveTime,
|
||||||
DialKeepAliveTimeout: DialTimeout,
|
DialKeepAliveTimeout: DialTimeout,
|
||||||
RejectOldCluster: true,
|
RejectOldCluster: true,
|
||||||
|
PermitWithoutStream: true,
|
||||||
}
|
}
|
||||||
if account, ok := GetAccount(endpoints); ok {
|
if account, ok := GetAccount(endpoints); ok {
|
||||||
cfg.Username = account.User
|
cfg.Username = account.User
|
||||||
|
|||||||
@@ -2,8 +2,10 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -11,8 +13,10 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
|
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
"go.etcd.io/etcd/client/v3/mock/mockserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
var mockLock sync.Mutex
|
var mockLock sync.Mutex
|
||||||
@@ -112,6 +116,7 @@ func TestCluster_Load(t *testing.T) {
|
|||||||
restore := setMockClient(cli)
|
restore := setMockClient(cli)
|
||||||
defer restore()
|
defer restore()
|
||||||
cli.EXPECT().Get(gomock.Any(), "any/", gomock.Any()).Return(&clientv3.GetResponse{
|
cli.EXPECT().Get(gomock.Any(), "any/", gomock.Any()).Return(&clientv3.GetResponse{
|
||||||
|
Header: &etcdserverpb.ResponseHeader{},
|
||||||
Kvs: []*mvccpb.KeyValue{
|
Kvs: []*mvccpb.KeyValue{
|
||||||
{
|
{
|
||||||
Key: []byte("hello"),
|
Key: []byte("hello"),
|
||||||
@@ -165,10 +170,10 @@ func TestCluster_Watch(t *testing.T) {
|
|||||||
assert.Equal(t, "world", kv.Val)
|
assert.Equal(t, "world", kv.Val)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}).MaxTimes(1)
|
}).MaxTimes(1)
|
||||||
listener.EXPECT().OnDelete(gomock.Any()).Do(func(_ interface{}) {
|
listener.EXPECT().OnDelete(gomock.Any()).Do(func(_ any) {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}).MaxTimes(1)
|
}).MaxTimes(1)
|
||||||
go c.watch(cli, "any")
|
go c.watch(cli, "any", 0)
|
||||||
ch <- clientv3.WatchResponse{
|
ch <- clientv3.WatchResponse{
|
||||||
Events: []*clientv3.Event{
|
Events: []*clientv3.Event{
|
||||||
{
|
{
|
||||||
@@ -212,7 +217,7 @@ func TestClusterWatch_RespFailures(t *testing.T) {
|
|||||||
ch <- resp
|
ch <- resp
|
||||||
close(c.done)
|
close(c.done)
|
||||||
}()
|
}()
|
||||||
c.watch(cli, "any")
|
c.watch(cli, "any", 0)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -232,7 +237,7 @@ func TestClusterWatch_CloseChan(t *testing.T) {
|
|||||||
close(ch)
|
close(ch)
|
||||||
close(c.done)
|
close(c.done)
|
||||||
}()
|
}()
|
||||||
c.watch(cli, "any")
|
c.watch(cli, "any", 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValueOnlyContext(t *testing.T) {
|
func TestValueOnlyContext(t *testing.T) {
|
||||||
@@ -240,3 +245,58 @@ func TestValueOnlyContext(t *testing.T) {
|
|||||||
ctx.Done()
|
ctx.Done()
|
||||||
assert.Nil(t, ctx.Err())
|
assert.Nil(t, ctx.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDialClient(t *testing.T) {
|
||||||
|
svr, err := mockserver.StartMockServers(1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
svr.StartAt(0)
|
||||||
|
|
||||||
|
certFile := createTempFile(t, []byte(certContent))
|
||||||
|
defer os.Remove(certFile)
|
||||||
|
keyFile := createTempFile(t, []byte(keyContent))
|
||||||
|
defer os.Remove(keyFile)
|
||||||
|
caFile := createTempFile(t, []byte(caContent))
|
||||||
|
defer os.Remove(caFile)
|
||||||
|
|
||||||
|
endpoints := []string{svr.Servers[0].Address}
|
||||||
|
AddAccount(endpoints, "foo", "bar")
|
||||||
|
assert.NoError(t, AddTLS(endpoints, certFile, keyFile, caFile, false))
|
||||||
|
|
||||||
|
old := DialTimeout
|
||||||
|
DialTimeout = time.Millisecond
|
||||||
|
defer func() {
|
||||||
|
DialTimeout = old
|
||||||
|
}()
|
||||||
|
_, err = DialClient(endpoints)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegistry_Monitor(t *testing.T) {
|
||||||
|
svr, err := mockserver.StartMockServers(1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
svr.StartAt(0)
|
||||||
|
|
||||||
|
endpoints := []string{svr.Servers[0].Address}
|
||||||
|
GetRegistry().lock.Lock()
|
||||||
|
GetRegistry().clusters = map[string]*cluster{
|
||||||
|
getClusterKey(endpoints): {
|
||||||
|
listeners: map[string][]UpdateListener{},
|
||||||
|
values: map[string]map[string]string{
|
||||||
|
"foo": {
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
GetRegistry().lock.Unlock()
|
||||||
|
assert.Error(t, GetRegistry().Monitor(endpoints, "foo", new(mockListener)))
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockListener struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockListener) OnAdd(_ KV) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockListener) OnDelete(_ KV) {
|
||||||
|
}
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ func (m *MocketcdConn) WaitForStateChange(ctx context.Context, sourceState conne
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForStateChange indicates an expected call of WaitForStateChange
|
// WaitForStateChange indicates an expected call of WaitForStateChange
|
||||||
func (mr *MocketcdConnMockRecorder) WaitForStateChange(ctx, sourceState interface{}) *gomock.Call {
|
func (mr *MocketcdConnMockRecorder) WaitForStateChange(ctx, sourceState any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForStateChange", reflect.TypeOf((*MocketcdConn)(nil).WaitForStateChange), ctx, sourceState)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForStateChange", reflect.TypeOf((*MocketcdConn)(nil).WaitForStateChange), ctx, sourceState)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func (m *MockUpdateListener) OnAdd(kv KV) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OnAdd indicates an expected call of OnAdd
|
// OnAdd indicates an expected call of OnAdd
|
||||||
func (mr *MockUpdateListenerMockRecorder) OnAdd(kv interface{}) *gomock.Call {
|
func (mr *MockUpdateListenerMockRecorder) OnAdd(kv any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnAdd", reflect.TypeOf((*MockUpdateListener)(nil).OnAdd), kv)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnAdd", reflect.TypeOf((*MockUpdateListener)(nil).OnAdd), kv)
|
||||||
}
|
}
|
||||||
@@ -52,7 +52,7 @@ func (m *MockUpdateListener) OnDelete(kv KV) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OnDelete indicates an expected call of OnDelete
|
// OnDelete indicates an expected call of OnDelete
|
||||||
func (mr *MockUpdateListenerMockRecorder) OnDelete(kv interface{}) *gomock.Call {
|
func (mr *MockUpdateListenerMockRecorder) OnDelete(kv any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDelete", reflect.TypeOf((*MockUpdateListener)(nil).OnDelete), kv)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDelete", reflect.TypeOf((*MockUpdateListener)(nil).OnDelete), kv)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package discov
|
package discov
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/discov/internal"
|
"github.com/zeromicro/go-zero/core/discov/internal"
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
@@ -51,12 +53,7 @@ func NewPublisher(endpoints []string, key, value string, opts ...PubOption) *Pub
|
|||||||
|
|
||||||
// KeepAlive keeps key:value alive.
|
// KeepAlive keeps key:value alive.
|
||||||
func (p *Publisher) KeepAlive() error {
|
func (p *Publisher) KeepAlive() error {
|
||||||
cli, err := internal.GetRegistry().GetConn(p.endpoints)
|
cli, err := p.doRegister()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.lease, err = p.register(cli)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -83,6 +80,43 @@ func (p *Publisher) Stop() {
|
|||||||
p.quit.Close()
|
p.quit.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Publisher) doKeepAlive() error {
|
||||||
|
ticker := time.NewTicker(time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for range ticker.C {
|
||||||
|
select {
|
||||||
|
case <-p.quit.Done():
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
cli, err := p.doRegister()
|
||||||
|
if err != nil {
|
||||||
|
logx.Errorf("etcd publisher doRegister: %s", err.Error())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.keepAliveAsync(cli); err != nil {
|
||||||
|
logx.Errorf("etcd publisher keepAliveAsync: %s", err.Error())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Publisher) doRegister() (internal.EtcdClient, error) {
|
||||||
|
cli, err := internal.GetRegistry().GetConn(p.endpoints)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.lease, err = p.register(cli)
|
||||||
|
return cli, err
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
|
func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
|
||||||
ch, err := cli.KeepAlive(cli.Ctx(), p.lease)
|
ch, err := cli.KeepAlive(cli.Ctx(), p.lease)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -95,8 +129,8 @@ func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
|
|||||||
case _, ok := <-ch:
|
case _, ok := <-ch:
|
||||||
if !ok {
|
if !ok {
|
||||||
p.revoke(cli)
|
p.revoke(cli)
|
||||||
if err := p.KeepAlive(); err != nil {
|
if err := p.doKeepAlive(); err != nil {
|
||||||
logx.Errorf("KeepAlive: %s", err.Error())
|
logx.Errorf("etcd publisher KeepAlive: %s", err.Error())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -105,8 +139,8 @@ func (p *Publisher) keepAliveAsync(cli internal.EtcdClient) error {
|
|||||||
p.revoke(cli)
|
p.revoke(cli)
|
||||||
select {
|
select {
|
||||||
case <-p.resumeChan:
|
case <-p.resumeChan:
|
||||||
if err := p.KeepAlive(); err != nil {
|
if err := p.doKeepAlive(); err != nil {
|
||||||
logx.Errorf("KeepAlive: %s", err.Error())
|
logx.Errorf("etcd publisher KeepAlive: %s", err.Error())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-p.quit.Done():
|
case <-p.quit.Done():
|
||||||
@@ -141,7 +175,7 @@ func (p *Publisher) register(client internal.EtcdClient) (clientv3.LeaseID, erro
|
|||||||
|
|
||||||
func (p *Publisher) revoke(cli internal.EtcdClient) {
|
func (p *Publisher) revoke(cli internal.EtcdClient) {
|
||||||
if _, err := cli.Revoke(cli.Ctx(), p.lease); err != nil {
|
if _, err := cli.Revoke(cli.Ctx(), p.lease); err != nil {
|
||||||
logx.Error(err)
|
logx.Errorf("etcd publisher revoke: %s", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
package discov
|
package discov
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,6 +16,83 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/logx"
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
"github.com/zeromicro/go-zero/core/stringx"
|
"github.com/zeromicro/go-zero/core/stringx"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
"google.golang.org/grpc/resolver/manual"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
certContent = `-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDazCCAlOgAwIBAgIUEg9GVO2oaPn+YSmiqmFIuAo10WIwDQYJKoZIhvcNAQEM
|
||||||
|
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||||
|
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAgFw0yMzAzMTExMzIxMjNaGA8yMTIz
|
||||||
|
MDIxNTEzMjEyM1owRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
|
||||||
|
ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
|
||||||
|
AQEBBQADggEPADCCAQoCggEBALplXlWsIf0O/IgnIplmiZHKGnxyfyufyE2FBRNk
|
||||||
|
OofRqbKuPH8GNqbkvZm7N29fwTDAQ+mViAggCkDht4hOzoWJMA7KYJt8JnTSWL48
|
||||||
|
M1lcrpc9DL2gszC/JF/FGvyANbBtLklkZPFBGdHUX14pjrT937wqPtm+SqUHSvRT
|
||||||
|
B7bmwmm2drRcmhpVm98LSlV7uQ2EgnJgsLjBPITKUejLmVLHfgX0RwQ2xIpX9pS4
|
||||||
|
FCe1BTacwl2gGp7Mje7y4Mfv3o0ArJW6Tuwbjx59ZXwb1KIP71b7bT04AVS8ZeYO
|
||||||
|
UMLKKuB5UR9x9Rn6cLXOTWBpcMVyzDgrAFLZjnE9LPUolZMCAwEAAaNRME8wHwYD
|
||||||
|
VR0jBBgwFoAUeW8w8pmhncbRgTsl48k4/7wnfx8wCQYDVR0TBAIwADALBgNVHQ8E
|
||||||
|
BAMCBPAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBDAUAA4IBAQAI
|
||||||
|
y9xaoS88CLPBsX6mxfcTAFVfGNTRW9VN9Ng1cCnUR+YGoXGM/l+qP4f7p8ocdGwK
|
||||||
|
iYZErVTzXYIn+D27//wpY3klJk3gAnEUBT3QRkStBw7XnpbeZ2oPBK+cmDnCnZPS
|
||||||
|
BIF1wxPX7vIgaxs5Zsdqwk3qvZ4Djr2wP7LabNWTLSBKgQoUY45Liw6pffLwcGF9
|
||||||
|
UKlu54bvGze2SufISCR3ib+I+FLvqpvJhXToZWYb/pfI/HccuCL1oot1x8vx6DQy
|
||||||
|
U+TYxlZsKS5mdNxAX3dqEkEMsgEi+g/tzDPXJImfeCGGBhIOXLm8SRypiuGdEbc9
|
||||||
|
xkWYxRPegajuEZGvCqVs
|
||||||
|
-----END CERTIFICATE-----`
|
||||||
|
keyContent = `-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEowIBAAKCAQEAumVeVawh/Q78iCcimWaJkcoafHJ/K5/ITYUFE2Q6h9Gpsq48
|
||||||
|
fwY2puS9mbs3b1/BMMBD6ZWICCAKQOG3iE7OhYkwDspgm3wmdNJYvjwzWVyulz0M
|
||||||
|
vaCzML8kX8Ua/IA1sG0uSWRk8UEZ0dRfXimOtP3fvCo+2b5KpQdK9FMHtubCabZ2
|
||||||
|
tFyaGlWb3wtKVXu5DYSCcmCwuME8hMpR6MuZUsd+BfRHBDbEilf2lLgUJ7UFNpzC
|
||||||
|
XaAansyN7vLgx+/ejQCslbpO7BuPHn1lfBvUog/vVvttPTgBVLxl5g5Qwsoq4HlR
|
||||||
|
H3H1Gfpwtc5NYGlwxXLMOCsAUtmOcT0s9SiVkwIDAQABAoIBAD5meTJNMgO55Kjg
|
||||||
|
ESExxpRcCIno+tHr5+6rvYtEXqPheOIsmmwb9Gfi4+Z3WpOaht5/Pz0Ppj6yGzyl
|
||||||
|
U//6AgGKb+BDuBvVcDpjwPnOxZIBCSHwejdxeQu0scSuA97MPS0XIAvJ5FEv7ijk
|
||||||
|
5Bht6SyGYURpECltHygoTNuGgGqmO+McCJRLE9L09lTBI6UQ/JQwWJqSr7wx6iPU
|
||||||
|
M1Ze/srIV+7cyEPu6i0DGjS1gSQKkX68Lqn1w6oE290O+OZvleO0gZ02fLDWCZke
|
||||||
|
aeD9+EU/Pw+rqm3H6o0szOFIpzhRp41FUdW9sybB3Yp3u7c/574E+04Z/e30LMKs
|
||||||
|
TCtE1QECgYEA3K7KIpw0NH2HXL5C3RHcLmr204xeBfS70riBQQuVUgYdmxak2ima
|
||||||
|
80RInskY8hRhSGTg0l+VYIH8cmjcUyqMSOELS5XfRH99r4QPiK8AguXg80T4VumY
|
||||||
|
W3Pf+zEC2ssgP/gYthV0g0Xj5m2QxktOF9tRw5nkg739ZR4dI9lm/iECgYEA2Dnf
|
||||||
|
uwEDGqHiQRF6/fh5BG/nGVMvrefkqx6WvTJQ3k/M/9WhxB+lr/8yH46TuS8N2b29
|
||||||
|
FoTf3Mr9T7pr/PWkOPzoY3P56nYbKU8xSwCim9xMzhBMzj8/N9ukJvXy27/VOz56
|
||||||
|
eQaKqnvdXNGtPJrIMDGHps2KKWlKLyAlapzjVTMCgYAA/W++tACv85g13EykfT4F
|
||||||
|
n0k4LbsGP9DP4zABQLIMyiY72eAncmRVjwrcW36XJ2xATOONTgx3gF3HjZzfaqNy
|
||||||
|
eD/6uNNllUTVEryXGmHgNHPL45VRnn6memCY2eFvZdXhM5W4y2PYaunY0MkDercA
|
||||||
|
+GTngbs6tBF88KOk04bYwQKBgFl68cRgsdkmnwwQYNaTKfmVGYzYaQXNzkqmWPko
|
||||||
|
xmCJo6tHzC7ubdG8iRCYHzfmahPuuj6EdGPZuSRyYFgJi5Ftz/nAN+84OxtIQ3zn
|
||||||
|
YWOgskQgaLh9YfsKsQ7Sf1NDOsnOnD5TX7UXl07fEpLe9vNCvAFiU8e5Y9LGudU5
|
||||||
|
4bYTAoGBAMdX3a3bXp4cZvXNBJ/QLVyxC6fP1Q4haCR1Od3m+T00Jth2IX2dk/fl
|
||||||
|
p6xiJT1av5JtYabv1dFKaXOS5s1kLGGuCCSKpkvFZm826aQ2AFm0XGqEQDLeei5b
|
||||||
|
A52Kpy/YJ+RkG4BTFtAooFq6DmA0cnoP6oPvG2h6XtDJwDTPInJb
|
||||||
|
-----END RSA PRIVATE KEY-----`
|
||||||
|
caContent = `-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDbTCCAlWgAwIBAgIUBJvFoCowKich7MMfseJ+DYzzirowDQYJKoZIhvcNAQEM
|
||||||
|
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||||
|
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAgFw0yMzAzMTExMzIxMDNaGA8yMTIz
|
||||||
|
MDIxNTEzMjEwM1owRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
|
||||||
|
ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
|
||||||
|
AQEBBQADggEPADCCAQoCggEBAO4to2YMYj0bxgr2FCiweSTSFuPx33zSw2x/s9Wf
|
||||||
|
OR41bm2DFsyYT5f3sOIKlXZEdLmOKty2e3ho3yC0EyNpVHdykkkHT3aDI17quZax
|
||||||
|
kYi/URqqtl1Z08A22txolc04hAZisg2BypGi3vql81UW1t3zyloGnJoIAeXR9uca
|
||||||
|
ljP6Bk3bwsxoVBLi1JtHrO0hHLQaeHmKhAyrys06X0LRdn7Px48yRZlt6FaLSa8X
|
||||||
|
YiRM0G44bVy/h6BkoQjMYGwVmCVk6zjJ9U7ZPFqdnDMNxAfR+hjDnYodqdLDMTTR
|
||||||
|
1NPVrnEnNwFx0AMLvgt/ba/45vZCEAmSZnFXFAJJcM7ai9ECAwEAAaNTMFEwHQYD
|
||||||
|
VR0OBBYEFHlvMPKZoZ3G0YE7JePJOP+8J38fMB8GA1UdIwQYMBaAFHlvMPKZoZ3G
|
||||||
|
0YE7JePJOP+8J38fMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggEB
|
||||||
|
AMX8dNulADOo9uQgBMyFb9TVra7iY0zZjzv4GY5XY7scd52n6CnfAPvYBBDnTr/O
|
||||||
|
BgNp5jaujb4+9u/2qhV3f9n+/3WOb2CmPehBgVSzlXqHeQ9lshmgwZPeem2T+8Tm
|
||||||
|
Nnc/xQnsUfCFszUDxpkr55+aLVM22j02RWqcZ4q7TAaVYL+kdFVMc8FoqG/0ro6A
|
||||||
|
BjE/Qn0Nn7ciX1VUjDt8l+k7ummPJTmzdi6i6E4AwO9dzrGNgGJ4aWL8cC6xYcIX
|
||||||
|
goVIRTFeONXSDno/oPjWHpIPt7L15heMpKBHNuzPkKx2YVqPHE5QZxWfS+Lzgx+Q
|
||||||
|
E2oTTM0rYKOZ8p6000mhvKI=
|
||||||
|
-----END CERTIFICATE-----`
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -37,7 +117,7 @@ func TestPublisher_register(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublisher_registerWithId(t *testing.T) {
|
func TestPublisher_registerWithOptions(t *testing.T) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
const id = 2
|
const id = 2
|
||||||
@@ -49,7 +129,15 @@ func TestPublisher_registerWithId(t *testing.T) {
|
|||||||
ID: 1,
|
ID: 1,
|
||||||
}, nil)
|
}, nil)
|
||||||
cli.EXPECT().Put(gomock.Any(), makeEtcdKey("thekey", id), "thevalue", gomock.Any())
|
cli.EXPECT().Put(gomock.Any(), makeEtcdKey("thekey", id), "thevalue", gomock.Any())
|
||||||
pub := NewPublisher(nil, "thekey", "thevalue", WithId(id))
|
|
||||||
|
certFile := createTempFile(t, []byte(certContent))
|
||||||
|
defer os.Remove(certFile)
|
||||||
|
keyFile := createTempFile(t, []byte(keyContent))
|
||||||
|
defer os.Remove(keyFile)
|
||||||
|
caFile := createTempFile(t, []byte(caContent))
|
||||||
|
defer os.Remove(caFile)
|
||||||
|
pub := NewPublisher(nil, "thekey", "thevalue", WithId(id),
|
||||||
|
WithPubEtcdTLS(certFile, keyFile, caFile, true))
|
||||||
_, err := pub.register(cli)
|
_, err := pub.register(cli)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
@@ -125,7 +213,7 @@ func TestPublisher_keepAliveAsyncQuit(t *testing.T) {
|
|||||||
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ interface{}) {
|
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
})
|
})
|
||||||
pub := NewPublisher(nil, "thekey", "thevalue")
|
pub := NewPublisher(nil, "thekey", "thevalue")
|
||||||
@@ -147,7 +235,7 @@ func TestPublisher_keepAliveAsyncPause(t *testing.T) {
|
|||||||
pub := NewPublisher(nil, "thekey", "thevalue")
|
pub := NewPublisher(nil, "thekey", "thevalue")
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ interface{}) {
|
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
|
||||||
pub.Stop()
|
pub.Stop()
|
||||||
wg.Done()
|
wg.Done()
|
||||||
})
|
})
|
||||||
@@ -169,3 +257,92 @@ func TestPublisher_Resume(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
<-publisher.resumeChan
|
<-publisher.resumeChan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPublisher_keepAliveAsync(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
const id clientv3.LeaseID = 1
|
||||||
|
conn := createMockConn(t)
|
||||||
|
defer conn.Close()
|
||||||
|
cli := internal.NewMockEtcdClient(ctrl)
|
||||||
|
cli.EXPECT().ActiveConnection().Return(conn).AnyTimes()
|
||||||
|
cli.EXPECT().Close()
|
||||||
|
defer cli.Close()
|
||||||
|
cli.ActiveConnection()
|
||||||
|
restore := setMockClient(cli)
|
||||||
|
defer restore()
|
||||||
|
cli.EXPECT().Ctx().AnyTimes()
|
||||||
|
cli.EXPECT().KeepAlive(gomock.Any(), id)
|
||||||
|
cli.EXPECT().Grant(gomock.Any(), timeToLive).Return(&clientv3.LeaseGrantResponse{
|
||||||
|
ID: 1,
|
||||||
|
}, nil)
|
||||||
|
cli.EXPECT().Put(gomock.Any(), makeEtcdKey("thekey", int64(id)), "thevalue", gomock.Any())
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
|
||||||
|
wg.Done()
|
||||||
|
})
|
||||||
|
pub := NewPublisher([]string{"the-endpoint"}, "thekey", "thevalue")
|
||||||
|
pub.lease = id
|
||||||
|
assert.Nil(t, pub.KeepAlive())
|
||||||
|
pub.Stop()
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMockConn(t *testing.T) *grpc.ClientConn {
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while listening. Err: %v", err)
|
||||||
|
}
|
||||||
|
defer lis.Close()
|
||||||
|
lisAddr := resolver.Address{Addr: lis.Addr().String()}
|
||||||
|
lisDone := make(chan struct{})
|
||||||
|
dialDone := make(chan struct{})
|
||||||
|
// 1st listener accepts the connection and then does nothing
|
||||||
|
go func() {
|
||||||
|
defer close(lisDone)
|
||||||
|
conn, err := lis.Accept()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while accepting. Err: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
framer := http2.NewFramer(conn, conn)
|
||||||
|
if err := framer.WriteSettings(http2.Setting{}); err != nil {
|
||||||
|
t.Errorf("Error while writing settings. Err: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
<-dialDone // Close conn only after dial returns.
|
||||||
|
}()
|
||||||
|
|
||||||
|
r := manual.NewBuilderWithScheme("whatever")
|
||||||
|
r.InitialState(resolver.State{Addresses: []resolver.Address{lisAddr}})
|
||||||
|
client, err := grpc.DialContext(context.Background(), r.Scheme()+":///test.server",
|
||||||
|
grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))
|
||||||
|
close(dialDone)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Dial failed. Err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := time.After(1 * time.Second)
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
t.Fatal("timed out waiting for server to finish")
|
||||||
|
case <-lisDone:
|
||||||
|
}
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTempFile(t *testing.T, body []byte) string {
|
||||||
|
tmpFile, err := os.CreateTemp(os.TempDir(), "go-unit-*.tmp")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpFile.Close()
|
||||||
|
if err = os.WriteFile(tmpFile.Name(), body, os.ModePerm); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmpFile.Name()
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ type (
|
|||||||
// SubOption defines the method to customize a Subscriber.
|
// SubOption defines the method to customize a Subscriber.
|
||||||
SubOption func(sub *Subscriber)
|
SubOption func(sub *Subscriber)
|
||||||
|
|
||||||
// A Subscriber is used to subscribe the given key on a etcd cluster.
|
// A Subscriber is used to subscribe the given key on an etcd cluster.
|
||||||
Subscriber struct {
|
Subscriber struct {
|
||||||
endpoints []string
|
endpoints []string
|
||||||
exclusive bool
|
exclusive bool
|
||||||
|
|||||||
@@ -11,10 +11,12 @@ type (
|
|||||||
errorArray []error
|
errorArray []error
|
||||||
)
|
)
|
||||||
|
|
||||||
// Add adds err to be.
|
// Add adds errs to be, nil errors are ignored.
|
||||||
func (be *BatchError) Add(err error) {
|
func (be *BatchError) Add(errs ...error) {
|
||||||
if err != nil {
|
for _, err := range errs {
|
||||||
be.errs = append(be.errs, err)
|
if err != nil {
|
||||||
|
be.errs = append(be.errs, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
21
core/errorx/wrap.go
Normal file
21
core/errorx/wrap.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package errorx
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Wrap returns an error that wraps err with given message.
|
||||||
|
func Wrap(err error, message string) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("%s: %w", message, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrapf returns an error that wraps err with given format and args.
|
||||||
|
func Wrapf(err error, format string, args ...any) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)
|
||||||
|
}
|
||||||
24
core/errorx/wrap_test.go
Normal file
24
core/errorx/wrap_test.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package errorx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWrap(t *testing.T) {
|
||||||
|
assert.Nil(t, Wrap(nil, "test"))
|
||||||
|
assert.Equal(t, "foo: bar", Wrap(errors.New("bar"), "foo").Error())
|
||||||
|
|
||||||
|
err := errors.New("foo")
|
||||||
|
assert.True(t, errors.Is(Wrap(err, "bar"), err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWrapf(t *testing.T) {
|
||||||
|
assert.Nil(t, Wrapf(nil, "%s", "test"))
|
||||||
|
assert.Equal(t, "foo bar: quz", Wrapf(errors.New("quz"), "foo %s", "bar").Error())
|
||||||
|
|
||||||
|
err := errors.New("foo")
|
||||||
|
assert.True(t, errors.Is(Wrapf(err, "foo %s", "bar"), err))
|
||||||
|
}
|
||||||
@@ -42,7 +42,7 @@ func NewBulkExecutor(execute Execute, opts ...BulkOption) *BulkExecutor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add adds task into be.
|
// Add adds task into be.
|
||||||
func (be *BulkExecutor) Add(task interface{}) error {
|
func (be *BulkExecutor) Add(task any) error {
|
||||||
be.executor.Add(task)
|
be.executor.Add(task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -79,22 +79,22 @@ func newBulkOptions() bulkOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type bulkContainer struct {
|
type bulkContainer struct {
|
||||||
tasks []interface{}
|
tasks []any
|
||||||
execute Execute
|
execute Execute
|
||||||
maxTasks int
|
maxTasks int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *bulkContainer) AddTask(task interface{}) bool {
|
func (bc *bulkContainer) AddTask(task any) bool {
|
||||||
bc.tasks = append(bc.tasks, task)
|
bc.tasks = append(bc.tasks, task)
|
||||||
return len(bc.tasks) >= bc.maxTasks
|
return len(bc.tasks) >= bc.maxTasks
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *bulkContainer) Execute(tasks interface{}) {
|
func (bc *bulkContainer) Execute(tasks any) {
|
||||||
vals := tasks.([]interface{})
|
vals := tasks.([]any)
|
||||||
bc.execute(vals)
|
bc.execute(vals)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *bulkContainer) RemoveAll() interface{} {
|
func (bc *bulkContainer) RemoveAll() any {
|
||||||
tasks := bc.tasks
|
tasks := bc.tasks
|
||||||
bc.tasks = nil
|
bc.tasks = nil
|
||||||
return tasks
|
return tasks
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ func TestBulkExecutor(t *testing.T) {
|
|||||||
var values []int
|
var values []int
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
|
|
||||||
executor := NewBulkExecutor(func(items []interface{}) {
|
executor := NewBulkExecutor(func(items []any) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
values = append(values, len(items))
|
values = append(values, len(items))
|
||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
@@ -40,7 +40,7 @@ func TestBulkExecutorFlushInterval(t *testing.T) {
|
|||||||
var wait sync.WaitGroup
|
var wait sync.WaitGroup
|
||||||
|
|
||||||
wait.Add(1)
|
wait.Add(1)
|
||||||
executor := NewBulkExecutor(func(items []interface{}) {
|
executor := NewBulkExecutor(func(items []any) {
|
||||||
assert.Equal(t, size, len(items))
|
assert.Equal(t, size, len(items))
|
||||||
wait.Done()
|
wait.Done()
|
||||||
}, WithBulkTasks(caches), WithBulkInterval(time.Millisecond*100))
|
}, WithBulkTasks(caches), WithBulkInterval(time.Millisecond*100))
|
||||||
@@ -53,7 +53,7 @@ func TestBulkExecutorFlushInterval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBulkExecutorEmpty(t *testing.T) {
|
func TestBulkExecutorEmpty(t *testing.T) {
|
||||||
NewBulkExecutor(func(items []interface{}) {
|
NewBulkExecutor(func(items []any) {
|
||||||
assert.Fail(t, "should not called")
|
assert.Fail(t, "should not called")
|
||||||
}, WithBulkTasks(10), WithBulkInterval(time.Millisecond))
|
}, WithBulkTasks(10), WithBulkInterval(time.Millisecond))
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
@@ -67,7 +67,7 @@ func TestBulkExecutorFlush(t *testing.T) {
|
|||||||
|
|
||||||
var wait sync.WaitGroup
|
var wait sync.WaitGroup
|
||||||
wait.Add(1)
|
wait.Add(1)
|
||||||
be := NewBulkExecutor(func(items []interface{}) {
|
be := NewBulkExecutor(func(items []any) {
|
||||||
assert.Equal(t, tasks, len(items))
|
assert.Equal(t, tasks, len(items))
|
||||||
wait.Done()
|
wait.Done()
|
||||||
}, WithBulkTasks(caches), WithBulkInterval(time.Minute))
|
}, WithBulkTasks(caches), WithBulkInterval(time.Minute))
|
||||||
@@ -81,8 +81,8 @@ func TestBulkExecutorFlush(t *testing.T) {
|
|||||||
func TestBuldExecutorFlushSlowTasks(t *testing.T) {
|
func TestBuldExecutorFlushSlowTasks(t *testing.T) {
|
||||||
const total = 1500
|
const total = 1500
|
||||||
lock := new(sync.Mutex)
|
lock := new(sync.Mutex)
|
||||||
result := make([]interface{}, 0, 10000)
|
result := make([]any, 0, 10000)
|
||||||
exec := NewBulkExecutor(func(tasks []interface{}) {
|
exec := NewBulkExecutor(func(tasks []any) {
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
@@ -100,7 +100,7 @@ func TestBuldExecutorFlushSlowTasks(t *testing.T) {
|
|||||||
func BenchmarkBulkExecutor(b *testing.B) {
|
func BenchmarkBulkExecutor(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
be := NewBulkExecutor(func(tasks []interface{}) {
|
be := NewBulkExecutor(func(tasks []any) {
|
||||||
time.Sleep(time.Millisecond * time.Duration(len(tasks)))
|
time.Sleep(time.Millisecond * time.Duration(len(tasks)))
|
||||||
})
|
})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ func NewChunkExecutor(execute Execute, opts ...ChunkOption) *ChunkExecutor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add adds task with given chunk size into ce.
|
// Add adds task with given chunk size into ce.
|
||||||
func (ce *ChunkExecutor) Add(task interface{}, size int) error {
|
func (ce *ChunkExecutor) Add(task any, size int) error {
|
||||||
ce.executor.Add(chunk{
|
ce.executor.Add(chunk{
|
||||||
val: task,
|
val: task,
|
||||||
size: size,
|
size: size,
|
||||||
@@ -82,25 +82,25 @@ func newChunkOptions() chunkOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type chunkContainer struct {
|
type chunkContainer struct {
|
||||||
tasks []interface{}
|
tasks []any
|
||||||
execute Execute
|
execute Execute
|
||||||
size int
|
size int
|
||||||
maxChunkSize int
|
maxChunkSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *chunkContainer) AddTask(task interface{}) bool {
|
func (bc *chunkContainer) AddTask(task any) bool {
|
||||||
ck := task.(chunk)
|
ck := task.(chunk)
|
||||||
bc.tasks = append(bc.tasks, ck.val)
|
bc.tasks = append(bc.tasks, ck.val)
|
||||||
bc.size += ck.size
|
bc.size += ck.size
|
||||||
return bc.size >= bc.maxChunkSize
|
return bc.size >= bc.maxChunkSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *chunkContainer) Execute(tasks interface{}) {
|
func (bc *chunkContainer) Execute(tasks any) {
|
||||||
vals := tasks.([]interface{})
|
vals := tasks.([]any)
|
||||||
bc.execute(vals)
|
bc.execute(vals)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *chunkContainer) RemoveAll() interface{} {
|
func (bc *chunkContainer) RemoveAll() any {
|
||||||
tasks := bc.tasks
|
tasks := bc.tasks
|
||||||
bc.tasks = nil
|
bc.tasks = nil
|
||||||
bc.size = 0
|
bc.size = 0
|
||||||
@@ -108,6 +108,6 @@ func (bc *chunkContainer) RemoveAll() interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type chunk struct {
|
type chunk struct {
|
||||||
val interface{}
|
val any
|
||||||
size int
|
size int
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ func TestChunkExecutor(t *testing.T) {
|
|||||||
var values []int
|
var values []int
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
|
|
||||||
executor := NewChunkExecutor(func(items []interface{}) {
|
executor := NewChunkExecutor(func(items []any) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
values = append(values, len(items))
|
values = append(values, len(items))
|
||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
@@ -40,7 +40,7 @@ func TestChunkExecutorFlushInterval(t *testing.T) {
|
|||||||
var wait sync.WaitGroup
|
var wait sync.WaitGroup
|
||||||
|
|
||||||
wait.Add(1)
|
wait.Add(1)
|
||||||
executor := NewChunkExecutor(func(items []interface{}) {
|
executor := NewChunkExecutor(func(items []any) {
|
||||||
assert.Equal(t, size, len(items))
|
assert.Equal(t, size, len(items))
|
||||||
wait.Done()
|
wait.Done()
|
||||||
}, WithChunkBytes(caches), WithFlushInterval(time.Millisecond*100))
|
}, WithChunkBytes(caches), WithFlushInterval(time.Millisecond*100))
|
||||||
@@ -53,10 +53,11 @@ func TestChunkExecutorFlushInterval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestChunkExecutorEmpty(t *testing.T) {
|
func TestChunkExecutorEmpty(t *testing.T) {
|
||||||
NewChunkExecutor(func(items []interface{}) {
|
executor := NewChunkExecutor(func(items []any) {
|
||||||
assert.Fail(t, "should not called")
|
assert.Fail(t, "should not called")
|
||||||
}, WithChunkBytes(10), WithFlushInterval(time.Millisecond))
|
}, WithChunkBytes(10), WithFlushInterval(time.Millisecond))
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
executor.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestChunkExecutorFlush(t *testing.T) {
|
func TestChunkExecutorFlush(t *testing.T) {
|
||||||
@@ -67,7 +68,7 @@ func TestChunkExecutorFlush(t *testing.T) {
|
|||||||
|
|
||||||
var wait sync.WaitGroup
|
var wait sync.WaitGroup
|
||||||
wait.Add(1)
|
wait.Add(1)
|
||||||
be := NewChunkExecutor(func(items []interface{}) {
|
be := NewChunkExecutor(func(items []any) {
|
||||||
assert.Equal(t, tasks, len(items))
|
assert.Equal(t, tasks, len(items))
|
||||||
wait.Done()
|
wait.Done()
|
||||||
}, WithChunkBytes(caches), WithFlushInterval(time.Minute))
|
}, WithChunkBytes(caches), WithFlushInterval(time.Minute))
|
||||||
@@ -81,7 +82,7 @@ func TestChunkExecutorFlush(t *testing.T) {
|
|||||||
func BenchmarkChunkExecutor(b *testing.B) {
|
func BenchmarkChunkExecutor(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
be := NewChunkExecutor(func(tasks []interface{}) {
|
be := NewChunkExecutor(func(tasks []any) {
|
||||||
time.Sleep(time.Millisecond * time.Duration(len(tasks)))
|
time.Sleep(time.Millisecond * time.Duration(len(tasks)))
|
||||||
})
|
})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
|
|||||||
@@ -21,16 +21,16 @@ type (
|
|||||||
TaskContainer interface {
|
TaskContainer interface {
|
||||||
// AddTask adds the task into the container.
|
// AddTask adds the task into the container.
|
||||||
// Returns true if the container needs to be flushed after the addition.
|
// Returns true if the container needs to be flushed after the addition.
|
||||||
AddTask(task interface{}) bool
|
AddTask(task any) bool
|
||||||
// Execute handles the collected tasks by the container when flushing.
|
// Execute handles the collected tasks by the container when flushing.
|
||||||
Execute(tasks interface{})
|
Execute(tasks any)
|
||||||
// RemoveAll removes the contained tasks, and return them.
|
// RemoveAll removes the contained tasks, and return them.
|
||||||
RemoveAll() interface{}
|
RemoveAll() any
|
||||||
}
|
}
|
||||||
|
|
||||||
// A PeriodicalExecutor is an executor that periodically execute tasks.
|
// A PeriodicalExecutor is an executor that periodically execute tasks.
|
||||||
PeriodicalExecutor struct {
|
PeriodicalExecutor struct {
|
||||||
commander chan interface{}
|
commander chan any
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
container TaskContainer
|
container TaskContainer
|
||||||
waitGroup sync.WaitGroup
|
waitGroup sync.WaitGroup
|
||||||
@@ -48,7 +48,7 @@ type (
|
|||||||
func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *PeriodicalExecutor {
|
func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *PeriodicalExecutor {
|
||||||
executor := &PeriodicalExecutor{
|
executor := &PeriodicalExecutor{
|
||||||
// buffer 1 to let the caller go quickly
|
// buffer 1 to let the caller go quickly
|
||||||
commander: make(chan interface{}, 1),
|
commander: make(chan any, 1),
|
||||||
interval: interval,
|
interval: interval,
|
||||||
container: container,
|
container: container,
|
||||||
confirmChan: make(chan lang.PlaceholderType),
|
confirmChan: make(chan lang.PlaceholderType),
|
||||||
@@ -64,7 +64,7 @@ func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *Per
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add adds tasks into pe.
|
// Add adds tasks into pe.
|
||||||
func (pe *PeriodicalExecutor) Add(task interface{}) {
|
func (pe *PeriodicalExecutor) Add(task any) {
|
||||||
if vals, ok := pe.addAndCheck(task); ok {
|
if vals, ok := pe.addAndCheck(task); ok {
|
||||||
pe.commander <- vals
|
pe.commander <- vals
|
||||||
<-pe.confirmChan
|
<-pe.confirmChan
|
||||||
@@ -74,14 +74,14 @@ func (pe *PeriodicalExecutor) Add(task interface{}) {
|
|||||||
// Flush forces pe to execute tasks.
|
// Flush forces pe to execute tasks.
|
||||||
func (pe *PeriodicalExecutor) Flush() bool {
|
func (pe *PeriodicalExecutor) Flush() bool {
|
||||||
pe.enterExecution()
|
pe.enterExecution()
|
||||||
return pe.executeTasks(func() interface{} {
|
return pe.executeTasks(func() any {
|
||||||
pe.lock.Lock()
|
pe.lock.Lock()
|
||||||
defer pe.lock.Unlock()
|
defer pe.lock.Unlock()
|
||||||
return pe.container.RemoveAll()
|
return pe.container.RemoveAll()
|
||||||
}())
|
}())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync lets caller to run fn thread-safe with pe, especially for the underlying container.
|
// Sync lets caller run fn thread-safe with pe, especially for the underlying container.
|
||||||
func (pe *PeriodicalExecutor) Sync(fn func()) {
|
func (pe *PeriodicalExecutor) Sync(fn func()) {
|
||||||
pe.lock.Lock()
|
pe.lock.Lock()
|
||||||
defer pe.lock.Unlock()
|
defer pe.lock.Unlock()
|
||||||
@@ -96,7 +96,7 @@ func (pe *PeriodicalExecutor) Wait() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pe *PeriodicalExecutor) addAndCheck(task interface{}) (interface{}, bool) {
|
func (pe *PeriodicalExecutor) addAndCheck(task any) (any, bool) {
|
||||||
pe.lock.Lock()
|
pe.lock.Lock()
|
||||||
defer func() {
|
defer func() {
|
||||||
if !pe.guarded {
|
if !pe.guarded {
|
||||||
@@ -116,7 +116,7 @@ func (pe *PeriodicalExecutor) addAndCheck(task interface{}) (interface{}, bool)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (pe *PeriodicalExecutor) backgroundFlush() {
|
func (pe *PeriodicalExecutor) backgroundFlush() {
|
||||||
threading.GoSafe(func() {
|
go func() {
|
||||||
// flush before quit goroutine to avoid missing tasks
|
// flush before quit goroutine to avoid missing tasks
|
||||||
defer pe.Flush()
|
defer pe.Flush()
|
||||||
|
|
||||||
@@ -144,7 +144,7 @@ func (pe *PeriodicalExecutor) backgroundFlush() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pe *PeriodicalExecutor) doneExecution() {
|
func (pe *PeriodicalExecutor) doneExecution() {
|
||||||
@@ -157,18 +157,20 @@ func (pe *PeriodicalExecutor) enterExecution() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pe *PeriodicalExecutor) executeTasks(tasks interface{}) bool {
|
func (pe *PeriodicalExecutor) executeTasks(tasks any) bool {
|
||||||
defer pe.doneExecution()
|
defer pe.doneExecution()
|
||||||
|
|
||||||
ok := pe.hasTasks(tasks)
|
ok := pe.hasTasks(tasks)
|
||||||
if ok {
|
if ok {
|
||||||
pe.container.Execute(tasks)
|
threading.RunSafe(func() {
|
||||||
|
pe.container.Execute(tasks)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pe *PeriodicalExecutor) hasTasks(tasks interface{}) bool {
|
func (pe *PeriodicalExecutor) hasTasks(tasks any) bool {
|
||||||
if tasks == nil {
|
if tasks == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/proc"
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -16,22 +17,22 @@ const threshold = 10
|
|||||||
type container struct {
|
type container struct {
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
tasks []int
|
tasks []int
|
||||||
execute func(tasks interface{})
|
execute func(tasks any)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newContainer(interval time.Duration, execute func(tasks interface{})) *container {
|
func newContainer(interval time.Duration, execute func(tasks any)) *container {
|
||||||
return &container{
|
return &container{
|
||||||
interval: interval,
|
interval: interval,
|
||||||
execute: execute,
|
execute: execute,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) AddTask(task interface{}) bool {
|
func (c *container) AddTask(task any) bool {
|
||||||
c.tasks = append(c.tasks, task.(int))
|
c.tasks = append(c.tasks, task.(int))
|
||||||
return len(c.tasks) > threshold
|
return len(c.tasks) > threshold
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) Execute(tasks interface{}) {
|
func (c *container) Execute(tasks any) {
|
||||||
if c.execute != nil {
|
if c.execute != nil {
|
||||||
c.execute(tasks)
|
c.execute(tasks)
|
||||||
} else {
|
} else {
|
||||||
@@ -39,7 +40,7 @@ func (c *container) Execute(tasks interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) RemoveAll() interface{} {
|
func (c *container) RemoveAll() any {
|
||||||
tasks := c.tasks
|
tasks := c.tasks
|
||||||
c.tasks = nil
|
c.tasks = nil
|
||||||
return tasks
|
return tasks
|
||||||
@@ -67,6 +68,7 @@ func TestPeriodicalExecutor_QuitGoroutine(t *testing.T) {
|
|||||||
ticker.Tick()
|
ticker.Tick()
|
||||||
ticker.Wait(time.Millisecond * idleRound)
|
ticker.Wait(time.Millisecond * idleRound)
|
||||||
assert.Equal(t, routines, runtime.NumGoroutine())
|
assert.Equal(t, routines, runtime.NumGoroutine())
|
||||||
|
proc.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeriodicalExecutor_Bulk(t *testing.T) {
|
func TestPeriodicalExecutor_Bulk(t *testing.T) {
|
||||||
@@ -74,7 +76,7 @@ func TestPeriodicalExecutor_Bulk(t *testing.T) {
|
|||||||
var vals []int
|
var vals []int
|
||||||
// avoid data race
|
// avoid data race
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, func(tasks interface{}) {
|
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, func(tasks any) {
|
||||||
t := tasks.([]int)
|
t := tasks.([]int)
|
||||||
for _, each := range t {
|
for _, each := range t {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
@@ -106,9 +108,67 @@ func TestPeriodicalExecutor_Bulk(t *testing.T) {
|
|||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPeriodicalExecutor_Panic(t *testing.T) {
|
||||||
|
// avoid data race
|
||||||
|
var lock sync.Mutex
|
||||||
|
ticker := timex.NewFakeTicker()
|
||||||
|
|
||||||
|
var (
|
||||||
|
executedTasks []int
|
||||||
|
expected []int
|
||||||
|
)
|
||||||
|
executor := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, func(tasks any) {
|
||||||
|
tt := tasks.([]int)
|
||||||
|
lock.Lock()
|
||||||
|
executedTasks = append(executedTasks, tt...)
|
||||||
|
lock.Unlock()
|
||||||
|
if tt[0] == 0 {
|
||||||
|
panic("test")
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
executor.newTicker = func(duration time.Duration) timex.Ticker {
|
||||||
|
return ticker
|
||||||
|
}
|
||||||
|
for i := 0; i < 30; i++ {
|
||||||
|
executor.Add(i)
|
||||||
|
expected = append(expected, i)
|
||||||
|
}
|
||||||
|
ticker.Tick()
|
||||||
|
ticker.Tick()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
lock.Lock()
|
||||||
|
assert.Equal(t, expected, executedTasks)
|
||||||
|
lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeriodicalExecutor_FlushPanic(t *testing.T) {
|
||||||
|
var (
|
||||||
|
executedTasks []int
|
||||||
|
expected []int
|
||||||
|
lock sync.Mutex
|
||||||
|
)
|
||||||
|
executor := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, func(tasks any) {
|
||||||
|
tt := tasks.([]int)
|
||||||
|
lock.Lock()
|
||||||
|
executedTasks = append(executedTasks, tt...)
|
||||||
|
lock.Unlock()
|
||||||
|
if tt[0] == 0 {
|
||||||
|
panic("flush panic")
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
executor.Add(i)
|
||||||
|
expected = append(expected, i)
|
||||||
|
}
|
||||||
|
executor.Flush()
|
||||||
|
lock.Lock()
|
||||||
|
assert.Equal(t, expected, executedTasks)
|
||||||
|
lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func TestPeriodicalExecutor_Wait(t *testing.T) {
|
func TestPeriodicalExecutor_Wait(t *testing.T) {
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
executer := NewBulkExecutor(func(tasks []interface{}) {
|
executer := NewBulkExecutor(func(tasks []any) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
@@ -124,7 +184,7 @@ func TestPeriodicalExecutor_WaitFast(t *testing.T) {
|
|||||||
const total = 3
|
const total = 3
|
||||||
var cnt int
|
var cnt int
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
executer := NewBulkExecutor(func(tasks []interface{}) {
|
executer := NewBulkExecutor(func(tasks []any) {
|
||||||
defer func() {
|
defer func() {
|
||||||
cnt++
|
cnt++
|
||||||
}()
|
}()
|
||||||
@@ -141,7 +201,7 @@ func TestPeriodicalExecutor_WaitFast(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPeriodicalExecutor_Deadlock(t *testing.T) {
|
func TestPeriodicalExecutor_Deadlock(t *testing.T) {
|
||||||
executor := NewBulkExecutor(func(tasks []interface{}) {
|
executor := NewBulkExecutor(func(tasks []any) {
|
||||||
}, WithBulkTasks(1), WithBulkInterval(time.Millisecond))
|
}, WithBulkTasks(1), WithBulkInterval(time.Millisecond))
|
||||||
for i := 0; i < 1e5; i++ {
|
for i := 0; i < 1e5; i++ {
|
||||||
executor.Add(1)
|
executor.Add(1)
|
||||||
@@ -149,13 +209,7 @@ func TestPeriodicalExecutor_Deadlock(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPeriodicalExecutor_hasTasks(t *testing.T) {
|
func TestPeriodicalExecutor_hasTasks(t *testing.T) {
|
||||||
ticker := timex.NewFakeTicker()
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, nil))
|
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, nil))
|
||||||
exec.newTicker = func(d time.Duration) timex.Ticker {
|
|
||||||
return ticker
|
|
||||||
}
|
|
||||||
assert.False(t, exec.hasTasks(nil))
|
assert.False(t, exec.hasTasks(nil))
|
||||||
assert.True(t, exec.hasTasks(1))
|
assert.True(t, exec.hasTasks(1))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,4 +5,4 @@ import "time"
|
|||||||
const defaultFlushInterval = time.Second
|
const defaultFlushInterval = time.Second
|
||||||
|
|
||||||
// Execute defines the method to execute tasks.
|
// Execute defines the method to execute tasks.
|
||||||
type Execute func(tasks []interface{})
|
type Execute func(tasks []any)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build windows
|
//go:build windows
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build linux || darwin
|
//go:build linux || darwin
|
||||||
// +build linux darwin
|
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
|
|||||||
15
core/fs/files_test.go
Normal file
15
core/fs/files_test.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCloseOnExec(t *testing.T) {
|
||||||
|
file := os.NewFile(0, os.DevNull)
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
CloseOnExec(file)
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
package fs
|
package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/hash"
|
"github.com/zeromicro/go-zero/core/hash"
|
||||||
@@ -12,12 +11,12 @@ import (
|
|||||||
// The file is kept as open, the caller should close the file handle,
|
// The file is kept as open, the caller should close the file handle,
|
||||||
// and remove the file by name.
|
// and remove the file by name.
|
||||||
func TempFileWithText(text string) (*os.File, error) {
|
func TempFileWithText(text string) (*os.File, error) {
|
||||||
tmpfile, err := ioutil.TempFile(os.TempDir(), hash.Md5Hex([]byte(text)))
|
tmpfile, err := os.CreateTemp(os.TempDir(), hash.Md5Hex([]byte(text)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ioutil.WriteFile(tmpfile.Name(), []byte(text), os.ModeTemporary); err != nil {
|
if err := os.WriteFile(tmpfile.Name(), []byte(text), os.ModeTemporary); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
49
core/fs/temps_test.go
Normal file
49
core/fs/temps_test.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTempFileWithText(t *testing.T) {
|
||||||
|
f, err := TempFileWithText("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if f == nil {
|
||||||
|
t.Error("TempFileWithText returned nil")
|
||||||
|
}
|
||||||
|
if f.Name() == "" {
|
||||||
|
t.Error("TempFileWithText returned empty file name")
|
||||||
|
}
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
|
||||||
|
bs, err := io.ReadAll(f)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
if len(bs) != 4 {
|
||||||
|
t.Error("TempFileWithText returned wrong file size")
|
||||||
|
}
|
||||||
|
if f.Close() != nil {
|
||||||
|
t.Error("TempFileWithText returned error on close")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTempFilenameWithText(t *testing.T) {
|
||||||
|
f, err := TempFilenameWithText("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if f == "" {
|
||||||
|
t.Error("TempFilenameWithText returned empty file name")
|
||||||
|
}
|
||||||
|
defer os.Remove(f)
|
||||||
|
|
||||||
|
bs, err := os.ReadFile(f)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
if len(bs) != 4 {
|
||||||
|
t.Error("TempFilenameWithText returned wrong file size")
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -21,31 +21,31 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FilterFunc defines the method to filter a Stream.
|
// FilterFunc defines the method to filter a Stream.
|
||||||
FilterFunc func(item interface{}) bool
|
FilterFunc func(item any) bool
|
||||||
// ForAllFunc defines the method to handle all elements in a Stream.
|
// ForAllFunc defines the method to handle all elements in a Stream.
|
||||||
ForAllFunc func(pipe <-chan interface{})
|
ForAllFunc func(pipe <-chan any)
|
||||||
// ForEachFunc defines the method to handle each element in a Stream.
|
// ForEachFunc defines the method to handle each element in a Stream.
|
||||||
ForEachFunc func(item interface{})
|
ForEachFunc func(item any)
|
||||||
// GenerateFunc defines the method to send elements into a Stream.
|
// GenerateFunc defines the method to send elements into a Stream.
|
||||||
GenerateFunc func(source chan<- interface{})
|
GenerateFunc func(source chan<- any)
|
||||||
// KeyFunc defines the method to generate keys for the elements in a Stream.
|
// KeyFunc defines the method to generate keys for the elements in a Stream.
|
||||||
KeyFunc func(item interface{}) interface{}
|
KeyFunc func(item any) any
|
||||||
// LessFunc defines the method to compare the elements in a Stream.
|
// LessFunc defines the method to compare the elements in a Stream.
|
||||||
LessFunc func(a, b interface{}) bool
|
LessFunc func(a, b any) bool
|
||||||
// MapFunc defines the method to map each element to another object in a Stream.
|
// MapFunc defines the method to map each element to another object in a Stream.
|
||||||
MapFunc func(item interface{}) interface{}
|
MapFunc func(item any) any
|
||||||
// Option defines the method to customize a Stream.
|
// Option defines the method to customize a Stream.
|
||||||
Option func(opts *rxOptions)
|
Option func(opts *rxOptions)
|
||||||
// ParallelFunc defines the method to handle elements parallelly.
|
// ParallelFunc defines the method to handle elements parallelly.
|
||||||
ParallelFunc func(item interface{})
|
ParallelFunc func(item any)
|
||||||
// ReduceFunc defines the method to reduce all the elements in a Stream.
|
// ReduceFunc defines the method to reduce all the elements in a Stream.
|
||||||
ReduceFunc func(pipe <-chan interface{}) (interface{}, error)
|
ReduceFunc func(pipe <-chan any) (any, error)
|
||||||
// WalkFunc defines the method to walk through all the elements in a Stream.
|
// WalkFunc defines the method to walk through all the elements in a Stream.
|
||||||
WalkFunc func(item interface{}, pipe chan<- interface{})
|
WalkFunc func(item any, pipe chan<- any)
|
||||||
|
|
||||||
// A Stream is a stream that can be used to do stream processing.
|
// A Stream is a stream that can be used to do stream processing.
|
||||||
Stream struct {
|
Stream struct {
|
||||||
source <-chan interface{}
|
source <-chan any
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ func Concat(s Stream, others ...Stream) Stream {
|
|||||||
|
|
||||||
// From constructs a Stream from the given GenerateFunc.
|
// From constructs a Stream from the given GenerateFunc.
|
||||||
func From(generate GenerateFunc) Stream {
|
func From(generate GenerateFunc) Stream {
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
|
|
||||||
threading.GoSafe(func() {
|
threading.GoSafe(func() {
|
||||||
defer close(source)
|
defer close(source)
|
||||||
@@ -67,8 +67,8 @@ func From(generate GenerateFunc) Stream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Just converts the given arbitrary items to a Stream.
|
// Just converts the given arbitrary items to a Stream.
|
||||||
func Just(items ...interface{}) Stream {
|
func Just(items ...any) Stream {
|
||||||
source := make(chan interface{}, len(items))
|
source := make(chan any, len(items))
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
source <- item
|
source <- item
|
||||||
}
|
}
|
||||||
@@ -78,7 +78,7 @@ func Just(items ...interface{}) Stream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Range converts the given channel to a Stream.
|
// Range converts the given channel to a Stream.
|
||||||
func Range(source <-chan interface{}) Stream {
|
func Range(source <-chan any) Stream {
|
||||||
return Stream{
|
return Stream{
|
||||||
source: source,
|
source: source,
|
||||||
}
|
}
|
||||||
@@ -87,7 +87,7 @@ func Range(source <-chan interface{}) Stream {
|
|||||||
// AllMach returns whether all elements of this stream match the provided predicate.
|
// AllMach returns whether all elements of this stream match the provided predicate.
|
||||||
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
||||||
// If the stream is empty then true is returned and the predicate is not evaluated.
|
// If the stream is empty then true is returned and the predicate is not evaluated.
|
||||||
func (s Stream) AllMach(predicate func(item interface{}) bool) bool {
|
func (s Stream) AllMach(predicate func(item any) bool) bool {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
if !predicate(item) {
|
if !predicate(item) {
|
||||||
// make sure the former goroutine not block, and current func returns fast.
|
// make sure the former goroutine not block, and current func returns fast.
|
||||||
@@ -102,7 +102,7 @@ func (s Stream) AllMach(predicate func(item interface{}) bool) bool {
|
|||||||
// AnyMach returns whether any elements of this stream match the provided predicate.
|
// AnyMach returns whether any elements of this stream match the provided predicate.
|
||||||
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
||||||
// If the stream is empty then false is returned and the predicate is not evaluated.
|
// If the stream is empty then false is returned and the predicate is not evaluated.
|
||||||
func (s Stream) AnyMach(predicate func(item interface{}) bool) bool {
|
func (s Stream) AnyMach(predicate func(item any) bool) bool {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
if predicate(item) {
|
if predicate(item) {
|
||||||
// make sure the former goroutine not block, and current func returns fast.
|
// make sure the former goroutine not block, and current func returns fast.
|
||||||
@@ -121,7 +121,7 @@ func (s Stream) Buffer(n int) Stream {
|
|||||||
n = 0
|
n = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
source := make(chan interface{}, n)
|
source := make(chan any, n)
|
||||||
go func() {
|
go func() {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
source <- item
|
source <- item
|
||||||
@@ -134,7 +134,7 @@ func (s Stream) Buffer(n int) Stream {
|
|||||||
|
|
||||||
// Concat returns a Stream that concatenated other streams
|
// Concat returns a Stream that concatenated other streams
|
||||||
func (s Stream) Concat(others ...Stream) Stream {
|
func (s Stream) Concat(others ...Stream) Stream {
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
group := threading.NewRoutineGroup()
|
group := threading.NewRoutineGroup()
|
||||||
@@ -170,12 +170,12 @@ func (s Stream) Count() (count int) {
|
|||||||
|
|
||||||
// Distinct removes the duplicated items base on the given KeyFunc.
|
// Distinct removes the duplicated items base on the given KeyFunc.
|
||||||
func (s Stream) Distinct(fn KeyFunc) Stream {
|
func (s Stream) Distinct(fn KeyFunc) Stream {
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
|
|
||||||
threading.GoSafe(func() {
|
threading.GoSafe(func() {
|
||||||
defer close(source)
|
defer close(source)
|
||||||
|
|
||||||
keys := make(map[interface{}]lang.PlaceholderType)
|
keys := make(map[any]lang.PlaceholderType)
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
key := fn(item)
|
key := fn(item)
|
||||||
if _, ok := keys[key]; !ok {
|
if _, ok := keys[key]; !ok {
|
||||||
@@ -195,7 +195,7 @@ func (s Stream) Done() {
|
|||||||
|
|
||||||
// Filter filters the items by the given FilterFunc.
|
// Filter filters the items by the given FilterFunc.
|
||||||
func (s Stream) Filter(fn FilterFunc, opts ...Option) Stream {
|
func (s Stream) Filter(fn FilterFunc, opts ...Option) Stream {
|
||||||
return s.Walk(func(item interface{}, pipe chan<- interface{}) {
|
return s.Walk(func(item any, pipe chan<- any) {
|
||||||
if fn(item) {
|
if fn(item) {
|
||||||
pipe <- item
|
pipe <- item
|
||||||
}
|
}
|
||||||
@@ -203,7 +203,7 @@ func (s Stream) Filter(fn FilterFunc, opts ...Option) Stream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// First returns the first item, nil if no items.
|
// First returns the first item, nil if no items.
|
||||||
func (s Stream) First() interface{} {
|
func (s Stream) First() any {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
// make sure the former goroutine not block, and current func returns fast.
|
// make sure the former goroutine not block, and current func returns fast.
|
||||||
go drain(s.source)
|
go drain(s.source)
|
||||||
@@ -229,13 +229,13 @@ func (s Stream) ForEach(fn ForEachFunc) {
|
|||||||
|
|
||||||
// Group groups the elements into different groups based on their keys.
|
// Group groups the elements into different groups based on their keys.
|
||||||
func (s Stream) Group(fn KeyFunc) Stream {
|
func (s Stream) Group(fn KeyFunc) Stream {
|
||||||
groups := make(map[interface{}][]interface{})
|
groups := make(map[any][]any)
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
key := fn(item)
|
key := fn(item)
|
||||||
groups[key] = append(groups[key], item)
|
groups[key] = append(groups[key], item)
|
||||||
}
|
}
|
||||||
|
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
go func() {
|
go func() {
|
||||||
for _, group := range groups {
|
for _, group := range groups {
|
||||||
source <- group
|
source <- group
|
||||||
@@ -252,7 +252,7 @@ func (s Stream) Head(n int64) Stream {
|
|||||||
panic("n must be greater than 0")
|
panic("n must be greater than 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
@@ -279,7 +279,7 @@ func (s Stream) Head(n int64) Stream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Last returns the last item, or nil if no items.
|
// Last returns the last item, or nil if no items.
|
||||||
func (s Stream) Last() (item interface{}) {
|
func (s Stream) Last() (item any) {
|
||||||
for item = range s.source {
|
for item = range s.source {
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -287,29 +287,53 @@ func (s Stream) Last() (item interface{}) {
|
|||||||
|
|
||||||
// Map converts each item to another corresponding item, which means it's a 1:1 model.
|
// Map converts each item to another corresponding item, which means it's a 1:1 model.
|
||||||
func (s Stream) Map(fn MapFunc, opts ...Option) Stream {
|
func (s Stream) Map(fn MapFunc, opts ...Option) Stream {
|
||||||
return s.Walk(func(item interface{}, pipe chan<- interface{}) {
|
return s.Walk(func(item any, pipe chan<- any) {
|
||||||
pipe <- fn(item)
|
pipe <- fn(item)
|
||||||
}, opts...)
|
}, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Max returns the maximum item from the underlying source.
|
||||||
|
func (s Stream) Max(less LessFunc) any {
|
||||||
|
var max any
|
||||||
|
for item := range s.source {
|
||||||
|
if max == nil || less(max, item) {
|
||||||
|
max = item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
|
||||||
// Merge merges all the items into a slice and generates a new stream.
|
// Merge merges all the items into a slice and generates a new stream.
|
||||||
func (s Stream) Merge() Stream {
|
func (s Stream) Merge() Stream {
|
||||||
var items []interface{}
|
var items []any
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
source := make(chan interface{}, 1)
|
source := make(chan any, 1)
|
||||||
source <- items
|
source <- items
|
||||||
close(source)
|
close(source)
|
||||||
|
|
||||||
return Range(source)
|
return Range(source)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Min returns the minimum item from the underlying source.
|
||||||
|
func (s Stream) Min(less LessFunc) any {
|
||||||
|
var min any
|
||||||
|
for item := range s.source {
|
||||||
|
if min == nil || less(item, min) {
|
||||||
|
min = item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
|
||||||
// NoneMatch returns whether all elements of this stream don't match the provided predicate.
|
// NoneMatch returns whether all elements of this stream don't match the provided predicate.
|
||||||
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
// May not evaluate the predicate on all elements if not necessary for determining the result.
|
||||||
// If the stream is empty then true is returned and the predicate is not evaluated.
|
// If the stream is empty then true is returned and the predicate is not evaluated.
|
||||||
func (s Stream) NoneMatch(predicate func(item interface{}) bool) bool {
|
func (s Stream) NoneMatch(predicate func(item any) bool) bool {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
if predicate(item) {
|
if predicate(item) {
|
||||||
// make sure the former goroutine not block, and current func returns fast.
|
// make sure the former goroutine not block, and current func returns fast.
|
||||||
@@ -323,19 +347,19 @@ func (s Stream) NoneMatch(predicate func(item interface{}) bool) bool {
|
|||||||
|
|
||||||
// Parallel applies the given ParallelFunc to each item concurrently with given number of workers.
|
// Parallel applies the given ParallelFunc to each item concurrently with given number of workers.
|
||||||
func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
|
func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
|
||||||
s.Walk(func(item interface{}, pipe chan<- interface{}) {
|
s.Walk(func(item any, pipe chan<- any) {
|
||||||
fn(item)
|
fn(item)
|
||||||
}, opts...).Done()
|
}, opts...).Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reduce is a utility method to let the caller deal with the underlying channel.
|
// Reduce is an utility method to let the caller deal with the underlying channel.
|
||||||
func (s Stream) Reduce(fn ReduceFunc) (interface{}, error) {
|
func (s Stream) Reduce(fn ReduceFunc) (any, error) {
|
||||||
return fn(s.source)
|
return fn(s.source)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reverse reverses the elements in the stream.
|
// Reverse reverses the elements in the stream.
|
||||||
func (s Stream) Reverse() Stream {
|
func (s Stream) Reverse() Stream {
|
||||||
var items []interface{}
|
var items []any
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
@@ -357,7 +381,7 @@ func (s Stream) Skip(n int64) Stream {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
@@ -376,7 +400,7 @@ func (s Stream) Skip(n int64) Stream {
|
|||||||
|
|
||||||
// Sort sorts the items from the underlying source.
|
// Sort sorts the items from the underlying source.
|
||||||
func (s Stream) Sort(less LessFunc) Stream {
|
func (s Stream) Sort(less LessFunc) Stream {
|
||||||
var items []interface{}
|
var items []any
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
@@ -394,9 +418,9 @@ func (s Stream) Split(n int) Stream {
|
|||||||
panic("n should be greater than 0")
|
panic("n should be greater than 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
go func() {
|
go func() {
|
||||||
var chunk []interface{}
|
var chunk []any
|
||||||
for item := range s.source {
|
for item := range s.source {
|
||||||
chunk = append(chunk, item)
|
chunk = append(chunk, item)
|
||||||
if len(chunk) == n {
|
if len(chunk) == n {
|
||||||
@@ -419,7 +443,7 @@ func (s Stream) Tail(n int64) Stream {
|
|||||||
panic("n should be greater than 0")
|
panic("n should be greater than 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
source := make(chan interface{})
|
source := make(chan any)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
ring := collection.NewRing(int(n))
|
ring := collection.NewRing(int(n))
|
||||||
@@ -446,7 +470,7 @@ func (s Stream) Walk(fn WalkFunc, opts ...Option) Stream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
|
func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
|
||||||
pipe := make(chan interface{}, option.workers)
|
pipe := make(chan any, option.workers)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@@ -477,7 +501,7 @@ func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream {
|
func (s Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream {
|
||||||
pipe := make(chan interface{}, option.workers)
|
pipe := make(chan any, option.workers)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@@ -529,7 +553,7 @@ func buildOptions(opts ...Option) *rxOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// drain drains the given channel.
|
// drain drains the given channel.
|
||||||
func drain(channel <-chan interface{}) {
|
func drain(channel <-chan any) {
|
||||||
for range channel {
|
for range channel {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package fx
|
package fx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
@@ -23,7 +23,7 @@ func TestBuffer(t *testing.T) {
|
|||||||
var count int32
|
var count int32
|
||||||
var wait sync.WaitGroup
|
var wait sync.WaitGroup
|
||||||
wait.Add(1)
|
wait.Add(1)
|
||||||
From(func(source chan<- interface{}) {
|
From(func(source chan<- any) {
|
||||||
ticker := time.NewTicker(10 * time.Millisecond)
|
ticker := time.NewTicker(10 * time.Millisecond)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ func TestBuffer(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}).Buffer(N).ForAll(func(pipe <-chan interface{}) {
|
}).Buffer(N).ForAll(func(pipe <-chan any) {
|
||||||
wait.Wait()
|
wait.Wait()
|
||||||
// why N+1, because take one more to wait for sending into the channel
|
// why N+1, because take one more to wait for sending into the channel
|
||||||
assert.Equal(t, int32(N+1), atomic.LoadInt32(&count))
|
assert.Equal(t, int32(N+1), atomic.LoadInt32(&count))
|
||||||
@@ -47,7 +47,7 @@ func TestBuffer(t *testing.T) {
|
|||||||
func TestBufferNegative(t *testing.T) {
|
func TestBufferNegative(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4).Buffer(-1).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
Just(1, 2, 3, 4).Buffer(-1).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -61,22 +61,22 @@ func TestCount(t *testing.T) {
|
|||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
elements []interface{}
|
elements []any
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no elements with nil",
|
name: "no elements with nil",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "no elements",
|
name: "no elements",
|
||||||
elements: []interface{}{},
|
elements: []any{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1 element",
|
name: "1 element",
|
||||||
elements: []interface{}{1},
|
elements: []any{1},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "multiple elements",
|
name: "multiple elements",
|
||||||
elements: []interface{}{1, 2, 3},
|
elements: []any{1, 2, 3},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,7 +92,7 @@ func TestCount(t *testing.T) {
|
|||||||
func TestDone(t *testing.T) {
|
func TestDone(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var count int32
|
var count int32
|
||||||
Just(1, 2, 3).Walk(func(item interface{}, pipe chan<- interface{}) {
|
Just(1, 2, 3).Walk(func(item any, pipe chan<- any) {
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
atomic.AddInt32(&count, int32(item.(int)))
|
atomic.AddInt32(&count, int32(item.(int)))
|
||||||
}).Done()
|
}).Done()
|
||||||
@@ -103,7 +103,7 @@ func TestDone(t *testing.T) {
|
|||||||
func TestJust(t *testing.T) {
|
func TestJust(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
Just(1, 2, 3, 4).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -116,9 +116,9 @@ func TestJust(t *testing.T) {
|
|||||||
func TestDistinct(t *testing.T) {
|
func TestDistinct(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(4, 1, 3, 2, 3, 4).Distinct(func(item interface{}) interface{} {
|
Just(4, 1, 3, 2, 3, 4).Distinct(func(item any) any {
|
||||||
return item
|
return item
|
||||||
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
}).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -131,9 +131,9 @@ func TestDistinct(t *testing.T) {
|
|||||||
func TestFilter(t *testing.T) {
|
func TestFilter(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
|
Just(1, 2, 3, 4).Filter(func(item any) bool {
|
||||||
return item.(int)%2 == 0
|
return item.(int)%2 == 0
|
||||||
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
}).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -154,9 +154,9 @@ func TestFirst(t *testing.T) {
|
|||||||
func TestForAll(t *testing.T) {
|
func TestForAll(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
|
Just(1, 2, 3, 4).Filter(func(item any) bool {
|
||||||
return item.(int)%2 == 0
|
return item.(int)%2 == 0
|
||||||
}).ForAll(func(pipe <-chan interface{}) {
|
}).ForAll(func(pipe <-chan any) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -168,11 +168,11 @@ func TestForAll(t *testing.T) {
|
|||||||
func TestGroup(t *testing.T) {
|
func TestGroup(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var groups [][]int
|
var groups [][]int
|
||||||
Just(10, 11, 20, 21).Group(func(item interface{}) interface{} {
|
Just(10, 11, 20, 21).Group(func(item any) any {
|
||||||
v := item.(int)
|
v := item.(int)
|
||||||
return v / 10
|
return v / 10
|
||||||
}).ForEach(func(item interface{}) {
|
}).ForEach(func(item any) {
|
||||||
v := item.([]interface{})
|
v := item.([]any)
|
||||||
var group []int
|
var group []int
|
||||||
for _, each := range v {
|
for _, each := range v {
|
||||||
group = append(group, each.(int))
|
group = append(group, each.(int))
|
||||||
@@ -191,7 +191,7 @@ func TestGroup(t *testing.T) {
|
|||||||
func TestHead(t *testing.T) {
|
func TestHead(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4).Head(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
Just(1, 2, 3, 4).Head(2).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -204,7 +204,7 @@ func TestHead(t *testing.T) {
|
|||||||
func TestHeadZero(t *testing.T) {
|
func TestHeadZero(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
assert.Panics(t, func() {
|
assert.Panics(t, func() {
|
||||||
Just(1, 2, 3, 4).Head(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
Just(1, 2, 3, 4).Head(0).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -214,7 +214,7 @@ func TestHeadZero(t *testing.T) {
|
|||||||
func TestHeadMore(t *testing.T) {
|
func TestHeadMore(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -238,21 +238,21 @@ func TestLast(t *testing.T) {
|
|||||||
|
|
||||||
func TestMap(t *testing.T) {
|
func TestMap(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
log.SetOutput(ioutil.Discard)
|
log.SetOutput(io.Discard)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
mapper MapFunc
|
mapper MapFunc
|
||||||
expect int
|
expect int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
mapper: func(item interface{}) interface{} {
|
mapper: func(item any) any {
|
||||||
v := item.(int)
|
v := item.(int)
|
||||||
return v * v
|
return v * v
|
||||||
},
|
},
|
||||||
expect: 30,
|
expect: 30,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mapper: func(item interface{}) interface{} {
|
mapper: func(item any) any {
|
||||||
v := item.(int)
|
v := item.(int)
|
||||||
if v%2 == 0 {
|
if v%2 == 0 {
|
||||||
return 0
|
return 0
|
||||||
@@ -262,7 +262,7 @@ func TestMap(t *testing.T) {
|
|||||||
expect: 10,
|
expect: 10,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mapper: func(item interface{}) interface{} {
|
mapper: func(item any) any {
|
||||||
v := item.(int)
|
v := item.(int)
|
||||||
if v%2 == 0 {
|
if v%2 == 0 {
|
||||||
panic(v)
|
panic(v)
|
||||||
@@ -283,12 +283,12 @@ func TestMap(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
workers = runtime.NumCPU()
|
workers = runtime.NumCPU()
|
||||||
}
|
}
|
||||||
From(func(source chan<- interface{}) {
|
From(func(source chan<- any) {
|
||||||
for i := 1; i < 5; i++ {
|
for i := 1; i < 5; i++ {
|
||||||
source <- i
|
source <- i
|
||||||
}
|
}
|
||||||
}).Map(test.mapper, WithWorkers(workers)).Reduce(
|
}).Map(test.mapper, WithWorkers(workers)).Reduce(
|
||||||
func(pipe <-chan interface{}) (interface{}, error) {
|
func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -303,8 +303,8 @@ func TestMap(t *testing.T) {
|
|||||||
|
|
||||||
func TestMerge(t *testing.T) {
|
func TestMerge(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
Just(1, 2, 3, 4).Merge().ForEach(func(item interface{}) {
|
Just(1, 2, 3, 4).Merge().ForEach(func(item any) {
|
||||||
assert.ElementsMatch(t, []interface{}{1, 2, 3, 4}, item.([]interface{}))
|
assert.ElementsMatch(t, []any{1, 2, 3, 4}, item.([]any))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -312,7 +312,7 @@ func TestMerge(t *testing.T) {
|
|||||||
func TestParallelJust(t *testing.T) {
|
func TestParallelJust(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var count int32
|
var count int32
|
||||||
Just(1, 2, 3).Parallel(func(item interface{}) {
|
Just(1, 2, 3).Parallel(func(item any) {
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
atomic.AddInt32(&count, int32(item.(int)))
|
atomic.AddInt32(&count, int32(item.(int)))
|
||||||
}, UnlimitedWorkers())
|
}, UnlimitedWorkers())
|
||||||
@@ -322,8 +322,8 @@ func TestParallelJust(t *testing.T) {
|
|||||||
|
|
||||||
func TestReverse(t *testing.T) {
|
func TestReverse(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
Just(1, 2, 3, 4).Reverse().Merge().ForEach(func(item interface{}) {
|
Just(1, 2, 3, 4).Reverse().Merge().ForEach(func(item any) {
|
||||||
assert.ElementsMatch(t, []interface{}{4, 3, 2, 1}, item.([]interface{}))
|
assert.ElementsMatch(t, []any{4, 3, 2, 1}, item.([]any))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -331,9 +331,9 @@ func TestReverse(t *testing.T) {
|
|||||||
func TestSort(t *testing.T) {
|
func TestSort(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var prev int
|
var prev int
|
||||||
Just(5, 3, 7, 1, 9, 6, 4, 8, 2).Sort(func(a, b interface{}) bool {
|
Just(5, 3, 7, 1, 9, 6, 4, 8, 2).Sort(func(a, b any) bool {
|
||||||
return a.(int) < b.(int)
|
return a.(int) < b.(int)
|
||||||
}).ForEach(func(item interface{}) {
|
}).ForEach(func(item any) {
|
||||||
next := item.(int)
|
next := item.(int)
|
||||||
assert.True(t, prev < next)
|
assert.True(t, prev < next)
|
||||||
prev = next
|
prev = next
|
||||||
@@ -346,12 +346,12 @@ func TestSplit(t *testing.T) {
|
|||||||
assert.Panics(t, func() {
|
assert.Panics(t, func() {
|
||||||
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(0).Done()
|
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(0).Done()
|
||||||
})
|
})
|
||||||
var chunks [][]interface{}
|
var chunks [][]any
|
||||||
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item interface{}) {
|
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item any) {
|
||||||
chunk := item.([]interface{})
|
chunk := item.([]any)
|
||||||
chunks = append(chunks, chunk)
|
chunks = append(chunks, chunk)
|
||||||
})
|
})
|
||||||
assert.EqualValues(t, [][]interface{}{
|
assert.EqualValues(t, [][]any{
|
||||||
{1, 2, 3, 4},
|
{1, 2, 3, 4},
|
||||||
{5, 6, 7, 8},
|
{5, 6, 7, 8},
|
||||||
{9, 10},
|
{9, 10},
|
||||||
@@ -362,7 +362,7 @@ func TestSplit(t *testing.T) {
|
|||||||
func TestTail(t *testing.T) {
|
func TestTail(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
for item := range pipe {
|
for item := range pipe {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
}
|
}
|
||||||
@@ -375,7 +375,7 @@ func TestTail(t *testing.T) {
|
|||||||
func TestTailZero(t *testing.T) {
|
func TestTailZero(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
assert.Panics(t, func() {
|
assert.Panics(t, func() {
|
||||||
Just(1, 2, 3, 4).Tail(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
|
Just(1, 2, 3, 4).Tail(0).Reduce(func(pipe <-chan any) (any, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -385,11 +385,11 @@ func TestTailZero(t *testing.T) {
|
|||||||
func TestWalk(t *testing.T) {
|
func TestWalk(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
var result int
|
var result int
|
||||||
Just(1, 2, 3, 4, 5).Walk(func(item interface{}, pipe chan<- interface{}) {
|
Just(1, 2, 3, 4, 5).Walk(func(item any, pipe chan<- any) {
|
||||||
if item.(int)%2 != 0 {
|
if item.(int)%2 != 0 {
|
||||||
pipe <- item
|
pipe <- item
|
||||||
}
|
}
|
||||||
}, UnlimitedWorkers()).ForEach(func(item interface{}) {
|
}, UnlimitedWorkers()).ForEach(func(item any) {
|
||||||
result += item.(int)
|
result += item.(int)
|
||||||
})
|
})
|
||||||
assert.Equal(t, 9, result)
|
assert.Equal(t, 9, result)
|
||||||
@@ -398,16 +398,16 @@ func TestWalk(t *testing.T) {
|
|||||||
|
|
||||||
func TestStream_AnyMach(t *testing.T) {
|
func TestStream_AnyMach(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
|
||||||
return item.(int) == 4
|
return item.(int) == 4
|
||||||
}))
|
}))
|
||||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
|
||||||
return item.(int) == 0
|
return item.(int) == 0
|
||||||
}))
|
}))
|
||||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
|
||||||
return item.(int) == 2
|
return item.(int) == 2
|
||||||
}))
|
}))
|
||||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
|
||||||
return item.(int) == 2
|
return item.(int) == 2
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
@@ -416,17 +416,17 @@ func TestStream_AnyMach(t *testing.T) {
|
|||||||
func TestStream_AllMach(t *testing.T) {
|
func TestStream_AllMach(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, true, Just(1, 2, 3).AllMach(func(item interface{}) bool {
|
t, true, Just(1, 2, 3).AllMach(func(item any) bool {
|
||||||
return true
|
return true
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool {
|
t, false, Just(1, 2, 3).AllMach(func(item any) bool {
|
||||||
return false
|
return false
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool {
|
t, false, Just(1, 2, 3).AllMach(func(item any) bool {
|
||||||
return item.(int) == 1
|
return item.(int) == 1
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
@@ -436,17 +436,17 @@ func TestStream_AllMach(t *testing.T) {
|
|||||||
func TestStream_NoneMatch(t *testing.T) {
|
func TestStream_NoneMatch(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, true, Just(1, 2, 3).NoneMatch(func(item interface{}) bool {
|
t, true, Just(1, 2, 3).NoneMatch(func(item any) bool {
|
||||||
return false
|
return false
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, false, Just(1, 2, 3).NoneMatch(func(item interface{}) bool {
|
t, false, Just(1, 2, 3).NoneMatch(func(item any) bool {
|
||||||
return true
|
return true
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
assetEqual(
|
assetEqual(
|
||||||
t, true, Just(1, 2, 3).NoneMatch(func(item interface{}) bool {
|
t, true, Just(1, 2, 3).NoneMatch(func(item any) bool {
|
||||||
return item.(int) == 4
|
return item.(int) == 4
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
@@ -455,19 +455,19 @@ func TestStream_NoneMatch(t *testing.T) {
|
|||||||
|
|
||||||
func TestConcat(t *testing.T) {
|
func TestConcat(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
a1 := []interface{}{1, 2, 3}
|
a1 := []any{1, 2, 3}
|
||||||
a2 := []interface{}{4, 5, 6}
|
a2 := []any{4, 5, 6}
|
||||||
s1 := Just(a1...)
|
s1 := Just(a1...)
|
||||||
s2 := Just(a2...)
|
s2 := Just(a2...)
|
||||||
stream := Concat(s1, s2)
|
stream := Concat(s1, s2)
|
||||||
var items []interface{}
|
var items []any
|
||||||
for item := range stream.source {
|
for item := range stream.source {
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
sort.Slice(items, func(i, j int) bool {
|
sort.Slice(items, func(i, j int) bool {
|
||||||
return items[i].(int) < items[j].(int)
|
return items[i].(int) < items[j].(int)
|
||||||
})
|
})
|
||||||
ints := make([]interface{}, 0)
|
ints := make([]any, 0)
|
||||||
ints = append(ints, a1...)
|
ints = append(ints, a1...)
|
||||||
ints = append(ints, a2...)
|
ints = append(ints, a2...)
|
||||||
assetEqual(t, ints, items)
|
assetEqual(t, ints, items)
|
||||||
@@ -479,7 +479,7 @@ func TestStream_Skip(t *testing.T) {
|
|||||||
assetEqual(t, 3, Just(1, 2, 3, 4).Skip(1).Count())
|
assetEqual(t, 3, Just(1, 2, 3, 4).Skip(1).Count())
|
||||||
assetEqual(t, 1, Just(1, 2, 3, 4).Skip(3).Count())
|
assetEqual(t, 1, Just(1, 2, 3, 4).Skip(3).Count())
|
||||||
assetEqual(t, 4, Just(1, 2, 3, 4).Skip(0).Count())
|
assetEqual(t, 4, Just(1, 2, 3, 4).Skip(0).Count())
|
||||||
equal(t, Just(1, 2, 3, 4).Skip(3), []interface{}{4})
|
equal(t, Just(1, 2, 3, 4).Skip(3), []any{4})
|
||||||
assert.Panics(t, func() {
|
assert.Panics(t, func() {
|
||||||
Just(1, 2, 3, 4).Skip(-1)
|
Just(1, 2, 3, 4).Skip(-1)
|
||||||
})
|
})
|
||||||
@@ -489,27 +489,104 @@ func TestStream_Skip(t *testing.T) {
|
|||||||
func TestStream_Concat(t *testing.T) {
|
func TestStream_Concat(t *testing.T) {
|
||||||
runCheckedTest(t, func(t *testing.T) {
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
stream := Just(1).Concat(Just(2), Just(3))
|
stream := Just(1).Concat(Just(2), Just(3))
|
||||||
var items []interface{}
|
var items []any
|
||||||
for item := range stream.source {
|
for item := range stream.source {
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
sort.Slice(items, func(i, j int) bool {
|
sort.Slice(items, func(i, j int) bool {
|
||||||
return items[i].(int) < items[j].(int)
|
return items[i].(int) < items[j].(int)
|
||||||
})
|
})
|
||||||
assetEqual(t, []interface{}{1, 2, 3}, items)
|
assetEqual(t, []any{1, 2, 3}, items)
|
||||||
|
|
||||||
just := Just(1)
|
just := Just(1)
|
||||||
equal(t, just.Concat(just), []interface{}{1})
|
equal(t, just.Concat(just), []any{1})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStream_Max(t *testing.T) {
|
||||||
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
elements []any
|
||||||
|
max any
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no elements with nil",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no elements",
|
||||||
|
elements: []any{},
|
||||||
|
max: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "1 element",
|
||||||
|
elements: []any{1},
|
||||||
|
max: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple elements",
|
||||||
|
elements: []any{1, 2, 9, 5, 8},
|
||||||
|
max: 9,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
val := Just(test.elements...).Max(func(a, b any) bool {
|
||||||
|
return a.(int) < b.(int)
|
||||||
|
})
|
||||||
|
assetEqual(t, test.max, val)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStream_Min(t *testing.T) {
|
||||||
|
runCheckedTest(t, func(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
elements []any
|
||||||
|
min any
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no elements with nil",
|
||||||
|
min: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no elements",
|
||||||
|
elements: []any{},
|
||||||
|
min: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "1 element",
|
||||||
|
elements: []any{1},
|
||||||
|
min: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple elements",
|
||||||
|
elements: []any{-1, 1, 2, 9, 5, 8},
|
||||||
|
min: -1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
val := Just(test.elements...).Min(func(a, b any) bool {
|
||||||
|
return a.(int) < b.(int)
|
||||||
|
})
|
||||||
|
assetEqual(t, test.min, val)
|
||||||
|
})
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkParallelMapReduce(b *testing.B) {
|
func BenchmarkParallelMapReduce(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
mapper := func(v interface{}) interface{} {
|
mapper := func(v any) any {
|
||||||
return v.(int64) * v.(int64)
|
return v.(int64) * v.(int64)
|
||||||
}
|
}
|
||||||
reducer := func(input <-chan interface{}) (interface{}, error) {
|
reducer := func(input <-chan any) (any, error) {
|
||||||
var result int64
|
var result int64
|
||||||
for v := range input {
|
for v := range input {
|
||||||
result += v.(int64)
|
result += v.(int64)
|
||||||
@@ -517,7 +594,7 @@ func BenchmarkParallelMapReduce(b *testing.B) {
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
From(func(input chan<- interface{}) {
|
From(func(input chan<- any) {
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
input <- int64(rand.Int())
|
input <- int64(rand.Int())
|
||||||
@@ -529,10 +606,10 @@ func BenchmarkParallelMapReduce(b *testing.B) {
|
|||||||
func BenchmarkMapReduce(b *testing.B) {
|
func BenchmarkMapReduce(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
mapper := func(v interface{}) interface{} {
|
mapper := func(v any) any {
|
||||||
return v.(int64) * v.(int64)
|
return v.(int64) * v.(int64)
|
||||||
}
|
}
|
||||||
reducer := func(input <-chan interface{}) (interface{}, error) {
|
reducer := func(input <-chan any) (any, error) {
|
||||||
var result int64
|
var result int64
|
||||||
for v := range input {
|
for v := range input {
|
||||||
result += v.(int64)
|
result += v.(int64)
|
||||||
@@ -540,21 +617,21 @@ func BenchmarkMapReduce(b *testing.B) {
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
From(func(input chan<- interface{}) {
|
From(func(input chan<- any) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
input <- int64(rand.Int())
|
input <- int64(rand.Int())
|
||||||
}
|
}
|
||||||
}).Map(mapper).Reduce(reducer)
|
}).Map(mapper).Reduce(reducer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assetEqual(t *testing.T, except, data interface{}) {
|
func assetEqual(t *testing.T, except, data any) {
|
||||||
if !reflect.DeepEqual(except, data) {
|
if !reflect.DeepEqual(except, data) {
|
||||||
t.Errorf(" %v, want %v", data, except)
|
t.Errorf(" %v, want %v", data, except)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func equal(t *testing.T, stream Stream, data []interface{}) {
|
func equal(t *testing.T, stream Stream, data []any) {
|
||||||
items := make([]interface{}, 0)
|
items := make([]any, 0)
|
||||||
for item := range stream.source {
|
for item := range stream.source {
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func DoWithTimeout(fn func() error, timeout time.Duration, opts ...DoOption) err
|
|||||||
|
|
||||||
// create channel with buffer size 1 to avoid goroutine leak
|
// create channel with buffer size 1 to avoid goroutine leak
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
panicChan := make(chan interface{}, 1)
|
panicChan := make(chan any, 1)
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
if p := recover(); p != nil {
|
if p := recover(); p != nil {
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/lang"
|
"github.com/zeromicro/go-zero/core/lang"
|
||||||
"github.com/zeromicro/go-zero/core/mapping"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -27,7 +26,7 @@ type (
|
|||||||
hashFunc Func
|
hashFunc Func
|
||||||
replicas int
|
replicas int
|
||||||
keys []uint64
|
keys []uint64
|
||||||
ring map[uint64][]interface{}
|
ring map[uint64][]any
|
||||||
nodes map[string]lang.PlaceholderType
|
nodes map[string]lang.PlaceholderType
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
@@ -51,21 +50,21 @@ func NewCustomConsistentHash(replicas int, fn Func) *ConsistentHash {
|
|||||||
return &ConsistentHash{
|
return &ConsistentHash{
|
||||||
hashFunc: fn,
|
hashFunc: fn,
|
||||||
replicas: replicas,
|
replicas: replicas,
|
||||||
ring: make(map[uint64][]interface{}),
|
ring: make(map[uint64][]any),
|
||||||
nodes: make(map[string]lang.PlaceholderType),
|
nodes: make(map[string]lang.PlaceholderType),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds the node with the number of h.replicas,
|
// Add adds the node with the number of h.replicas,
|
||||||
// the later call will overwrite the replicas of the former calls.
|
// the later call will overwrite the replicas of the former calls.
|
||||||
func (h *ConsistentHash) Add(node interface{}) {
|
func (h *ConsistentHash) Add(node any) {
|
||||||
h.AddWithReplicas(node, h.replicas)
|
h.AddWithReplicas(node, h.replicas)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddWithReplicas adds the node with the number of replicas,
|
// AddWithReplicas adds the node with the number of replicas,
|
||||||
// replicas will be truncated to h.replicas if it's larger than h.replicas,
|
// replicas will be truncated to h.replicas if it's larger than h.replicas,
|
||||||
// the later call will overwrite the replicas of the former calls.
|
// the later call will overwrite the replicas of the former calls.
|
||||||
func (h *ConsistentHash) AddWithReplicas(node interface{}, replicas int) {
|
func (h *ConsistentHash) AddWithReplicas(node any, replicas int) {
|
||||||
h.Remove(node)
|
h.Remove(node)
|
||||||
|
|
||||||
if replicas > h.replicas {
|
if replicas > h.replicas {
|
||||||
@@ -90,7 +89,7 @@ func (h *ConsistentHash) AddWithReplicas(node interface{}, replicas int) {
|
|||||||
|
|
||||||
// AddWithWeight adds the node with weight, the weight can be 1 to 100, indicates the percent,
|
// AddWithWeight adds the node with weight, the weight can be 1 to 100, indicates the percent,
|
||||||
// the later call will overwrite the replicas of the former calls.
|
// the later call will overwrite the replicas of the former calls.
|
||||||
func (h *ConsistentHash) AddWithWeight(node interface{}, weight int) {
|
func (h *ConsistentHash) AddWithWeight(node any, weight int) {
|
||||||
// don't need to make sure weight not larger than TopWeight,
|
// don't need to make sure weight not larger than TopWeight,
|
||||||
// because AddWithReplicas makes sure replicas cannot be larger than h.replicas
|
// because AddWithReplicas makes sure replicas cannot be larger than h.replicas
|
||||||
replicas := h.replicas * weight / TopWeight
|
replicas := h.replicas * weight / TopWeight
|
||||||
@@ -98,7 +97,7 @@ func (h *ConsistentHash) AddWithWeight(node interface{}, weight int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the corresponding node from h base on the given v.
|
// Get returns the corresponding node from h base on the given v.
|
||||||
func (h *ConsistentHash) Get(v interface{}) (interface{}, bool) {
|
func (h *ConsistentHash) Get(v any) (any, bool) {
|
||||||
h.lock.RLock()
|
h.lock.RLock()
|
||||||
defer h.lock.RUnlock()
|
defer h.lock.RUnlock()
|
||||||
|
|
||||||
@@ -125,7 +124,7 @@ func (h *ConsistentHash) Get(v interface{}) (interface{}, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes the given node from h.
|
// Remove removes the given node from h.
|
||||||
func (h *ConsistentHash) Remove(node interface{}) {
|
func (h *ConsistentHash) Remove(node any) {
|
||||||
nodeRepr := repr(node)
|
nodeRepr := repr(node)
|
||||||
|
|
||||||
h.lock.Lock()
|
h.lock.Lock()
|
||||||
@@ -178,10 +177,10 @@ func (h *ConsistentHash) removeNode(nodeRepr string) {
|
|||||||
delete(h.nodes, nodeRepr)
|
delete(h.nodes, nodeRepr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func innerRepr(node interface{}) string {
|
func innerRepr(node any) string {
|
||||||
return fmt.Sprintf("%d:%v", prime, node)
|
return fmt.Sprintf("%d:%v", prime, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
func repr(node interface{}) string {
|
func repr(node any) string {
|
||||||
return mapping.Repr(node)
|
return lang.Repr(node)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ func TestConsistentHash(t *testing.T) {
|
|||||||
keys[key.(string)]++
|
keys[key.(string)]++
|
||||||
}
|
}
|
||||||
|
|
||||||
mi := make(map[interface{}]int, len(keys))
|
mi := make(map[any]int, len(keys))
|
||||||
for k, v := range keys {
|
for k, v := range keys {
|
||||||
mi[k] = v
|
mi[k] = v
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func NewBufferPool(capability int) *BufferPool {
|
|||||||
return &BufferPool{
|
return &BufferPool{
|
||||||
capability: capability,
|
capability: capability,
|
||||||
pool: &sync.Pool{
|
pool: &sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ func (nopCloser) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NopCloser returns a io.WriteCloser that does nothing on calling Close.
|
// NopCloser returns an io.WriteCloser that does nothing on calling Close.
|
||||||
func NopCloser(w io.Writer) io.WriteCloser {
|
func NopCloser(w io.Writer) io.WriteCloser {
|
||||||
return nopCloser{w}
|
return nopCloser{w}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@@ -26,7 +25,7 @@ type (
|
|||||||
func DupReadCloser(reader io.ReadCloser) (io.ReadCloser, io.ReadCloser) {
|
func DupReadCloser(reader io.ReadCloser) (io.ReadCloser, io.ReadCloser) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
tee := io.TeeReader(reader, &buf)
|
tee := io.TeeReader(reader, &buf)
|
||||||
return ioutil.NopCloser(tee), ioutil.NopCloser(&buf)
|
return io.NopCloser(tee), io.NopCloser(&buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// KeepSpace customizes the reading functions to keep leading and tailing spaces.
|
// KeepSpace customizes the reading functions to keep leading and tailing spaces.
|
||||||
@@ -54,7 +53,7 @@ func ReadBytes(reader io.Reader, buf []byte) error {
|
|||||||
|
|
||||||
// ReadText reads content from the given file with leading and tailing spaces trimmed.
|
// ReadText reads content from the given file with leading and tailing spaces trimmed.
|
||||||
func ReadText(filename string) (string, error) {
|
func ReadText(filename string) (string, error) {
|
||||||
content, err := ioutil.ReadFile(filename)
|
content, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package iox
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -97,10 +96,10 @@ func TestReadTextLines(t *testing.T) {
|
|||||||
|
|
||||||
func TestDupReadCloser(t *testing.T) {
|
func TestDupReadCloser(t *testing.T) {
|
||||||
input := "hello"
|
input := "hello"
|
||||||
reader := ioutil.NopCloser(bytes.NewBufferString(input))
|
reader := io.NopCloser(bytes.NewBufferString(input))
|
||||||
r1, r2 := DupReadCloser(reader)
|
r1, r2 := DupReadCloser(reader)
|
||||||
verify := func(r io.Reader) {
|
verify := func(r io.Reader) {
|
||||||
output, err := ioutil.ReadAll(r)
|
output, err := io.ReadAll(r)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, input, string(output))
|
assert.Equal(t, input, string(output))
|
||||||
}
|
}
|
||||||
@@ -110,7 +109,7 @@ func TestDupReadCloser(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadBytes(t *testing.T) {
|
func TestReadBytes(t *testing.T) {
|
||||||
reader := ioutil.NopCloser(bytes.NewBufferString("helloworld"))
|
reader := io.NopCloser(bytes.NewBufferString("helloworld"))
|
||||||
buf := make([]byte, 5)
|
buf := make([]byte, 5)
|
||||||
err := ReadBytes(reader, buf)
|
err := ReadBytes(reader, buf)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -118,7 +117,7 @@ func TestReadBytes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadBytesNotEnough(t *testing.T) {
|
func TestReadBytesNotEnough(t *testing.T) {
|
||||||
reader := ioutil.NopCloser(bytes.NewBufferString("hell"))
|
reader := io.NopCloser(bytes.NewBufferString("hell"))
|
||||||
buf := make([]byte, 5)
|
buf := make([]byte, 5)
|
||||||
err := ReadBytes(reader, buf)
|
err := ReadBytes(reader, buf)
|
||||||
assert.Equal(t, io.EOF, err)
|
assert.Equal(t, io.EOF, err)
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package iox
|
package iox
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -13,7 +12,7 @@ func TestCountLines(t *testing.T) {
|
|||||||
2
|
2
|
||||||
3
|
3
|
||||||
4`
|
4`
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "test-")
|
file, err := os.CreateTemp(os.TempDir(), "test-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
package jsontype
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/globalsign/mgo/bson"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MilliTime represents time.Time that works better with mongodb.
|
|
||||||
type MilliTime struct {
|
|
||||||
time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals mt to json bytes.
|
|
||||||
func (mt MilliTime) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(mt.Milli())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals data into mt.
|
|
||||||
func (mt *MilliTime) UnmarshalJSON(data []byte) error {
|
|
||||||
var milli int64
|
|
||||||
if err := json.Unmarshal(data, &milli); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mt.Time = time.Unix(0, milli*int64(time.Millisecond))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBSON returns BSON base on mt.
|
|
||||||
func (mt MilliTime) GetBSON() (interface{}, error) {
|
|
||||||
return mt.Time, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBSON sets raw into mt.
|
|
||||||
func (mt *MilliTime) SetBSON(raw bson.Raw) error {
|
|
||||||
return raw.Unmarshal(&mt.Time)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Milli returns milliseconds for mt.
|
|
||||||
func (mt MilliTime) Milli() int64 {
|
|
||||||
return mt.UnixNano() / int64(time.Millisecond)
|
|
||||||
}
|
|
||||||
@@ -1,126 +0,0 @@
|
|||||||
package jsontype
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/globalsign/mgo/bson"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMilliTime_GetBSON(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
tm time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "now",
|
|
||||||
tm: time.Now(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "future",
|
|
||||||
tm: time.Now().Add(time.Hour),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
got, err := MilliTime{test.tm}.GetBSON()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, test.tm, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMilliTime_MarshalJSON(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
tm time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "now",
|
|
||||||
tm: time.Now(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "future",
|
|
||||||
tm: time.Now().Add(time.Hour),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
b, err := MilliTime{test.tm}.MarshalJSON()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, strconv.FormatInt(test.tm.UnixNano()/1e6, 10), string(b))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMilliTime_Milli(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
tm time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "now",
|
|
||||||
tm: time.Now(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "future",
|
|
||||||
tm: time.Now().Add(time.Hour),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
n := MilliTime{test.tm}.Milli()
|
|
||||||
assert.Equal(t, test.tm.UnixNano()/1e6, n)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMilliTime_UnmarshalJSON(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
tm time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "now",
|
|
||||||
tm: time.Now(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "future",
|
|
||||||
tm: time.Now().Add(time.Hour),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
var mt MilliTime
|
|
||||||
s := strconv.FormatInt(test.tm.UnixNano()/1e6, 10)
|
|
||||||
err := mt.UnmarshalJSON([]byte(s))
|
|
||||||
assert.Nil(t, err)
|
|
||||||
s1, err := mt.MarshalJSON()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, s, string(s1))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalWithError(t *testing.T) {
|
|
||||||
var mt MilliTime
|
|
||||||
assert.NotNil(t, mt.UnmarshalJSON([]byte("hello")))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetBSON(t *testing.T) {
|
|
||||||
data, err := bson.Marshal(time.Now())
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
var raw bson.Raw
|
|
||||||
assert.Nil(t, bson.Unmarshal(data, &raw))
|
|
||||||
|
|
||||||
var mt MilliTime
|
|
||||||
assert.Nil(t, mt.SetBSON(raw))
|
|
||||||
assert.NotNil(t, mt.SetBSON(bson.Raw{}))
|
|
||||||
}
|
|
||||||
@@ -9,12 +9,22 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Marshal marshals v into json bytes.
|
// Marshal marshals v into json bytes.
|
||||||
func Marshal(v interface{}) ([]byte, error) {
|
func Marshal(v any) ([]byte, error) {
|
||||||
return json.Marshal(v)
|
return json.Marshal(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalToString marshals v into a string.
|
||||||
|
func MarshalToString(v any) (string, error) {
|
||||||
|
data, err := Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Unmarshal unmarshals data bytes into v.
|
// Unmarshal unmarshals data bytes into v.
|
||||||
func Unmarshal(data []byte, v interface{}) error {
|
func Unmarshal(data []byte, v any) error {
|
||||||
decoder := json.NewDecoder(bytes.NewReader(data))
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
if err := unmarshalUseNumber(decoder, v); err != nil {
|
if err := unmarshalUseNumber(decoder, v); err != nil {
|
||||||
return formatError(string(data), err)
|
return formatError(string(data), err)
|
||||||
@@ -24,7 +34,7 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalFromString unmarshals v from str.
|
// UnmarshalFromString unmarshals v from str.
|
||||||
func UnmarshalFromString(str string, v interface{}) error {
|
func UnmarshalFromString(str string, v any) error {
|
||||||
decoder := json.NewDecoder(strings.NewReader(str))
|
decoder := json.NewDecoder(strings.NewReader(str))
|
||||||
if err := unmarshalUseNumber(decoder, v); err != nil {
|
if err := unmarshalUseNumber(decoder, v); err != nil {
|
||||||
return formatError(str, err)
|
return formatError(str, err)
|
||||||
@@ -34,7 +44,7 @@ func UnmarshalFromString(str string, v interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalFromReader unmarshals v from reader.
|
// UnmarshalFromReader unmarshals v from reader.
|
||||||
func UnmarshalFromReader(reader io.Reader, v interface{}) error {
|
func UnmarshalFromReader(reader io.Reader, v any) error {
|
||||||
var buf strings.Builder
|
var buf strings.Builder
|
||||||
teeReader := io.TeeReader(reader, &buf)
|
teeReader := io.TeeReader(reader, &buf)
|
||||||
decoder := json.NewDecoder(teeReader)
|
decoder := json.NewDecoder(teeReader)
|
||||||
@@ -45,7 +55,7 @@ func UnmarshalFromReader(reader io.Reader, v interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalUseNumber(decoder *json.Decoder, v interface{}) error {
|
func unmarshalUseNumber(decoder *json.Decoder, v any) error {
|
||||||
decoder.UseNumber()
|
decoder.UseNumber()
|
||||||
return decoder.Decode(v)
|
return decoder.Decode(v)
|
||||||
}
|
}
|
||||||
|
|||||||
103
core/jsonx/json_test.go
Normal file
103
core/jsonx/json_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package jsonx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMarshal(t *testing.T) {
|
||||||
|
var v = struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}{
|
||||||
|
Name: "John",
|
||||||
|
Age: 30,
|
||||||
|
}
|
||||||
|
bs, err := Marshal(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, `{"name":"John","age":30}`, string(bs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalToString(t *testing.T) {
|
||||||
|
var v = struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}{
|
||||||
|
Name: "John",
|
||||||
|
Age: 30,
|
||||||
|
}
|
||||||
|
toString, err := MarshalToString(v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, `{"name":"John","age":30}`, toString)
|
||||||
|
|
||||||
|
_, err = MarshalToString(make(chan int))
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshal(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30}`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := Unmarshal([]byte(s), &v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", v.Name)
|
||||||
|
assert.Equal(t, 30, v.Age)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalError(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := Unmarshal([]byte(s), &v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromString(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30}`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromString(s, &v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", v.Name)
|
||||||
|
assert.Equal(t, 30, v.Age)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromStringError(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromString(s, &v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromRead(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30}`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromReader(strings.NewReader(s), &v)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "John", v.Name)
|
||||||
|
assert.Equal(t, 30, v.Age)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalFromReaderError(t *testing.T) {
|
||||||
|
const s = `{"name":"John","age":30`
|
||||||
|
var v struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
err := UnmarshalFromReader(strings.NewReader(s), &v)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
@@ -1,11 +1,78 @@
|
|||||||
package lang
|
package lang
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// Placeholder is a placeholder object that can be used globally.
|
// Placeholder is a placeholder object that can be used globally.
|
||||||
var Placeholder PlaceholderType
|
var Placeholder PlaceholderType
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// AnyType can be used to hold any type.
|
// AnyType can be used to hold any type.
|
||||||
AnyType = interface{}
|
AnyType = any
|
||||||
// PlaceholderType represents a placeholder type.
|
// PlaceholderType represents a placeholder type.
|
||||||
PlaceholderType = struct{}
|
PlaceholderType = struct{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Repr returns the string representation of v.
|
||||||
|
func Repr(v any) string {
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// if func (v *Type) String() string, we can't use Elem()
|
||||||
|
switch vt := v.(type) {
|
||||||
|
case fmt.Stringer:
|
||||||
|
return vt.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
for val.Kind() == reflect.Ptr && !val.IsNil() {
|
||||||
|
val = val.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
return reprOfValue(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func reprOfValue(val reflect.Value) string {
|
||||||
|
switch vt := val.Interface().(type) {
|
||||||
|
case bool:
|
||||||
|
return strconv.FormatBool(vt)
|
||||||
|
case error:
|
||||||
|
return vt.Error()
|
||||||
|
case float32:
|
||||||
|
return strconv.FormatFloat(float64(vt), 'f', -1, 32)
|
||||||
|
case float64:
|
||||||
|
return strconv.FormatFloat(vt, 'f', -1, 64)
|
||||||
|
case fmt.Stringer:
|
||||||
|
return vt.String()
|
||||||
|
case int:
|
||||||
|
return strconv.Itoa(vt)
|
||||||
|
case int8:
|
||||||
|
return strconv.Itoa(int(vt))
|
||||||
|
case int16:
|
||||||
|
return strconv.Itoa(int(vt))
|
||||||
|
case int32:
|
||||||
|
return strconv.Itoa(int(vt))
|
||||||
|
case int64:
|
||||||
|
return strconv.FormatInt(vt, 10)
|
||||||
|
case string:
|
||||||
|
return vt
|
||||||
|
case uint:
|
||||||
|
return strconv.FormatUint(uint64(vt), 10)
|
||||||
|
case uint8:
|
||||||
|
return strconv.FormatUint(uint64(vt), 10)
|
||||||
|
case uint16:
|
||||||
|
return strconv.FormatUint(uint64(vt), 10)
|
||||||
|
case uint32:
|
||||||
|
return strconv.FormatUint(uint64(vt), 10)
|
||||||
|
case uint64:
|
||||||
|
return strconv.FormatUint(vt, 10)
|
||||||
|
case []byte:
|
||||||
|
return string(vt)
|
||||||
|
default:
|
||||||
|
return fmt.Sprint(val.Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
156
core/lang/lang_test.go
Normal file
156
core/lang/lang_test.go
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
package lang
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRepr(t *testing.T) {
|
||||||
|
var (
|
||||||
|
f32 float32 = 1.1
|
||||||
|
f64 = 2.2
|
||||||
|
i8 int8 = 1
|
||||||
|
i16 int16 = 2
|
||||||
|
i32 int32 = 3
|
||||||
|
i64 int64 = 4
|
||||||
|
u8 uint8 = 5
|
||||||
|
u16 uint16 = 6
|
||||||
|
u32 uint32 = 7
|
||||||
|
u64 uint64 = 8
|
||||||
|
)
|
||||||
|
tests := []struct {
|
||||||
|
v any
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
mockStringable{},
|
||||||
|
"mocked",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
new(mockStringable),
|
||||||
|
"mocked",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
newMockPtr(),
|
||||||
|
"mockptr",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
&mockOpacity{
|
||||||
|
val: 1,
|
||||||
|
},
|
||||||
|
"{1}",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
true,
|
||||||
|
"true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
false,
|
||||||
|
"false",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
f32,
|
||||||
|
"1.1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
f64,
|
||||||
|
"2.2",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
i8,
|
||||||
|
"1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
i16,
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
i32,
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
i64,
|
||||||
|
"4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
u8,
|
||||||
|
"5",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
u16,
|
||||||
|
"6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
u32,
|
||||||
|
"7",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
u64,
|
||||||
|
"8",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
[]byte(`abcd`),
|
||||||
|
"abcd",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
mockOpacity{val: 1},
|
||||||
|
"{1}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.expect, func(t *testing.T) {
|
||||||
|
assert.Equal(t, test.expect, Repr(test.v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReprOfValue(t *testing.T) {
|
||||||
|
t.Run("error", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "error", reprOfValue(reflect.ValueOf(errors.New("error"))))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("stringer", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "1.23", reprOfValue(reflect.ValueOf(json.Number("1.23"))))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("int", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "1", reprOfValue(reflect.ValueOf(1)))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("int", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "1", reprOfValue(reflect.ValueOf("1")))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("int", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "1", reprOfValue(reflect.ValueOf(uint(1))))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockStringable struct{}
|
||||||
|
|
||||||
|
func (m mockStringable) String() string {
|
||||||
|
return "mocked"
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockPtr struct{}
|
||||||
|
|
||||||
|
func newMockPtr() *mockPtr {
|
||||||
|
return new(mockPtr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockPtr) String() string {
|
||||||
|
return "mockptr"
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockOpacity struct {
|
||||||
|
val int
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package limit
|
package limit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@@ -8,21 +9,6 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/stores/redis"
|
"github.com/zeromicro/go-zero/core/stores/redis"
|
||||||
)
|
)
|
||||||
|
|
||||||
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
|
||||||
const periodScript = `local limit = tonumber(ARGV[1])
|
|
||||||
local window = tonumber(ARGV[2])
|
|
||||||
local current = redis.call("INCRBY", KEYS[1], 1)
|
|
||||||
if current == 1 then
|
|
||||||
redis.call("expire", KEYS[1], window)
|
|
||||||
return 1
|
|
||||||
elseif current < limit then
|
|
||||||
return 1
|
|
||||||
elseif current == limit then
|
|
||||||
return 2
|
|
||||||
else
|
|
||||||
return 0
|
|
||||||
end`
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Unknown means not initialized state.
|
// Unknown means not initialized state.
|
||||||
Unknown = iota
|
Unknown = iota
|
||||||
@@ -38,8 +24,25 @@ const (
|
|||||||
internalHitQuota = 2
|
internalHitQuota = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrUnknownCode is an error that represents unknown status code.
|
var (
|
||||||
var ErrUnknownCode = errors.New("unknown status code")
|
// ErrUnknownCode is an error that represents unknown status code.
|
||||||
|
ErrUnknownCode = errors.New("unknown status code")
|
||||||
|
|
||||||
|
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
||||||
|
periodScript = redis.NewScript(`local limit = tonumber(ARGV[1])
|
||||||
|
local window = tonumber(ARGV[2])
|
||||||
|
local current = redis.call("INCRBY", KEYS[1], 1)
|
||||||
|
if current == 1 then
|
||||||
|
redis.call("expire", KEYS[1], window)
|
||||||
|
end
|
||||||
|
if current < limit then
|
||||||
|
return 1
|
||||||
|
elseif current == limit then
|
||||||
|
return 2
|
||||||
|
else
|
||||||
|
return 0
|
||||||
|
end`)
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// PeriodOption defines the method to customize a PeriodLimit.
|
// PeriodOption defines the method to customize a PeriodLimit.
|
||||||
@@ -74,7 +77,12 @@ func NewPeriodLimit(period, quota int, limitStore *redis.Redis, keyPrefix string
|
|||||||
|
|
||||||
// Take requests a permit, it returns the permit state.
|
// Take requests a permit, it returns the permit state.
|
||||||
func (h *PeriodLimit) Take(key string) (int, error) {
|
func (h *PeriodLimit) Take(key string) (int, error) {
|
||||||
resp, err := h.limitStore.Eval(periodScript, []string{h.keyPrefix + key}, []string{
|
return h.TakeCtx(context.Background(), key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TakeCtx requests a permit with context, it returns the permit state.
|
||||||
|
func (h *PeriodLimit) TakeCtx(ctx context.Context, key string) (int, error) {
|
||||||
|
resp, err := h.limitStore.ScriptRunCtx(ctx, periodScript, []string{h.keyPrefix + key}, []string{
|
||||||
strconv.Itoa(h.quota),
|
strconv.Itoa(h.quota),
|
||||||
strconv.Itoa(h.calcExpireSeconds()),
|
strconv.Itoa(h.calcExpireSeconds()),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -33,9 +33,7 @@ func TestPeriodLimit_RedisUnavailable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testPeriodLimit(t *testing.T, opts ...PeriodOption) {
|
func testPeriodLimit(t *testing.T, opts ...PeriodOption) {
|
||||||
store, clean, err := redistest.CreateRedis()
|
store := redistest.CreateRedis(t)
|
||||||
assert.Nil(t, err)
|
|
||||||
defer clean()
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
seconds = 1
|
seconds = 1
|
||||||
@@ -65,3 +63,13 @@ func testPeriodLimit(t *testing.T, opts ...PeriodOption) {
|
|||||||
assert.Equal(t, 1, hitQuota)
|
assert.Equal(t, 1, hitQuota)
|
||||||
assert.Equal(t, total-quota, overQuota)
|
assert.Equal(t, total-quota, overQuota)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestQuotaFull(t *testing.T) {
|
||||||
|
s, err := miniredis.Run()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
l := NewPeriodLimit(1, 1, redis.New(s.Addr()), "periodlimit")
|
||||||
|
val, err := l.Take("first")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, HitQuota, val)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package limit
|
package limit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -13,10 +15,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
tokenFormat = "{%s}.tokens"
|
||||||
// KEYS[1] as tokens_key
|
timestampFormat = "{%s}.ts"
|
||||||
// KEYS[2] as timestamp_key
|
pingInterval = time.Millisecond * 100
|
||||||
script = `local rate = tonumber(ARGV[1])
|
)
|
||||||
|
|
||||||
|
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
|
||||||
|
// KEYS[1] as tokens_key
|
||||||
|
// KEYS[2] as timestamp_key
|
||||||
|
var script = redis.NewScript(`local rate = tonumber(ARGV[1])
|
||||||
local capacity = tonumber(ARGV[2])
|
local capacity = tonumber(ARGV[2])
|
||||||
local now = tonumber(ARGV[3])
|
local now = tonumber(ARGV[3])
|
||||||
local requested = tonumber(ARGV[4])
|
local requested = tonumber(ARGV[4])
|
||||||
@@ -43,11 +50,7 @@ end
|
|||||||
redis.call("setex", KEYS[1], ttl, new_tokens)
|
redis.call("setex", KEYS[1], ttl, new_tokens)
|
||||||
redis.call("setex", KEYS[2], ttl, now)
|
redis.call("setex", KEYS[2], ttl, now)
|
||||||
|
|
||||||
return allowed`
|
return allowed`)
|
||||||
tokenFormat = "{%s}.tokens"
|
|
||||||
timestampFormat = "{%s}.ts"
|
|
||||||
pingInterval = time.Millisecond * 100
|
|
||||||
)
|
|
||||||
|
|
||||||
// A TokenLimiter controls how frequently events are allowed to happen with in one second.
|
// A TokenLimiter controls how frequently events are allowed to happen with in one second.
|
||||||
type TokenLimiter struct {
|
type TokenLimiter struct {
|
||||||
@@ -58,8 +61,8 @@ type TokenLimiter struct {
|
|||||||
timestampKey string
|
timestampKey string
|
||||||
rescueLock sync.Mutex
|
rescueLock sync.Mutex
|
||||||
redisAlive uint32
|
redisAlive uint32
|
||||||
rescueLimiter *xrate.Limiter
|
|
||||||
monitorStarted bool
|
monitorStarted bool
|
||||||
|
rescueLimiter *xrate.Limiter
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTokenLimiter returns a new TokenLimiter that allows events up to rate and permits
|
// NewTokenLimiter returns a new TokenLimiter that allows events up to rate and permits
|
||||||
@@ -84,19 +87,31 @@ func (lim *TokenLimiter) Allow() bool {
|
|||||||
return lim.AllowN(time.Now(), 1)
|
return lim.AllowN(time.Now(), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AllowCtx is shorthand for AllowNCtx(ctx,time.Now(), 1) with incoming context.
|
||||||
|
func (lim *TokenLimiter) AllowCtx(ctx context.Context) bool {
|
||||||
|
return lim.AllowNCtx(ctx, time.Now(), 1)
|
||||||
|
}
|
||||||
|
|
||||||
// AllowN reports whether n events may happen at time now.
|
// AllowN reports whether n events may happen at time now.
|
||||||
// Use this method if you intend to drop / skip events that exceed the rate.
|
// Use this method if you intend to drop / skip events that exceed the rate.
|
||||||
// Otherwise, use Reserve or Wait.
|
// Otherwise, use Reserve or Wait.
|
||||||
func (lim *TokenLimiter) AllowN(now time.Time, n int) bool {
|
func (lim *TokenLimiter) AllowN(now time.Time, n int) bool {
|
||||||
return lim.reserveN(now, n)
|
return lim.reserveN(context.Background(), now, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lim *TokenLimiter) reserveN(now time.Time, n int) bool {
|
// AllowNCtx reports whether n events may happen at time now with incoming context.
|
||||||
|
// Use this method if you intend to drop / skip events that exceed the rate.
|
||||||
|
// Otherwise, use Reserve or Wait.
|
||||||
|
func (lim *TokenLimiter) AllowNCtx(ctx context.Context, now time.Time, n int) bool {
|
||||||
|
return lim.reserveN(ctx, now, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lim *TokenLimiter) reserveN(ctx context.Context, now time.Time, n int) bool {
|
||||||
if atomic.LoadUint32(&lim.redisAlive) == 0 {
|
if atomic.LoadUint32(&lim.redisAlive) == 0 {
|
||||||
return lim.rescueLimiter.AllowN(now, n)
|
return lim.rescueLimiter.AllowN(now, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := lim.store.Eval(
|
resp, err := lim.store.ScriptRunCtx(ctx,
|
||||||
script,
|
script,
|
||||||
[]string{
|
[]string{
|
||||||
lim.tokenKey,
|
lim.tokenKey,
|
||||||
@@ -113,6 +128,10 @@ func (lim *TokenLimiter) reserveN(now time.Time, n int) bool {
|
|||||||
if err == redis.Nil {
|
if err == redis.Nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
|
||||||
|
logx.Errorf("fail to use rate limiter: %s", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logx.Errorf("fail to use rate limiter: %s, use in-process limiter for rescue", err)
|
logx.Errorf("fail to use rate limiter: %s, use in-process limiter for rescue", err)
|
||||||
lim.startMonitor()
|
lim.startMonitor()
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package limit
|
package limit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -15,6 +16,30 @@ func init() {
|
|||||||
logx.Disable()
|
logx.Disable()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTokenLimit_WithCtx(t *testing.T) {
|
||||||
|
s, err := miniredis.Run()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
const (
|
||||||
|
total = 100
|
||||||
|
rate = 5
|
||||||
|
burst = 10
|
||||||
|
)
|
||||||
|
l := NewTokenLimiter(rate, burst, redis.New(s.Addr()), "tokenlimit")
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
ok := l.AllowCtx(ctx)
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
for i := 0; i < total; i++ {
|
||||||
|
ok := l.AllowCtx(ctx)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.False(t, l.monitorStarted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestTokenLimit_Rescue(t *testing.T) {
|
func TestTokenLimit_Rescue(t *testing.T) {
|
||||||
s, err := miniredis.Run()
|
s, err := miniredis.Run()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -45,9 +70,7 @@ func TestTokenLimit_Rescue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTokenLimit_Take(t *testing.T) {
|
func TestTokenLimit_Take(t *testing.T) {
|
||||||
store, clean, err := redistest.CreateRedis()
|
store := redistest.CreateRedis(t)
|
||||||
assert.Nil(t, err)
|
|
||||||
defer clean()
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
total = 100
|
total = 100
|
||||||
@@ -67,9 +90,7 @@ func TestTokenLimit_Take(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTokenLimit_TakeBurst(t *testing.T) {
|
func TestTokenLimit_TakeBurst(t *testing.T) {
|
||||||
store, clean, err := redistest.CreateRedis()
|
store := redistest.CreateRedis(t)
|
||||||
assert.Nil(t, err)
|
|
||||||
defer clean()
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
total = 100
|
total = 100
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
defaultBuckets = 50
|
defaultBuckets = 50
|
||||||
defaultWindow = time.Second * 5
|
defaultWindow = time.Second * 5
|
||||||
// using 1000m notation, 900m is like 80%, keep it as var for unit test
|
// using 1000m notation, 900m is like 90%, keep it as var for unit test
|
||||||
defaultCpuThreshold = 900
|
defaultCpuThreshold = 900
|
||||||
defaultMinRt = float64(time.Second / time.Millisecond)
|
defaultMinRt = float64(time.Second / time.Millisecond)
|
||||||
// moving average hyperparameter beta for calculating requests on the fly
|
// moving average hyperparameter beta for calculating requests on the fly
|
||||||
@@ -70,7 +70,7 @@ type (
|
|||||||
flying int64
|
flying int64
|
||||||
avgFlying float64
|
avgFlying float64
|
||||||
avgFlyingLock syncx.SpinLock
|
avgFlyingLock syncx.SpinLock
|
||||||
dropTime *syncx.AtomicDuration
|
overloadTime *syncx.AtomicDuration
|
||||||
droppedRecently *syncx.AtomicBool
|
droppedRecently *syncx.AtomicBool
|
||||||
passCounter *collection.RollingWindow
|
passCounter *collection.RollingWindow
|
||||||
rtCounter *collection.RollingWindow
|
rtCounter *collection.RollingWindow
|
||||||
@@ -106,7 +106,7 @@ func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
|
|||||||
return &adaptiveShedder{
|
return &adaptiveShedder{
|
||||||
cpuThreshold: options.cpuThreshold,
|
cpuThreshold: options.cpuThreshold,
|
||||||
windows: int64(time.Second / bucketDuration),
|
windows: int64(time.Second / bucketDuration),
|
||||||
dropTime: syncx.NewAtomicDuration(),
|
overloadTime: syncx.NewAtomicDuration(),
|
||||||
droppedRecently: syncx.NewAtomicBool(),
|
droppedRecently: syncx.NewAtomicBool(),
|
||||||
passCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
|
passCounter: collection.NewRollingWindow(options.buckets, bucketDuration,
|
||||||
collection.IgnoreCurrentBucket()),
|
collection.IgnoreCurrentBucket()),
|
||||||
@@ -118,7 +118,6 @@ func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
|
|||||||
// Allow implements Shedder.Allow.
|
// Allow implements Shedder.Allow.
|
||||||
func (as *adaptiveShedder) Allow() (Promise, error) {
|
func (as *adaptiveShedder) Allow() (Promise, error) {
|
||||||
if as.shouldDrop() {
|
if as.shouldDrop() {
|
||||||
as.dropTime.Set(timex.Now())
|
|
||||||
as.droppedRecently.Set(true)
|
as.droppedRecently.Set(true)
|
||||||
|
|
||||||
return nil, ErrServiceOverloaded
|
return nil, ErrServiceOverloaded
|
||||||
@@ -215,21 +214,26 @@ func (as *adaptiveShedder) stillHot() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
dropTime := as.dropTime.Load()
|
overloadTime := as.overloadTime.Load()
|
||||||
if dropTime == 0 {
|
if overloadTime == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
hot := timex.Since(dropTime) < coolOffDuration
|
if timex.Since(overloadTime) < coolOffDuration {
|
||||||
if !hot {
|
return true
|
||||||
as.droppedRecently.Set(false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return hot
|
as.droppedRecently.Set(false)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *adaptiveShedder) systemOverloaded() bool {
|
func (as *adaptiveShedder) systemOverloaded() bool {
|
||||||
return systemOverloadChecker(as.cpuThreshold)
|
if !systemOverloadChecker(as.cpuThreshold) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
as.overloadTime.Set(timex.Now())
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBuckets customizes the Shedder with given number of buckets.
|
// WithBuckets customizes the Shedder with given number of buckets.
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/zeromicro/go-zero/core/mathx"
|
"github.com/zeromicro/go-zero/core/mathx"
|
||||||
"github.com/zeromicro/go-zero/core/stat"
|
"github.com/zeromicro/go-zero/core/stat"
|
||||||
"github.com/zeromicro/go-zero/core/syncx"
|
"github.com/zeromicro/go-zero/core/syncx"
|
||||||
|
"github.com/zeromicro/go-zero/core/timex"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -136,7 +137,7 @@ func TestAdaptiveShedderShouldDrop(t *testing.T) {
|
|||||||
passCounter: passCounter,
|
passCounter: passCounter,
|
||||||
rtCounter: rtCounter,
|
rtCounter: rtCounter,
|
||||||
windows: buckets,
|
windows: buckets,
|
||||||
dropTime: syncx.NewAtomicDuration(),
|
overloadTime: syncx.NewAtomicDuration(),
|
||||||
droppedRecently: syncx.NewAtomicBool(),
|
droppedRecently: syncx.NewAtomicBool(),
|
||||||
}
|
}
|
||||||
// cpu >= 800, inflight < maxPass
|
// cpu >= 800, inflight < maxPass
|
||||||
@@ -190,12 +191,15 @@ func TestAdaptiveShedderStillHot(t *testing.T) {
|
|||||||
passCounter: passCounter,
|
passCounter: passCounter,
|
||||||
rtCounter: rtCounter,
|
rtCounter: rtCounter,
|
||||||
windows: buckets,
|
windows: buckets,
|
||||||
dropTime: syncx.NewAtomicDuration(),
|
overloadTime: syncx.NewAtomicDuration(),
|
||||||
droppedRecently: syncx.ForAtomicBool(true),
|
droppedRecently: syncx.ForAtomicBool(true),
|
||||||
}
|
}
|
||||||
assert.False(t, shedder.stillHot())
|
assert.False(t, shedder.stillHot())
|
||||||
shedder.dropTime.Set(-coolOffDuration * 2)
|
shedder.overloadTime.Set(-coolOffDuration * 2)
|
||||||
assert.False(t, shedder.stillHot())
|
assert.False(t, shedder.stillHot())
|
||||||
|
shedder.droppedRecently.Set(true)
|
||||||
|
shedder.overloadTime.Set(timex.Now())
|
||||||
|
assert.True(t, shedder.stillHot())
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkAdaptiveShedder_Allow(b *testing.B) {
|
func BenchmarkAdaptiveShedder_Allow(b *testing.B) {
|
||||||
|
|||||||
142
core/logc/logs.go
Normal file
142
core/logc/logs.go
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
package logc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
LogConf = logx.LogConf
|
||||||
|
LogField = logx.LogField
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddGlobalFields adds global fields.
|
||||||
|
func AddGlobalFields(fields ...LogField) {
|
||||||
|
logx.AddGlobalFields(fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alert alerts v in alert level, and the message is written to error log.
|
||||||
|
func Alert(_ context.Context, v string) {
|
||||||
|
logx.Alert(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the logging.
|
||||||
|
func Close() error {
|
||||||
|
return logx.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug writes v into access log.
|
||||||
|
func Debug(ctx context.Context, v ...interface{}) {
|
||||||
|
getLogger(ctx).Debug(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugf writes v with format into access log.
|
||||||
|
func Debugf(ctx context.Context, format string, v ...interface{}) {
|
||||||
|
getLogger(ctx).Debugf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugv writes v into access log with json content.
|
||||||
|
func Debugv(ctx context.Context, v interface{}) {
|
||||||
|
getLogger(ctx).Debugv(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugw writes msg along with fields into access log.
|
||||||
|
func Debugw(ctx context.Context, msg string, fields ...LogField) {
|
||||||
|
getLogger(ctx).Debugw(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error writes v into error log.
|
||||||
|
func Error(ctx context.Context, v ...any) {
|
||||||
|
getLogger(ctx).Error(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf writes v with format into error log.
|
||||||
|
func Errorf(ctx context.Context, format string, v ...any) {
|
||||||
|
getLogger(ctx).Errorf(fmt.Errorf(format, v...).Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorv writes v into error log with json content.
|
||||||
|
// No call stack attached, because not elegant to pack the messages.
|
||||||
|
func Errorv(ctx context.Context, v any) {
|
||||||
|
getLogger(ctx).Errorv(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorw writes msg along with fields into error log.
|
||||||
|
func Errorw(ctx context.Context, msg string, fields ...LogField) {
|
||||||
|
getLogger(ctx).Errorw(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field returns a LogField for the given key and value.
|
||||||
|
func Field(key string, value any) LogField {
|
||||||
|
return logx.Field(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info writes v into access log.
|
||||||
|
func Info(ctx context.Context, v ...any) {
|
||||||
|
getLogger(ctx).Info(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infof writes v with format into access log.
|
||||||
|
func Infof(ctx context.Context, format string, v ...any) {
|
||||||
|
getLogger(ctx).Infof(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infov writes v into access log with json content.
|
||||||
|
func Infov(ctx context.Context, v any) {
|
||||||
|
getLogger(ctx).Infov(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infow writes msg along with fields into access log.
|
||||||
|
func Infow(ctx context.Context, msg string, fields ...LogField) {
|
||||||
|
getLogger(ctx).Infow(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must checks if err is nil, otherwise logs the error and exits.
|
||||||
|
func Must(err error) {
|
||||||
|
logx.Must(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustSetup sets up logging with given config c. It exits on error.
|
||||||
|
func MustSetup(c logx.LogConf) {
|
||||||
|
logx.MustSetup(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLevel sets the logging level. It can be used to suppress some logs.
|
||||||
|
func SetLevel(level uint32) {
|
||||||
|
logx.SetLevel(level)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUp sets up the logx. If already set up, just return nil.
|
||||||
|
// we allow SetUp to be called multiple times, because for example
|
||||||
|
// we need to allow different service frameworks to initialize logx respectively.
|
||||||
|
// the same logic for SetUp
|
||||||
|
func SetUp(c LogConf) error {
|
||||||
|
return logx.SetUp(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow writes v into slow log.
|
||||||
|
func Slow(ctx context.Context, v ...any) {
|
||||||
|
getLogger(ctx).Slow(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slowf writes v with format into slow log.
|
||||||
|
func Slowf(ctx context.Context, format string, v ...any) {
|
||||||
|
getLogger(ctx).Slowf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slowv writes v into slow log with json content.
|
||||||
|
func Slowv(ctx context.Context, v any) {
|
||||||
|
getLogger(ctx).Slowv(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sloww writes msg along with fields into slow log.
|
||||||
|
func Sloww(ctx context.Context, msg string, fields ...LogField) {
|
||||||
|
getLogger(ctx).Sloww(msg, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLogger returns the logx.Logger with the given ctx and correct caller.
|
||||||
|
func getLogger(ctx context.Context) logx.Logger {
|
||||||
|
return logx.WithContext(ctx).WithCallerSkip(1)
|
||||||
|
}
|
||||||
266
core/logc/logs_test.go
Normal file
266
core/logc/logs_test.go
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
package logc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/logx"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddGlobalFields(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
Info(context.Background(), "hello")
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
AddGlobalFields(Field("a", "1"), Field("b", "2"))
|
||||||
|
AddGlobalFields(Field("c", "3"))
|
||||||
|
Info(context.Background(), "world")
|
||||||
|
var m map[string]any
|
||||||
|
assert.NoError(t, json.Unmarshal(buf.Bytes(), &m))
|
||||||
|
assert.Equal(t, "1", m["a"])
|
||||||
|
assert.Equal(t, "2", m["b"])
|
||||||
|
assert.Equal(t, "3", m["c"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlert(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
Alert(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), "foo"), buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestError(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Error(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorf(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Errorf(context.Background(), "foo %s", "bar")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorv(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Errorv(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorw(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Errorw(context.Background(), "foo", Field("a", "b"))
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfo(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Info(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfof(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Infof(context.Background(), "foo %s", "bar")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfov(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Infov(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfow(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Infow(context.Background(), "foo", Field("a", "b"))
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebug(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Debug(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugf(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Debugf(context.Background(), "foo %s", "bar")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugv(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Debugv(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugw(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Debugw(context.Background(), "foo", Field("a", "b"))
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMust(t *testing.T) {
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
Must(nil)
|
||||||
|
})
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
MustSetup(LogConf{})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMisc(t *testing.T) {
|
||||||
|
SetLevel(logx.DebugLevel)
|
||||||
|
assert.NoError(t, SetUp(LogConf{}))
|
||||||
|
assert.NoError(t, Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSlow(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Slow(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSlowf(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Slowf(context.Background(), "foo %s", "bar")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSlowv(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Slowv(context.Background(), "foo")
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSloww(t *testing.T) {
|
||||||
|
var buf strings.Builder
|
||||||
|
writer := logx.NewWriter(&buf)
|
||||||
|
old := logx.Reset()
|
||||||
|
logx.SetWriter(writer)
|
||||||
|
defer logx.SetWriter(old)
|
||||||
|
|
||||||
|
file, line := getFileLine()
|
||||||
|
Sloww(context.Background(), "foo", Field("a", "b"))
|
||||||
|
assert.True(t, strings.Contains(buf.String(), fmt.Sprintf("%s:%d", file, line+1)), buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileLine() (string, int) {
|
||||||
|
_, file, line, _ := runtime.Caller(1)
|
||||||
|
short := file
|
||||||
|
|
||||||
|
for i := len(file) - 1; i > 0; i-- {
|
||||||
|
if file[i] == '/' {
|
||||||
|
short = file[i+1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return short, line
|
||||||
|
}
|
||||||
26
core/logx/color.go
Normal file
26
core/logx/color.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/zeromicro/go-zero/core/color"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithColor is a helper function to add color to a string, only in plain encoding.
|
||||||
|
func WithColor(text string, colour color.Color) string {
|
||||||
|
if atomic.LoadUint32(&encoding) == plainEncodingType {
|
||||||
|
return color.WithColor(text, colour)
|
||||||
|
}
|
||||||
|
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithColorPadding is a helper function to add color to a string with leading and trailing spaces,
|
||||||
|
// only in plain encoding.
|
||||||
|
func WithColorPadding(text string, colour color.Color) string {
|
||||||
|
if atomic.LoadUint32(&encoding) == plainEncodingType {
|
||||||
|
return color.WithColorPadding(text, colour)
|
||||||
|
}
|
||||||
|
|
||||||
|
return text
|
||||||
|
}
|
||||||
33
core/logx/color_test.go
Normal file
33
core/logx/color_test.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/zeromicro/go-zero/core/color"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithColor(t *testing.T) {
|
||||||
|
old := atomic.SwapUint32(&encoding, plainEncodingType)
|
||||||
|
defer atomic.StoreUint32(&encoding, old)
|
||||||
|
|
||||||
|
output := WithColor("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, "hello", output)
|
||||||
|
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
|
output = WithColor("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, "hello", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithColorPadding(t *testing.T) {
|
||||||
|
old := atomic.SwapUint32(&encoding, plainEncodingType)
|
||||||
|
defer atomic.StoreUint32(&encoding, old)
|
||||||
|
|
||||||
|
output := WithColorPadding("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, " hello ", output)
|
||||||
|
|
||||||
|
atomic.StoreUint32(&encoding, jsonEncodingType)
|
||||||
|
output = WithColorPadding("hello", color.BgBlue)
|
||||||
|
assert.Equal(t, "hello", output)
|
||||||
|
}
|
||||||
@@ -2,13 +2,44 @@ package logx
|
|||||||
|
|
||||||
// A LogConf is a logging config.
|
// A LogConf is a logging config.
|
||||||
type LogConf struct {
|
type LogConf struct {
|
||||||
ServiceName string `json:",optional"`
|
// ServiceName represents the service name.
|
||||||
Mode string `json:",default=console,options=[console,file,volume]"`
|
ServiceName string `json:",optional"`
|
||||||
Encoding string `json:",default=json,options=[json,plain]"`
|
// Mode represents the logging mode, default is `console`.
|
||||||
TimeFormat string `json:",optional"`
|
// console: log to console.
|
||||||
Path string `json:",default=logs"`
|
// file: log to file.
|
||||||
Level string `json:",default=info,options=[info,error,severe]"`
|
// volume: used in k8s, prepend the hostname to the log file name.
|
||||||
Compress bool `json:",optional"`
|
Mode string `json:",default=console,options=[console,file,volume]"`
|
||||||
KeepDays int `json:",optional"`
|
// Encoding represents the encoding type, default is `json`.
|
||||||
StackCooldownMillis int `json:",default=100"`
|
// json: json encoding.
|
||||||
|
// plain: plain text encoding, typically used in development.
|
||||||
|
Encoding string `json:",default=json,options=[json,plain]"`
|
||||||
|
// TimeFormat represents the time format, default is `2006-01-02T15:04:05.000Z07:00`.
|
||||||
|
TimeFormat string `json:",optional"`
|
||||||
|
// Path represents the log file path, default is `logs`.
|
||||||
|
Path string `json:",default=logs"`
|
||||||
|
// Level represents the log level, default is `info`.
|
||||||
|
Level string `json:",default=info,options=[debug,info,error,severe]"`
|
||||||
|
// MaxContentLength represents the max content bytes, default is no limit.
|
||||||
|
MaxContentLength uint32 `json:",optional"`
|
||||||
|
// Compress represents whether to compress the log file, default is `false`.
|
||||||
|
Compress bool `json:",optional"`
|
||||||
|
// Stat represents whether to log statistics, default is `true`.
|
||||||
|
Stat bool `json:",default=true"`
|
||||||
|
// KeepDays represents how many days the log files will be kept. Default to keep all files.
|
||||||
|
// Only take effect when Mode is `file` or `volume`, both work when Rotation is `daily` or `size`.
|
||||||
|
KeepDays int `json:",optional"`
|
||||||
|
// StackCooldownMillis represents the cooldown time for stack logging, default is 100ms.
|
||||||
|
StackCooldownMillis int `json:",default=100"`
|
||||||
|
// MaxBackups represents how many backup log files will be kept. 0 means all files will be kept forever.
|
||||||
|
// Only take effect when RotationRuleType is `size`.
|
||||||
|
// Even thougth `MaxBackups` sets 0, log files will still be removed
|
||||||
|
// if the `KeepDays` limitation is reached.
|
||||||
|
MaxBackups int `json:",default=0"`
|
||||||
|
// MaxSize represents how much space the writing log file takes up. 0 means no limit. The unit is `MB`.
|
||||||
|
// Only take effect when RotationRuleType is `size`
|
||||||
|
MaxSize int `json:",default=0"`
|
||||||
|
// Rotation represents the type of log rotation rule. Default is `daily`.
|
||||||
|
// daily: daily rotation.
|
||||||
|
// size: size limited rotation.
|
||||||
|
Rotation string `json:",default=daily,options=[daily,size]"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,93 +0,0 @@
|
|||||||
package logx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/zeromicro/go-zero/core/timex"
|
|
||||||
)
|
|
||||||
|
|
||||||
const durationCallerDepth = 3
|
|
||||||
|
|
||||||
type durationLogger logEntry
|
|
||||||
|
|
||||||
// WithDuration returns a Logger which logs the given duration.
|
|
||||||
func WithDuration(d time.Duration) Logger {
|
|
||||||
return &durationLogger{
|
|
||||||
Duration: timex.ReprOfDuration(d),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Error(v ...interface{}) {
|
|
||||||
if shallLog(ErrorLevel) {
|
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Errorf(format string, v ...interface{}) {
|
|
||||||
if shallLog(ErrorLevel) {
|
|
||||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Errorv(v interface{}) {
|
|
||||||
if shallLog(ErrorLevel) {
|
|
||||||
l.write(errorLog, levelError, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Info(v ...interface{}) {
|
|
||||||
if shallLog(InfoLevel) {
|
|
||||||
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Infof(format string, v ...interface{}) {
|
|
||||||
if shallLog(InfoLevel) {
|
|
||||||
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Infov(v interface{}) {
|
|
||||||
if shallLog(InfoLevel) {
|
|
||||||
l.write(infoLog, levelInfo, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Slow(v ...interface{}) {
|
|
||||||
if shallLog(ErrorLevel) {
|
|
||||||
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Slowf(format string, v ...interface{}) {
|
|
||||||
if shallLog(ErrorLevel) {
|
|
||||||
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) Slowv(v interface{}) {
|
|
||||||
if shallLog(ErrorLevel) {
|
|
||||||
l.write(slowLog, levelSlow, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) WithDuration(duration time.Duration) Logger {
|
|
||||||
l.Duration = timex.ReprOfDuration(duration)
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *durationLogger) write(writer io.Writer, level string, val interface{}) {
|
|
||||||
switch encoding {
|
|
||||||
case plainEncodingType:
|
|
||||||
writePlainAny(writer, level, val, l.Duration)
|
|
||||||
default:
|
|
||||||
outputJson(writer, &durationLogger{
|
|
||||||
Timestamp: getTimestamp(),
|
|
||||||
Level: level,
|
|
||||||
Content: val,
|
|
||||||
Duration: l.Duration,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,86 +0,0 @@
|
|||||||
package logx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestWithDurationError(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Error("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationErrorf(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Errorf("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationErrorv(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Errorv("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationInfo(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Info("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationInfoConsole(t *testing.T) {
|
|
||||||
old := encoding
|
|
||||||
encoding = plainEncodingType
|
|
||||||
defer func() {
|
|
||||||
encoding = old
|
|
||||||
}()
|
|
||||||
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Info("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "ms"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationInfof(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Infof("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationInfov(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Infov("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationSlow(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).Slow("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationSlowf(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).WithDuration(time.Hour).Slowf("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithDurationSlowv(t *testing.T) {
|
|
||||||
var builder strings.Builder
|
|
||||||
log.SetOutput(&builder)
|
|
||||||
WithDuration(time.Second).WithDuration(time.Hour).Slowv("foo")
|
|
||||||
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
|
|
||||||
}
|
|
||||||
48
core/logx/fields.go
Normal file
48
core/logx/fields.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
fieldsContextKey contextKey
|
||||||
|
globalFields atomic.Value
|
||||||
|
globalFieldsLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
type contextKey struct{}
|
||||||
|
|
||||||
|
// AddGlobalFields adds global fields.
|
||||||
|
func AddGlobalFields(fields ...LogField) {
|
||||||
|
globalFieldsLock.Lock()
|
||||||
|
defer globalFieldsLock.Unlock()
|
||||||
|
|
||||||
|
old := globalFields.Load()
|
||||||
|
if old == nil {
|
||||||
|
globalFields.Store(append([]LogField(nil), fields...))
|
||||||
|
} else {
|
||||||
|
globalFields.Store(append(old.([]LogField), fields...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextWithFields returns a new context with the given fields.
|
||||||
|
func ContextWithFields(ctx context.Context, fields ...LogField) context.Context {
|
||||||
|
if val := ctx.Value(fieldsContextKey); val != nil {
|
||||||
|
if arr, ok := val.([]LogField); ok {
|
||||||
|
allFields := make([]LogField, 0, len(arr)+len(fields))
|
||||||
|
allFields = append(allFields, arr...)
|
||||||
|
allFields = append(allFields, fields...)
|
||||||
|
return context.WithValue(ctx, fieldsContextKey, allFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return context.WithValue(ctx, fieldsContextKey, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFields returns a new logger with the given fields.
|
||||||
|
// deprecated: use ContextWithFields instead.
|
||||||
|
func WithFields(ctx context.Context, fields ...LogField) context.Context {
|
||||||
|
return ContextWithFields(ctx, fields...)
|
||||||
|
}
|
||||||
121
core/logx/fields_test.go
Normal file
121
core/logx/fields_test.go
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
package logx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddGlobalFields(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := NewWriter(&buf)
|
||||||
|
old := Reset()
|
||||||
|
SetWriter(writer)
|
||||||
|
defer SetWriter(old)
|
||||||
|
|
||||||
|
Info("hello")
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
AddGlobalFields(Field("a", "1"), Field("b", "2"))
|
||||||
|
AddGlobalFields(Field("c", "3"))
|
||||||
|
Info("world")
|
||||||
|
var m map[string]any
|
||||||
|
assert.NoError(t, json.Unmarshal(buf.Bytes(), &m))
|
||||||
|
assert.Equal(t, "1", m["a"])
|
||||||
|
assert.Equal(t, "2", m["b"])
|
||||||
|
assert.Equal(t, "3", m["c"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestContextWithFields(t *testing.T) {
|
||||||
|
ctx := ContextWithFields(context.Background(), Field("a", 1), Field("b", 2))
|
||||||
|
vals := ctx.Value(fieldsContextKey)
|
||||||
|
assert.NotNil(t, vals)
|
||||||
|
fields, ok := vals.([]LogField)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.EqualValues(t, []LogField{Field("a", 1), Field("b", 2)}, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithFields(t *testing.T) {
|
||||||
|
ctx := WithFields(context.Background(), Field("a", 1), Field("b", 2))
|
||||||
|
vals := ctx.Value(fieldsContextKey)
|
||||||
|
assert.NotNil(t, vals)
|
||||||
|
fields, ok := vals.([]LogField)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.EqualValues(t, []LogField{Field("a", 1), Field("b", 2)}, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithFieldsAppend(t *testing.T) {
|
||||||
|
var dummyKey struct{}
|
||||||
|
ctx := context.WithValue(context.Background(), dummyKey, "dummy")
|
||||||
|
ctx = ContextWithFields(ctx, Field("a", 1), Field("b", 2))
|
||||||
|
ctx = ContextWithFields(ctx, Field("c", 3), Field("d", 4))
|
||||||
|
vals := ctx.Value(fieldsContextKey)
|
||||||
|
assert.NotNil(t, vals)
|
||||||
|
fields, ok := vals.([]LogField)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "dummy", ctx.Value(dummyKey))
|
||||||
|
assert.EqualValues(t, []LogField{
|
||||||
|
Field("a", 1),
|
||||||
|
Field("b", 2),
|
||||||
|
Field("c", 3),
|
||||||
|
Field("d", 4),
|
||||||
|
}, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithFieldsAppendCopy(t *testing.T) {
|
||||||
|
const count = 10
|
||||||
|
ctx := context.Background()
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
ctx = ContextWithFields(ctx, Field(strconv.Itoa(i), 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
af := Field("foo", 1)
|
||||||
|
bf := Field("bar", 2)
|
||||||
|
ctxa := ContextWithFields(ctx, af)
|
||||||
|
ctxb := ContextWithFields(ctx, bf)
|
||||||
|
|
||||||
|
assert.EqualValues(t, af, ctxa.Value(fieldsContextKey).([]LogField)[count])
|
||||||
|
assert.EqualValues(t, bf, ctxb.Value(fieldsContextKey).([]LogField)[count])
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicValue(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
var container atomic.Value
|
||||||
|
vals := []LogField{
|
||||||
|
Field("a", "b"),
|
||||||
|
Field("c", "d"),
|
||||||
|
Field("e", "f"),
|
||||||
|
}
|
||||||
|
container.Store(&vals)
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
val := container.Load()
|
||||||
|
if val != nil {
|
||||||
|
_ = *val.(*[]LogField)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRWMutex(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
var lock sync.RWMutex
|
||||||
|
vals := []LogField{
|
||||||
|
Field("a", "b"),
|
||||||
|
Field("c", "d"),
|
||||||
|
Field("e", "f"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
lock.RLock()
|
||||||
|
_ = vals
|
||||||
|
lock.RUnlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -13,14 +13,14 @@ func NewLessLogger(milliseconds int) *LessLogger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Error logs v into error log or discard it if more than once in the given duration.
|
// Error logs v into error log or discard it if more than once in the given duration.
|
||||||
func (logger *LessLogger) Error(v ...interface{}) {
|
func (logger *LessLogger) Error(v ...any) {
|
||||||
logger.logOrDiscard(func() {
|
logger.logOrDiscard(func() {
|
||||||
Error(v...)
|
Error(v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf logs v with format into error log or discard it if more than once in the given duration.
|
// Errorf logs v with format into error log or discard it if more than once in the given duration.
|
||||||
func (logger *LessLogger) Errorf(format string, v ...interface{}) {
|
func (logger *LessLogger) Errorf(format string, v ...any) {
|
||||||
logger.logOrDiscard(func() {
|
logger.logOrDiscard(func() {
|
||||||
Errorf(format, v...)
|
Errorf(format, v...)
|
||||||
})
|
})
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user