mirror of
https://github.com/zeromicro/go-zero.git
synced 2026-05-12 01:10:00 +08:00
Compare commits
847 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6a8638fc85 | ||
|
|
837a9ffa03 | ||
|
|
d28cfe5f20 | ||
|
|
022c100dc9 | ||
|
|
426b09c356 | ||
|
|
40dc21e4cf | ||
|
|
9b114e3251 | ||
|
|
4c6234f108 | ||
|
|
3cdfcb05f1 | ||
|
|
9f5bfa0088 | ||
|
|
2d42c8fa00 | ||
|
|
10e7922597 | ||
|
|
6e34b55ba7 | ||
|
|
ed15ca04f4 | ||
|
|
295ec27e1b | ||
|
|
d1e702e8a3 | ||
|
|
d1bfb5ef61 | ||
|
|
e43357164c | ||
|
|
cd21c9fa74 | ||
|
|
cdd2fcbbc9 | ||
|
|
8d2db09d45 | ||
|
|
65905b914d | ||
|
|
80e3407be1 | ||
|
|
657d27213a | ||
|
|
8ac18a9422 | ||
|
|
d3ae9cfd49 | ||
|
|
d7f42161fd | ||
|
|
e03229cabe | ||
|
|
8403ed16ae | ||
|
|
d87d203c3b | ||
|
|
3ae6a882a7 | ||
|
|
41c980f00c | ||
|
|
f34d81ca2c | ||
|
|
004ee488a6 | ||
|
|
2e12cd2c99 | ||
|
|
2695c30886 | ||
|
|
c74fb988e0 | ||
|
|
e8a340c1c0 | ||
|
|
06e114e5a3 | ||
|
|
74ad681a66 | ||
|
|
e7bbc09093 | ||
|
|
1eb1450c43 | ||
|
|
9a724fe907 | ||
|
|
30e49f2939 | ||
|
|
a5407479a6 | ||
|
|
7fb5bab26b | ||
|
|
27249e021f | ||
|
|
d809795fec | ||
|
|
c9db9588b7 | ||
|
|
872c50b71a | ||
|
|
7c83155e4f | ||
|
|
358d86b8ae | ||
|
|
f4bb9f5635 | ||
|
|
5c6a3132eb | ||
|
|
2bd95aa007 | ||
|
|
e8376936d5 | ||
|
|
71c0288023 | ||
|
|
9e2f07a842 | ||
|
|
24fd34413f | ||
|
|
3f47251892 | ||
|
|
0b6bc69afa | ||
|
|
5b9bdc8d02 | ||
|
|
ded22e296e | ||
|
|
f0ed2370a3 | ||
|
|
6bf6cfdd01 | ||
|
|
5cc9eb0de4 | ||
|
|
f070d447ef | ||
|
|
f6d9e19ecb | ||
|
|
56807aabf6 | ||
|
|
861dcf2f36 | ||
|
|
c837dc21bb | ||
|
|
96a35ecf1a | ||
|
|
bdec5f2349 | ||
|
|
bc92b57bdb | ||
|
|
d8905b9e9e | ||
|
|
dec6309c55 | ||
|
|
10805577f5 | ||
|
|
a4d8286e36 | ||
|
|
84d2b64e7c | ||
|
|
6476da4a18 | ||
|
|
79eab0ea2f | ||
|
|
3b683fd498 | ||
|
|
d179b342b2 | ||
|
|
58874779e7 | ||
|
|
8829c31c0d | ||
|
|
b42f3fa047 | ||
|
|
9bdadf2381 | ||
|
|
20f665ede8 | ||
|
|
0325d8e92d | ||
|
|
2125977281 | ||
|
|
c26c187e11 | ||
|
|
4ef1859f0b | ||
|
|
407a6cbf9c | ||
|
|
76fc1ef460 | ||
|
|
423955c55f | ||
|
|
db95b3f0e3 | ||
|
|
4bee60eb7f | ||
|
|
7618139dad | ||
|
|
6fd08027ff | ||
|
|
b9e268aae8 | ||
|
|
4c1bb1148b | ||
|
|
50a6bbe6b9 | ||
|
|
dfb3cb510a | ||
|
|
519db812b4 | ||
|
|
3203f8e06b | ||
|
|
b71ac2042a | ||
|
|
d0f9e57022 | ||
|
|
aa68210cde | ||
|
|
280e837c9e | ||
|
|
f669e1226c | ||
|
|
cd15c19250 | ||
|
|
5b35fa17de | ||
|
|
9672298fa8 | ||
|
|
bf3ce16823 | ||
|
|
189721da16 | ||
|
|
a523ab1f93 | ||
|
|
7ea8b636d9 | ||
|
|
b2fea65faa | ||
|
|
a1fe8bf6cd | ||
|
|
67ee9e4391 | ||
|
|
9c1ee50497 | ||
|
|
7c842f22d0 | ||
|
|
14ec29991c | ||
|
|
c7f5aad83a | ||
|
|
e77747cff8 | ||
|
|
f2612db4b1 | ||
|
|
a21ff71373 | ||
|
|
fc04ad7854 | ||
|
|
fbf2eebc42 | ||
|
|
dc43430812 | ||
|
|
c6642bc2e6 | ||
|
|
bdca24dd3b | ||
|
|
00c5734021 | ||
|
|
33f87cf1f0 | ||
|
|
69935c1ba3 | ||
|
|
1fb356f328 | ||
|
|
0b0406f41a | ||
|
|
cc264dcf55 | ||
|
|
e024aebb66 | ||
|
|
f204729482 | ||
|
|
d20cf56a69 | ||
|
|
54d57c7d4b | ||
|
|
28a7c9d38f | ||
|
|
872e75e10d | ||
|
|
af1730079e | ||
|
|
04521e2d24 | ||
|
|
02adcccbf4 | ||
|
|
a74aaf1823 | ||
|
|
1eb2089c69 | ||
|
|
f7f3730e1a | ||
|
|
0ee7654407 | ||
|
|
16cc990fdd | ||
|
|
00061c2e5b | ||
|
|
6793f7a1de | ||
|
|
c8428a7f65 | ||
|
|
a5e1d0d0dc | ||
|
|
8270c7deed | ||
|
|
9f4a882a1b | ||
|
|
cb7b7cb72e | ||
|
|
603c93aa4a | ||
|
|
cb8d9d413a | ||
|
|
ff7443c6a7 | ||
|
|
b812e74d6f | ||
|
|
089cdaa75f | ||
|
|
476026e393 | ||
|
|
75952308f9 | ||
|
|
df0550d6dc | ||
|
|
e481b63b21 | ||
|
|
e47079f0f4 | ||
|
|
9b2a279948 | ||
|
|
db87fd3239 | ||
|
|
598fda0c97 | ||
|
|
b0e335e7b0 | ||
|
|
efdf475da4 | ||
|
|
22a1315136 | ||
|
|
5b22823018 | ||
|
|
9ccb997ed8 | ||
|
|
01c92a6bc5 | ||
|
|
c9a2a60e28 | ||
|
|
b0739d63c0 | ||
|
|
c22f84cb5f | ||
|
|
60450bab02 | ||
|
|
3e8cec5c78 | ||
|
|
74ee163761 | ||
|
|
ea4f680052 | ||
|
|
58cdba2c5d | ||
|
|
a2fbc14c70 | ||
|
|
158df8c270 | ||
|
|
30ec236a87 | ||
|
|
ac3653b3f9 | ||
|
|
8520db4fd9 | ||
|
|
14141fed62 | ||
|
|
5d86cc2f20 | ||
|
|
8a6e4b7580 | ||
|
|
453f949638 | ||
|
|
75a330184d | ||
|
|
546fcd8bab | ||
|
|
3022f93b6d | ||
|
|
8ffc392c66 | ||
|
|
ae7d85dadf | ||
|
|
e89268ac37 | ||
|
|
aaa3623404 | ||
|
|
8998f16054 | ||
|
|
94417be018 | ||
|
|
f300408fc0 | ||
|
|
aaa39e17a3 | ||
|
|
73906f996d | ||
|
|
73417f54db | ||
|
|
491213afb8 | ||
|
|
edf743cd72 | ||
|
|
78a88be787 | ||
|
|
9f6a574f97 | ||
|
|
ea01cc78f0 | ||
|
|
a87978568a | ||
|
|
14cecb9b31 | ||
|
|
0ce54100a4 | ||
|
|
d28ac35ff7 | ||
|
|
a5962f677f | ||
|
|
8478474f7f | ||
|
|
df5ae9507f | ||
|
|
faf4d7e3bb | ||
|
|
f64fe5eb5e | ||
|
|
97d889103a | ||
|
|
9a44310d00 | ||
|
|
06eeef2cf3 | ||
|
|
9adc7d4cb9 | ||
|
|
006f78c3d5 | ||
|
|
64a8e65f4a | ||
|
|
8fd1e76d29 | ||
|
|
0466af5e49 | ||
|
|
7405d7f506 | ||
|
|
afd9ff889e | ||
|
|
7e087de6e6 | ||
|
|
5aded99df5 | ||
|
|
08fb980ad2 | ||
|
|
b94d7aa532 | ||
|
|
ee630b8b57 | ||
|
|
bd82b7d8de | ||
|
|
3d729c77a6 | ||
|
|
e944b59bb3 | ||
|
|
54b5e3f4b2 | ||
|
|
b913229028 | ||
|
|
9963ffb1c1 | ||
|
|
8cb6490724 | ||
|
|
05e37ee20f | ||
|
|
d88da4cc88 | ||
|
|
425430f67c | ||
|
|
4e0d91f6c0 | ||
|
|
8584351b6d | ||
|
|
b19c5223a9 | ||
|
|
99a2d95433 | ||
|
|
9db222bf5b | ||
|
|
ac648d08cb | ||
|
|
6df7fa619c | ||
|
|
bbb4ce586f | ||
|
|
888551627c | ||
|
|
bd623aaac3 | ||
|
|
9e6c2ba2c0 | ||
|
|
c0db8d017d | ||
|
|
52b4f8ca91 | ||
|
|
4884a7b3c6 | ||
|
|
3c6951577d | ||
|
|
fcd15c9b17 | ||
|
|
155e6061cb | ||
|
|
dda7666097 | ||
|
|
c954568b61 | ||
|
|
c2acc43a52 | ||
|
|
1a1a6f5239 | ||
|
|
60c7edf8f8 | ||
|
|
7ad86a52f3 | ||
|
|
1e4e5a02b2 | ||
|
|
39540e21d2 | ||
|
|
b321622c95 | ||
|
|
a25cba5380 | ||
|
|
f01472c9ea | ||
|
|
af531cf264 | ||
|
|
c4b2cddef7 | ||
|
|
51de0d0620 | ||
|
|
dd393351cc | ||
|
|
655ae8034c | ||
|
|
d894b88c3e | ||
|
|
791e76bcf0 | ||
|
|
c566b5ff82 | ||
|
|
490241d639 | ||
|
|
f02711a9cb | ||
|
|
ad32f9de23 | ||
|
|
f309e9f80c | ||
|
|
2087ac1e89 | ||
|
|
e6ef1fca12 | ||
|
|
ef146cf5ba | ||
|
|
04b0f26182 | ||
|
|
acdaee0fb6 | ||
|
|
56ad4776d4 | ||
|
|
904d168f18 | ||
|
|
4bd4981bfb | ||
|
|
90562df826 | ||
|
|
497762ab47 | ||
|
|
6e4c98e52d | ||
|
|
b4bb5c0323 | ||
|
|
a58fac9000 | ||
|
|
d84e3d4b53 | ||
|
|
221f923fae | ||
|
|
bbb9126302 | ||
|
|
e7c9ef16fe | ||
|
|
8872d7cbd3 | ||
|
|
334ee4213f | ||
|
|
226513ed60 | ||
|
|
dac00d10c1 | ||
|
|
84d2b6f8f5 | ||
|
|
f98c9246b2 | ||
|
|
059027bc9d | ||
|
|
af68caeaf6 | ||
|
|
fdeacfc89f | ||
|
|
5b33dd59d9 | ||
|
|
1f92bfde6a | ||
|
|
0c094cb2d7 | ||
|
|
f238290dd3 | ||
|
|
c376ffc351 | ||
|
|
802549ac7c | ||
|
|
72580dee38 | ||
|
|
086113c843 | ||
|
|
d239952d2d | ||
|
|
7472d1e70b | ||
|
|
2446d8a668 | ||
|
|
f6894448bd | ||
|
|
425be6b4a1 | ||
|
|
457048bfac | ||
|
|
f14ab70035 | ||
|
|
8f1c88e07d | ||
|
|
9602494454 | ||
|
|
38abfb80ed | ||
|
|
87938bcc09 | ||
|
|
8ebf6750b9 | ||
|
|
6f92daae12 | ||
|
|
80e1c85b50 | ||
|
|
395a1db22f | ||
|
|
28009c4224 | ||
|
|
211f3050e9 | ||
|
|
03b5fd4a10 | ||
|
|
5e969cbef0 | ||
|
|
42883d0899 | ||
|
|
06f6dc9937 | ||
|
|
1789b12db2 | ||
|
|
c7f3e6119d | ||
|
|
54414db91d | ||
|
|
9b0625bb83 | ||
|
|
0dda05fd57 | ||
|
|
5b79ba2618 | ||
|
|
22a1fa649e | ||
|
|
745e76c335 | ||
|
|
852891dbd8 | ||
|
|
316195e912 | ||
|
|
8e889d694d | ||
|
|
ec6132b754 | ||
|
|
c282bb1d86 | ||
|
|
d04b54243d | ||
|
|
b88ba14597 | ||
|
|
7b3c3de35e | ||
|
|
abab7c2852 | ||
|
|
30f5ab0b99 | ||
|
|
8b273a075c | ||
|
|
76026fc211 | ||
|
|
04284e31cd | ||
|
|
c3b9c3c5ab | ||
|
|
a8b550e7ef | ||
|
|
cbfbebed00 | ||
|
|
2b07f22672 | ||
|
|
a784982030 | ||
|
|
ebec5aafab | ||
|
|
572b32729f | ||
|
|
43e712d86a | ||
|
|
4db20677f7 | ||
|
|
6887fb22de | ||
|
|
50fbdbcfd7 | ||
|
|
c77b8489d7 | ||
|
|
eca4ed2cc0 | ||
|
|
744c18b7cb | ||
|
|
8d6f6f933e | ||
|
|
37c3b9f5c1 | ||
|
|
1f1dcd16e6 | ||
|
|
3285436f75 | ||
|
|
7f49bd8a31 | ||
|
|
9cd2015661 | ||
|
|
cf3a1020b0 | ||
|
|
ee19fb736b | ||
|
|
b0ccfb8eb4 | ||
|
|
444e5a711f | ||
|
|
8774d72ddb | ||
|
|
e3fcdbf040 | ||
|
|
2854ca03b4 | ||
|
|
6c624a6ed0 | ||
|
|
57b73d8b49 | ||
|
|
a79cee12ee | ||
|
|
7a921f66e6 | ||
|
|
12e235efb0 | ||
|
|
01060cf16d | ||
|
|
0786862a35 | ||
|
|
efa43483b2 | ||
|
|
771371e051 | ||
|
|
2ee95f8981 | ||
|
|
5bc01e4bfd | ||
|
|
510e966982 | ||
|
|
10e3b8ac80 | ||
|
|
04059bbf5a | ||
|
|
d643007c79 | ||
|
|
fc43876cc5 | ||
|
|
a926cb514f | ||
|
|
25cab2f273 | ||
|
|
8d2e2753a2 | ||
|
|
cc4c50e3eb | ||
|
|
751072bdb0 | ||
|
|
e97e1f10db | ||
|
|
0bd2a0656c | ||
|
|
71a2b20301 | ||
|
|
8df7de94e3 | ||
|
|
bf21203297 | ||
|
|
ae98375194 | ||
|
|
82d1ccf376 | ||
|
|
bb6d49c17e | ||
|
|
ed735ec47c | ||
|
|
ba4bac3a03 | ||
|
|
08433d7e04 | ||
|
|
a3b525b50d | ||
|
|
097f6886f2 | ||
|
|
07a1549634 | ||
|
|
befca26c58 | ||
|
|
3556a2eef4 | ||
|
|
807765f77e | ||
|
|
e44584e549 | ||
|
|
acd48f0abb | ||
|
|
f919bc6713 | ||
|
|
a0030b8f45 | ||
|
|
a5f0cce1b1 | ||
|
|
4d13dda605 | ||
|
|
b56cc8e459 | ||
|
|
c435811479 | ||
|
|
c686c93fb5 | ||
|
|
da8f76e6bd | ||
|
|
99596a4149 | ||
|
|
ec2a9f2c57 | ||
|
|
fd73ced6dc | ||
|
|
5071736ab4 | ||
|
|
0d7f1d23b4 | ||
|
|
84ab11ac09 | ||
|
|
67804a6bb2 | ||
|
|
65ee877236 | ||
|
|
b060867009 | ||
|
|
4d53045c6b | ||
|
|
cecd4b1b75 | ||
|
|
7cd0463953 | ||
|
|
7a82cf80ce | ||
|
|
f997aee3ba | ||
|
|
88ec89bdbd | ||
|
|
7d1b43780a | ||
|
|
4b5c2de376 | ||
|
|
e5c560e8ba | ||
|
|
bed494d904 | ||
|
|
2dfecda465 | ||
|
|
3ebb1e0221 | ||
|
|
348184904c | ||
|
|
7a27fa50a1 | ||
|
|
8d4951c990 | ||
|
|
6e57f6c527 | ||
|
|
b9ac51b6c3 | ||
|
|
702e8d79ce | ||
|
|
95a9dabf8b | ||
|
|
bae66c49c2 | ||
|
|
e0afe0b4bb | ||
|
|
24fb29a356 | ||
|
|
71083b5e64 | ||
|
|
1174f17bd9 | ||
|
|
d6d8fc21d8 | ||
|
|
9592639cb4 | ||
|
|
abcb28e506 | ||
|
|
a92f65580c | ||
|
|
3819f67cf4 | ||
|
|
295c8d2934 | ||
|
|
88da8685dd | ||
|
|
c7831ac96d | ||
|
|
e898761762 | ||
|
|
13d1c5cd00 | ||
|
|
16bfb1b7be | ||
|
|
ef4d4968d6 | ||
|
|
7b4a5e3ec6 | ||
|
|
e6df21e0d2 | ||
|
|
0a2c2d1eca | ||
|
|
a5fb29a6f0 | ||
|
|
f8da301e57 | ||
|
|
cb9075b737 | ||
|
|
3f389a55c2 | ||
|
|
afbd565d87 | ||
|
|
d629acc2b7 | ||
|
|
f32c6a9b28 | ||
|
|
95aa65efb9 | ||
|
|
3806e66cf1 | ||
|
|
bd430baf52 | ||
|
|
48f4154ea8 | ||
|
|
2599e0d28d | ||
|
|
12327fa07d | ||
|
|
57079bf4a4 | ||
|
|
7f6eceb5a3 | ||
|
|
7d7cb836af | ||
|
|
f87d9d1dda | ||
|
|
856b5aadb1 | ||
|
|
f7d778e0ed | ||
|
|
88333ee77f | ||
|
|
e76f44a35b | ||
|
|
c9ec22d5f4 | ||
|
|
afffc1048b | ||
|
|
d0b76b1d9a | ||
|
|
b004b070d7 | ||
|
|
677d581bd1 | ||
|
|
b776468e69 | ||
|
|
4c9315e984 | ||
|
|
668a7011c4 | ||
|
|
cc07a1d69b | ||
|
|
7f99a3baa8 | ||
|
|
9504418462 | ||
|
|
b144a2335c | ||
|
|
7b9ed7a313 | ||
|
|
3d2e9fcb84 | ||
|
|
2b993424c1 | ||
|
|
5e87b33b23 | ||
|
|
9b7cc43dcb | ||
|
|
000b28cf84 | ||
|
|
9fd16cd278 | ||
|
|
b71429e16b | ||
|
|
a13b48c33e | ||
|
|
033525fea8 | ||
|
|
607fc3297a | ||
|
|
4287877b74 | ||
|
|
2b7545ce11 | ||
|
|
60925c1164 | ||
|
|
1c9e81aa28 | ||
|
|
db7dcaa120 | ||
|
|
099d44054d | ||
|
|
f5f873c6bd | ||
|
|
6dbd3eada9 | ||
|
|
cf2d20a211 | ||
|
|
91bfc093f4 | ||
|
|
cf33aae91d | ||
|
|
c9494c8bc7 | ||
|
|
1fd2ef9347 | ||
|
|
efffb40fa3 | ||
|
|
9c8f31cf83 | ||
|
|
96cb7af728 | ||
|
|
41964f9d52 | ||
|
|
fe0d0687f5 | ||
|
|
1c1e4bca86 | ||
|
|
1abe21aa2a | ||
|
|
cee170f3e9 | ||
|
|
907efd92c9 | ||
|
|
737cd4751a | ||
|
|
dfe6e88529 | ||
|
|
85a815bea0 | ||
|
|
aa3c391919 | ||
|
|
c9b0ac1ee4 | ||
|
|
33faab61a3 | ||
|
|
81bf122fa4 | ||
|
|
a14bd309a9 | ||
|
|
ea7e410145 | ||
|
|
e81358e7fa | ||
|
|
695ea69bfc | ||
|
|
d2ed14002c | ||
|
|
1d9c4a4c4b | ||
|
|
7e83895c6e | ||
|
|
dc0534573c | ||
|
|
fe3739b7f3 | ||
|
|
94645481b1 | ||
|
|
338caf9927 | ||
|
|
9cc979960f | ||
|
|
f904710811 | ||
|
|
8291eabc2c | ||
|
|
901fadb5d3 | ||
|
|
c824e9e118 | ||
|
|
6f49639f80 | ||
|
|
7d4a548d29 | ||
|
|
936dd67008 | ||
|
|
84cc41df42 | ||
|
|
da1a93e932 | ||
|
|
7e61555d42 | ||
|
|
7a134ec64d | ||
|
|
d123b00e73 | ||
|
|
20d53add46 | ||
|
|
a1b141d31a | ||
|
|
0a9c427443 | ||
|
|
c32759d735 | ||
|
|
fe855c52f1 | ||
|
|
3f8b080882 | ||
|
|
adc275872d | ||
|
|
be39133dba | ||
|
|
15a9ab1d18 | ||
|
|
7c354dcc38 | ||
|
|
3733b06f1b | ||
|
|
8115a0932e | ||
|
|
4df5eb760c | ||
|
|
4a639b853c | ||
|
|
1023425c1d | ||
|
|
360fbfd0fa | ||
|
|
09b7625f06 | ||
|
|
6db294b5cc | ||
|
|
305b6749fd | ||
|
|
10b855713d | ||
|
|
1cc0f071d9 | ||
|
|
02ce8f82c8 | ||
|
|
8a585afbf0 | ||
|
|
e356025cef | ||
|
|
14dee114dd | ||
|
|
637a94a189 | ||
|
|
173b347c90 | ||
|
|
6749c5b94a | ||
|
|
e66cca3710 | ||
|
|
f90c0aa98e | ||
|
|
f00b5416a3 | ||
|
|
f49694d6b6 | ||
|
|
d809bf2dca | ||
|
|
44ae5463bc | ||
|
|
40dbd722d7 | ||
|
|
709574133b | ||
|
|
cb1c593108 | ||
|
|
6ecf575c00 | ||
|
|
b8fcdd5460 | ||
|
|
ce42281568 | ||
|
|
40230d79e7 | ||
|
|
ba7851795b | ||
|
|
096fe3bc47 | ||
|
|
e37858295a | ||
|
|
5a4afb1518 | ||
|
|
63f1f39c40 | ||
|
|
481895d1e4 | ||
|
|
9e9ce3bf48 | ||
|
|
0ce654968d | ||
|
|
2703493541 | ||
|
|
d4240cd4b0 | ||
|
|
a22bcc84a3 | ||
|
|
93f430a449 | ||
|
|
d1b303fe7e | ||
|
|
dbca20e3df | ||
|
|
b3ead4d76c | ||
|
|
33a9db85c8 | ||
|
|
e7d46aa6e2 | ||
|
|
b282304054 | ||
|
|
0a36031d48 | ||
|
|
e5d7c3ab04 | ||
|
|
12c08bfd39 | ||
|
|
8f465fa439 | ||
|
|
8a470bb6ee | ||
|
|
9277ad77f7 | ||
|
|
a958400595 | ||
|
|
015716d1b5 | ||
|
|
54e9d01312 | ||
|
|
bc831b75dd | ||
|
|
ff112fdaee | ||
|
|
8d0f7dbb27 | ||
|
|
a5ce2c448e | ||
|
|
0dd8e27557 | ||
|
|
17a0908a84 | ||
|
|
9f9c24cce9 | ||
|
|
b628bc0086 | ||
|
|
be9c48da7f | ||
|
|
797a90ae7d | ||
|
|
92e60a5777 | ||
|
|
46995a4d7d | ||
|
|
5e6dcac734 | ||
|
|
3e7e466526 | ||
|
|
b6b8941a18 | ||
|
|
878fd14739 | ||
|
|
5e99f2b85d | ||
|
|
9c23399c33 | ||
|
|
86d3de4c89 | ||
|
|
dc17855367 | ||
|
|
1606a92c6e | ||
|
|
029fd3ea35 | ||
|
|
57299a7597 | ||
|
|
762af9dda2 | ||
|
|
eccfaba614 | ||
|
|
974c19d6d3 | ||
|
|
0f8140031a | ||
|
|
0b1ee79d3a | ||
|
|
26e16107ce | ||
|
|
1e5e9d63bd | ||
|
|
f994e1df1a | ||
|
|
b5dcadda78 | ||
|
|
df37597ac3 | ||
|
|
68335ada54 | ||
|
|
ecdae2477e | ||
|
|
a561884fcf | ||
|
|
a50bcb90a6 | ||
|
|
e6f8e0e8c3 | ||
|
|
598ff6d0fc | ||
|
|
9a57993e83 | ||
|
|
ee45b0a459 | ||
|
|
2896ef1a49 | ||
|
|
05df86436f | ||
|
|
fb22589cf5 | ||
|
|
a8fb010333 | ||
|
|
8cc09244a0 | ||
|
|
21e811887c | ||
|
|
7f0ec14704 | ||
|
|
d12e9fa2d7 | ||
|
|
ce5961a7d0 | ||
|
|
e1d942a799 | ||
|
|
754e631dc4 | ||
|
|
72aeac3fa9 | ||
|
|
1c3c8f4bbc | ||
|
|
17e6cfb7a9 | ||
|
|
0d151c17f8 | ||
|
|
52990550fb | ||
|
|
3a9b9ceace | ||
|
|
3128d63134 | ||
|
|
4408767981 | ||
|
|
ff7c14c6b6 | ||
|
|
520f4d7c1b | ||
|
|
0e674933f3 | ||
|
|
1d12f20ff6 | ||
|
|
2b815162f6 | ||
|
|
1602f6ce81 | ||
|
|
c5cd0d32d1 | ||
|
|
1cb17311dd | ||
|
|
e987eb60d3 | ||
|
|
99a863e8be | ||
|
|
5333fb93e5 | ||
|
|
cb13556461 | ||
|
|
561370d5c9 | ||
|
|
7c779d0433 | ||
|
|
6814c86fcd | ||
|
|
a1d2ea9d85 | ||
|
|
4dfbd66323 | ||
|
|
dbf556e7d2 | ||
|
|
c0d0e00803 | ||
|
|
b4aa89fc25 | ||
|
|
11dd3d75ec | ||
|
|
167422ac4f | ||
|
|
a74d73fb2e | ||
|
|
81a9ada2d9 | ||
|
|
55c9c3f3dd | ||
|
|
8dd93d59a0 | ||
|
|
3a4e1cbb33 | ||
|
|
d1129e3974 | ||
|
|
1e85f74fd8 | ||
|
|
33eb2936e8 | ||
|
|
b7a018b33a | ||
|
|
ea1c9aa250 | ||
|
|
fbad810cd1 | ||
|
|
6b15475ccd | ||
|
|
5c0c3ea467 | ||
|
|
89f3712347 | ||
|
|
af7acdd843 | ||
|
|
7ffa3349a9 | ||
|
|
f03862c378 | ||
|
|
fe3e70a60f | ||
|
|
36174ba5cc | ||
|
|
7b17b3604a | ||
|
|
eb40c2731d | ||
|
|
618bec5075 | ||
|
|
5821b7324e | ||
|
|
befdaab542 | ||
|
|
431be8ed9d | ||
|
|
3c688c319e | ||
|
|
59ffa75c00 | ||
|
|
09340e82a7 | ||
|
|
6c4a4be5d2 | ||
|
|
6e3d99e869 | ||
|
|
0f97b2019a | ||
|
|
0cf4ed46a1 | ||
|
|
3affe62ae4 | ||
|
|
0734bbcab3 | ||
|
|
f411178a4f | ||
|
|
72132ce399 | ||
|
|
db16115037 | ||
|
|
71bbf91a63 | ||
|
|
69ccc61cfe | ||
|
|
a94cf653f0 | ||
|
|
77e23ad65d | ||
|
|
38806e7237 | ||
|
|
a987d12237 | ||
|
|
33208e6ef6 | ||
|
|
5d8a3c07cd | ||
|
|
1c24e71568 | ||
|
|
229544f3ca | ||
|
|
c575fa7f95 | ||
|
|
fe2252184a | ||
|
|
1a8014c704 | ||
|
|
30e52707ae | ||
|
|
73b61e09ed | ||
|
|
9b8595a85e | ||
|
|
015e284515 | ||
|
|
456b395860 | ||
|
|
f3c367a323 | ||
|
|
a32028c4fb | ||
|
|
b4572fa064 | ||
|
|
ccbabf6f58 | ||
|
|
5989444227 | ||
|
|
dc286a03f5 | ||
|
|
b82c02ed16 | ||
|
|
59ba4ecc5b | ||
|
|
5e7b514ae2 | ||
|
|
2b1466e41e | ||
|
|
9c9f80518f | ||
|
|
25973d6b59 | ||
|
|
6237d01948 | ||
|
|
49316b113e | ||
|
|
6a673e8cb0 | ||
|
|
0efa28ddbd | ||
|
|
0b6a13fe84 | ||
|
|
11aa6668e8 | ||
|
|
267a283328 | ||
|
|
2d8366b30e | ||
|
|
db83843558 | ||
|
|
50565c9765 | ||
|
|
4c02a19a14 | ||
|
|
a1b990c5ec | ||
|
|
2607bb8863 | ||
|
|
5bf37535fe | ||
|
|
ed85775fd5 | ||
|
|
418f8f6666 | ||
|
|
22e75cdf78 | ||
|
|
e79c42add1 | ||
|
|
9e14820698 | ||
|
|
2ebb5b6b58 | ||
|
|
2673dbc6e1 | ||
|
|
d21d770b5b | ||
|
|
1252bd9cde | ||
|
|
054d9b5540 | ||
|
|
f03cfb0ff7 | ||
|
|
0214161bfc | ||
|
|
d4e38cb7f0 | ||
|
|
693a8b627a | ||
|
|
701208b6f4 | ||
|
|
b65fcc5512 | ||
|
|
3321ed3519 | ||
|
|
5e007c1f9f | ||
|
|
de2f8c06fb | ||
|
|
926d746df5 | ||
|
|
4b636cd293 | ||
|
|
4bdf5e4c90 | ||
|
|
721b7def7c | ||
|
|
f294090130 | ||
|
|
489980ea0f | ||
|
|
e12c8ae993 | ||
|
|
21aad62513 | ||
|
|
0b08aca554 | ||
|
|
6ef1b5e14c | ||
|
|
8745039877 | ||
|
|
9d9399ad10 | ||
|
|
e7dd04701c | ||
|
|
a3d7474ae0 |
3
.codecov.yml
Normal file
3
.codecov.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
comment: false
|
||||||
|
ignore:
|
||||||
|
- "tools"
|
||||||
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior, if applicable:
|
||||||
|
|
||||||
|
1. The code is
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
2. The error is
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Environments (please complete the following information):**
|
||||||
|
- OS: [e.g. Linux]
|
||||||
|
- go-zero version [e.g. 1.2.1]
|
||||||
|
- goctl version [e.g. 1.2.1, optional]
|
||||||
|
|
||||||
|
**More description**
|
||||||
|
Add any other context about the problem here.
|
||||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
10
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
name: Question
|
||||||
|
about: Ask a question on using go-zero or goctl
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# For most projects, this workflow file will not need changing; you simply need
|
||||||
|
# to commit it to your repository.
|
||||||
|
#
|
||||||
|
# You may wish to alter this file to override the set of languages analyzed,
|
||||||
|
# or to provide custom queries or build logic.
|
||||||
|
#
|
||||||
|
# ******** NOTE ********
|
||||||
|
# We have attempted to detect the languages in your repository. Please check
|
||||||
|
# the `language` matrix defined below to confirm you have the correct set of
|
||||||
|
# supported CodeQL languages.
|
||||||
|
#
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ master ]
|
||||||
|
schedule:
|
||||||
|
- cron: '18 19 * * 6'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'go' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
||||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -7,7 +7,6 @@ on:
|
|||||||
branches: [ master ]
|
branches: [ master ]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
build:
|
build:
|
||||||
name: Build
|
name: Build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -16,7 +15,7 @@ jobs:
|
|||||||
- name: Set up Go 1.x
|
- name: Set up Go 1.x
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: ^1.13
|
go-version: ^1.14
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
@@ -26,5 +25,14 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
go get -v -t -d ./...
|
go get -v -t -d ./...
|
||||||
|
|
||||||
|
- name: Lint
|
||||||
|
run: |
|
||||||
|
go vet -stdmethods=false $(go list ./...)
|
||||||
|
go install mvdan.cc/gofumpt@latest
|
||||||
|
test -z "$(gofumpt -s -l -extra .)" || echo "Please run 'gofumpt -l -w -extra .'"
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: go test -v -race ./...
|
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
|
- name: Codecov
|
||||||
|
uses: codecov/codecov-action@v2
|
||||||
|
|||||||
19
.github/workflows/issues.yml
vendored
Normal file
19
.github/workflows/issues.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "30 1 * * *"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v3
|
||||||
|
with:
|
||||||
|
days-before-issue-stale: 30
|
||||||
|
days-before-issue-close: 14
|
||||||
|
stale-issue-label: "stale"
|
||||||
|
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
|
||||||
|
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
19
.github/workflows/reviewdog.yml
vendored
Normal file
19
.github/workflows/reviewdog.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: reviewdog
|
||||||
|
on: [pull_request]
|
||||||
|
jobs:
|
||||||
|
staticcheck:
|
||||||
|
name: runner / staticcheck
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: reviewdog/action-staticcheck@v1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.github_token }}
|
||||||
|
# Change reviewdog reporter if you need [github-pr-check,github-check,github-pr-review].
|
||||||
|
reporter: github-pr-review
|
||||||
|
# Report all results.
|
||||||
|
filter_mode: nofilter
|
||||||
|
# Exit with 1 when it find at least one finding.
|
||||||
|
fail_on_error: true
|
||||||
|
# Set staticcheck flags
|
||||||
|
staticcheck_flags: -checks=inherit,-SA1019,-SA1029,-SA5008
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,15 +4,16 @@
|
|||||||
# Unignore all with extensions
|
# Unignore all with extensions
|
||||||
!*.*
|
!*.*
|
||||||
!**/Dockerfile
|
!**/Dockerfile
|
||||||
|
!**/Makefile
|
||||||
|
|
||||||
# Unignore all dirs
|
# Unignore all dirs
|
||||||
!*/
|
!*/
|
||||||
!api
|
!api
|
||||||
|
|
||||||
|
# ignore
|
||||||
.idea
|
.idea
|
||||||
**/.DS_Store
|
**/.DS_Store
|
||||||
**/logs
|
**/logs
|
||||||
!Makefile
|
|
||||||
|
|
||||||
# gitlab ci
|
# gitlab ci
|
||||||
.cache
|
.cache
|
||||||
|
|||||||
@@ -1,18 +0,0 @@
|
|||||||
stages:
|
|
||||||
- analysis
|
|
||||||
|
|
||||||
variables:
|
|
||||||
GOPATH: '/runner-cache/zero'
|
|
||||||
GOCACHE: '/runner-cache/zero'
|
|
||||||
GOPROXY: 'https://goproxy.cn,direct'
|
|
||||||
|
|
||||||
analysis:
|
|
||||||
stage: analysis
|
|
||||||
image: golang
|
|
||||||
script:
|
|
||||||
- go version && go env
|
|
||||||
- go test -short $(go list ./...) | grep -v "no test"
|
|
||||||
only:
|
|
||||||
- merge_requests
|
|
||||||
tags:
|
|
||||||
- common
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
run:
|
|
||||||
# concurrency: 6
|
|
||||||
timeout: 5m
|
|
||||||
skip-dirs:
|
|
||||||
- core
|
|
||||||
- doc
|
|
||||||
- example
|
|
||||||
- rest
|
|
||||||
- rpcx
|
|
||||||
- tools
|
|
||||||
|
|
||||||
|
|
||||||
linters:
|
|
||||||
disable-all: true
|
|
||||||
enable:
|
|
||||||
- bodyclose
|
|
||||||
- deadcode
|
|
||||||
- errcheck
|
|
||||||
- gosimple
|
|
||||||
- govet
|
|
||||||
- ineffassign
|
|
||||||
- staticcheck
|
|
||||||
- structcheck
|
|
||||||
- typecheck
|
|
||||||
- unused
|
|
||||||
- varcheck
|
|
||||||
# - dupl
|
|
||||||
|
|
||||||
|
|
||||||
linters-settings:
|
|
||||||
|
|
||||||
issues:
|
|
||||||
exclude-rules:
|
|
||||||
- linters:
|
|
||||||
- staticcheck
|
|
||||||
text: 'SA1019: (baseresponse.BoolResponse|oldresponse.FormatBadRequestResponse|oldresponse.FormatResponse)|SA5008: unknown JSON option ("optional"|"default=|"range=|"options=)'
|
|
||||||
102
CONTRIBUTING.md
Normal file
102
CONTRIBUTING.md
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# Contributing
|
||||||
|
|
||||||
|
Welcome to go-zero!
|
||||||
|
|
||||||
|
- [Before you get started](#before-you-get-started)
|
||||||
|
- [Code of Conduct](#code-of-conduct)
|
||||||
|
- [Community Expectations](#community-expectations)
|
||||||
|
- [Getting started](#getting-started)
|
||||||
|
- [Your First Contribution](#your-first-contribution)
|
||||||
|
- [Find something to work on](#find-something-to-work-on)
|
||||||
|
- [Find a good first topic](#find-a-good-first-topic)
|
||||||
|
- [Work on an Issue](#work-on-an-issue)
|
||||||
|
- [File an Issue](#file-an-issue)
|
||||||
|
- [Contributor Workflow](#contributor-workflow)
|
||||||
|
- [Creating Pull Requests](#creating-pull-requests)
|
||||||
|
- [Code Review](#code-review)
|
||||||
|
- [Testing](#testing)
|
||||||
|
|
||||||
|
# Before you get started
|
||||||
|
|
||||||
|
## Code of Conduct
|
||||||
|
|
||||||
|
Please make sure to read and observe our [Code of Conduct](/code-of-conduct.md).
|
||||||
|
|
||||||
|
## Community Expectations
|
||||||
|
|
||||||
|
go-zero is a community project driven by its community which strives to promote a healthy, friendly and productive environment.
|
||||||
|
go-zero is a web and rpc framework written in Go. It's born to ensure the stability of the busy sites with resilient design. Builtin goctl greatly improves the development productivity.
|
||||||
|
|
||||||
|
# Getting started
|
||||||
|
|
||||||
|
- Fork the repository on GitHub.
|
||||||
|
- Make your changes on your fork repository.
|
||||||
|
- Submit a PR.
|
||||||
|
|
||||||
|
|
||||||
|
# Your First Contribution
|
||||||
|
|
||||||
|
We will help you to contribute in different areas like filing issues, developing features, fixing critical bugs and
|
||||||
|
getting your work reviewed and merged.
|
||||||
|
|
||||||
|
If you have questions about the development process,
|
||||||
|
feel free to [file an issue](https://github.com/tal-tech/go-zero/issues/new/choose).
|
||||||
|
|
||||||
|
## Find something to work on
|
||||||
|
|
||||||
|
We are always in need of help, be it fixing documentation, reporting bugs or writing some code.
|
||||||
|
Look at places where you feel best coding practices aren't followed, code refactoring is needed or tests are missing.
|
||||||
|
Here is how you get started.
|
||||||
|
|
||||||
|
### Find a good first topic
|
||||||
|
|
||||||
|
[go-zero](https://github.com/tal-tech/go-zero) has beginner-friendly issues that provide a good first issue.
|
||||||
|
For example, [go-zero](https://github.com/tal-tech/go-zero) has
|
||||||
|
[help wanted](https://github.com/tal-tech/go-zero/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) and
|
||||||
|
[good first issue](https://github.com/tal-tech/go-zero/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
|
||||||
|
labels for issues that should not need deep knowledge of the system.
|
||||||
|
We can help new contributors who wish to work on such issues.
|
||||||
|
|
||||||
|
Another good way to contribute is to find a documentation improvement, such as a missing/broken link.
|
||||||
|
Please see [Contributing](#contributing) below for the workflow.
|
||||||
|
|
||||||
|
#### Work on an issue
|
||||||
|
|
||||||
|
When you are willing to take on an issue, just reply on the issue. The maintainer will assign it to you.
|
||||||
|
|
||||||
|
### File an Issue
|
||||||
|
|
||||||
|
While we encourage everyone to contribute code, it is also appreciated when someone reports an issue.
|
||||||
|
|
||||||
|
Please follow the prompted submission guidelines while opening an issue.
|
||||||
|
|
||||||
|
# Contributor Workflow
|
||||||
|
|
||||||
|
Please do not ever hesitate to ask a question or send a pull request.
|
||||||
|
|
||||||
|
This is a rough outline of what a contributor's workflow looks like:
|
||||||
|
|
||||||
|
- Create a topic branch from where to base the contribution. This is usually master.
|
||||||
|
- Make commits of logical units.
|
||||||
|
- Push changes in a topic branch to a personal fork of the repository.
|
||||||
|
- Submit a pull request to [go-zero](https://github.com/tal-tech/go-zero).
|
||||||
|
|
||||||
|
## Creating Pull Requests
|
||||||
|
|
||||||
|
Pull requests are often called simply "PR".
|
||||||
|
go-zero generally follows the standard [github pull request](https://help.github.com/articles/about-pull-requests/) process.
|
||||||
|
To submit a proposed change, please develop the code/fix and add new test cases.
|
||||||
|
After that, run these local verifications before submitting pull request to predict the pass or
|
||||||
|
fail of continuous integration.
|
||||||
|
|
||||||
|
* Format the code with `gofmt`
|
||||||
|
* Run the test with data race enabled `go test -race ./...`
|
||||||
|
|
||||||
|
## Code Review
|
||||||
|
|
||||||
|
To make it easier for your PR to receive reviews, consider the reviewers will need you to:
|
||||||
|
|
||||||
|
* follow [good coding guidelines](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||||
|
* write [good commit messages](https://chris.beams.io/posts/git-commit/).
|
||||||
|
* break large changes into a logical series of smaller patches which individually make easily understandable changes, and in aggregate solve a broader issue.
|
||||||
|
|
||||||
22
ROADMAP.md
Normal file
22
ROADMAP.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# go-zero Roadmap
|
||||||
|
|
||||||
|
This document defines a high level roadmap for go-zero development and upcoming releases.
|
||||||
|
Community and contributor involvement is vital for successfully implementing all desired items for each release.
|
||||||
|
We hope that the items listed below will inspire further engagement from the community to keep go-zero progressing and shipping exciting and valuable features.
|
||||||
|
|
||||||
|
## 2021 Q2
|
||||||
|
- [x] Support service discovery through K8S client api
|
||||||
|
- [x] Log full sql statements for easier sql problem solving
|
||||||
|
|
||||||
|
## 2021 Q3
|
||||||
|
- [x] Support `goctl model pg` to support PostgreSQL code generation
|
||||||
|
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file
|
||||||
|
- [ ] Adapt builtin tracing mechanism to opentracing solutions
|
||||||
|
|
||||||
|
## 2021 Q4
|
||||||
|
- [ ] Add `httpx.Client` with governance, like circuit breaker etc.
|
||||||
|
- [ ] Support `goctl doctor` command to report potential issues for given service
|
||||||
|
- [ ] Support `context` in redis related methods for timeout and tracing
|
||||||
|
- [ ] Support `context` in sql related methods for timeout and tracing
|
||||||
|
- [ ] Support `context` in mongodb related methods for timeout and tracing
|
||||||
|
- [ ] Support TLS in redis connections
|
||||||
76
code-of-conduct.md
Normal file
76
code-of-conduct.md
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as
|
||||||
|
contributors and maintainers pledge to make participation in our project and
|
||||||
|
our community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||||
|
level of experience, education, socio-economic status, nationality, personal
|
||||||
|
appearance, race, religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment
|
||||||
|
include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||||
|
advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic
|
||||||
|
address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable
|
||||||
|
behavior and are expected to take appropriate and fair corrective action in
|
||||||
|
response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||||
|
permanently any contributor for other behaviors that they deem inappropriate,
|
||||||
|
threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all project spaces, and it also applies when
|
||||||
|
an individual is representing the project or its community in public spaces.
|
||||||
|
Examples of representing a project or community include using an official
|
||||||
|
project e-mail address, posting via an official social media account, or acting
|
||||||
|
as an appointed representative at an online or offline event. Representation of
|
||||||
|
a project may be further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting the project team at [INSERT EMAIL ADDRESS]. All
|
||||||
|
complaints will be reviewed and investigated and will result in a response that
|
||||||
|
is deemed necessary and appropriate to the circumstances. The project team is
|
||||||
|
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||||
|
Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||||
|
faith may face temporary or permanent repercussions as determined by other
|
||||||
|
members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||||
|
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see
|
||||||
|
https://www.contributor-covenant.org/faq
|
||||||
@@ -13,15 +13,13 @@ const (
|
|||||||
// maps as k in the error rate table
|
// maps as k in the error rate table
|
||||||
maps = 14
|
maps = 14
|
||||||
setScript = `
|
setScript = `
|
||||||
local key = KEYS[1]
|
|
||||||
for _, offset in ipairs(ARGV) do
|
for _, offset in ipairs(ARGV) do
|
||||||
redis.call("setbit", key, offset, 1)
|
redis.call("setbit", KEYS[1], offset, 1)
|
||||||
end
|
end
|
||||||
`
|
`
|
||||||
testScript = `
|
testScript = `
|
||||||
local key = KEYS[1]
|
|
||||||
for _, offset in ipairs(ARGV) do
|
for _, offset in ipairs(ARGV) do
|
||||||
if tonumber(redis.call("getbit", key, offset)) == 0 then
|
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
|
||||||
return false
|
return false
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@@ -29,44 +27,43 @@ return true
|
|||||||
`
|
`
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrTooLargeOffset indicates the offset is too large in bitset.
|
||||||
var ErrTooLargeOffset = errors.New("too large offset")
|
var ErrTooLargeOffset = errors.New("too large offset")
|
||||||
|
|
||||||
type (
|
type (
|
||||||
BitSetProvider interface {
|
// A Filter is a bloom filter.
|
||||||
|
Filter struct {
|
||||||
|
bits uint
|
||||||
|
bitSet bitSetProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
bitSetProvider interface {
|
||||||
check([]uint) (bool, error)
|
check([]uint) (bool, error)
|
||||||
set([]uint) error
|
set([]uint) error
|
||||||
}
|
}
|
||||||
|
|
||||||
BloomFilter struct {
|
|
||||||
bits uint
|
|
||||||
maps uint
|
|
||||||
bitSet BitSetProvider
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// New create a BloomFilter, store is the backed redis, key is the key for the bloom filter,
|
// New create a Filter, store is the backed redis, key is the key for the bloom filter,
|
||||||
// bits is how many bits will be used, maps is how many hashes for each addition.
|
// bits is how many bits will be used, maps is how many hashes for each addition.
|
||||||
// best practices:
|
// best practices:
|
||||||
// elements - means how many actual elements
|
// elements - means how many actual elements
|
||||||
// when maps = 14, formula: 0.7*(bits/maps), bits = 20*elements, the error rate is 0.000067 < 1e-4
|
// when maps = 14, formula: 0.7*(bits/maps), bits = 20*elements, the error rate is 0.000067 < 1e-4
|
||||||
// for detailed error rate table, see http://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
|
// for detailed error rate table, see http://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
|
||||||
func New(store *redis.Redis, key string, bits uint) *BloomFilter {
|
func New(store *redis.Redis, key string, bits uint) *Filter {
|
||||||
return &BloomFilter{
|
return &Filter{
|
||||||
bits: bits,
|
bits: bits,
|
||||||
bitSet: newRedisBitSet(store, key, bits),
|
bitSet: newRedisBitSet(store, key, bits),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *BloomFilter) Add(data []byte) error {
|
// Add adds data into f.
|
||||||
|
func (f *Filter) Add(data []byte) error {
|
||||||
locations := f.getLocations(data)
|
locations := f.getLocations(data)
|
||||||
err := f.bitSet.set(locations)
|
return f.bitSet.set(locations)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *BloomFilter) Exists(data []byte) (bool, error) {
|
// Exists checks if data is in f.
|
||||||
|
func (f *Filter) Exists(data []byte) (bool, error) {
|
||||||
locations := f.getLocations(data)
|
locations := f.getLocations(data)
|
||||||
isSet, err := f.bitSet.check(locations)
|
isSet, err := f.bitSet.check(locations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -79,7 +76,7 @@ func (f *BloomFilter) Exists(data []byte) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *BloomFilter) getLocations(data []byte) []uint {
|
func (f *Filter) getLocations(data []byte) []uint {
|
||||||
locations := make([]uint, maps)
|
locations := make([]uint, maps)
|
||||||
for i := uint(0); i < maps; i++ {
|
for i := uint(0); i < maps; i++ {
|
||||||
hashValue := hash.Hash(append(data, byte(i)))
|
hashValue := hash.Hash(append(data, byte(i)))
|
||||||
@@ -130,11 +127,12 @@ func (r *redisBitSet) check(offsets []uint) (bool, error) {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if exists, ok := resp.(int64); !ok {
|
exists, ok := resp.(int64)
|
||||||
|
if !ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else {
|
|
||||||
return exists == 1, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return exists == 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *redisBitSet) del() error {
|
func (r *redisBitSet) del() error {
|
||||||
@@ -155,7 +153,7 @@ func (r *redisBitSet) set(offsets []uint) error {
|
|||||||
_, err = r.store.Eval(setScript, []string{r.key}, args)
|
_, err = r.store.Eval(setScript, []string{r.key}, args)
|
||||||
if err == redis.Nil {
|
if err == redis.Nil {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,19 +3,15 @@ package bloom
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alicebob/miniredis"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tal-tech/go-zero/core/stores/redis"
|
"github.com/tal-tech/go-zero/core/stores/redis/redistest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRedisBitSet_New_Set_Test(t *testing.T) {
|
func TestRedisBitSet_New_Set_Test(t *testing.T) {
|
||||||
s, err := miniredis.Run()
|
store, clean, err := redistest.CreateRedis()
|
||||||
if err != nil {
|
assert.Nil(t, err)
|
||||||
t.Error("Miniredis could not start")
|
defer clean()
|
||||||
}
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
store := redis.NewRedis(s.Addr(), redis.NodeType)
|
|
||||||
bitSet := newRedisBitSet(store, "test_key", 1024)
|
bitSet := newRedisBitSet(store, "test_key", 1024)
|
||||||
isSetBefore, err := bitSet.check([]uint{0})
|
isSetBefore, err := bitSet.check([]uint{0})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -46,13 +42,10 @@ func TestRedisBitSet_New_Set_Test(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRedisBitSet_Add(t *testing.T) {
|
func TestRedisBitSet_Add(t *testing.T) {
|
||||||
s, err := miniredis.Run()
|
store, clean, err := redistest.CreateRedis()
|
||||||
if err != nil {
|
assert.Nil(t, err)
|
||||||
t.Error("Miniredis could not start")
|
defer clean()
|
||||||
}
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
store := redis.NewRedis(s.Addr(), redis.NodeType)
|
|
||||||
filter := New(store, "test_key", 64)
|
filter := New(store, "test_key", 64)
|
||||||
assert.Nil(t, filter.Add([]byte("hello")))
|
assert.Nil(t, filter.Add([]byte("hello")))
|
||||||
assert.Nil(t, filter.Add([]byte("world")))
|
assert.Nil(t, filter.Add([]byte("world")))
|
||||||
|
|||||||
@@ -5,17 +5,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/tal-tech/go-zero/core/mathx"
|
"github.com/tal-tech/go-zero/core/mathx"
|
||||||
"github.com/tal-tech/go-zero/core/proc"
|
"github.com/tal-tech/go-zero/core/proc"
|
||||||
"github.com/tal-tech/go-zero/core/stat"
|
"github.com/tal-tech/go-zero/core/stat"
|
||||||
"github.com/tal-tech/go-zero/core/stringx"
|
"github.com/tal-tech/go-zero/core/stringx"
|
||||||
)
|
"github.com/tal-tech/go-zero/core/timex"
|
||||||
|
|
||||||
const (
|
|
||||||
StateClosed State = iota
|
|
||||||
StateOpen
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -23,15 +18,16 @@ const (
|
|||||||
timeFormat = "15:04:05"
|
timeFormat = "15:04:05"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrServiceUnavailable is returned when the CB state is open
|
// ErrServiceUnavailable is returned when the Breaker state is open.
|
||||||
var ErrServiceUnavailable = errors.New("circuit breaker is open")
|
var ErrServiceUnavailable = errors.New("circuit breaker is open")
|
||||||
|
|
||||||
type (
|
type (
|
||||||
State = int32
|
// Acceptable is the func to check if the error can be accepted.
|
||||||
Acceptable func(err error) bool
|
Acceptable func(err error) bool
|
||||||
|
|
||||||
|
// A Breaker represents a circuit breaker.
|
||||||
Breaker interface {
|
Breaker interface {
|
||||||
// Name returns the name of the netflixBreaker.
|
// Name returns the name of the Breaker.
|
||||||
Name() string
|
Name() string
|
||||||
|
|
||||||
// Allow checks if the request is allowed.
|
// Allow checks if the request is allowed.
|
||||||
@@ -40,37 +36,41 @@ type (
|
|||||||
// If not allow, ErrServiceUnavailable will be returned.
|
// If not allow, ErrServiceUnavailable will be returned.
|
||||||
Allow() (Promise, error)
|
Allow() (Promise, error)
|
||||||
|
|
||||||
// Do runs the given request if the netflixBreaker accepts it.
|
// Do runs the given request if the Breaker accepts it.
|
||||||
// Do returns an error instantly if the netflixBreaker rejects the request.
|
// Do returns an error instantly if the Breaker rejects the request.
|
||||||
// If a panic occurs in the request, the netflixBreaker handles it as an error
|
// If a panic occurs in the request, the Breaker handles it as an error
|
||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
Do(req func() error) error
|
Do(req func() error) error
|
||||||
|
|
||||||
// DoWithAcceptable runs the given request if the netflixBreaker accepts it.
|
// DoWithAcceptable runs the given request if the Breaker accepts it.
|
||||||
// Do returns an error instantly if the netflixBreaker rejects the request.
|
// DoWithAcceptable returns an error instantly if the Breaker rejects the request.
|
||||||
// If a panic occurs in the request, the netflixBreaker handles it as an error
|
// If a panic occurs in the request, the Breaker handles it as an error
|
||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
// acceptable checks if it's a successful call, even if the err is not nil.
|
// acceptable checks if it's a successful call, even if the err is not nil.
|
||||||
DoWithAcceptable(req func() error, acceptable Acceptable) error
|
DoWithAcceptable(req func() error, acceptable Acceptable) error
|
||||||
|
|
||||||
// DoWithFallback runs the given request if the netflixBreaker accepts it.
|
// DoWithFallback runs the given request if the Breaker accepts it.
|
||||||
// DoWithFallback runs the fallback if the netflixBreaker rejects the request.
|
// DoWithFallback runs the fallback if the Breaker rejects the request.
|
||||||
// If a panic occurs in the request, the netflixBreaker handles it as an error
|
// If a panic occurs in the request, the Breaker handles it as an error
|
||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
DoWithFallback(req func() error, fallback func(err error) error) error
|
DoWithFallback(req func() error, fallback func(err error) error) error
|
||||||
|
|
||||||
// DoWithFallbackAcceptable runs the given request if the netflixBreaker accepts it.
|
// DoWithFallbackAcceptable runs the given request if the Breaker accepts it.
|
||||||
// DoWithFallback runs the fallback if the netflixBreaker rejects the request.
|
// DoWithFallbackAcceptable runs the fallback if the Breaker rejects the request.
|
||||||
// If a panic occurs in the request, the netflixBreaker handles it as an error
|
// If a panic occurs in the request, the Breaker handles it as an error
|
||||||
// and causes the same panic again.
|
// and causes the same panic again.
|
||||||
// acceptable checks if it's a successful call, even if the err is not nil.
|
// acceptable checks if it's a successful call, even if the err is not nil.
|
||||||
DoWithFallbackAcceptable(req func() error, fallback func(err error) error, acceptable Acceptable) error
|
DoWithFallbackAcceptable(req func() error, fallback func(err error) error, acceptable Acceptable) error
|
||||||
}
|
}
|
||||||
|
|
||||||
BreakerOption func(breaker *circuitBreaker)
|
// Option defines the method to customize a Breaker.
|
||||||
|
Option func(breaker *circuitBreaker)
|
||||||
|
|
||||||
|
// Promise interface defines the callbacks that returned by Breaker.Allow.
|
||||||
Promise interface {
|
Promise interface {
|
||||||
|
// Accept tells the Breaker that the call is successful.
|
||||||
Accept()
|
Accept()
|
||||||
|
// Reject tells the Breaker that the call is failed.
|
||||||
Reject(reason string)
|
Reject(reason string)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,7 +95,9 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewBreaker(opts ...BreakerOption) Breaker {
|
// NewBreaker returns a Breaker object.
|
||||||
|
// opts can be used to customize the Breaker.
|
||||||
|
func NewBreaker(opts ...Option) Breaker {
|
||||||
var b circuitBreaker
|
var b circuitBreaker
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
opt(&b)
|
opt(&b)
|
||||||
@@ -133,7 +135,8 @@ func (cb *circuitBreaker) Name() string {
|
|||||||
return cb.name
|
return cb.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithName(name string) BreakerOption {
|
// WithName returns a function to set the name of a Breaker.
|
||||||
|
func WithName(name string) Option {
|
||||||
return func(b *circuitBreaker) {
|
return func(b *circuitBreaker) {
|
||||||
b.name = name
|
b.name = name
|
||||||
}
|
}
|
||||||
@@ -195,23 +198,23 @@ type errorWindow struct {
|
|||||||
|
|
||||||
func (ew *errorWindow) add(reason string) {
|
func (ew *errorWindow) add(reason string) {
|
||||||
ew.lock.Lock()
|
ew.lock.Lock()
|
||||||
ew.reasons[ew.index] = fmt.Sprintf("%s %s", time.Now().Format(timeFormat), reason)
|
ew.reasons[ew.index] = fmt.Sprintf("%s %s", timex.Time().Format(timeFormat), reason)
|
||||||
ew.index = (ew.index + 1) % numHistoryReasons
|
ew.index = (ew.index + 1) % numHistoryReasons
|
||||||
ew.count = mathx.MinInt(ew.count+1, numHistoryReasons)
|
ew.count = mathx.MinInt(ew.count+1, numHistoryReasons)
|
||||||
ew.lock.Unlock()
|
ew.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ew *errorWindow) String() string {
|
func (ew *errorWindow) String() string {
|
||||||
var builder strings.Builder
|
var reasons []string
|
||||||
|
|
||||||
ew.lock.Lock()
|
ew.lock.Lock()
|
||||||
for i := ew.index + ew.count - 1; i >= ew.index; i-- {
|
// reverse order
|
||||||
builder.WriteString(ew.reasons[i%numHistoryReasons])
|
for i := ew.index - 1; i >= ew.index-ew.count; i-- {
|
||||||
builder.WriteByte('\n')
|
reasons = append(reasons, ew.reasons[(i+numHistoryReasons)%numHistoryReasons])
|
||||||
}
|
}
|
||||||
ew.lock.Unlock()
|
ew.lock.Unlock()
|
||||||
|
|
||||||
return builder.String()
|
return strings.Join(reasons, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
type promiseWithReason struct {
|
type promiseWithReason struct {
|
||||||
|
|||||||
@@ -2,7 +2,9 @@ package breaker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -33,6 +35,84 @@ func TestLogReason(t *testing.T) {
|
|||||||
assert.Equal(t, numHistoryReasons, errs.count)
|
assert.Equal(t, numHistoryReasons, errs.count)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestErrorWindow(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
reasons []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no error",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one error",
|
||||||
|
reasons: []string{"foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "two errors",
|
||||||
|
reasons: []string{"foo", "bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "five errors",
|
||||||
|
reasons: []string{"first", "second", "third", "fourth", "fifth"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "six errors",
|
||||||
|
reasons: []string{"first", "second", "third", "fourth", "fifth", "sixth"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
var ew errorWindow
|
||||||
|
for _, reason := range test.reasons {
|
||||||
|
ew.add(reason)
|
||||||
|
}
|
||||||
|
var reasons []string
|
||||||
|
if len(test.reasons) > numHistoryReasons {
|
||||||
|
reasons = test.reasons[len(test.reasons)-numHistoryReasons:]
|
||||||
|
} else {
|
||||||
|
reasons = test.reasons
|
||||||
|
}
|
||||||
|
for _, reason := range reasons {
|
||||||
|
assert.True(t, strings.Contains(ew.String(), reason), fmt.Sprintf("actual: %s", ew.String()))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPromiseWithReason(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
reason string
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "success",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "success",
|
||||||
|
reason: "fail",
|
||||||
|
expect: "fail",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
promise := promiseWithReason{
|
||||||
|
promise: new(mockedPromise),
|
||||||
|
errWin: new(errorWindow),
|
||||||
|
}
|
||||||
|
if len(test.reason) == 0 {
|
||||||
|
promise.Accept()
|
||||||
|
} else {
|
||||||
|
promise.Reject(test.reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, strings.Contains(promise.errWin.String(), test.expect))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkGoogleBreaker(b *testing.B) {
|
func BenchmarkGoogleBreaker(b *testing.B) {
|
||||||
br := NewBreaker()
|
br := NewBreaker()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@@ -41,3 +121,11 @@ func BenchmarkGoogleBreaker(b *testing.B) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockedPromise struct{}
|
||||||
|
|
||||||
|
func (m *mockedPromise) Accept() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockedPromise) Reject() {
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,24 +7,28 @@ var (
|
|||||||
breakers = make(map[string]Breaker)
|
breakers = make(map[string]Breaker)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Do calls Breaker.Do on the Breaker with given name.
|
||||||
func Do(name string, req func() error) error {
|
func Do(name string, req func() error) error {
|
||||||
return do(name, func(b Breaker) error {
|
return do(name, func(b Breaker) error {
|
||||||
return b.Do(req)
|
return b.Do(req)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoWithAcceptable calls Breaker.DoWithAcceptable on the Breaker with given name.
|
||||||
func DoWithAcceptable(name string, req func() error, acceptable Acceptable) error {
|
func DoWithAcceptable(name string, req func() error, acceptable Acceptable) error {
|
||||||
return do(name, func(b Breaker) error {
|
return do(name, func(b Breaker) error {
|
||||||
return b.DoWithAcceptable(req, acceptable)
|
return b.DoWithAcceptable(req, acceptable)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoWithFallback calls Breaker.DoWithFallback on the Breaker with given name.
|
||||||
func DoWithFallback(name string, req func() error, fallback func(err error) error) error {
|
func DoWithFallback(name string, req func() error, fallback func(err error) error) error {
|
||||||
return do(name, func(b Breaker) error {
|
return do(name, func(b Breaker) error {
|
||||||
return b.DoWithFallback(req, fallback)
|
return b.DoWithFallback(req, fallback)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoWithFallbackAcceptable calls Breaker.DoWithFallbackAcceptable on the Breaker with given name.
|
||||||
func DoWithFallbackAcceptable(name string, req func() error, fallback func(err error) error,
|
func DoWithFallbackAcceptable(name string, req func() error, fallback func(err error) error,
|
||||||
acceptable Acceptable) error {
|
acceptable Acceptable) error {
|
||||||
return do(name, func(b Breaker) error {
|
return do(name, func(b Breaker) error {
|
||||||
@@ -32,6 +36,7 @@ func DoWithFallbackAcceptable(name string, req func() error, fallback func(err e
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBreaker returns the Breaker with the given name.
|
||||||
func GetBreaker(name string) Breaker {
|
func GetBreaker(name string) Breaker {
|
||||||
lock.RLock()
|
lock.RLock()
|
||||||
b, ok := breakers[name]
|
b, ok := breakers[name]
|
||||||
@@ -41,36 +46,23 @@ func GetBreaker(name string) Breaker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
defer lock.Unlock()
|
b, ok = breakers[name]
|
||||||
|
if !ok {
|
||||||
|
b = NewBreaker(WithName(name))
|
||||||
|
breakers[name] = b
|
||||||
|
}
|
||||||
|
lock.Unlock()
|
||||||
|
|
||||||
b = NewBreaker()
|
|
||||||
breakers[name] = b
|
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func NoBreakFor(name string) {
|
// NoBreakerFor disables the circuit breaker for the given name.
|
||||||
|
func NoBreakerFor(name string) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
breakers[name] = newNoOpBreaker()
|
breakers[name] = newNoOpBreaker()
|
||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func do(name string, execute func(b Breaker) error) error {
|
func do(name string, execute func(b Breaker) error) error {
|
||||||
lock.RLock()
|
return execute(GetBreaker(name))
|
||||||
b, ok := breakers[name]
|
|
||||||
lock.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return execute(b)
|
|
||||||
} else {
|
|
||||||
lock.Lock()
|
|
||||||
b, ok = breakers[name]
|
|
||||||
if ok {
|
|
||||||
lock.Unlock()
|
|
||||||
return execute(b)
|
|
||||||
} else {
|
|
||||||
b = NewBreaker(WithName(name))
|
|
||||||
breakers[name] = b
|
|
||||||
lock.Unlock()
|
|
||||||
return execute(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ func TestBreakersDoWithAcceptable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBreakersNoBreakerFor(t *testing.T) {
|
func TestBreakersNoBreakerFor(t *testing.T) {
|
||||||
NoBreakFor("any")
|
NoBreakerFor("any")
|
||||||
errDummy := errors.New("any")
|
errDummy := errors.New("any")
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
assert.Equal(t, errDummy, GetBreaker("any").Do(func() error {
|
assert.Equal(t, errDummy, GetBreaker("any").Do(func() error {
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package breaker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tal-tech/go-zero/core/collection"
|
"github.com/tal-tech/go-zero/core/collection"
|
||||||
@@ -21,7 +20,6 @@ const (
|
|||||||
// see Client-Side Throttling section in https://landing.google.com/sre/sre-book/chapters/handling-overload/
|
// see Client-Side Throttling section in https://landing.google.com/sre/sre-book/chapters/handling-overload/
|
||||||
type googleBreaker struct {
|
type googleBreaker struct {
|
||||||
k float64
|
k float64
|
||||||
state int32
|
|
||||||
stat *collection.RollingWindow
|
stat *collection.RollingWindow
|
||||||
proba *mathx.Proba
|
proba *mathx.Proba
|
||||||
}
|
}
|
||||||
@@ -32,7 +30,6 @@ func newGoogleBreaker() *googleBreaker {
|
|||||||
return &googleBreaker{
|
return &googleBreaker{
|
||||||
stat: st,
|
stat: st,
|
||||||
k: k,
|
k: k,
|
||||||
state: StateClosed,
|
|
||||||
proba: mathx.NewProba(),
|
proba: mathx.NewProba(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -43,15 +40,9 @@ func (b *googleBreaker) accept() error {
|
|||||||
// https://landing.google.com/sre/sre-book/chapters/handling-overload/#eq2101
|
// https://landing.google.com/sre/sre-book/chapters/handling-overload/#eq2101
|
||||||
dropRatio := math.Max(0, (float64(total-protection)-weightedAccepts)/float64(total+1))
|
dropRatio := math.Max(0, (float64(total-protection)-weightedAccepts)/float64(total+1))
|
||||||
if dropRatio <= 0 {
|
if dropRatio <= 0 {
|
||||||
if atomic.LoadInt32(&b.state) == StateOpen {
|
|
||||||
atomic.CompareAndSwapInt32(&b.state, StateOpen, StateClosed)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if atomic.LoadInt32(&b.state) == StateClosed {
|
|
||||||
atomic.CompareAndSwapInt32(&b.state, StateClosed, StateOpen)
|
|
||||||
}
|
|
||||||
if b.proba.TrueOnProba(dropRatio) {
|
if b.proba.TrueOnProba(dropRatio) {
|
||||||
return ErrServiceUnavailable
|
return ErrServiceUnavailable
|
||||||
}
|
}
|
||||||
@@ -73,9 +64,9 @@ func (b *googleBreaker) doReq(req func() error, fallback func(err error) error,
|
|||||||
if err := b.accept(); err != nil {
|
if err := b.accept(); err != nil {
|
||||||
if fallback != nil {
|
if fallback != nil {
|
||||||
return fallback(err)
|
return fallback(err)
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -103,7 +94,7 @@ func (b *googleBreaker) markFailure() {
|
|||||||
b.stat.Add(0)
|
b.stat.Add(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *googleBreaker) history() (accepts int64, total int64) {
|
func (b *googleBreaker) history() (accepts, total int64) {
|
||||||
b.stat.Reduce(func(b *collection.Bucket) {
|
b.stat.Reduce(func(b *collection.Bucket) {
|
||||||
accepts += int64(b.Sum)
|
accepts += int64(b.Sum)
|
||||||
total += b.Count
|
total += b.Count
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package breaker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -27,7 +26,6 @@ func getGoogleBreaker() *googleBreaker {
|
|||||||
return &googleBreaker{
|
return &googleBreaker{
|
||||||
stat: st,
|
stat: st,
|
||||||
k: 5,
|
k: 5,
|
||||||
state: StateClosed,
|
|
||||||
proba: mathx.NewProba(),
|
proba: mathx.NewProba(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -158,7 +156,7 @@ func TestGoogleBreakerSelfProtection(t *testing.T) {
|
|||||||
t.Run("total request > 100, total < 2 * success", func(t *testing.T) {
|
t.Run("total request > 100, total < 2 * success", func(t *testing.T) {
|
||||||
b := getGoogleBreaker()
|
b := getGoogleBreaker()
|
||||||
size := rand.Intn(10000)
|
size := rand.Intn(10000)
|
||||||
accepts := int(math.Ceil(float64(size))) + 1
|
accepts := size + 1
|
||||||
markSuccess(b, accepts)
|
markSuccess(b, accepts)
|
||||||
markFailed(b, size-accepts)
|
markFailed(b, size-accepts)
|
||||||
assert.Nil(t, b.accept())
|
assert.Nil(t, b.accept())
|
||||||
|
|||||||
@@ -7,11 +7,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// EnterToContinue let stdin waiting for an enter key to continue.
|
||||||
func EnterToContinue() {
|
func EnterToContinue() {
|
||||||
fmt.Print("Press 'Enter' to continue...")
|
fmt.Print("Press 'Enter' to continue...")
|
||||||
bufio.NewReader(os.Stdin).ReadBytes('\n')
|
bufio.NewReader(os.Stdin).ReadBytes('\n')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadLine shows prompt to stdout and read a line from stdin.
|
||||||
func ReadLine(prompt string) string {
|
func ReadLine(prompt string) string {
|
||||||
fmt.Print(prompt)
|
fmt.Print(prompt)
|
||||||
input, _ := bufio.NewReader(os.Stdin).ReadString('\n')
|
input, _ := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||||
|
|||||||
80
core/cmdline/input_test.go
Normal file
80
core/cmdline/input_test.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
package cmdline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/iox"
|
||||||
|
"github.com/tal-tech/go-zero/core/lang"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEnterToContinue(t *testing.T) {
|
||||||
|
restore, err := iox.RedirectInOut()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer restore()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
fmt.Println()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
EnterToContinue()
|
||||||
|
}()
|
||||||
|
|
||||||
|
wait := make(chan lang.PlaceholderType)
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(wait)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Error("timeout")
|
||||||
|
case <-wait:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadLine(t *testing.T) {
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
ow := os.Stdout
|
||||||
|
os.Stdout = w
|
||||||
|
or := os.Stdin
|
||||||
|
os.Stdin = r
|
||||||
|
defer func() {
|
||||||
|
os.Stdin = or
|
||||||
|
os.Stdout = ow
|
||||||
|
}()
|
||||||
|
|
||||||
|
const message = "hello"
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
fmt.Println(message)
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
input := ReadLine("")
|
||||||
|
assert.Equal(t, message, input)
|
||||||
|
}()
|
||||||
|
|
||||||
|
wait := make(chan lang.PlaceholderType)
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(wait)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Error("timeout")
|
||||||
|
case <-wait:
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/logx"
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrPaddingSize indicates bad padding size.
|
||||||
var ErrPaddingSize = errors.New("padding size error")
|
var ErrPaddingSize = errors.New("padding size error")
|
||||||
|
|
||||||
type ecb struct {
|
type ecb struct {
|
||||||
@@ -26,6 +27,7 @@ func newECB(b cipher.Block) *ecb {
|
|||||||
|
|
||||||
type ecbEncrypter ecb
|
type ecbEncrypter ecb
|
||||||
|
|
||||||
|
// NewECBEncrypter returns an ECB encrypter.
|
||||||
func NewECBEncrypter(b cipher.Block) cipher.BlockMode {
|
func NewECBEncrypter(b cipher.Block) cipher.BlockMode {
|
||||||
return (*ecbEncrypter)(newECB(b))
|
return (*ecbEncrypter)(newECB(b))
|
||||||
}
|
}
|
||||||
@@ -52,6 +54,7 @@ func (x *ecbEncrypter) CryptBlocks(dst, src []byte) {
|
|||||||
|
|
||||||
type ecbDecrypter ecb
|
type ecbDecrypter ecb
|
||||||
|
|
||||||
|
// NewECBDecrypter returns an ECB decrypter.
|
||||||
func NewECBDecrypter(b cipher.Block) cipher.BlockMode {
|
func NewECBDecrypter(b cipher.Block) cipher.BlockMode {
|
||||||
return (*ecbDecrypter)(newECB(b))
|
return (*ecbDecrypter)(newECB(b))
|
||||||
}
|
}
|
||||||
@@ -78,6 +81,7 @@ func (x *ecbDecrypter) CryptBlocks(dst, src []byte) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EcbDecrypt decrypts src with the given key.
|
||||||
func EcbDecrypt(key, src []byte) ([]byte, error) {
|
func EcbDecrypt(key, src []byte) ([]byte, error) {
|
||||||
block, err := aes.NewCipher(key)
|
block, err := aes.NewCipher(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -92,6 +96,8 @@ func EcbDecrypt(key, src []byte) ([]byte, error) {
|
|||||||
return pkcs5Unpadding(decrypted, decrypter.BlockSize())
|
return pkcs5Unpadding(decrypted, decrypter.BlockSize())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EcbDecryptBase64 decrypts base64 encoded src with the given base64 encoded key.
|
||||||
|
// The returned string is also base64 encoded.
|
||||||
func EcbDecryptBase64(key, src string) (string, error) {
|
func EcbDecryptBase64(key, src string) (string, error) {
|
||||||
keyBytes, err := getKeyBytes(key)
|
keyBytes, err := getKeyBytes(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -111,6 +117,7 @@ func EcbDecryptBase64(key, src string) (string, error) {
|
|||||||
return base64.StdEncoding.EncodeToString(decryptedBytes), nil
|
return base64.StdEncoding.EncodeToString(decryptedBytes), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EcbEncrypt encrypts src with the given key.
|
||||||
func EcbEncrypt(key, src []byte) ([]byte, error) {
|
func EcbEncrypt(key, src []byte) ([]byte, error) {
|
||||||
block, err := aes.NewCipher(key)
|
block, err := aes.NewCipher(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -126,6 +133,8 @@ func EcbEncrypt(key, src []byte) ([]byte, error) {
|
|||||||
return crypted, nil
|
return crypted, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EcbEncryptBase64 encrypts base64 encoded src with the given base64 encoded key.
|
||||||
|
// The returned string is also base64 encoded.
|
||||||
func EcbEncryptBase64(key, src string) (string, error) {
|
func EcbEncryptBase64(key, src string) (string, error) {
|
||||||
keyBytes, err := getKeyBytes(key)
|
keyBytes, err := getKeyBytes(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -146,15 +155,16 @@ func EcbEncryptBase64(key, src string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getKeyBytes(key string) ([]byte, error) {
|
func getKeyBytes(key string) ([]byte, error) {
|
||||||
if len(key) > 32 {
|
if len(key) <= 32 {
|
||||||
if keyBytes, err := base64.StdEncoding.DecodeString(key); err != nil {
|
return []byte(key), nil
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
return keyBytes, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return []byte(key), nil
|
keyBytes, err := base64.StdEncoding.DecodeString(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return keyBytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func pkcs5Padding(ciphertext []byte, blockSize int) []byte {
|
func pkcs5Padding(ciphertext []byte, blockSize int) []byte {
|
||||||
|
|||||||
65
core/codec/aesecb_test.go
Normal file
65
core/codec/aesecb_test.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAesEcb(t *testing.T) {
|
||||||
|
var (
|
||||||
|
key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
|
||||||
|
val = []byte("hello")
|
||||||
|
badKey1 = []byte("aaaaaaaaa")
|
||||||
|
// more than 32 chars
|
||||||
|
badKey2 = []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||||
|
)
|
||||||
|
_, err := EcbEncrypt(badKey1, val)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
_, err = EcbEncrypt(badKey2, val)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
dst, err := EcbEncrypt(key, val)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
_, err = EcbDecrypt(badKey1, dst)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
_, err = EcbDecrypt(badKey2, dst)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
_, err = EcbDecrypt(key, val)
|
||||||
|
// not enough block, just nil
|
||||||
|
assert.Nil(t, err)
|
||||||
|
src, err := EcbDecrypt(key, dst)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, val, src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAesEcbBase64(t *testing.T) {
|
||||||
|
const (
|
||||||
|
val = "hello"
|
||||||
|
badKey1 = "aaaaaaaaa"
|
||||||
|
// more than 32 chars
|
||||||
|
badKey2 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||||
|
)
|
||||||
|
key := []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
|
||||||
|
b64Key := base64.StdEncoding.EncodeToString(key)
|
||||||
|
b64Val := base64.StdEncoding.EncodeToString([]byte(val))
|
||||||
|
_, err := EcbEncryptBase64(badKey1, val)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
_, err = EcbEncryptBase64(badKey2, val)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
_, err = EcbEncryptBase64(b64Key, val)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
dst, err := EcbEncryptBase64(b64Key, b64Val)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
_, err = EcbDecryptBase64(badKey1, dst)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
_, err = EcbDecryptBase64(badKey2, dst)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
_, err = EcbDecryptBase64(b64Key, val)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
src, err := EcbDecryptBase64(b64Key, dst)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
b, err := base64.StdEncoding.DecodeString(src)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, val, string(b))
|
||||||
|
}
|
||||||
@@ -11,8 +11,11 @@ import (
|
|||||||
// 2048-bit MODP Group
|
// 2048-bit MODP Group
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrInvalidPriKey = errors.New("invalid private key")
|
// ErrInvalidPriKey indicates the invalid private key.
|
||||||
ErrInvalidPubKey = errors.New("invalid public key")
|
ErrInvalidPriKey = errors.New("invalid private key")
|
||||||
|
// ErrInvalidPubKey indicates the invalid public key.
|
||||||
|
ErrInvalidPubKey = errors.New("invalid public key")
|
||||||
|
// ErrPubKeyOutOfBound indicates the public key is out of bound.
|
||||||
ErrPubKeyOutOfBound = errors.New("public key out of bound")
|
ErrPubKeyOutOfBound = errors.New("public key out of bound")
|
||||||
|
|
||||||
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
|
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
|
||||||
@@ -20,11 +23,13 @@ var (
|
|||||||
zero = big.NewInt(0)
|
zero = big.NewInt(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DhKey defines the Diffie Hellman key.
|
||||||
type DhKey struct {
|
type DhKey struct {
|
||||||
PriKey *big.Int
|
PriKey *big.Int
|
||||||
PubKey *big.Int
|
PubKey *big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ComputeKey returns a key from public key and private key.
|
||||||
func ComputeKey(pubKey, priKey *big.Int) (*big.Int, error) {
|
func ComputeKey(pubKey, priKey *big.Int) (*big.Int, error) {
|
||||||
if pubKey == nil {
|
if pubKey == nil {
|
||||||
return nil, ErrInvalidPubKey
|
return nil, ErrInvalidPubKey
|
||||||
@@ -41,6 +46,7 @@ func ComputeKey(pubKey, priKey *big.Int) (*big.Int, error) {
|
|||||||
return new(big.Int).Exp(pubKey, priKey, p), nil
|
return new(big.Int).Exp(pubKey, priKey, p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerateKey returns a Diffie Hellman key.
|
||||||
func GenerateKey() (*DhKey, error) {
|
func GenerateKey() (*DhKey, error) {
|
||||||
var err error
|
var err error
|
||||||
var x *big.Int
|
var x *big.Int
|
||||||
@@ -63,10 +69,12 @@ func GenerateKey() (*DhKey, error) {
|
|||||||
return key, nil
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewPublicKey returns a public key from the given bytes.
|
||||||
func NewPublicKey(bs []byte) *big.Int {
|
func NewPublicKey(bs []byte) *big.Int {
|
||||||
return new(big.Int).SetBytes(bs)
|
return new(big.Int).SetBytes(bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Bytes returns public key bytes.
|
||||||
func (k *DhKey) Bytes() []byte {
|
func (k *DhKey) Bytes() []byte {
|
||||||
if k.PubKey == nil {
|
if k.PubKey == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -71,3 +71,12 @@ func TestDiffieHellmanMiddleManAttack(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, string(src), string(decryptedSrc))
|
assert.Equal(t, string(src), string(decryptedSrc))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestKeyBytes(t *testing.T) {
|
||||||
|
var empty DhKey
|
||||||
|
assert.Equal(t, 0, len(empty.Bytes()))
|
||||||
|
|
||||||
|
key, err := GenerateKey()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.True(t, len(key.Bytes()) > 0)
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,6 +6,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const unzipLimit = 100 * 1024 * 1024 // 100MB
|
||||||
|
|
||||||
|
// Gzip compresses bs.
|
||||||
func Gzip(bs []byte) []byte {
|
func Gzip(bs []byte) []byte {
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
|
|
||||||
@@ -16,6 +19,7 @@ func Gzip(bs []byte) []byte {
|
|||||||
return b.Bytes()
|
return b.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Gunzip uncompresses bs.
|
||||||
func Gunzip(bs []byte) ([]byte, error) {
|
func Gunzip(bs []byte) ([]byte, error) {
|
||||||
r, err := gzip.NewReader(bytes.NewBuffer(bs))
|
r, err := gzip.NewReader(bytes.NewBuffer(bs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -24,8 +28,7 @@ func Gunzip(bs []byte) ([]byte, error) {
|
|||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
var c bytes.Buffer
|
var c bytes.Buffer
|
||||||
_, err = io.Copy(&c, r)
|
if _, err = io.Copy(&c, io.LimitReader(r, unzipLimit)); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,12 +7,14 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Hmac returns HMAC bytes for body with the given key.
|
||||||
func Hmac(key []byte, body string) []byte {
|
func Hmac(key []byte, body string) []byte {
|
||||||
h := hmac.New(sha256.New, key)
|
h := hmac.New(sha256.New, key)
|
||||||
io.WriteString(h, body)
|
io.WriteString(h, body)
|
||||||
return h.Sum(nil)
|
return h.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HmacBase64 returns the base64 encoded string of HMAC for body with the given key.
|
||||||
func HmacBase64(key []byte, body string) string {
|
func HmacBase64(key []byte, body string) string {
|
||||||
return base64.StdEncoding.EncodeToString(Hmac(key, body))
|
return base64.StdEncoding.EncodeToString(Hmac(key, body))
|
||||||
}
|
}
|
||||||
|
|||||||
19
core/codec/hmac_test.go
Normal file
19
core/codec/hmac_test.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHmac(t *testing.T) {
|
||||||
|
ret := Hmac([]byte("foo"), "bar")
|
||||||
|
assert.Equal(t, "f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
|
||||||
|
fmt.Sprintf("%x", ret))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHmacBase64(t *testing.T) {
|
||||||
|
ret := HmacBase64([]byte("foo"), "bar")
|
||||||
|
assert.Equal(t, "+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc=", ret)
|
||||||
|
}
|
||||||
@@ -11,17 +11,22 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
// ErrPrivateKey indicates the invalid private key.
|
||||||
ErrPrivateKey = errors.New("private key error")
|
ErrPrivateKey = errors.New("private key error")
|
||||||
ErrPublicKey = errors.New("failed to parse PEM block containing the public key")
|
// ErrPublicKey indicates the invalid public key.
|
||||||
ErrNotRsaKey = errors.New("key type is not RSA")
|
ErrPublicKey = errors.New("failed to parse PEM block containing the public key")
|
||||||
|
// ErrNotRsaKey indicates the invalid RSA key.
|
||||||
|
ErrNotRsaKey = errors.New("key type is not RSA")
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// RsaDecrypter represents a RSA decrypter.
|
||||||
RsaDecrypter interface {
|
RsaDecrypter interface {
|
||||||
Decrypt(input []byte) ([]byte, error)
|
Decrypt(input []byte) ([]byte, error)
|
||||||
DecryptBase64(input string) ([]byte, error)
|
DecryptBase64(input string) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RsaEncrypter represents a RSA encrypter.
|
||||||
RsaEncrypter interface {
|
RsaEncrypter interface {
|
||||||
Encrypt(input []byte) ([]byte, error)
|
Encrypt(input []byte) ([]byte, error)
|
||||||
}
|
}
|
||||||
@@ -41,6 +46,7 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewRsaDecrypter returns a RsaDecrypter with the given file.
|
||||||
func NewRsaDecrypter(file string) (RsaDecrypter, error) {
|
func NewRsaDecrypter(file string) (RsaDecrypter, error) {
|
||||||
content, err := ioutil.ReadFile(file)
|
content, err := ioutil.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -84,6 +90,7 @@ func (r *rsaDecrypter) DecryptBase64(input string) ([]byte, error) {
|
|||||||
return r.Decrypt(base64Decoded)
|
return r.Decrypt(base64Decoded)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewRsaEncrypter returns a RsaEncrypter with the given key.
|
||||||
func NewRsaEncrypter(key []byte) (RsaEncrypter, error) {
|
func NewRsaEncrypter(key []byte) (RsaEncrypter, error) {
|
||||||
block, _ := pem.Decode(key)
|
block, _ := pem.Decode(key)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
|
|||||||
58
core/codec/rsa_test.go
Normal file
58
core/codec/rsa_test.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
priKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIICXQIBAAKBgQC4TJk3onpqb2RYE3wwt23J9SHLFstHGSkUYFLe+nl1dEKHbD+/
|
||||||
|
Zt95L757J3xGTrwoTc7KCTxbrgn+stn0w52BNjj/kIE2ko4lbh/v8Fl14AyVR9ms
|
||||||
|
fKtKOnhe5FCT72mdtApr+qvzcC3q9hfXwkyQU32pv7q5UimZ205iKSBmgQIDAQAB
|
||||||
|
AoGAM5mWqGIAXj5z3MkP01/4CDxuyrrGDVD5FHBno3CDgyQa4Gmpa4B0/ywj671B
|
||||||
|
aTnwKmSmiiCN2qleuQYASixes2zY5fgTzt+7KNkl9JHsy7i606eH2eCKzsUa/s6u
|
||||||
|
WD8V3w/hGCQ9zYI18ihwyXlGHIgcRz/eeRh+nWcWVJzGOPUCQQD5nr6It/1yHb1p
|
||||||
|
C6l4fC4xXF19l4KxJjGu1xv/sOpSx0pOqBDEX3Mh//FU954392rUWDXV1/I65BPt
|
||||||
|
TLphdsu3AkEAvQJ2Qay/lffFj9FaUrvXuftJZ/Ypn0FpaSiUh3Ak3obBT6UvSZS0
|
||||||
|
bcYdCJCNHDtBOsWHnIN1x+BcWAPrdU7PhwJBAIQ0dUlH2S3VXnoCOTGc44I1Hzbj
|
||||||
|
Rc65IdsuBqA3fQN2lX5vOOIog3vgaFrOArg1jBkG1wx5IMvb/EnUN2pjVqUCQCza
|
||||||
|
KLXtCInOAlPemlCHwumfeAvznmzsWNdbieOZ+SXVVIpR6KbNYwOpv7oIk3Pfm9sW
|
||||||
|
hNffWlPUKhW42Gc+DIECQQDmk20YgBXwXWRM5DRPbhisIV088N5Z58K9DtFWkZsd
|
||||||
|
OBDT3dFcgZONtlmR1MqZO0pTh30lA4qovYj3Bx7A8i36
|
||||||
|
-----END RSA PRIVATE KEY-----`
|
||||||
|
pubKey = `-----BEGIN PUBLIC KEY-----
|
||||||
|
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4TJk3onpqb2RYE3wwt23J9SHL
|
||||||
|
FstHGSkUYFLe+nl1dEKHbD+/Zt95L757J3xGTrwoTc7KCTxbrgn+stn0w52BNjj/
|
||||||
|
kIE2ko4lbh/v8Fl14AyVR9msfKtKOnhe5FCT72mdtApr+qvzcC3q9hfXwkyQU32p
|
||||||
|
v7q5UimZ205iKSBmgQIDAQAB
|
||||||
|
-----END PUBLIC KEY-----`
|
||||||
|
testBody = `this is the content`
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCryption(t *testing.T) {
|
||||||
|
enc, err := NewRsaEncrypter([]byte(pubKey))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
ret, err := enc.Encrypt([]byte(testBody))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
file, err := fs.TempFilenameWithText(priKey)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
dec, err := NewRsaDecrypter(file)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
actual, err := dec.Decrypt(ret)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, testBody, string(actual))
|
||||||
|
|
||||||
|
actual, err = dec.DecryptBase64(base64.StdEncoding.EncodeToString(ret))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, testBody, string(actual))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBadPubKey(t *testing.T) {
|
||||||
|
_, err := NewRsaEncrypter([]byte("foo"))
|
||||||
|
assert.Equal(t, ErrPublicKey, err)
|
||||||
|
}
|
||||||
@@ -23,28 +23,30 @@ const (
|
|||||||
var emptyLruCache = emptyLru{}
|
var emptyLruCache = emptyLru{}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// CacheOption defines the method to customize a Cache.
|
||||||
CacheOption func(cache *Cache)
|
CacheOption func(cache *Cache)
|
||||||
|
|
||||||
|
// A Cache object is a in-memory cache.
|
||||||
Cache struct {
|
Cache struct {
|
||||||
name string
|
name string
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
data map[string]interface{}
|
data map[string]interface{}
|
||||||
evicts *list.List
|
|
||||||
expire time.Duration
|
expire time.Duration
|
||||||
timingWheel *TimingWheel
|
timingWheel *TimingWheel
|
||||||
lruCache lru
|
lruCache lru
|
||||||
barrier syncx.SharedCalls
|
barrier syncx.SingleFlight
|
||||||
unstableExpiry mathx.Unstable
|
unstableExpiry mathx.Unstable
|
||||||
stats *cacheStat
|
stats *cacheStat
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewCache returns a Cache with given expire.
|
||||||
func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
||||||
cache := &Cache{
|
cache := &Cache{
|
||||||
data: make(map[string]interface{}),
|
data: make(map[string]interface{}),
|
||||||
expire: expire,
|
expire: expire,
|
||||||
lruCache: emptyLruCache,
|
lruCache: emptyLruCache,
|
||||||
barrier: syncx.NewSharedCalls(),
|
barrier: syncx.NewSingleFlight(),
|
||||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,6 +75,7 @@ func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
|||||||
return cache, nil
|
return cache, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Del deletes the item with the given key from c.
|
||||||
func (c *Cache) Del(key string) {
|
func (c *Cache) Del(key string) {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
delete(c.data, key)
|
delete(c.data, key)
|
||||||
@@ -81,13 +84,9 @@ func (c *Cache) Del(key string) {
|
|||||||
c.timingWheel.RemoveTimer(key)
|
c.timingWheel.RemoveTimer(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get returns the item with the given key from c.
|
||||||
func (c *Cache) Get(key string) (interface{}, bool) {
|
func (c *Cache) Get(key string) (interface{}, bool) {
|
||||||
c.lock.Lock()
|
value, ok := c.doGet(key)
|
||||||
value, ok := c.data[key]
|
|
||||||
if ok {
|
|
||||||
c.lruCache.add(key)
|
|
||||||
}
|
|
||||||
c.lock.Unlock()
|
|
||||||
if ok {
|
if ok {
|
||||||
c.stats.IncrementHit()
|
c.stats.IncrementHit()
|
||||||
} else {
|
} else {
|
||||||
@@ -97,6 +96,7 @@ func (c *Cache) Get(key string) (interface{}, bool) {
|
|||||||
return value, ok
|
return value, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set sets value into c with key.
|
||||||
func (c *Cache) Set(key string, value interface{}) {
|
func (c *Cache) Set(key string, value interface{}) {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
_, ok := c.data[key]
|
_, ok := c.data[key]
|
||||||
@@ -112,13 +112,29 @@ func (c *Cache) Set(key string, value interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Take returns the item with the given key.
|
||||||
|
// If the item is in c, return it directly.
|
||||||
|
// If not, use fetch method to get the item, set into c and return it.
|
||||||
func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) {
|
func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) {
|
||||||
val, fresh, err := c.barrier.DoEx(key, func() (interface{}, error) {
|
if val, ok := c.doGet(key); ok {
|
||||||
|
c.stats.IncrementHit()
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var fresh bool
|
||||||
|
val, err := c.barrier.Do(key, func() (interface{}, error) {
|
||||||
|
// because O(1) on map search in memory, and fetch is an IO query
|
||||||
|
// so we do double check, cache might be taken by another call
|
||||||
|
if val, ok := c.doGet(key); ok {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
v, e := fetch()
|
v, e := fetch()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return nil, e
|
return nil, e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fresh = true
|
||||||
c.Set(key, v)
|
c.Set(key, v)
|
||||||
return v, nil
|
return v, nil
|
||||||
})
|
})
|
||||||
@@ -129,14 +145,25 @@ func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}
|
|||||||
if fresh {
|
if fresh {
|
||||||
c.stats.IncrementMiss()
|
c.stats.IncrementMiss()
|
||||||
return val, nil
|
return val, nil
|
||||||
} else {
|
|
||||||
// got the result from previous ongoing query
|
|
||||||
c.stats.IncrementHit()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// got the result from previous ongoing query
|
||||||
|
c.stats.IncrementHit()
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cache) doGet(key string) (interface{}, bool) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
value, ok := c.data[key]
|
||||||
|
if ok {
|
||||||
|
c.lruCache.add(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cache) onEvict(key string) {
|
func (c *Cache) onEvict(key string) {
|
||||||
// already locked
|
// already locked
|
||||||
delete(c.data, key)
|
delete(c.data, key)
|
||||||
@@ -149,6 +176,7 @@ func (c *Cache) size() int {
|
|||||||
return len(c.data)
|
return len(c.data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithLimit customizes a Cache with items up to limit.
|
||||||
func WithLimit(limit int) CacheOption {
|
func WithLimit(limit int) CacheOption {
|
||||||
return func(cache *Cache) {
|
return func(cache *Cache) {
|
||||||
if limit > 0 {
|
if limit > 0 {
|
||||||
@@ -157,6 +185,7 @@ func WithLimit(limit int) CacheOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithName customizes a Cache with the given name.
|
||||||
func WithName(name string) CacheOption {
|
func WithName(name string) CacheOption {
|
||||||
return func(cache *Cache) {
|
return func(cache *Cache) {
|
||||||
cache.name = name
|
cache.name = name
|
||||||
@@ -258,18 +287,15 @@ func (cs *cacheStat) statLoop() {
|
|||||||
ticker := time.NewTicker(statInterval)
|
ticker := time.NewTicker(statInterval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
for {
|
for range ticker.C {
|
||||||
select {
|
hit := atomic.SwapUint64(&cs.hit, 0)
|
||||||
case <-ticker.C:
|
miss := atomic.SwapUint64(&cs.miss, 0)
|
||||||
hit := atomic.SwapUint64(&cs.hit, 0)
|
total := hit + miss
|
||||||
miss := atomic.SwapUint64(&cs.miss, 0)
|
if total == 0 {
|
||||||
total := hit + miss
|
continue
|
||||||
if total == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
percent := 100 * float32(hit) / float32(total)
|
|
||||||
logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d",
|
|
||||||
cs.name, total, percent, cs.sizeCallback(), hit, miss)
|
|
||||||
}
|
}
|
||||||
|
percent := 100 * float32(hit) / float32(total)
|
||||||
|
logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d",
|
||||||
|
cs.name, total, percent, cs.sizeCallback(), hit, miss)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package collection
|
package collection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -10,6 +11,8 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var errDummy = errors.New("dummy")
|
||||||
|
|
||||||
func TestCacheSet(t *testing.T) {
|
func TestCacheSet(t *testing.T) {
|
||||||
cache, err := NewCache(time.Second*2, WithName("any"))
|
cache, err := NewCache(time.Second*2, WithName("any"))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -63,6 +66,54 @@ func TestCacheTake(t *testing.T) {
|
|||||||
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCacheTakeExists(t *testing.T) {
|
||||||
|
cache, err := NewCache(time.Second * 2)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
var count int32
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
cache.Set("first", "first element")
|
||||||
|
cache.Take("first", func() (interface{}, error) {
|
||||||
|
atomic.AddInt32(&count, 1)
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
return "first element", nil
|
||||||
|
})
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
assert.Equal(t, 1, cache.size())
|
||||||
|
assert.Equal(t, int32(0), atomic.LoadInt32(&count))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheTakeError(t *testing.T) {
|
||||||
|
cache, err := NewCache(time.Second * 2)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
var count int32
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
_, err := cache.Take("first", func() (interface{}, error) {
|
||||||
|
atomic.AddInt32(&count, 1)
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
return "", errDummy
|
||||||
|
})
|
||||||
|
assert.Equal(t, errDummy, err)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
assert.Equal(t, 0, cache.size())
|
||||||
|
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
||||||
|
}
|
||||||
|
|
||||||
func TestCacheWithLruEvicts(t *testing.T) {
|
func TestCacheWithLruEvicts(t *testing.T) {
|
||||||
cache, err := NewCache(time.Minute, WithLimit(3))
|
cache, err := NewCache(time.Minute, WithLimit(3))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@@ -72,9 +123,9 @@ func TestCacheWithLruEvicts(t *testing.T) {
|
|||||||
cache.Set("third", "third element")
|
cache.Set("third", "third element")
|
||||||
cache.Set("fourth", "fourth element")
|
cache.Set("fourth", "fourth element")
|
||||||
|
|
||||||
value, ok := cache.Get("first")
|
_, ok := cache.Get("first")
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
value, ok = cache.Get("second")
|
value, ok := cache.Get("second")
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.Equal(t, "second element", value)
|
assert.Equal(t, "second element", value)
|
||||||
value, ok = cache.Get("third")
|
value, ok = cache.Get("third")
|
||||||
@@ -94,9 +145,9 @@ func TestCacheWithLruEvicted(t *testing.T) {
|
|||||||
cache.Set("third", "third element")
|
cache.Set("third", "third element")
|
||||||
cache.Set("fourth", "fourth element")
|
cache.Set("fourth", "fourth element")
|
||||||
|
|
||||||
value, ok := cache.Get("first")
|
_, ok := cache.Get("first")
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
value, ok = cache.Get("second")
|
value, ok := cache.Get("second")
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.Equal(t, "second element", value)
|
assert.Equal(t, "second element", value)
|
||||||
cache.Set("fifth", "fifth element")
|
cache.Set("fifth", "fifth element")
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package collection
|
|||||||
|
|
||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
|
// A Queue is a FIFO queue.
|
||||||
type Queue struct {
|
type Queue struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
elements []interface{}
|
elements []interface{}
|
||||||
@@ -11,6 +12,7 @@ type Queue struct {
|
|||||||
count int
|
count int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewQueue returns a Queue object.
|
||||||
func NewQueue(size int) *Queue {
|
func NewQueue(size int) *Queue {
|
||||||
return &Queue{
|
return &Queue{
|
||||||
elements: make([]interface{}, size),
|
elements: make([]interface{}, size),
|
||||||
@@ -18,6 +20,7 @@ func NewQueue(size int) *Queue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Empty checks if q is empty.
|
||||||
func (q *Queue) Empty() bool {
|
func (q *Queue) Empty() bool {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
empty := q.count == 0
|
empty := q.count == 0
|
||||||
@@ -26,6 +29,7 @@ func (q *Queue) Empty() bool {
|
|||||||
return empty
|
return empty
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Put puts element into q at the last position.
|
||||||
func (q *Queue) Put(element interface{}) {
|
func (q *Queue) Put(element interface{}) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
@@ -44,6 +48,7 @@ func (q *Queue) Put(element interface{}) {
|
|||||||
q.count++
|
q.count++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Take takes the first element out of q if not empty.
|
||||||
func (q *Queue) Take() (interface{}, bool) {
|
func (q *Queue) Take() (interface{}, bool) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|||||||
@@ -1,22 +1,39 @@
|
|||||||
package collection
|
package collection
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// A Ring can be used as fixed size ring.
|
||||||
type Ring struct {
|
type Ring struct {
|
||||||
elements []interface{}
|
elements []interface{}
|
||||||
index int
|
index int
|
||||||
|
lock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewRing returns a Ring object with the given size n.
|
||||||
func NewRing(n int) *Ring {
|
func NewRing(n int) *Ring {
|
||||||
|
if n < 1 {
|
||||||
|
panic("n should be greater than 0")
|
||||||
|
}
|
||||||
|
|
||||||
return &Ring{
|
return &Ring{
|
||||||
elements: make([]interface{}, n),
|
elements: make([]interface{}, n),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add adds v into r.
|
||||||
func (r *Ring) Add(v interface{}) {
|
func (r *Ring) Add(v interface{}) {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
r.elements[r.index%len(r.elements)] = v
|
r.elements[r.index%len(r.elements)] = v
|
||||||
r.index++
|
r.index++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Take takes all items from r.
|
||||||
func (r *Ring) Take() []interface{} {
|
func (r *Ring) Take() []interface{} {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
var size int
|
var size int
|
||||||
var start int
|
var start int
|
||||||
if r.index > len(r.elements) {
|
if r.index > len(r.elements) {
|
||||||
|
|||||||
@@ -1,11 +1,18 @@
|
|||||||
package collection
|
package collection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestNewRing(t *testing.T) {
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
NewRing(0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestRingLess(t *testing.T) {
|
func TestRingLess(t *testing.T) {
|
||||||
ring := NewRing(5)
|
ring := NewRing(5)
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
@@ -23,3 +30,30 @@ func TestRingMore(t *testing.T) {
|
|||||||
elements := ring.Take()
|
elements := ring.Take()
|
||||||
assert.ElementsMatch(t, []interface{}{6, 7, 8, 9, 10}, elements)
|
assert.ElementsMatch(t, []interface{}{6, 7, 8, 9, 10}, elements)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRingAdd(t *testing.T) {
|
||||||
|
ring := NewRing(5051)
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
for i := 1; i <= 100; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
for j := 1; j <= i; j++ {
|
||||||
|
ring.Add(i)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
assert.Equal(t, 5050, len(ring.Take()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRingAdd(b *testing.B) {
|
||||||
|
ring := NewRing(500)
|
||||||
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
|
for pb.Next() {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ring.Add(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,8 +8,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// RollingWindowOption let callers customize the RollingWindow.
|
||||||
RollingWindowOption func(rollingWindow *RollingWindow)
|
RollingWindowOption func(rollingWindow *RollingWindow)
|
||||||
|
|
||||||
|
// RollingWindow defines a rolling window to calculate the events in buckets with time interval.
|
||||||
RollingWindow struct {
|
RollingWindow struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
size int
|
size int
|
||||||
@@ -17,11 +19,17 @@ type (
|
|||||||
interval time.Duration
|
interval time.Duration
|
||||||
offset int
|
offset int
|
||||||
ignoreCurrent bool
|
ignoreCurrent bool
|
||||||
lastTime time.Duration
|
lastTime time.Duration // start time of the last bucket
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewRollingWindow returns a RollingWindow that with size buckets and time interval,
|
||||||
|
// use opts to customize the RollingWindow.
|
||||||
func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOption) *RollingWindow {
|
func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOption) *RollingWindow {
|
||||||
|
if size < 1 {
|
||||||
|
panic("size must be greater than 0")
|
||||||
|
}
|
||||||
|
|
||||||
w := &RollingWindow{
|
w := &RollingWindow{
|
||||||
size: size,
|
size: size,
|
||||||
win: newWindow(size),
|
win: newWindow(size),
|
||||||
@@ -34,6 +42,7 @@ func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOpt
|
|||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add adds value to current bucket.
|
||||||
func (rw *RollingWindow) Add(v float64) {
|
func (rw *RollingWindow) Add(v float64) {
|
||||||
rw.lock.Lock()
|
rw.lock.Lock()
|
||||||
defer rw.lock.Unlock()
|
defer rw.lock.Unlock()
|
||||||
@@ -41,6 +50,7 @@ func (rw *RollingWindow) Add(v float64) {
|
|||||||
rw.win.add(rw.offset, v)
|
rw.win.add(rw.offset, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reduce runs fn on all buckets, ignore current bucket if ignoreCurrent was set.
|
||||||
func (rw *RollingWindow) Reduce(fn func(b *Bucket)) {
|
func (rw *RollingWindow) Reduce(fn func(b *Bucket)) {
|
||||||
rw.lock.RLock()
|
rw.lock.RLock()
|
||||||
defer rw.lock.RUnlock()
|
defer rw.lock.RUnlock()
|
||||||
@@ -63,36 +73,30 @@ func (rw *RollingWindow) span() int {
|
|||||||
offset := int(timex.Since(rw.lastTime) / rw.interval)
|
offset := int(timex.Since(rw.lastTime) / rw.interval)
|
||||||
if 0 <= offset && offset < rw.size {
|
if 0 <= offset && offset < rw.size {
|
||||||
return offset
|
return offset
|
||||||
} else {
|
|
||||||
return rw.size
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return rw.size
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rw *RollingWindow) updateOffset() {
|
func (rw *RollingWindow) updateOffset() {
|
||||||
span := rw.span()
|
span := rw.span()
|
||||||
if span > 0 {
|
if span <= 0 {
|
||||||
offset := rw.offset
|
return
|
||||||
// reset expired buckets
|
|
||||||
start := offset + 1
|
|
||||||
steps := start + span
|
|
||||||
var remainder int
|
|
||||||
if steps > rw.size {
|
|
||||||
remainder = steps - rw.size
|
|
||||||
steps = rw.size
|
|
||||||
}
|
|
||||||
for i := start; i < steps; i++ {
|
|
||||||
rw.win.resetBucket(i)
|
|
||||||
offset = i
|
|
||||||
}
|
|
||||||
for i := 0; i < remainder; i++ {
|
|
||||||
rw.win.resetBucket(i)
|
|
||||||
offset = i
|
|
||||||
}
|
|
||||||
rw.offset = offset
|
|
||||||
rw.lastTime = timex.Now()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
offset := rw.offset
|
||||||
|
// reset expired buckets
|
||||||
|
for i := 0; i < span; i++ {
|
||||||
|
rw.win.resetBucket((offset + i + 1) % rw.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.offset = (offset + span) % rw.size
|
||||||
|
now := timex.Now()
|
||||||
|
// align to interval time boundary
|
||||||
|
rw.lastTime = now - (now-rw.lastTime)%rw.interval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Bucket defines the bucket that holds sum and num of additions.
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
Sum float64
|
Sum float64
|
||||||
Count int64
|
Count int64
|
||||||
@@ -114,9 +118,9 @@ type window struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newWindow(size int) *window {
|
func newWindow(size int) *window {
|
||||||
var buckets []*Bucket
|
buckets := make([]*Bucket, size)
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
buckets = append(buckets, new(Bucket))
|
buckets[i] = new(Bucket)
|
||||||
}
|
}
|
||||||
return &window{
|
return &window{
|
||||||
buckets: buckets,
|
buckets: buckets,
|
||||||
@@ -130,14 +134,15 @@ func (w *window) add(offset int, v float64) {
|
|||||||
|
|
||||||
func (w *window) reduce(start, count int, fn func(b *Bucket)) {
|
func (w *window) reduce(start, count int, fn func(b *Bucket)) {
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
fn(w.buckets[(start+i)%len(w.buckets)])
|
fn(w.buckets[(start+i)%w.size])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *window) resetBucket(offset int) {
|
func (w *window) resetBucket(offset int) {
|
||||||
w.buckets[offset].reset()
|
w.buckets[offset%w.size].reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IgnoreCurrentBucket lets the Reduce call ignore current bucket.
|
||||||
func IgnoreCurrentBucket() RollingWindowOption {
|
func IgnoreCurrentBucket() RollingWindowOption {
|
||||||
return func(w *RollingWindow) {
|
return func(w *RollingWindow) {
|
||||||
w.ignoreCurrent = true
|
w.ignoreCurrent = true
|
||||||
|
|||||||
@@ -11,6 +11,13 @@ import (
|
|||||||
|
|
||||||
const duration = time.Millisecond * 50
|
const duration = time.Millisecond * 50
|
||||||
|
|
||||||
|
func TestNewRollingWindow(t *testing.T) {
|
||||||
|
assert.NotNil(t, NewRollingWindow(10, time.Second))
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
NewRollingWindow(0, time.Second)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestRollingWindowAdd(t *testing.T) {
|
func TestRollingWindowAdd(t *testing.T) {
|
||||||
const size = 3
|
const size = 3
|
||||||
r := NewRollingWindow(size, duration)
|
r := NewRollingWindow(size, duration)
|
||||||
@@ -81,7 +88,7 @@ func TestRollingWindowReduce(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(stringx.Rand(), func(t *testing.T) {
|
t.Run(stringx.Rand(), func(t *testing.T) {
|
||||||
r := test.win
|
r := test.win
|
||||||
for x := 0; x < size; x = x + 1 {
|
for x := 0; x < size; x++ {
|
||||||
for i := 0; i <= x; i++ {
|
for i := 0; i <= x; i++ {
|
||||||
r.Add(float64(i))
|
r.Add(float64(i))
|
||||||
}
|
}
|
||||||
@@ -98,10 +105,41 @@ func TestRollingWindowReduce(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRollingWindowBucketTimeBoundary(t *testing.T) {
|
||||||
|
const size = 3
|
||||||
|
interval := time.Millisecond * 30
|
||||||
|
r := NewRollingWindow(size, interval)
|
||||||
|
listBuckets := func() []float64 {
|
||||||
|
var buckets []float64
|
||||||
|
r.Reduce(func(b *Bucket) {
|
||||||
|
buckets = append(buckets, b.Sum)
|
||||||
|
})
|
||||||
|
return buckets
|
||||||
|
}
|
||||||
|
assert.Equal(t, []float64{0, 0, 0}, listBuckets())
|
||||||
|
r.Add(1)
|
||||||
|
assert.Equal(t, []float64{0, 0, 1}, listBuckets())
|
||||||
|
time.Sleep(time.Millisecond * 45)
|
||||||
|
r.Add(2)
|
||||||
|
r.Add(3)
|
||||||
|
assert.Equal(t, []float64{0, 1, 5}, listBuckets())
|
||||||
|
// sleep time should be less than interval, and make the bucket change happen
|
||||||
|
time.Sleep(time.Millisecond * 20)
|
||||||
|
r.Add(4)
|
||||||
|
r.Add(5)
|
||||||
|
r.Add(6)
|
||||||
|
assert.Equal(t, []float64{1, 5, 15}, listBuckets())
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
r.Add(7)
|
||||||
|
r.Add(8)
|
||||||
|
r.Add(9)
|
||||||
|
assert.Equal(t, []float64{0, 0, 24}, listBuckets())
|
||||||
|
}
|
||||||
|
|
||||||
func TestRollingWindowDataRace(t *testing.T) {
|
func TestRollingWindowDataRace(t *testing.T) {
|
||||||
const size = 3
|
const size = 3
|
||||||
r := NewRollingWindow(size, duration)
|
r := NewRollingWindow(size, duration)
|
||||||
var stop = make(chan bool)
|
stop := make(chan bool)
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ type SafeMap struct {
|
|||||||
dirtyNew map[interface{}]interface{}
|
dirtyNew map[interface{}]interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSafeMap returns a SafeMap.
|
||||||
func NewSafeMap() *SafeMap {
|
func NewSafeMap() *SafeMap {
|
||||||
return &SafeMap{
|
return &SafeMap{
|
||||||
dirtyOld: make(map[interface{}]interface{}),
|
dirtyOld: make(map[interface{}]interface{}),
|
||||||
@@ -25,6 +26,7 @@ func NewSafeMap() *SafeMap {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Del deletes the value with the given key from m.
|
||||||
func (m *SafeMap) Del(key interface{}) {
|
func (m *SafeMap) Del(key interface{}) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
if _, ok := m.dirtyOld[key]; ok {
|
if _, ok := m.dirtyOld[key]; ok {
|
||||||
@@ -53,18 +55,20 @@ func (m *SafeMap) Del(key interface{}) {
|
|||||||
m.lock.Unlock()
|
m.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get gets the value with the given key from m.
|
||||||
func (m *SafeMap) Get(key interface{}) (interface{}, bool) {
|
func (m *SafeMap) Get(key interface{}) (interface{}, bool) {
|
||||||
m.lock.RLock()
|
m.lock.RLock()
|
||||||
defer m.lock.RUnlock()
|
defer m.lock.RUnlock()
|
||||||
|
|
||||||
if val, ok := m.dirtyOld[key]; ok {
|
if val, ok := m.dirtyOld[key]; ok {
|
||||||
return val, true
|
return val, true
|
||||||
} else {
|
|
||||||
val, ok := m.dirtyNew[key]
|
|
||||||
return val, ok
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
val, ok := m.dirtyNew[key]
|
||||||
|
return val, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set sets the value into m with the given key.
|
||||||
func (m *SafeMap) Set(key, value interface{}) {
|
func (m *SafeMap) Set(key, value interface{}) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
if m.deletionOld <= maxDeletion {
|
if m.deletionOld <= maxDeletion {
|
||||||
@@ -83,6 +87,7 @@ func (m *SafeMap) Set(key, value interface{}) {
|
|||||||
m.lock.Unlock()
|
m.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Size returns the size of m.
|
||||||
func (m *SafeMap) Size() int {
|
func (m *SafeMap) Size() int {
|
||||||
m.lock.RLock()
|
m.lock.RLock()
|
||||||
size := len(m.dirtyOld) + len(m.dirtyNew)
|
size := len(m.dirtyOld) + len(m.dirtyNew)
|
||||||
|
|||||||
@@ -15,11 +15,13 @@ const (
|
|||||||
stringType
|
stringType
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Set is not thread-safe, for concurrent use, make sure to use it with synchronization.
|
||||||
type Set struct {
|
type Set struct {
|
||||||
data map[interface{}]lang.PlaceholderType
|
data map[interface{}]lang.PlaceholderType
|
||||||
tp int
|
tp int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSet returns a managed Set, can only put the values with the same type.
|
||||||
func NewSet() *Set {
|
func NewSet() *Set {
|
||||||
return &Set{
|
return &Set{
|
||||||
data: make(map[interface{}]lang.PlaceholderType),
|
data: make(map[interface{}]lang.PlaceholderType),
|
||||||
@@ -27,6 +29,7 @@ func NewSet() *Set {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewUnmanagedSet returns a unmanaged Set, which can put values with different types.
|
||||||
func NewUnmanagedSet() *Set {
|
func NewUnmanagedSet() *Set {
|
||||||
return &Set{
|
return &Set{
|
||||||
data: make(map[interface{}]lang.PlaceholderType),
|
data: make(map[interface{}]lang.PlaceholderType),
|
||||||
@@ -34,42 +37,49 @@ func NewUnmanagedSet() *Set {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add adds i into s.
|
||||||
func (s *Set) Add(i ...interface{}) {
|
func (s *Set) Add(i ...interface{}) {
|
||||||
for _, each := range i {
|
for _, each := range i {
|
||||||
s.add(each)
|
s.add(each)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddInt adds int values ii into s.
|
||||||
func (s *Set) AddInt(ii ...int) {
|
func (s *Set) AddInt(ii ...int) {
|
||||||
for _, each := range ii {
|
for _, each := range ii {
|
||||||
s.add(each)
|
s.add(each)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddInt64 adds int64 values ii into s.
|
||||||
func (s *Set) AddInt64(ii ...int64) {
|
func (s *Set) AddInt64(ii ...int64) {
|
||||||
for _, each := range ii {
|
for _, each := range ii {
|
||||||
s.add(each)
|
s.add(each)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddUint adds uint values ii into s.
|
||||||
func (s *Set) AddUint(ii ...uint) {
|
func (s *Set) AddUint(ii ...uint) {
|
||||||
for _, each := range ii {
|
for _, each := range ii {
|
||||||
s.add(each)
|
s.add(each)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddUint64 adds uint64 values ii into s.
|
||||||
func (s *Set) AddUint64(ii ...uint64) {
|
func (s *Set) AddUint64(ii ...uint64) {
|
||||||
for _, each := range ii {
|
for _, each := range ii {
|
||||||
s.add(each)
|
s.add(each)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddStr adds string values ss into s.
|
||||||
func (s *Set) AddStr(ss ...string) {
|
func (s *Set) AddStr(ss ...string) {
|
||||||
for _, each := range ss {
|
for _, each := range ss {
|
||||||
s.add(each)
|
s.add(each)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Contains checks if i is in s.
|
||||||
func (s *Set) Contains(i interface{}) bool {
|
func (s *Set) Contains(i interface{}) bool {
|
||||||
if len(s.data) == 0 {
|
if len(s.data) == 0 {
|
||||||
return false
|
return false
|
||||||
@@ -80,6 +90,7 @@ func (s *Set) Contains(i interface{}) bool {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Keys returns the keys in s.
|
||||||
func (s *Set) Keys() []interface{} {
|
func (s *Set) Keys() []interface{} {
|
||||||
var keys []interface{}
|
var keys []interface{}
|
||||||
|
|
||||||
@@ -90,13 +101,12 @@ func (s *Set) Keys() []interface{} {
|
|||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeysInt returns the int keys in s.
|
||||||
func (s *Set) KeysInt() []int {
|
func (s *Set) KeysInt() []int {
|
||||||
var keys []int
|
var keys []int
|
||||||
|
|
||||||
for key := range s.data {
|
for key := range s.data {
|
||||||
if intKey, ok := key.(int); !ok {
|
if intKey, ok := key.(int); ok {
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
keys = append(keys, intKey)
|
keys = append(keys, intKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -104,13 +114,12 @@ func (s *Set) KeysInt() []int {
|
|||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeysInt64 returns int64 keys in s.
|
||||||
func (s *Set) KeysInt64() []int64 {
|
func (s *Set) KeysInt64() []int64 {
|
||||||
var keys []int64
|
var keys []int64
|
||||||
|
|
||||||
for key := range s.data {
|
for key := range s.data {
|
||||||
if intKey, ok := key.(int64); !ok {
|
if intKey, ok := key.(int64); ok {
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
keys = append(keys, intKey)
|
keys = append(keys, intKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -118,13 +127,12 @@ func (s *Set) KeysInt64() []int64 {
|
|||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeysUint returns uint keys in s.
|
||||||
func (s *Set) KeysUint() []uint {
|
func (s *Set) KeysUint() []uint {
|
||||||
var keys []uint
|
var keys []uint
|
||||||
|
|
||||||
for key := range s.data {
|
for key := range s.data {
|
||||||
if intKey, ok := key.(uint); !ok {
|
if intKey, ok := key.(uint); ok {
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
keys = append(keys, intKey)
|
keys = append(keys, intKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,13 +140,12 @@ func (s *Set) KeysUint() []uint {
|
|||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeysUint64 returns uint64 keys in s.
|
||||||
func (s *Set) KeysUint64() []uint64 {
|
func (s *Set) KeysUint64() []uint64 {
|
||||||
var keys []uint64
|
var keys []uint64
|
||||||
|
|
||||||
for key := range s.data {
|
for key := range s.data {
|
||||||
if intKey, ok := key.(uint64); !ok {
|
if intKey, ok := key.(uint64); ok {
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
keys = append(keys, intKey)
|
keys = append(keys, intKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -146,13 +153,12 @@ func (s *Set) KeysUint64() []uint64 {
|
|||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeysStr returns string keys in s.
|
||||||
func (s *Set) KeysStr() []string {
|
func (s *Set) KeysStr() []string {
|
||||||
var keys []string
|
var keys []string
|
||||||
|
|
||||||
for key := range s.data {
|
for key := range s.data {
|
||||||
if strKey, ok := key.(string); !ok {
|
if strKey, ok := key.(string); ok {
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
keys = append(keys, strKey)
|
keys = append(keys, strKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,11 +166,13 @@ func (s *Set) KeysStr() []string {
|
|||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove removes i from s.
|
||||||
func (s *Set) Remove(i interface{}) {
|
func (s *Set) Remove(i interface{}) {
|
||||||
s.validate(i)
|
s.validate(i)
|
||||||
delete(s.data, i)
|
delete(s.data, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Count returns the number of items in s.
|
||||||
func (s *Set) Count() int {
|
func (s *Set) Count() int {
|
||||||
return len(s.data)
|
return len(s.data)
|
||||||
}
|
}
|
||||||
@@ -182,10 +190,7 @@ func (s *Set) add(i interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) setType(i interface{}) {
|
func (s *Set) setType(i interface{}) {
|
||||||
if s.tp != untyped {
|
// s.tp can only be untyped here
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch i.(type) {
|
switch i.(type) {
|
||||||
case int:
|
case int:
|
||||||
s.tp = intType
|
s.tp = intType
|
||||||
|
|||||||
@@ -5,8 +5,13 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
logx.Disable()
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkRawSet(b *testing.B) {
|
func BenchmarkRawSet(b *testing.B) {
|
||||||
m := make(map[interface{}]struct{})
|
m := make(map[interface{}]struct{})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@@ -147,3 +152,51 @@ func TestCount(t *testing.T) {
|
|||||||
// then
|
// then
|
||||||
assert.Equal(t, set.Count(), 3)
|
assert.Equal(t, set.Count(), 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestKeysIntMismatch(t *testing.T) {
|
||||||
|
set := NewSet()
|
||||||
|
set.add(int64(1))
|
||||||
|
set.add(2)
|
||||||
|
vals := set.KeysInt()
|
||||||
|
assert.EqualValues(t, []int{2}, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeysInt64Mismatch(t *testing.T) {
|
||||||
|
set := NewSet()
|
||||||
|
set.add(1)
|
||||||
|
set.add(int64(2))
|
||||||
|
vals := set.KeysInt64()
|
||||||
|
assert.EqualValues(t, []int64{2}, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeysUintMismatch(t *testing.T) {
|
||||||
|
set := NewSet()
|
||||||
|
set.add(1)
|
||||||
|
set.add(uint(2))
|
||||||
|
vals := set.KeysUint()
|
||||||
|
assert.EqualValues(t, []uint{2}, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeysUint64Mismatch(t *testing.T) {
|
||||||
|
set := NewSet()
|
||||||
|
set.add(1)
|
||||||
|
set.add(uint64(2))
|
||||||
|
vals := set.KeysUint64()
|
||||||
|
assert.EqualValues(t, []uint64{2}, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeysStrMismatch(t *testing.T) {
|
||||||
|
set := NewSet()
|
||||||
|
set.add(1)
|
||||||
|
set.add("2")
|
||||||
|
vals := set.KeysStr()
|
||||||
|
assert.EqualValues(t, []string{"2"}, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetType(t *testing.T) {
|
||||||
|
set := NewUnmanagedSet()
|
||||||
|
set.add(1)
|
||||||
|
set.add("2")
|
||||||
|
vals := set.Keys()
|
||||||
|
assert.ElementsMatch(t, []interface{}{1, "2"}, vals)
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,8 +13,10 @@ import (
|
|||||||
const drainWorkers = 8
|
const drainWorkers = 8
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// Execute defines the method to execute the task.
|
||||||
Execute func(key, value interface{})
|
Execute func(key, value interface{})
|
||||||
|
|
||||||
|
// A TimingWheel is a timing wheel object to schedule tasks.
|
||||||
TimingWheel struct {
|
TimingWheel struct {
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
ticker timex.Ticker
|
ticker timex.Ticker
|
||||||
@@ -54,6 +56,7 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewTimingWheel returns a TimingWheel.
|
||||||
func NewTimingWheel(interval time.Duration, numSlots int, execute Execute) (*TimingWheel, error) {
|
func NewTimingWheel(interval time.Duration, numSlots int, execute Execute) (*TimingWheel, error) {
|
||||||
if interval <= 0 || numSlots <= 0 || execute == nil {
|
if interval <= 0 || numSlots <= 0 || execute == nil {
|
||||||
return nil, fmt.Errorf("interval: %v, slots: %d, execute: %p", interval, numSlots, execute)
|
return nil, fmt.Errorf("interval: %v, slots: %d, execute: %p", interval, numSlots, execute)
|
||||||
@@ -85,10 +88,12 @@ func newTimingWheelWithClock(interval time.Duration, numSlots int, execute Execu
|
|||||||
return tw, nil
|
return tw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Drain drains all items and executes them.
|
||||||
func (tw *TimingWheel) Drain(fn func(key, value interface{})) {
|
func (tw *TimingWheel) Drain(fn func(key, value interface{})) {
|
||||||
tw.drainChannel <- fn
|
tw.drainChannel <- fn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MoveTimer moves the task with the given key to the given delay.
|
||||||
func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) {
|
func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) {
|
||||||
if delay <= 0 || key == nil {
|
if delay <= 0 || key == nil {
|
||||||
return
|
return
|
||||||
@@ -100,6 +105,7 @@ func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RemoveTimer removes the task with the given key.
|
||||||
func (tw *TimingWheel) RemoveTimer(key interface{}) {
|
func (tw *TimingWheel) RemoveTimer(key interface{}) {
|
||||||
if key == nil {
|
if key == nil {
|
||||||
return
|
return
|
||||||
@@ -108,6 +114,7 @@ func (tw *TimingWheel) RemoveTimer(key interface{}) {
|
|||||||
tw.removeChannel <- key
|
tw.removeChannel <- key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTimer sets the task value with the given key to the delay.
|
||||||
func (tw *TimingWheel) SetTimer(key, value interface{}, delay time.Duration) {
|
func (tw *TimingWheel) SetTimer(key, value interface{}, delay time.Duration) {
|
||||||
if delay <= 0 || key == nil {
|
if delay <= 0 || key == nil {
|
||||||
return
|
return
|
||||||
@@ -122,6 +129,7 @@ func (tw *TimingWheel) SetTimer(key, value interface{}, delay time.Duration) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops tw.
|
||||||
func (tw *TimingWheel) Stop() {
|
func (tw *TimingWheel) Stop() {
|
||||||
close(tw.stopChannel)
|
close(tw.stopChannel)
|
||||||
}
|
}
|
||||||
@@ -143,7 +151,7 @@ func (tw *TimingWheel) drainAll(fn func(key, value interface{})) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *TimingWheel) getPositionAndCircle(d time.Duration) (pos int, circle int) {
|
func (tw *TimingWheel) getPositionAndCircle(d time.Duration) (pos, circle int) {
|
||||||
steps := int(d / tw.interval)
|
steps := int(d / tw.interval)
|
||||||
pos = (tw.tickedPos + steps) % tw.numSlots
|
pos = (tw.tickedPos + steps) % tw.numSlots
|
||||||
circle = (steps - 1) / tw.numSlots
|
circle = (steps - 1) / tw.numSlots
|
||||||
@@ -204,6 +212,7 @@ func (tw *TimingWheel) removeTask(key interface{}) {
|
|||||||
|
|
||||||
timer := val.(*positionEntry)
|
timer := val.(*positionEntry)
|
||||||
timer.item.removed = true
|
timer.item.removed = true
|
||||||
|
tw.timers.Del(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tw *TimingWheel) run() {
|
func (tw *TimingWheel) run() {
|
||||||
@@ -248,7 +257,6 @@ func (tw *TimingWheel) scanAndRunTasks(l *list.List) {
|
|||||||
if task.removed {
|
if task.removed {
|
||||||
next := e.Next()
|
next := e.Next()
|
||||||
l.Remove(e)
|
l.Remove(e)
|
||||||
tw.timers.Del(task.key)
|
|
||||||
e = next
|
e = next
|
||||||
continue
|
continue
|
||||||
} else if task.circle > 0 {
|
} else if task.circle > 0 {
|
||||||
@@ -301,6 +309,7 @@ func (tw *TimingWheel) setTask(task *timingEntry) {
|
|||||||
func (tw *TimingWheel) setTimerPosition(pos int, task *timingEntry) {
|
func (tw *TimingWheel) setTimerPosition(pos int, task *timingEntry) {
|
||||||
if val, ok := tw.timers.Get(task.key); ok {
|
if val, ok := tw.timers.Get(task.key); ok {
|
||||||
timer := val.(*positionEntry)
|
timer := val.(*positionEntry)
|
||||||
|
timer.item = task
|
||||||
timer.pos = pos
|
timer.pos = pos
|
||||||
} else {
|
} else {
|
||||||
tw.timers.Set(task.key, &positionEntry{
|
tw.timers.Set(task.key, &positionEntry{
|
||||||
|
|||||||
@@ -213,7 +213,10 @@ func TestTimingWheel_SetTimer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
t.Run(stringx.RandId(), func(t *testing.T) {
|
t.Run(stringx.RandId(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
var count int32
|
var count int32
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tick := func() {
|
tick := func() {
|
||||||
@@ -291,7 +294,10 @@ func TestTimingWheel_SetAndMoveThenStart(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
t.Run(stringx.RandId(), func(t *testing.T) {
|
t.Run(stringx.RandId(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
var count int32
|
var count int32
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tick := func() {
|
tick := func() {
|
||||||
@@ -376,7 +382,10 @@ func TestTimingWheel_SetAndMoveTwice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
t.Run(stringx.RandId(), func(t *testing.T) {
|
t.Run(stringx.RandId(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
var count int32
|
var count int32
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tick := func() {
|
tick := func() {
|
||||||
@@ -454,7 +463,10 @@ func TestTimingWheel_ElapsedAndSet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
t.Run(stringx.RandId(), func(t *testing.T) {
|
t.Run(stringx.RandId(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
var count int32
|
var count int32
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tick := func() {
|
tick := func() {
|
||||||
@@ -542,7 +554,10 @@ func TestTimingWheel_ElapsedAndSetThenMove(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
t.Run(stringx.RandId(), func(t *testing.T) {
|
t.Run(stringx.RandId(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
var count int32
|
var count int32
|
||||||
ticker := timex.NewFakeTicker()
|
ticker := timex.NewFakeTicker()
|
||||||
tick := func() {
|
tick := func() {
|
||||||
@@ -579,6 +594,31 @@ func TestTimingWheel_ElapsedAndSetThenMove(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMoveAndRemoveTask(t *testing.T) {
|
||||||
|
ticker := timex.NewFakeTicker()
|
||||||
|
tick := func(v int) {
|
||||||
|
for i := 0; i < v; i++ {
|
||||||
|
ticker.Tick()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var keys []int
|
||||||
|
tw, _ := newTimingWheelWithClock(testStep, 10, func(k, v interface{}) {
|
||||||
|
assert.Equal(t, "any", k)
|
||||||
|
assert.Equal(t, 3, v.(int))
|
||||||
|
keys = append(keys, v.(int))
|
||||||
|
ticker.Done()
|
||||||
|
}, ticker)
|
||||||
|
defer tw.Stop()
|
||||||
|
tw.SetTimer("any", 3, testStep*8)
|
||||||
|
tick(6)
|
||||||
|
tw.MoveTimer("any", testStep*7)
|
||||||
|
tick(3)
|
||||||
|
tw.RemoveTimer("any")
|
||||||
|
tick(30)
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
assert.Equal(t, 0, len(keys))
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkTimingWheel(b *testing.B) {
|
func BenchmarkTimingWheel(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/tal-tech/go-zero/core/mapping"
|
"github.com/tal-tech/go-zero/core/mapping"
|
||||||
@@ -15,26 +16,43 @@ var loaders = map[string]func([]byte, interface{}) error{
|
|||||||
".yml": LoadConfigFromYamlBytes,
|
".yml": LoadConfigFromYamlBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadConfig(file string, v interface{}) error {
|
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
|
||||||
if content, err := ioutil.ReadFile(file); err != nil {
|
func LoadConfig(file string, v interface{}, opts ...Option) error {
|
||||||
|
content, err := ioutil.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if loader, ok := loaders[path.Ext(file)]; ok {
|
|
||||||
return loader(content, v)
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unrecoginized file type: %s", file)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
loader, ok := loaders[path.Ext(file)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unrecognized file type: %s", file)
|
||||||
|
}
|
||||||
|
|
||||||
|
var opt options
|
||||||
|
for _, o := range opts {
|
||||||
|
o(&opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.env {
|
||||||
|
return loader([]byte(os.ExpandEnv(string(content))), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return loader(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadConfigFromJsonBytes loads config into v from content json bytes.
|
||||||
func LoadConfigFromJsonBytes(content []byte, v interface{}) error {
|
func LoadConfigFromJsonBytes(content []byte, v interface{}) error {
|
||||||
return mapping.UnmarshalJsonBytes(content, v)
|
return mapping.UnmarshalJsonBytes(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadConfigFromYamlBytes loads config into v from content yaml bytes.
|
||||||
func LoadConfigFromYamlBytes(content []byte, v interface{}) error {
|
func LoadConfigFromYamlBytes(content []byte, v interface{}) error {
|
||||||
return mapping.UnmarshalYamlBytes(content, v)
|
return mapping.UnmarshalYamlBytes(content, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func MustLoad(path string, v interface{}) {
|
// MustLoad loads config into v from path, exits on error.
|
||||||
if err := LoadConfig(path, v); err != nil {
|
func MustLoad(path string, v interface{}, opts ...Option) {
|
||||||
|
if err := LoadConfig(path, v, opts...); err != nil {
|
||||||
log.Fatalf("error: config file %s, %s", path, err.Error())
|
log.Fatalf("error: config file %s, %s", path, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
112
core/conf/config_test.go
Normal file
112
core/conf/config_test.go
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
package conf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/fs"
|
||||||
|
"github.com/tal-tech/go-zero/core/hash"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoadConfig_notExists(t *testing.T) {
|
||||||
|
assert.NotNil(t, LoadConfig("not_a_file", nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadConfig_notRecogFile(t *testing.T) {
|
||||||
|
filename, err := fs.TempFilenameWithText("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
assert.NotNil(t, LoadConfig(filename, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigJson(t *testing.T) {
|
||||||
|
tests := []string{
|
||||||
|
".json",
|
||||||
|
".yaml",
|
||||||
|
".yml",
|
||||||
|
}
|
||||||
|
text := `{
|
||||||
|
"a": "foo",
|
||||||
|
"b": 1,
|
||||||
|
"c": "${FOO}",
|
||||||
|
"d": "abcd!@#$112"
|
||||||
|
}`
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run(test, func(t *testing.T) {
|
||||||
|
os.Setenv("FOO", "2")
|
||||||
|
defer os.Unsetenv("FOO")
|
||||||
|
tmpfile, err := createTempFile(test, text)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(tmpfile)
|
||||||
|
|
||||||
|
var val struct {
|
||||||
|
A string `json:"a"`
|
||||||
|
B int `json:"b"`
|
||||||
|
C string `json:"c"`
|
||||||
|
D string `json:"d"`
|
||||||
|
}
|
||||||
|
MustLoad(tmpfile, &val)
|
||||||
|
assert.Equal(t, "foo", val.A)
|
||||||
|
assert.Equal(t, 1, val.B)
|
||||||
|
assert.Equal(t, "${FOO}", val.C)
|
||||||
|
assert.Equal(t, "abcd!@#$112", val.D)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigJsonEnv(t *testing.T) {
|
||||||
|
tests := []string{
|
||||||
|
".json",
|
||||||
|
".yaml",
|
||||||
|
".yml",
|
||||||
|
}
|
||||||
|
text := `{
|
||||||
|
"a": "foo",
|
||||||
|
"b": 1,
|
||||||
|
"c": "${FOO}",
|
||||||
|
"d": "abcd!@#$a12 3"
|
||||||
|
}`
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run(test, func(t *testing.T) {
|
||||||
|
os.Setenv("FOO", "2")
|
||||||
|
defer os.Unsetenv("FOO")
|
||||||
|
tmpfile, err := createTempFile(test, text)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(tmpfile)
|
||||||
|
|
||||||
|
var val struct {
|
||||||
|
A string `json:"a"`
|
||||||
|
B int `json:"b"`
|
||||||
|
C string `json:"c"`
|
||||||
|
D string `json:"d"`
|
||||||
|
}
|
||||||
|
MustLoad(tmpfile, &val, UseEnv())
|
||||||
|
assert.Equal(t, "foo", val.A)
|
||||||
|
assert.Equal(t, 1, val.B)
|
||||||
|
assert.Equal(t, "2", val.C)
|
||||||
|
assert.Equal(t, "abcd!@# 3", val.D)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTempFile(ext, text string) (string, error) {
|
||||||
|
tmpfile, err := ioutil.TempFile(os.TempDir(), hash.Md5Hex([]byte(text))+"*"+ext)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ioutil.WriteFile(tmpfile.Name(), []byte(text), os.ModeTemporary); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := tmpfile.Name()
|
||||||
|
if err = tmpfile.Close(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return filename, nil
|
||||||
|
}
|
||||||
17
core/conf/options.go
Normal file
17
core/conf/options.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package conf
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Option defines the method to customize the config options.
|
||||||
|
Option func(opt *options)
|
||||||
|
|
||||||
|
options struct {
|
||||||
|
env bool
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// UseEnv customizes the config to use environment variables.
|
||||||
|
func UseEnv() Option {
|
||||||
|
return func(opt *options) {
|
||||||
|
opt.env = true
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@ package conf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -30,12 +31,17 @@ type mapBasedProperties struct {
|
|||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loads the properties into a properties configuration instance. May return the
|
// LoadProperties loads the properties into a properties configuration instance.
|
||||||
// configuration itself along with an error that indicates if there was a problem loading the configuration.
|
// Returns an error that indicates if there was a problem loading the configuration.
|
||||||
func LoadProperties(filename string) (Properties, error) {
|
func LoadProperties(filename string, opts ...Option) (Properties, error) {
|
||||||
lines, err := iox.ReadTextLines(filename, iox.WithoutBlank(), iox.OmitWithPrefix("#"))
|
lines, err := iox.ReadTextLines(filename, iox.WithoutBlank(), iox.OmitWithPrefix("#"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var opt options
|
||||||
|
for _, o := range opts {
|
||||||
|
o(&opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
raw := make(map[string]string)
|
raw := make(map[string]string)
|
||||||
@@ -50,7 +56,11 @@ func LoadProperties(filename string) (Properties, error) {
|
|||||||
|
|
||||||
key := strings.TrimSpace(pair[0])
|
key := strings.TrimSpace(pair[0])
|
||||||
value := strings.TrimSpace(pair[1])
|
value := strings.TrimSpace(pair[1])
|
||||||
raw[key] = value
|
if opt.env {
|
||||||
|
raw[key] = os.ExpandEnv(value)
|
||||||
|
} else {
|
||||||
|
raw[key] = value
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &mapBasedProperties{
|
return &mapBasedProperties{
|
||||||
@@ -87,7 +97,7 @@ func (config *mapBasedProperties) SetInt(key string, value int) {
|
|||||||
config.lock.Unlock()
|
config.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dumps the configuration internal map into a string.
|
// ToString dumps the configuration internal map into a string.
|
||||||
func (config *mapBasedProperties) ToString() string {
|
func (config *mapBasedProperties) ToString() string {
|
||||||
config.lock.RLock()
|
config.lock.RLock()
|
||||||
ret := fmt.Sprintf("%s", config.properties)
|
ret := fmt.Sprintf("%s", config.properties)
|
||||||
@@ -96,12 +106,12 @@ func (config *mapBasedProperties) ToString() string {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the error message.
|
// Error returns the error message.
|
||||||
func (configError *PropertyError) Error() string {
|
func (configError *PropertyError) Error() string {
|
||||||
return configError.message
|
return configError.message
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builds a new properties configuration structure
|
// NewProperties builds a new properties configuration structure.
|
||||||
func NewProperties() Properties {
|
func NewProperties() Properties {
|
||||||
return &mapBasedProperties{
|
return &mapBasedProperties{
|
||||||
properties: make(map[string]string),
|
properties: make(map[string]string),
|
||||||
|
|||||||
@@ -24,6 +24,53 @@ func TestProperties(t *testing.T) {
|
|||||||
assert.Equal(t, "test", props.GetString("app.name"))
|
assert.Equal(t, "test", props.GetString("app.name"))
|
||||||
assert.Equal(t, "app", props.GetString("app.program"))
|
assert.Equal(t, "app", props.GetString("app.program"))
|
||||||
assert.Equal(t, 5, props.GetInt("app.threads"))
|
assert.Equal(t, 5, props.GetInt("app.threads"))
|
||||||
|
|
||||||
|
val := props.ToString()
|
||||||
|
assert.Contains(t, val, "app.name")
|
||||||
|
assert.Contains(t, val, "app.program")
|
||||||
|
assert.Contains(t, val, "app.threads")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPropertiesEnv(t *testing.T) {
|
||||||
|
text := `app.name = test
|
||||||
|
|
||||||
|
app.program=app
|
||||||
|
|
||||||
|
app.env1 = ${FOO}
|
||||||
|
app.env2 = $none
|
||||||
|
|
||||||
|
# this is comment
|
||||||
|
app.threads = 5`
|
||||||
|
tmpfile, err := fs.TempFilenameWithText(text)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(tmpfile)
|
||||||
|
|
||||||
|
os.Setenv("FOO", "2")
|
||||||
|
defer os.Unsetenv("FOO")
|
||||||
|
|
||||||
|
props, err := LoadProperties(tmpfile, UseEnv())
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "test", props.GetString("app.name"))
|
||||||
|
assert.Equal(t, "app", props.GetString("app.program"))
|
||||||
|
assert.Equal(t, 5, props.GetInt("app.threads"))
|
||||||
|
assert.Equal(t, "2", props.GetString("app.env1"))
|
||||||
|
assert.Equal(t, "", props.GetString("app.env2"))
|
||||||
|
|
||||||
|
val := props.ToString()
|
||||||
|
assert.Contains(t, val, "app.name")
|
||||||
|
assert.Contains(t, val, "app.program")
|
||||||
|
assert.Contains(t, val, "app.threads")
|
||||||
|
assert.Contains(t, val, "app.env1")
|
||||||
|
assert.Contains(t, val, "app.env2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadProperties_badContent(t *testing.T) {
|
||||||
|
filename, err := fs.TempFilenameWithText("hello")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
defer os.Remove(filename)
|
||||||
|
_, err = LoadProperties(filename)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.True(t, len(err.Error()) > 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetString(t *testing.T) {
|
func TestSetString(t *testing.T) {
|
||||||
@@ -41,3 +88,8 @@ func TestSetInt(t *testing.T) {
|
|||||||
props.SetInt(key, value)
|
props.SetInt(key, value)
|
||||||
assert.Equal(t, value, props.GetInt(key))
|
assert.Equal(t, value, props.GetInt(key))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoadBadFile(t *testing.T) {
|
||||||
|
_, err := LoadProperties("nosuchfile")
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
package contextx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ShrinkDeadline(ctx context.Context, timeout time.Duration) (context.Context, func()) {
|
|
||||||
if deadline, ok := ctx.Deadline(); ok {
|
|
||||||
leftTime := time.Until(deadline)
|
|
||||||
if leftTime < timeout {
|
|
||||||
timeout = leftTime
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return context.WithDeadline(ctx, time.Now().Add(timeout))
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
package contextx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestShrinkDeadlineLess(t *testing.T) {
|
|
||||||
deadline := time.Now().Add(time.Second)
|
|
||||||
ctx, _ := context.WithDeadline(context.Background(), deadline)
|
|
||||||
ctx, _ = ShrinkDeadline(ctx, time.Minute)
|
|
||||||
dl, ok := ctx.Deadline()
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, deadline, dl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestShrinkDeadlineMore(t *testing.T) {
|
|
||||||
deadline := time.Now().Add(time.Minute)
|
|
||||||
ctx, _ := context.WithDeadline(context.Background(), deadline)
|
|
||||||
ctx, _ = ShrinkDeadline(ctx, time.Second)
|
|
||||||
dl, ok := ctx.Deadline()
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.True(t, dl.Before(deadline))
|
|
||||||
}
|
|
||||||
@@ -19,6 +19,7 @@ func (cv contextValuer) Value(key string) (interface{}, bool) {
|
|||||||
return v, v != nil
|
return v, v != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For unmarshals ctx into v.
|
||||||
func For(ctx context.Context, v interface{}) error {
|
func For(ctx context.Context, v interface{}) error {
|
||||||
return unmarshaler.UnmarshalValuer(contextValuer{
|
return unmarshaler.UnmarshalValuer(contextValuer{
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
|
|||||||
@@ -47,9 +47,11 @@ func TestUnmarshalContextWithMissing(t *testing.T) {
|
|||||||
Name string `ctx:"name"`
|
Name string `ctx:"name"`
|
||||||
Age int `ctx:"age"`
|
Age int `ctx:"age"`
|
||||||
}
|
}
|
||||||
|
type name string
|
||||||
|
const PersonNameKey name = "name"
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ctx = context.WithValue(ctx, "name", "kevin")
|
ctx = context.WithValue(ctx, PersonNameKey, "kevin")
|
||||||
|
|
||||||
var person Person
|
var person Person
|
||||||
err := For(ctx, &person)
|
err := For(ctx, &person)
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ func (valueOnlyContext) Err() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValueOnlyFrom takes all values from the given ctx, without deadline and error control.
|
||||||
func ValueOnlyFrom(ctx context.Context) context.Context {
|
func ValueOnlyFrom(ctx context.Context) context.Context {
|
||||||
return valueOnlyContext{
|
return valueOnlyContext{
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
|
|||||||
@@ -9,10 +9,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestContextCancel(t *testing.T) {
|
func TestContextCancel(t *testing.T) {
|
||||||
c := context.WithValue(context.Background(), "key", "value")
|
type key string
|
||||||
|
var nameKey key = "name"
|
||||||
|
c := context.WithValue(context.Background(), nameKey, "value")
|
||||||
c1, cancel := context.WithCancel(c)
|
c1, cancel := context.WithCancel(c)
|
||||||
o := ValueOnlyFrom(c1)
|
o := ValueOnlyFrom(c1)
|
||||||
c2, _ := context.WithCancel(o)
|
c2, cancel2 := context.WithCancel(o)
|
||||||
|
defer cancel2()
|
||||||
contexts := []context.Context{c1, c2}
|
contexts := []context.Context{c1, c2}
|
||||||
|
|
||||||
for _, c := range contexts {
|
for _, c := range contexts {
|
||||||
@@ -34,8 +37,9 @@ func TestContextCancel(t *testing.T) {
|
|||||||
assert.NotEqual(t, context.Canceled, c2.Err())
|
assert.NotEqual(t, context.Canceled, c2.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConextDeadline(t *testing.T) {
|
func TestContextDeadline(t *testing.T) {
|
||||||
c, _ := context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
|
c, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
|
||||||
|
cancel()
|
||||||
o := ValueOnlyFrom(c)
|
o := ValueOnlyFrom(c)
|
||||||
select {
|
select {
|
||||||
case <-time.After(100 * time.Millisecond):
|
case <-time.After(100 * time.Millisecond):
|
||||||
@@ -43,9 +47,11 @@ func TestConextDeadline(t *testing.T) {
|
|||||||
t.Fatal("ValueOnlyContext: context should not have timed out")
|
t.Fatal("ValueOnlyContext: context should not have timed out")
|
||||||
}
|
}
|
||||||
|
|
||||||
c, _ = context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
|
c, cancel = context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
|
||||||
|
cancel()
|
||||||
o = ValueOnlyFrom(c)
|
o = ValueOnlyFrom(c)
|
||||||
c, _ = context.WithDeadline(o, time.Now().Add(20*time.Millisecond))
|
c, cancel = context.WithDeadline(o, time.Now().Add(20*time.Millisecond))
|
||||||
|
defer cancel()
|
||||||
select {
|
select {
|
||||||
case <-time.After(100 * time.Millisecond):
|
case <-time.After(100 * time.Millisecond):
|
||||||
t.Fatal("ValueOnlyContext+Deadline: context should have timed out")
|
t.Fatal("ValueOnlyContext+Deadline: context should have timed out")
|
||||||
|
|||||||
@@ -8,12 +8,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
indexOfKey = iota
|
_ = iota
|
||||||
indexOfId
|
indexOfId
|
||||||
)
|
)
|
||||||
|
|
||||||
const timeToLive int64 = 10
|
const timeToLive int64 = 10
|
||||||
|
|
||||||
|
// TimeToLive is seconds to live in etcd.
|
||||||
var TimeToLive = timeToLive
|
var TimeToLive = timeToLive
|
||||||
|
|
||||||
func extract(etcdKey string, index int) (string, bool) {
|
func extract(etcdKey string, index int) (string, bool) {
|
||||||
|
|||||||
@@ -28,6 +28,9 @@ func TestExtract(t *testing.T) {
|
|||||||
|
|
||||||
_, ok = extract("any", -1)
|
_, ok = extract("any", -1)
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
|
|
||||||
|
_, ok = extract("any", 10)
|
||||||
|
assert.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMakeKey(t *testing.T) {
|
func TestMakeKey(t *testing.T) {
|
||||||
|
|||||||
@@ -2,11 +2,13 @@ package discov
|
|||||||
|
|
||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
|
// EtcdConf is the config item with the given key on etcd.
|
||||||
type EtcdConf struct {
|
type EtcdConf struct {
|
||||||
Hosts []string
|
Hosts []string
|
||||||
Key string
|
Key string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate validates c.
|
||||||
func (c EtcdConf) Validate() error {
|
func (c EtcdConf) Validate() error {
|
||||||
if len(c.Hosts) == 0 {
|
if len(c.Hosts) == 0 {
|
||||||
return errors.New("empty etcd hosts")
|
return errors.New("empty etcd hosts")
|
||||||
|
|||||||
@@ -1,47 +0,0 @@
|
|||||||
package discov
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/tal-tech/go-zero/core/discov/internal"
|
|
||||||
"github.com/tal-tech/go-zero/core/lang"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
Facade struct {
|
|
||||||
endpoints []string
|
|
||||||
registry *internal.Registry
|
|
||||||
}
|
|
||||||
|
|
||||||
FacadeListener interface {
|
|
||||||
OnAdd(key, val string)
|
|
||||||
OnDelete(key string)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewFacade(endpoints []string) Facade {
|
|
||||||
return Facade{
|
|
||||||
endpoints: endpoints,
|
|
||||||
registry: internal.GetRegistry(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f Facade) Client() internal.EtcdClient {
|
|
||||||
conn, err := f.registry.GetConn(f.endpoints)
|
|
||||||
lang.Must(err)
|
|
||||||
return conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f Facade) Monitor(key string, l FacadeListener) {
|
|
||||||
f.registry.Monitor(f.endpoints, key, listenerAdapter{l})
|
|
||||||
}
|
|
||||||
|
|
||||||
type listenerAdapter struct {
|
|
||||||
l FacadeListener
|
|
||||||
}
|
|
||||||
|
|
||||||
func (la listenerAdapter) OnAdd(kv internal.KV) {
|
|
||||||
la.l.OnAdd(kv.Key, kv.Val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (la listenerAdapter) OnDelete(kv internal.KV) {
|
|
||||||
la.l.OnDelete(kv.Key)
|
|
||||||
}
|
|
||||||
@@ -1,13 +1,15 @@
|
|||||||
//go:generate mockgen -package internal -destination etcdclient_mock.go -source etcdclient.go EtcdClient
|
//go:generate mockgen -package internal -destination etcdclient_mock.go -source etcdclient.go EtcdClient
|
||||||
|
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"go.etcd.io/etcd/clientv3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// EtcdClient interface represents an etcd client.
|
||||||
type EtcdClient interface {
|
type EtcdClient interface {
|
||||||
ActiveConnection() *grpc.ClientConn
|
ActiveConnection() *grpc.ClientConn
|
||||||
Close() error
|
Close() error
|
||||||
|
|||||||
@@ -6,10 +6,11 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
clientv3 "go.etcd.io/etcd/clientv3"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockEtcdClient is a mock of EtcdClient interface
|
// MockEtcdClient is a mock of EtcdClient interface
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
//go:generate mockgen -package internal -destination listener_mock.go -source listener.go Listener
|
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
|
// Listener interface wraps the OnUpdate method.
|
||||||
type Listener interface {
|
type Listener interface {
|
||||||
OnUpdate(keys []string, values []string, newKey string)
|
OnUpdate(keys, values []string, newKey string)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,45 +0,0 @@
|
|||||||
// Code generated by MockGen. DO NOT EDIT.
|
|
||||||
// Source: listener.go
|
|
||||||
|
|
||||||
// Package internal is a generated GoMock package.
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
reflect "reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MockListener is a mock of Listener interface
|
|
||||||
type MockListener struct {
|
|
||||||
ctrl *gomock.Controller
|
|
||||||
recorder *MockListenerMockRecorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockListenerMockRecorder is the mock recorder for MockListener
|
|
||||||
type MockListenerMockRecorder struct {
|
|
||||||
mock *MockListener
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMockListener creates a new mock instance
|
|
||||||
func NewMockListener(ctrl *gomock.Controller) *MockListener {
|
|
||||||
mock := &MockListener{ctrl: ctrl}
|
|
||||||
mock.recorder = &MockListenerMockRecorder{mock}
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
|
||||||
func (m *MockListener) EXPECT() *MockListenerMockRecorder {
|
|
||||||
return m.recorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnUpdate mocks base method
|
|
||||||
func (m *MockListener) OnUpdate(keys, values []string, newKey string) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
m.ctrl.Call(m, "OnUpdate", keys, values, newKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnUpdate indicates an expected call of OnUpdate
|
|
||||||
func (mr *MockListenerMockRecorder) OnUpdate(keys, values, newKey interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnUpdate", reflect.TypeOf((*MockListener)(nil).OnUpdate), keys, values, newKey)
|
|
||||||
}
|
|
||||||
@@ -14,23 +14,35 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/logx"
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
"github.com/tal-tech/go-zero/core/syncx"
|
"github.com/tal-tech/go-zero/core/syncx"
|
||||||
"github.com/tal-tech/go-zero/core/threading"
|
"github.com/tal-tech/go-zero/core/threading"
|
||||||
"go.etcd.io/etcd/clientv3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
registryInstance = Registry{
|
registry = Registry{
|
||||||
clusters: make(map[string]*cluster),
|
clusters: make(map[string]*cluster),
|
||||||
}
|
}
|
||||||
connManager = syncx.NewResourceManager()
|
connManager = syncx.NewResourceManager()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A Registry is a registry that manages the etcd client connections.
|
||||||
type Registry struct {
|
type Registry struct {
|
||||||
clusters map[string]*cluster
|
clusters map[string]*cluster
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRegistry returns a global Registry.
|
||||||
func GetRegistry() *Registry {
|
func GetRegistry() *Registry {
|
||||||
return ®istryInstance
|
return ®istry
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConn returns an etcd client connection associated with given endpoints.
|
||||||
|
func (r *Registry) GetConn(endpoints []string) (EtcdClient, error) {
|
||||||
|
return r.getCluster(endpoints).getClient()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monitor monitors the key on given etcd endpoints, notify with the given UpdateListener.
|
||||||
|
func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) error {
|
||||||
|
return r.getCluster(endpoints).monitor(key, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) getCluster(endpoints []string) *cluster {
|
func (r *Registry) getCluster(endpoints []string) *cluster {
|
||||||
@@ -46,14 +58,6 @@ func (r *Registry) getCluster(endpoints []string) *cluster {
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) GetConn(endpoints []string) (EtcdClient, error) {
|
|
||||||
return r.getCluster(endpoints).getClient()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) error {
|
|
||||||
return r.getCluster(endpoints).monitor(key, l)
|
|
||||||
}
|
|
||||||
|
|
||||||
type cluster struct {
|
type cluster struct {
|
||||||
endpoints []string
|
endpoints []string
|
||||||
key string
|
key string
|
||||||
@@ -256,26 +260,34 @@ func (c *cluster) reload(cli EtcdClient) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) watch(cli EtcdClient, key string) {
|
func (c *cluster) watch(cli EtcdClient, key string) {
|
||||||
|
for {
|
||||||
|
if c.watchStream(cli, key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cluster) watchStream(cli EtcdClient, key string) bool {
|
||||||
rch := cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix())
|
rch := cli.Watch(clientv3.WithRequireLeader(c.context(cli)), makeKeyPrefix(key), clientv3.WithPrefix())
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case wresp, ok := <-rch:
|
case wresp, ok := <-rch:
|
||||||
if !ok {
|
if !ok {
|
||||||
logx.Error("etcd monitor chan has been closed")
|
logx.Error("etcd monitor chan has been closed")
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
if wresp.Canceled {
|
if wresp.Canceled {
|
||||||
logx.Error("etcd monitor chan has been canceled")
|
logx.Errorf("etcd monitor chan has been canceled, error: %v", wresp.Err())
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
if wresp.Err() != nil {
|
if wresp.Err() != nil {
|
||||||
logx.Error(fmt.Sprintf("etcd monitor chan error: %v", wresp.Err()))
|
logx.Error(fmt.Sprintf("etcd monitor chan error: %v", wresp.Err()))
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
c.handleWatchEvents(key, wresp.Events)
|
c.handleWatchEvents(key, wresp.Events)
|
||||||
case <-c.done:
|
case <-c.done:
|
||||||
return
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -288,6 +300,7 @@ func (c *cluster) watchConnState(cli EtcdClient) {
|
|||||||
watcher.watch(cli.ActiveConnection())
|
watcher.watch(cli.ActiveConnection())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DialClient dials an etcd cluster with given endpoints.
|
||||||
func DialClient(endpoints []string) (EtcdClient, error) {
|
func DialClient(endpoints []string) (EtcdClient, error) {
|
||||||
return clientv3.New(clientv3.Config{
|
return clientv3.New(clientv3.Config{
|
||||||
Endpoints: endpoints,
|
Endpoints: endpoints,
|
||||||
|
|||||||
@@ -8,10 +8,11 @@ import (
|
|||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tal-tech/go-zero/core/contextx"
|
"github.com/tal-tech/go-zero/core/contextx"
|
||||||
|
"github.com/tal-tech/go-zero/core/lang"
|
||||||
"github.com/tal-tech/go-zero/core/logx"
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
"github.com/tal-tech/go-zero/core/stringx"
|
"github.com/tal-tech/go-zero/core/stringx"
|
||||||
"go.etcd.io/etcd/clientv3"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
"go.etcd.io/etcd/mvcc/mvccpb"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var mockLock sync.Mutex
|
var mockLock sync.Mutex
|
||||||
@@ -202,11 +203,13 @@ func TestClusterWatch_RespFailures(t *testing.T) {
|
|||||||
restore := setMockClient(cli)
|
restore := setMockClient(cli)
|
||||||
defer restore()
|
defer restore()
|
||||||
ch := make(chan clientv3.WatchResponse)
|
ch := make(chan clientv3.WatchResponse)
|
||||||
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch)
|
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch).AnyTimes()
|
||||||
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
||||||
c := new(cluster)
|
c := new(cluster)
|
||||||
|
c.done = make(chan lang.PlaceholderType)
|
||||||
go func() {
|
go func() {
|
||||||
ch <- resp
|
ch <- resp
|
||||||
|
close(c.done)
|
||||||
}()
|
}()
|
||||||
c.watch(cli, "any")
|
c.watch(cli, "any")
|
||||||
})
|
})
|
||||||
@@ -220,11 +223,13 @@ func TestClusterWatch_CloseChan(t *testing.T) {
|
|||||||
restore := setMockClient(cli)
|
restore := setMockClient(cli)
|
||||||
defer restore()
|
defer restore()
|
||||||
ch := make(chan clientv3.WatchResponse)
|
ch := make(chan clientv3.WatchResponse)
|
||||||
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch)
|
cli.EXPECT().Watch(gomock.Any(), "any/", gomock.Any()).Return(ch).AnyTimes()
|
||||||
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
cli.EXPECT().Ctx().Return(context.Background()).AnyTimes()
|
||||||
c := new(cluster)
|
c := new(cluster)
|
||||||
|
c.done = make(chan lang.PlaceholderType)
|
||||||
go func() {
|
go func() {
|
||||||
close(ch)
|
close(ch)
|
||||||
|
close(c.done)
|
||||||
}()
|
}()
|
||||||
c.watch(cli, "any")
|
c.watch(cli, "any")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
//go:generate mockgen -package internal -destination statewatcher_mock.go -source statewatcher.go etcdConn
|
//go:generate mockgen -package internal -destination statewatcher_mock.go -source statewatcher.go etcdConn
|
||||||
|
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -18,7 +19,8 @@ type (
|
|||||||
disconnected bool
|
disconnected bool
|
||||||
currentState connectivity.State
|
currentState connectivity.State
|
||||||
listeners []func()
|
listeners []func()
|
||||||
lock sync.Mutex
|
// lock only guards listeners, because only listens can be accessed by other goroutines.
|
||||||
|
lock sync.Mutex
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,27 +34,33 @@ func (sw *stateWatcher) addListener(l func()) {
|
|||||||
sw.lock.Unlock()
|
sw.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sw *stateWatcher) notifyListeners() {
|
||||||
|
sw.lock.Lock()
|
||||||
|
defer sw.lock.Unlock()
|
||||||
|
|
||||||
|
for _, l := range sw.listeners {
|
||||||
|
l()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *stateWatcher) updateState(conn etcdConn) {
|
||||||
|
sw.currentState = conn.GetState()
|
||||||
|
switch sw.currentState {
|
||||||
|
case connectivity.TransientFailure, connectivity.Shutdown:
|
||||||
|
sw.disconnected = true
|
||||||
|
case connectivity.Ready:
|
||||||
|
if sw.disconnected {
|
||||||
|
sw.disconnected = false
|
||||||
|
sw.notifyListeners()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (sw *stateWatcher) watch(conn etcdConn) {
|
func (sw *stateWatcher) watch(conn etcdConn) {
|
||||||
sw.currentState = conn.GetState()
|
sw.currentState = conn.GetState()
|
||||||
for {
|
for {
|
||||||
if conn.WaitForStateChange(context.Background(), sw.currentState) {
|
if conn.WaitForStateChange(context.Background(), sw.currentState) {
|
||||||
newState := conn.GetState()
|
sw.updateState(conn)
|
||||||
sw.lock.Lock()
|
|
||||||
sw.currentState = newState
|
|
||||||
|
|
||||||
switch newState {
|
|
||||||
case connectivity.TransientFailure, connectivity.Shutdown:
|
|
||||||
sw.disconnected = true
|
|
||||||
case connectivity.Ready:
|
|
||||||
if sw.disconnected {
|
|
||||||
sw.disconnected = false
|
|
||||||
for _, l := range sw.listeners {
|
|
||||||
l()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sw.lock.Unlock()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,9 +6,10 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
connectivity "google.golang.org/grpc/connectivity"
|
connectivity "google.golang.org/grpc/connectivity"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MocketcdConn is a mock of etcdConn interface
|
// MocketcdConn is a mock of etcdConn interface
|
||||||
|
|||||||
@@ -1,12 +1,15 @@
|
|||||||
//go:generate mockgen -package internal -destination updatelistener_mock.go -source updatelistener.go UpdateListener
|
//go:generate mockgen -package internal -destination updatelistener_mock.go -source updatelistener.go UpdateListener
|
||||||
|
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// A KV is used to store an etcd entry with key and value.
|
||||||
KV struct {
|
KV struct {
|
||||||
Key string
|
Key string
|
||||||
Val string
|
Val string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateListener wraps the OnAdd and OnDelete methods.
|
||||||
UpdateListener interface {
|
UpdateListener interface {
|
||||||
OnAdd(kv KV)
|
OnAdd(kv KV)
|
||||||
OnDelete(kv KV)
|
OnDelete(kv KV)
|
||||||
|
|||||||
@@ -5,8 +5,9 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockUpdateListener is a mock of UpdateListener interface
|
// MockUpdateListener is a mock of UpdateListener interface
|
||||||
|
|||||||
@@ -3,17 +3,22 @@ package internal
|
|||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// Delimiter is a separator that separates the etcd path.
|
||||||
|
Delimiter = '/'
|
||||||
|
|
||||||
autoSyncInterval = time.Minute
|
autoSyncInterval = time.Minute
|
||||||
coolDownInterval = time.Second
|
coolDownInterval = time.Second
|
||||||
dialTimeout = 5 * time.Second
|
dialTimeout = 5 * time.Second
|
||||||
dialKeepAliveTime = 5 * time.Second
|
dialKeepAliveTime = 5 * time.Second
|
||||||
requestTimeout = 3 * time.Second
|
requestTimeout = 3 * time.Second
|
||||||
Delimiter = '/'
|
|
||||||
endpointsSeparator = ","
|
endpointsSeparator = ","
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
DialTimeout = dialTimeout
|
// DialTimeout is the dial timeout.
|
||||||
|
DialTimeout = dialTimeout
|
||||||
|
// RequestTimeout is the request timeout.
|
||||||
RequestTimeout = requestTimeout
|
RequestTimeout = requestTimeout
|
||||||
NewClient = DialClient
|
// NewClient is used to create etcd clients.
|
||||||
|
NewClient = DialClient
|
||||||
)
|
)
|
||||||
|
|||||||
64
core/discov/kubernetes/etcd-statefulset.yaml
Normal file
64
core/discov/kubernetes/etcd-statefulset.yaml
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: "etcd"
|
||||||
|
namespace: discov
|
||||||
|
labels:
|
||||||
|
app: "etcd"
|
||||||
|
spec:
|
||||||
|
serviceName: "etcd"
|
||||||
|
replicas: 5
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: "etcd"
|
||||||
|
labels:
|
||||||
|
app: "etcd"
|
||||||
|
spec:
|
||||||
|
volumes:
|
||||||
|
- name: etcd-pvc
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: etcd-pvc
|
||||||
|
containers:
|
||||||
|
- name: "etcd"
|
||||||
|
image: quay.io/coreos/etcd:latest
|
||||||
|
ports:
|
||||||
|
- containerPort: 2379
|
||||||
|
name: client
|
||||||
|
- containerPort: 2380
|
||||||
|
name: peer
|
||||||
|
env:
|
||||||
|
- name: CLUSTER_SIZE
|
||||||
|
value: "5"
|
||||||
|
- name: SET_NAME
|
||||||
|
value: "etcd"
|
||||||
|
- name: VOLNAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: metadata.name
|
||||||
|
volumeMounts:
|
||||||
|
- name: etcd-pvc
|
||||||
|
mountPath: /var/lib/etcd
|
||||||
|
subPathExpr: $(VOLNAME) # data mounted respectively in each pod
|
||||||
|
command:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-ecx"
|
||||||
|
- |
|
||||||
|
|
||||||
|
chmod 700 /var/lib/etcd
|
||||||
|
|
||||||
|
IP=$(hostname -i)
|
||||||
|
PEERS=""
|
||||||
|
for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do
|
||||||
|
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
|
||||||
|
done
|
||||||
|
exec etcd --name ${HOSTNAME} \
|
||||||
|
--listen-peer-urls http://0.0.0.0:2380 \
|
||||||
|
--listen-client-urls http://0.0.0.0:2379 \
|
||||||
|
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}.discov:2379 \
|
||||||
|
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
|
||||||
|
--initial-cluster ${PEERS} \
|
||||||
|
--initial-cluster-state new \
|
||||||
|
--logger zap \
|
||||||
|
--data-dir /var/lib/etcd \
|
||||||
|
--auto-compaction-retention 1
|
||||||
@@ -1,16 +1,16 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: discov
|
name: etcd
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: discov-port
|
- name: etcd-port
|
||||||
port: 2379
|
port: 2379
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
targetPort: 2379
|
targetPort: 2379
|
||||||
selector:
|
selector:
|
||||||
app: discov
|
app: etcd
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -18,30 +18,31 @@ apiVersion: v1
|
|||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: discov
|
app: etcd
|
||||||
discov_node: discov0
|
etcd_node: etcd0
|
||||||
name: discov0
|
name: etcd0
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- command:
|
- command:
|
||||||
- /usr/local/bin/etcd
|
- /usr/local/bin/etcd
|
||||||
- --name
|
- --name
|
||||||
- discov0
|
- etcd0
|
||||||
- --initial-advertise-peer-urls
|
- --initial-advertise-peer-urls
|
||||||
- http://discov0:2380
|
- http://etcd0:2380
|
||||||
- --listen-peer-urls
|
- --listen-peer-urls
|
||||||
- http://0.0.0.0:2380
|
- http://0.0.0.0:2380
|
||||||
- --listen-client-urls
|
- --listen-client-urls
|
||||||
- http://0.0.0.0:2379
|
- http://0.0.0.0:2379
|
||||||
- --advertise-client-urls
|
- --advertise-client-urls
|
||||||
- http://discov0:2379
|
- http://etcd0.discov:2379
|
||||||
- --initial-cluster
|
- --initial-cluster
|
||||||
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
|
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
|
||||||
- --initial-cluster-state
|
- --initial-cluster-state
|
||||||
- new
|
- new
|
||||||
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
|
- --auto-compaction-retention=1
|
||||||
name: discov0
|
image: quay.io/coreos/etcd:latest
|
||||||
|
name: etcd0
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 2379
|
- containerPort: 2379
|
||||||
name: client
|
name: client
|
||||||
@@ -49,8 +50,6 @@ spec:
|
|||||||
- containerPort: 2380
|
- containerPort: 2380
|
||||||
name: server
|
name: server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
imagePullSecrets:
|
|
||||||
- name: aliyun
|
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
@@ -59,7 +58,7 @@ spec:
|
|||||||
- key: app
|
- key: app
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- discov
|
- etcd
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
||||||
@@ -69,9 +68,9 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
discov_node: discov0
|
etcd_node: etcd0
|
||||||
name: discov0
|
name: etcd0
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: client
|
- name: client
|
||||||
@@ -83,7 +82,7 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
targetPort: 2380
|
targetPort: 2380
|
||||||
selector:
|
selector:
|
||||||
discov_node: discov0
|
etcd_node: etcd0
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -91,30 +90,31 @@ apiVersion: v1
|
|||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: discov
|
app: etcd
|
||||||
discov_node: discov1
|
etcd_node: etcd1
|
||||||
name: discov1
|
name: etcd1
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- command:
|
- command:
|
||||||
- /usr/local/bin/etcd
|
- /usr/local/bin/etcd
|
||||||
- --name
|
- --name
|
||||||
- discov1
|
- etcd1
|
||||||
- --initial-advertise-peer-urls
|
- --initial-advertise-peer-urls
|
||||||
- http://discov1:2380
|
- http://etcd1:2380
|
||||||
- --listen-peer-urls
|
- --listen-peer-urls
|
||||||
- http://0.0.0.0:2380
|
- http://0.0.0.0:2380
|
||||||
- --listen-client-urls
|
- --listen-client-urls
|
||||||
- http://0.0.0.0:2379
|
- http://0.0.0.0:2379
|
||||||
- --advertise-client-urls
|
- --advertise-client-urls
|
||||||
- http://discov1:2379
|
- http://etcd1.discov:2379
|
||||||
- --initial-cluster
|
- --initial-cluster
|
||||||
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
|
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
|
||||||
- --initial-cluster-state
|
- --initial-cluster-state
|
||||||
- new
|
- new
|
||||||
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
|
- --auto-compaction-retention=1
|
||||||
name: discov1
|
image: quay.io/coreos/etcd:latest
|
||||||
|
name: etcd1
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 2379
|
- containerPort: 2379
|
||||||
name: client
|
name: client
|
||||||
@@ -122,8 +122,6 @@ spec:
|
|||||||
- containerPort: 2380
|
- containerPort: 2380
|
||||||
name: server
|
name: server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
imagePullSecrets:
|
|
||||||
- name: aliyun
|
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
@@ -132,7 +130,7 @@ spec:
|
|||||||
- key: app
|
- key: app
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- discov
|
- etcd
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
||||||
@@ -142,9 +140,9 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
discov_node: discov1
|
etcd_node: etcd1
|
||||||
name: discov1
|
name: etcd1
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: client
|
- name: client
|
||||||
@@ -156,7 +154,7 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
targetPort: 2380
|
targetPort: 2380
|
||||||
selector:
|
selector:
|
||||||
discov_node: discov1
|
etcd_node: etcd1
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -164,30 +162,31 @@ apiVersion: v1
|
|||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: discov
|
app: etcd
|
||||||
discov_node: discov2
|
etcd_node: etcd2
|
||||||
name: discov2
|
name: etcd2
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- command:
|
- command:
|
||||||
- /usr/local/bin/etcd
|
- /usr/local/bin/etcd
|
||||||
- --name
|
- --name
|
||||||
- discov2
|
- etcd2
|
||||||
- --initial-advertise-peer-urls
|
- --initial-advertise-peer-urls
|
||||||
- http://discov2:2380
|
- http://etcd2:2380
|
||||||
- --listen-peer-urls
|
- --listen-peer-urls
|
||||||
- http://0.0.0.0:2380
|
- http://0.0.0.0:2380
|
||||||
- --listen-client-urls
|
- --listen-client-urls
|
||||||
- http://0.0.0.0:2379
|
- http://0.0.0.0:2379
|
||||||
- --advertise-client-urls
|
- --advertise-client-urls
|
||||||
- http://discov2:2379
|
- http://etcd2.discov:2379
|
||||||
- --initial-cluster
|
- --initial-cluster
|
||||||
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
|
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
|
||||||
- --initial-cluster-state
|
- --initial-cluster-state
|
||||||
- new
|
- new
|
||||||
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
|
- --auto-compaction-retention=1
|
||||||
name: discov2
|
image: quay.io/coreos/etcd:latest
|
||||||
|
name: etcd2
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 2379
|
- containerPort: 2379
|
||||||
name: client
|
name: client
|
||||||
@@ -195,8 +194,6 @@ spec:
|
|||||||
- containerPort: 2380
|
- containerPort: 2380
|
||||||
name: server
|
name: server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
imagePullSecrets:
|
|
||||||
- name: aliyun
|
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
@@ -205,7 +202,7 @@ spec:
|
|||||||
- key: app
|
- key: app
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- discov
|
- etcd
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
||||||
@@ -215,9 +212,9 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
discov_node: discov2
|
etcd_node: etcd2
|
||||||
name: discov2
|
name: etcd2
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: client
|
- name: client
|
||||||
@@ -229,7 +226,7 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
targetPort: 2380
|
targetPort: 2380
|
||||||
selector:
|
selector:
|
||||||
discov_node: discov2
|
etcd_node: etcd2
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -237,30 +234,31 @@ apiVersion: v1
|
|||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: discov
|
app: etcd
|
||||||
discov_node: discov3
|
etcd_node: etcd3
|
||||||
name: discov3
|
name: etcd3
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- command:
|
- command:
|
||||||
- /usr/local/bin/etcd
|
- /usr/local/bin/etcd
|
||||||
- --name
|
- --name
|
||||||
- discov3
|
- etcd3
|
||||||
- --initial-advertise-peer-urls
|
- --initial-advertise-peer-urls
|
||||||
- http://discov3:2380
|
- http://etcd3:2380
|
||||||
- --listen-peer-urls
|
- --listen-peer-urls
|
||||||
- http://0.0.0.0:2380
|
- http://0.0.0.0:2380
|
||||||
- --listen-client-urls
|
- --listen-client-urls
|
||||||
- http://0.0.0.0:2379
|
- http://0.0.0.0:2379
|
||||||
- --advertise-client-urls
|
- --advertise-client-urls
|
||||||
- http://discov3:2379
|
- http://etcd3.discov:2379
|
||||||
- --initial-cluster
|
- --initial-cluster
|
||||||
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
|
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
|
||||||
- --initial-cluster-state
|
- --initial-cluster-state
|
||||||
- new
|
- new
|
||||||
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
|
- --auto-compaction-retention=1
|
||||||
name: discov3
|
image: quay.io/coreos/etcd:latest
|
||||||
|
name: etcd3
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 2379
|
- containerPort: 2379
|
||||||
name: client
|
name: client
|
||||||
@@ -268,8 +266,6 @@ spec:
|
|||||||
- containerPort: 2380
|
- containerPort: 2380
|
||||||
name: server
|
name: server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
imagePullSecrets:
|
|
||||||
- name: aliyun
|
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
@@ -278,7 +274,7 @@ spec:
|
|||||||
- key: app
|
- key: app
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- discov
|
- etcd
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
||||||
@@ -288,9 +284,9 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
discov_node: discov3
|
etcd_node: etcd3
|
||||||
name: discov3
|
name: etcd3
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: client
|
- name: client
|
||||||
@@ -302,7 +298,7 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
targetPort: 2380
|
targetPort: 2380
|
||||||
selector:
|
selector:
|
||||||
discov_node: discov3
|
etcd_node: etcd3
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -310,30 +306,31 @@ apiVersion: v1
|
|||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: discov
|
app: etcd
|
||||||
discov_node: discov4
|
etcd_node: etcd4
|
||||||
name: discov4
|
name: etcd4
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- command:
|
- command:
|
||||||
- /usr/local/bin/etcd
|
- /usr/local/bin/etcd
|
||||||
- --name
|
- --name
|
||||||
- discov4
|
- etcd4
|
||||||
- --initial-advertise-peer-urls
|
- --initial-advertise-peer-urls
|
||||||
- http://discov4:2380
|
- http://etcd4:2380
|
||||||
- --listen-peer-urls
|
- --listen-peer-urls
|
||||||
- http://0.0.0.0:2380
|
- http://0.0.0.0:2380
|
||||||
- --listen-client-urls
|
- --listen-client-urls
|
||||||
- http://0.0.0.0:2379
|
- http://0.0.0.0:2379
|
||||||
- --advertise-client-urls
|
- --advertise-client-urls
|
||||||
- http://discov4:2379
|
- http://etcd4.discov:2379
|
||||||
- --initial-cluster
|
- --initial-cluster
|
||||||
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
|
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
|
||||||
- --initial-cluster-state
|
- --initial-cluster-state
|
||||||
- new
|
- new
|
||||||
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
|
- --auto-compaction-retention=1
|
||||||
name: discov4
|
image: quay.io/coreos/etcd:latest
|
||||||
|
name: etcd4
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 2379
|
- containerPort: 2379
|
||||||
name: client
|
name: client
|
||||||
@@ -341,8 +338,6 @@ spec:
|
|||||||
- containerPort: 2380
|
- containerPort: 2380
|
||||||
name: server
|
name: server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
imagePullSecrets:
|
|
||||||
- name: aliyun
|
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
@@ -351,7 +346,7 @@ spec:
|
|||||||
- key: app
|
- key: app
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- discov
|
- etcd
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
||||||
@@ -361,9 +356,9 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
discov_node: discov4
|
etcd_node: etcd4
|
||||||
name: discov4
|
name: etcd4
|
||||||
namespace: discovery
|
namespace: discov
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: client
|
- name: client
|
||||||
@@ -375,4 +370,4 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
targetPort: 2380
|
targetPort: 2380
|
||||||
selector:
|
selector:
|
||||||
discov_node: discov4
|
etcd_node: etcd4
|
||||||
@@ -7,12 +7,14 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/proc"
|
"github.com/tal-tech/go-zero/core/proc"
|
||||||
"github.com/tal-tech/go-zero/core/syncx"
|
"github.com/tal-tech/go-zero/core/syncx"
|
||||||
"github.com/tal-tech/go-zero/core/threading"
|
"github.com/tal-tech/go-zero/core/threading"
|
||||||
"go.etcd.io/etcd/clientv3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// PublisherOption defines the method to customize a Publisher.
|
||||||
PublisherOption func(client *Publisher)
|
PublisherOption func(client *Publisher)
|
||||||
|
|
||||||
|
// A Publisher can be used to publish the value to an etcd cluster on the given key.
|
||||||
Publisher struct {
|
Publisher struct {
|
||||||
endpoints []string
|
endpoints []string
|
||||||
key string
|
key string
|
||||||
@@ -26,6 +28,10 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewPublisher returns a Publisher.
|
||||||
|
// endpoints is the hosts of the etcd cluster.
|
||||||
|
// key:value are a pair to be published.
|
||||||
|
// opts are used to customize the Publisher.
|
||||||
func NewPublisher(endpoints []string, key, value string, opts ...PublisherOption) *Publisher {
|
func NewPublisher(endpoints []string, key, value string, opts ...PublisherOption) *Publisher {
|
||||||
publisher := &Publisher{
|
publisher := &Publisher{
|
||||||
endpoints: endpoints,
|
endpoints: endpoints,
|
||||||
@@ -43,6 +49,7 @@ func NewPublisher(endpoints []string, key, value string, opts ...PublisherOption
|
|||||||
return publisher
|
return publisher
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeepAlive keeps key:value alive.
|
||||||
func (p *Publisher) KeepAlive() error {
|
func (p *Publisher) KeepAlive() error {
|
||||||
cli, err := internal.GetRegistry().GetConn(p.endpoints)
|
cli, err := internal.GetRegistry().GetConn(p.endpoints)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -61,14 +68,17 @@ func (p *Publisher) KeepAlive() error {
|
|||||||
return p.keepAliveAsync(cli)
|
return p.keepAliveAsync(cli)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pause pauses the renewing of key:value.
|
||||||
func (p *Publisher) Pause() {
|
func (p *Publisher) Pause() {
|
||||||
p.pauseChan <- lang.Placeholder
|
p.pauseChan <- lang.Placeholder
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resume resumes the renewing of key:value.
|
||||||
func (p *Publisher) Resume() {
|
func (p *Publisher) Resume() {
|
||||||
p.resumeChan <- lang.Placeholder
|
p.resumeChan <- lang.Placeholder
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops the renewing and revokes the registration.
|
||||||
func (p *Publisher) Stop() {
|
func (p *Publisher) Stop() {
|
||||||
p.quit.Close()
|
p.quit.Close()
|
||||||
}
|
}
|
||||||
@@ -135,6 +145,7 @@ func (p *Publisher) revoke(cli internal.EtcdClient) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithId customizes a Publisher with the id.
|
||||||
func WithId(id int64) PublisherOption {
|
func WithId(id int64) PublisherOption {
|
||||||
return func(publisher *Publisher) {
|
return func(publisher *Publisher) {
|
||||||
publisher.id = id
|
publisher.id = id
|
||||||
|
|||||||
@@ -4,12 +4,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tal-tech/go-zero/core/discov/internal"
|
"github.com/tal-tech/go-zero/core/discov/internal"
|
||||||
|
"github.com/tal-tech/go-zero/core/lang"
|
||||||
"github.com/tal-tech/go-zero/core/logx"
|
"github.com/tal-tech/go-zero/core/logx"
|
||||||
"go.etcd.io/etcd/clientv3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -111,6 +113,10 @@ func TestPublisher_keepAliveAsyncQuit(t *testing.T) {
|
|||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
const id clientv3.LeaseID = 1
|
const id clientv3.LeaseID = 1
|
||||||
cli := internal.NewMockEtcdClient(ctrl)
|
cli := internal.NewMockEtcdClient(ctrl)
|
||||||
|
cli.EXPECT().ActiveConnection()
|
||||||
|
cli.EXPECT().Close()
|
||||||
|
defer cli.Close()
|
||||||
|
cli.ActiveConnection()
|
||||||
restore := setMockClient(cli)
|
restore := setMockClient(cli)
|
||||||
defer restore()
|
defer restore()
|
||||||
cli.EXPECT().Ctx().AnyTimes()
|
cli.EXPECT().Ctx().AnyTimes()
|
||||||
@@ -148,3 +154,16 @@ func TestPublisher_keepAliveAsyncPause(t *testing.T) {
|
|||||||
pub.Pause()
|
pub.Pause()
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPublisher_Resume(t *testing.T) {
|
||||||
|
publisher := new(Publisher)
|
||||||
|
publisher.resumeChan = make(chan lang.PlaceholderType)
|
||||||
|
go func() {
|
||||||
|
publisher.Resume()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Minute)
|
||||||
|
t.Fail()
|
||||||
|
}()
|
||||||
|
<-publisher.resumeChan
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,13 +13,19 @@ type (
|
|||||||
exclusive bool
|
exclusive bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SubOption defines the method to customize a Subscriber.
|
||||||
SubOption func(opts *subOptions)
|
SubOption func(opts *subOptions)
|
||||||
|
|
||||||
|
// A Subscriber is used to subscribe the given key on a etcd cluster.
|
||||||
Subscriber struct {
|
Subscriber struct {
|
||||||
items *container
|
items *container
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewSubscriber returns a Subscriber.
|
||||||
|
// endpoints is the hosts of the etcd cluster.
|
||||||
|
// key is the key to subscribe.
|
||||||
|
// opts are used to customize the Subscriber.
|
||||||
func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscriber, error) {
|
func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscriber, error) {
|
||||||
var subOpts subOptions
|
var subOpts subOptions
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
@@ -36,15 +42,17 @@ func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscrib
|
|||||||
return sub, nil
|
return sub, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddListener adds listener to s.
|
||||||
func (s *Subscriber) AddListener(listener func()) {
|
func (s *Subscriber) AddListener(listener func()) {
|
||||||
s.items.addListener(listener)
|
s.items.addListener(listener)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Values returns all the subscription values.
|
||||||
func (s *Subscriber) Values() []string {
|
func (s *Subscriber) Values() []string {
|
||||||
return s.items.getValues()
|
return s.items.getValues()
|
||||||
}
|
}
|
||||||
|
|
||||||
// exclusive means that key value can only be 1:1,
|
// Exclusive means that key value can only be 1:1,
|
||||||
// which means later added value will remove the keys associated with the same value previously.
|
// which means later added value will remove the keys associated with the same value previously.
|
||||||
func Exclusive() SubOption {
|
func Exclusive() SubOption {
|
||||||
return func(opts *subOptions) {
|
return func(opts *subOptions) {
|
||||||
@@ -100,9 +108,9 @@ func (c *container) addKv(key, value string) ([]string, bool) {
|
|||||||
|
|
||||||
if early {
|
if early {
|
||||||
return previous, true
|
return previous, true
|
||||||
} else {
|
|
||||||
return nil, false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) addListener(listener func()) {
|
func (c *container) addListener(listener func()) {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package discov
|
package discov
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -198,3 +199,18 @@ func TestContainer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSubscriber(t *testing.T) {
|
||||||
|
var opt subOptions
|
||||||
|
Exclusive()(&opt)
|
||||||
|
|
||||||
|
sub := new(Subscriber)
|
||||||
|
sub.items = newContainer(opt.exclusive)
|
||||||
|
var count int32
|
||||||
|
sub.AddListener(func() {
|
||||||
|
atomic.AddInt32(&count, 1)
|
||||||
|
})
|
||||||
|
sub.items.notifyChange()
|
||||||
|
assert.Empty(t, sub.Values())
|
||||||
|
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,21 +1,23 @@
|
|||||||
package errorx
|
package errorx
|
||||||
|
|
||||||
import "sync"
|
import "sync/atomic"
|
||||||
|
|
||||||
|
// AtomicError defines an atomic error.
|
||||||
type AtomicError struct {
|
type AtomicError struct {
|
||||||
err error
|
err atomic.Value // error
|
||||||
lock sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set sets the error.
|
||||||
func (ae *AtomicError) Set(err error) {
|
func (ae *AtomicError) Set(err error) {
|
||||||
ae.lock.Lock()
|
if err != nil {
|
||||||
ae.err = err
|
ae.err.Store(err)
|
||||||
ae.lock.Unlock()
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load returns the error.
|
||||||
func (ae *AtomicError) Load() error {
|
func (ae *AtomicError) Load() error {
|
||||||
ae.lock.Lock()
|
if v := ae.err.Load(); v != nil {
|
||||||
err := ae.err
|
return v.(error)
|
||||||
ae.lock.Unlock()
|
}
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package errorx
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -15,7 +17,66 @@ func TestAtomicError(t *testing.T) {
|
|||||||
assert.Equal(t, errDummy, err.Load())
|
assert.Equal(t, errDummy, err.Load())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAtomicErrorSetNil(t *testing.T) {
|
||||||
|
var (
|
||||||
|
errNil error
|
||||||
|
err AtomicError
|
||||||
|
)
|
||||||
|
err.Set(errNil)
|
||||||
|
assert.Equal(t, errNil, err.Load())
|
||||||
|
}
|
||||||
|
|
||||||
func TestAtomicErrorNil(t *testing.T) {
|
func TestAtomicErrorNil(t *testing.T) {
|
||||||
var err AtomicError
|
var err AtomicError
|
||||||
assert.Nil(t, err.Load())
|
assert.Nil(t, err.Load())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicError(b *testing.B) {
|
||||||
|
var aerr AtomicError
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
|
||||||
|
b.Run("Load", func(b *testing.B) {
|
||||||
|
var done uint32
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
if atomic.LoadUint32(&done) != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
aerr.Set(errDummy)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_ = aerr.Load()
|
||||||
|
}
|
||||||
|
b.StopTimer()
|
||||||
|
atomic.StoreUint32(&done, 1)
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
b.Run("Set", func(b *testing.B) {
|
||||||
|
var done uint32
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
if atomic.LoadUint32(&done) != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
_ = aerr.Load()
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
aerr.Set(errDummy)
|
||||||
|
}
|
||||||
|
b.StopTimer()
|
||||||
|
atomic.StoreUint32(&done, 1)
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package errorx
|
|||||||
import "bytes"
|
import "bytes"
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// A BatchError is an error that can hold multiple errors.
|
||||||
BatchError struct {
|
BatchError struct {
|
||||||
errs errorArray
|
errs errorArray
|
||||||
}
|
}
|
||||||
@@ -10,12 +11,14 @@ type (
|
|||||||
errorArray []error
|
errorArray []error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Add adds err to be.
|
||||||
func (be *BatchError) Add(err error) {
|
func (be *BatchError) Add(err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
be.errs = append(be.errs, err)
|
be.errs = append(be.errs, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Err returns an error that represents all errors.
|
||||||
func (be *BatchError) Err() error {
|
func (be *BatchError) Err() error {
|
||||||
switch len(be.errs) {
|
switch len(be.errs) {
|
||||||
case 0:
|
case 0:
|
||||||
@@ -27,10 +30,12 @@ func (be *BatchError) Err() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NotNil checks if any error inside.
|
||||||
func (be *BatchError) NotNil() bool {
|
func (be *BatchError) NotNil() bool {
|
||||||
return len(be.errs) > 0
|
return len(be.errs) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Error returns a string that represents inside errors.
|
||||||
func (ea errorArray) Error() string {
|
func (ea errorArray) Error() string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
|||||||
12
core/errorx/callchain.go
Normal file
12
core/errorx/callchain.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package errorx
|
||||||
|
|
||||||
|
// Chain runs funs one by one until an error occurred.
|
||||||
|
func Chain(fns ...func() error) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
27
core/errorx/callchain_test.go
Normal file
27
core/errorx/callchain_test.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package errorx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestChain(t *testing.T) {
|
||||||
|
errDummy := errors.New("dummy")
|
||||||
|
assert.Nil(t, Chain(func() error {
|
||||||
|
return nil
|
||||||
|
}, func() error {
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
assert.Equal(t, errDummy, Chain(func() error {
|
||||||
|
return errDummy
|
||||||
|
}, func() error {
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
assert.Equal(t, errDummy, Chain(func() error {
|
||||||
|
return nil
|
||||||
|
}, func() error {
|
||||||
|
return errDummy
|
||||||
|
}))
|
||||||
|
}
|
||||||
@@ -5,8 +5,12 @@ import "time"
|
|||||||
const defaultBulkTasks = 1000
|
const defaultBulkTasks = 1000
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// BulkOption defines the method to customize a BulkExecutor.
|
||||||
BulkOption func(options *bulkOptions)
|
BulkOption func(options *bulkOptions)
|
||||||
|
|
||||||
|
// A BulkExecutor is an executor that can execute tasks on either requirement meets:
|
||||||
|
// 1. up to given size of tasks
|
||||||
|
// 2. flush interval time elapsed
|
||||||
BulkExecutor struct {
|
BulkExecutor struct {
|
||||||
executor *PeriodicalExecutor
|
executor *PeriodicalExecutor
|
||||||
container *bulkContainer
|
container *bulkContainer
|
||||||
@@ -18,6 +22,7 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewBulkExecutor returns a BulkExecutor.
|
||||||
func NewBulkExecutor(execute Execute, opts ...BulkOption) *BulkExecutor {
|
func NewBulkExecutor(execute Execute, opts ...BulkOption) *BulkExecutor {
|
||||||
options := newBulkOptions()
|
options := newBulkOptions()
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
@@ -36,25 +41,30 @@ func NewBulkExecutor(execute Execute, opts ...BulkOption) *BulkExecutor {
|
|||||||
return executor
|
return executor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add adds task into be.
|
||||||
func (be *BulkExecutor) Add(task interface{}) error {
|
func (be *BulkExecutor) Add(task interface{}) error {
|
||||||
be.executor.Add(task)
|
be.executor.Add(task)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Flush forces be to flush and execute tasks.
|
||||||
func (be *BulkExecutor) Flush() {
|
func (be *BulkExecutor) Flush() {
|
||||||
be.executor.Flush()
|
be.executor.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait waits be to done with the task execution.
|
||||||
func (be *BulkExecutor) Wait() {
|
func (be *BulkExecutor) Wait() {
|
||||||
be.executor.Wait()
|
be.executor.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithBulkTasks customizes a BulkExecutor with given tasks limit.
|
||||||
func WithBulkTasks(tasks int) BulkOption {
|
func WithBulkTasks(tasks int) BulkOption {
|
||||||
return func(options *bulkOptions) {
|
return func(options *bulkOptions) {
|
||||||
options.cachedTasks = tasks
|
options.cachedTasks = tasks
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithBulkInterval customizes a BulkExecutor with given flush interval.
|
||||||
func WithBulkInterval(duration time.Duration) BulkOption {
|
func WithBulkInterval(duration time.Duration) BulkOption {
|
||||||
return func(options *bulkOptions) {
|
return func(options *bulkOptions) {
|
||||||
options.flushInterval = duration
|
options.flushInterval = duration
|
||||||
|
|||||||
@@ -12,14 +12,14 @@ func TestBulkExecutor(t *testing.T) {
|
|||||||
var values []int
|
var values []int
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
|
|
||||||
exeutor := NewBulkExecutor(func(items []interface{}) {
|
executor := NewBulkExecutor(func(items []interface{}) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
values = append(values, len(items))
|
values = append(values, len(items))
|
||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
}, WithBulkTasks(10), WithBulkInterval(time.Minute))
|
}, WithBulkTasks(10), WithBulkInterval(time.Minute))
|
||||||
|
|
||||||
for i := 0; i < 50; i++ {
|
for i := 0; i < 50; i++ {
|
||||||
exeutor.Add(1)
|
executor.Add(1)
|
||||||
time.Sleep(time.Millisecond)
|
time.Sleep(time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,13 +40,13 @@ func TestBulkExecutorFlushInterval(t *testing.T) {
|
|||||||
var wait sync.WaitGroup
|
var wait sync.WaitGroup
|
||||||
|
|
||||||
wait.Add(1)
|
wait.Add(1)
|
||||||
exeutor := NewBulkExecutor(func(items []interface{}) {
|
executor := NewBulkExecutor(func(items []interface{}) {
|
||||||
assert.Equal(t, size, len(items))
|
assert.Equal(t, size, len(items))
|
||||||
wait.Done()
|
wait.Done()
|
||||||
}, WithBulkTasks(caches), WithBulkInterval(time.Millisecond*100))
|
}, WithBulkTasks(caches), WithBulkInterval(time.Millisecond*100))
|
||||||
|
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
exeutor.Add(1)
|
executor.Add(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
wait.Wait()
|
wait.Wait()
|
||||||
@@ -86,9 +86,7 @@ func TestBuldExecutorFlushSlowTasks(t *testing.T) {
|
|||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
for _, i := range tasks {
|
result = append(result, tasks...)
|
||||||
result = append(result, i)
|
|
||||||
}
|
|
||||||
}, WithBulkTasks(1000))
|
}, WithBulkTasks(1000))
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
assert.Nil(t, exec.Add(i))
|
assert.Nil(t, exec.Add(i))
|
||||||
|
|||||||
@@ -5,8 +5,12 @@ import "time"
|
|||||||
const defaultChunkSize = 1024 * 1024 // 1M
|
const defaultChunkSize = 1024 * 1024 // 1M
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// ChunkOption defines the method to customize a ChunkExecutor.
|
||||||
ChunkOption func(options *chunkOptions)
|
ChunkOption func(options *chunkOptions)
|
||||||
|
|
||||||
|
// A ChunkExecutor is an executor to execute tasks when either requirement meets:
|
||||||
|
// 1. up to given chunk size
|
||||||
|
// 2. flush interval elapsed
|
||||||
ChunkExecutor struct {
|
ChunkExecutor struct {
|
||||||
executor *PeriodicalExecutor
|
executor *PeriodicalExecutor
|
||||||
container *chunkContainer
|
container *chunkContainer
|
||||||
@@ -18,6 +22,7 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewChunkExecutor returns a ChunkExecutor.
|
||||||
func NewChunkExecutor(execute Execute, opts ...ChunkOption) *ChunkExecutor {
|
func NewChunkExecutor(execute Execute, opts ...ChunkOption) *ChunkExecutor {
|
||||||
options := newChunkOptions()
|
options := newChunkOptions()
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
@@ -36,6 +41,7 @@ func NewChunkExecutor(execute Execute, opts ...ChunkOption) *ChunkExecutor {
|
|||||||
return executor
|
return executor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add adds task with given chunk size into ce.
|
||||||
func (ce *ChunkExecutor) Add(task interface{}, size int) error {
|
func (ce *ChunkExecutor) Add(task interface{}, size int) error {
|
||||||
ce.executor.Add(chunk{
|
ce.executor.Add(chunk{
|
||||||
val: task,
|
val: task,
|
||||||
@@ -44,20 +50,24 @@ func (ce *ChunkExecutor) Add(task interface{}, size int) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Flush forces ce to flush and execute tasks.
|
||||||
func (ce *ChunkExecutor) Flush() {
|
func (ce *ChunkExecutor) Flush() {
|
||||||
ce.executor.Flush()
|
ce.executor.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait waits the execution to be done.
|
||||||
func (ce *ChunkExecutor) Wait() {
|
func (ce *ChunkExecutor) Wait() {
|
||||||
ce.executor.Wait()
|
ce.executor.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithChunkBytes customizes a ChunkExecutor with the given chunk size.
|
||||||
func WithChunkBytes(size int) ChunkOption {
|
func WithChunkBytes(size int) ChunkOption {
|
||||||
return func(options *chunkOptions) {
|
return func(options *chunkOptions) {
|
||||||
options.chunkSize = size
|
options.chunkSize = size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithFlushInterval customizes a ChunkExecutor with the given flush interval.
|
||||||
func WithFlushInterval(duration time.Duration) ChunkOption {
|
func WithFlushInterval(duration time.Duration) ChunkOption {
|
||||||
return func(options *chunkOptions) {
|
return func(options *chunkOptions) {
|
||||||
options.flushInterval = duration
|
options.flushInterval = duration
|
||||||
|
|||||||
@@ -12,14 +12,14 @@ func TestChunkExecutor(t *testing.T) {
|
|||||||
var values []int
|
var values []int
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
|
|
||||||
exeutor := NewChunkExecutor(func(items []interface{}) {
|
executor := NewChunkExecutor(func(items []interface{}) {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
values = append(values, len(items))
|
values = append(values, len(items))
|
||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
}, WithChunkBytes(10), WithFlushInterval(time.Minute))
|
}, WithChunkBytes(10), WithFlushInterval(time.Minute))
|
||||||
|
|
||||||
for i := 0; i < 50; i++ {
|
for i := 0; i < 50; i++ {
|
||||||
exeutor.Add(1, 1)
|
executor.Add(1, 1)
|
||||||
time.Sleep(time.Millisecond)
|
time.Sleep(time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,13 +40,13 @@ func TestChunkExecutorFlushInterval(t *testing.T) {
|
|||||||
var wait sync.WaitGroup
|
var wait sync.WaitGroup
|
||||||
|
|
||||||
wait.Add(1)
|
wait.Add(1)
|
||||||
exeutor := NewChunkExecutor(func(items []interface{}) {
|
executor := NewChunkExecutor(func(items []interface{}) {
|
||||||
assert.Equal(t, size, len(items))
|
assert.Equal(t, size, len(items))
|
||||||
wait.Done()
|
wait.Done()
|
||||||
}, WithChunkBytes(caches), WithFlushInterval(time.Millisecond*100))
|
}, WithChunkBytes(caches), WithFlushInterval(time.Millisecond*100))
|
||||||
|
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
exeutor.Add(1, 1)
|
executor.Add(1, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
wait.Wait()
|
wait.Wait()
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/threading"
|
"github.com/tal-tech/go-zero/core/threading"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A DelayExecutor delays a tasks on given delay interval.
|
||||||
type DelayExecutor struct {
|
type DelayExecutor struct {
|
||||||
fn func()
|
fn func()
|
||||||
delay time.Duration
|
delay time.Duration
|
||||||
@@ -14,6 +15,7 @@ type DelayExecutor struct {
|
|||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewDelayExecutor returns a DelayExecutor with given fn and delay.
|
||||||
func NewDelayExecutor(fn func(), delay time.Duration) *DelayExecutor {
|
func NewDelayExecutor(fn func(), delay time.Duration) *DelayExecutor {
|
||||||
return &DelayExecutor{
|
return &DelayExecutor{
|
||||||
fn: fn,
|
fn: fn,
|
||||||
@@ -21,6 +23,7 @@ func NewDelayExecutor(fn func(), delay time.Duration) *DelayExecutor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Trigger triggers the task to be executed after given delay, safe to trigger more than once.
|
||||||
func (de *DelayExecutor) Trigger() {
|
func (de *DelayExecutor) Trigger() {
|
||||||
de.lock.Lock()
|
de.lock.Lock()
|
||||||
defer de.lock.Unlock()
|
defer de.lock.Unlock()
|
||||||
|
|||||||
@@ -7,11 +7,13 @@ import (
|
|||||||
"github.com/tal-tech/go-zero/core/timex"
|
"github.com/tal-tech/go-zero/core/timex"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A LessExecutor is an executor to limit execution once within given time interval.
|
||||||
type LessExecutor struct {
|
type LessExecutor struct {
|
||||||
threshold time.Duration
|
threshold time.Duration
|
||||||
lastTime *syncx.AtomicDuration
|
lastTime *syncx.AtomicDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewLessExecutor returns a LessExecutor with given threshold as time interval.
|
||||||
func NewLessExecutor(threshold time.Duration) *LessExecutor {
|
func NewLessExecutor(threshold time.Duration) *LessExecutor {
|
||||||
return &LessExecutor{
|
return &LessExecutor{
|
||||||
threshold: threshold,
|
threshold: threshold,
|
||||||
@@ -19,6 +21,8 @@ func NewLessExecutor(threshold time.Duration) *LessExecutor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoOrDiscard executes or discards the task depends on if
|
||||||
|
// another task was executed within the time interval.
|
||||||
func (le *LessExecutor) DoOrDiscard(execute func()) bool {
|
func (le *LessExecutor) DoOrDiscard(execute func()) bool {
|
||||||
now := timex.Now()
|
now := timex.Now()
|
||||||
lastTime := le.lastTime.Load()
|
lastTime := le.lastTime.Load()
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ package executors
|
|||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/tal-tech/go-zero/core/lang"
|
||||||
"github.com/tal-tech/go-zero/core/proc"
|
"github.com/tal-tech/go-zero/core/proc"
|
||||||
"github.com/tal-tech/go-zero/core/syncx"
|
"github.com/tal-tech/go-zero/core/syncx"
|
||||||
"github.com/tal-tech/go-zero/core/threading"
|
"github.com/tal-tech/go-zero/core/threading"
|
||||||
@@ -14,7 +16,7 @@ import (
|
|||||||
const idleRound = 10
|
const idleRound = 10
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// A type that satisfies executors.TaskContainer can be used as the underlying
|
// TaskContainer interface defines a type that can be used as the underlying
|
||||||
// container that used to do periodical executions.
|
// container that used to do periodical executions.
|
||||||
TaskContainer interface {
|
TaskContainer interface {
|
||||||
// AddTask adds the task into the container.
|
// AddTask adds the task into the container.
|
||||||
@@ -26,27 +28,32 @@ type (
|
|||||||
RemoveAll() interface{}
|
RemoveAll() interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A PeriodicalExecutor is an executor that periodically execute tasks.
|
||||||
PeriodicalExecutor struct {
|
PeriodicalExecutor struct {
|
||||||
commander chan interface{}
|
commander chan interface{}
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
container TaskContainer
|
container TaskContainer
|
||||||
waitGroup sync.WaitGroup
|
waitGroup sync.WaitGroup
|
||||||
// avoid race condition on waitGroup when calling wg.Add/Done/Wait(...)
|
// avoid race condition on waitGroup when calling wg.Add/Done/Wait(...)
|
||||||
wgBarrier syncx.Barrier
|
wgBarrier syncx.Barrier
|
||||||
guarded bool
|
confirmChan chan lang.PlaceholderType
|
||||||
newTicker func(duration time.Duration) timex.Ticker
|
inflight int32
|
||||||
lock sync.Mutex
|
guarded bool
|
||||||
|
newTicker func(duration time.Duration) timex.Ticker
|
||||||
|
lock sync.Mutex
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewPeriodicalExecutor returns a PeriodicalExecutor with given interval and container.
|
||||||
func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *PeriodicalExecutor {
|
func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *PeriodicalExecutor {
|
||||||
executor := &PeriodicalExecutor{
|
executor := &PeriodicalExecutor{
|
||||||
// buffer 1 to let the caller go quickly
|
// buffer 1 to let the caller go quickly
|
||||||
commander: make(chan interface{}, 1),
|
commander: make(chan interface{}, 1),
|
||||||
interval: interval,
|
interval: interval,
|
||||||
container: container,
|
container: container,
|
||||||
|
confirmChan: make(chan lang.PlaceholderType),
|
||||||
newTicker: func(d time.Duration) timex.Ticker {
|
newTicker: func(d time.Duration) timex.Ticker {
|
||||||
return timex.NewTicker(interval)
|
return timex.NewTicker(d)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
proc.AddShutdownListener(func() {
|
proc.AddShutdownListener(func() {
|
||||||
@@ -56,13 +63,17 @@ func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *Per
|
|||||||
return executor
|
return executor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add adds tasks into pe.
|
||||||
func (pe *PeriodicalExecutor) Add(task interface{}) {
|
func (pe *PeriodicalExecutor) Add(task interface{}) {
|
||||||
if vals, ok := pe.addAndCheck(task); ok {
|
if vals, ok := pe.addAndCheck(task); ok {
|
||||||
pe.commander <- vals
|
pe.commander <- vals
|
||||||
|
<-pe.confirmChan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Flush forces pe to execute tasks.
|
||||||
func (pe *PeriodicalExecutor) Flush() bool {
|
func (pe *PeriodicalExecutor) Flush() bool {
|
||||||
|
pe.enterExecution()
|
||||||
return pe.executeTasks(func() interface{} {
|
return pe.executeTasks(func() interface{} {
|
||||||
pe.lock.Lock()
|
pe.lock.Lock()
|
||||||
defer pe.lock.Unlock()
|
defer pe.lock.Unlock()
|
||||||
@@ -70,13 +81,16 @@ func (pe *PeriodicalExecutor) Flush() bool {
|
|||||||
}())
|
}())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sync lets caller to run fn thread-safe with pe, especially for the underlying container.
|
||||||
func (pe *PeriodicalExecutor) Sync(fn func()) {
|
func (pe *PeriodicalExecutor) Sync(fn func()) {
|
||||||
pe.lock.Lock()
|
pe.lock.Lock()
|
||||||
defer pe.lock.Unlock()
|
defer pe.lock.Unlock()
|
||||||
fn()
|
fn()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait waits the execution to be done.
|
||||||
func (pe *PeriodicalExecutor) Wait() {
|
func (pe *PeriodicalExecutor) Wait() {
|
||||||
|
pe.Flush()
|
||||||
pe.wgBarrier.Guard(func() {
|
pe.wgBarrier.Guard(func() {
|
||||||
pe.waitGroup.Wait()
|
pe.waitGroup.Wait()
|
||||||
})
|
})
|
||||||
@@ -85,18 +99,16 @@ func (pe *PeriodicalExecutor) Wait() {
|
|||||||
func (pe *PeriodicalExecutor) addAndCheck(task interface{}) (interface{}, bool) {
|
func (pe *PeriodicalExecutor) addAndCheck(task interface{}) (interface{}, bool) {
|
||||||
pe.lock.Lock()
|
pe.lock.Lock()
|
||||||
defer func() {
|
defer func() {
|
||||||
var start bool
|
|
||||||
if !pe.guarded {
|
if !pe.guarded {
|
||||||
pe.guarded = true
|
pe.guarded = true
|
||||||
start = true
|
// defer to unlock quickly
|
||||||
|
defer pe.backgroundFlush()
|
||||||
}
|
}
|
||||||
pe.lock.Unlock()
|
pe.lock.Unlock()
|
||||||
if start {
|
|
||||||
pe.backgroundFlush()
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if pe.container.AddTask(task) {
|
if pe.container.AddTask(task) {
|
||||||
|
atomic.AddInt32(&pe.inflight, 1)
|
||||||
return pe.container.RemoveAll(), true
|
return pe.container.RemoveAll(), true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,6 +117,9 @@ func (pe *PeriodicalExecutor) addAndCheck(task interface{}) (interface{}, bool)
|
|||||||
|
|
||||||
func (pe *PeriodicalExecutor) backgroundFlush() {
|
func (pe *PeriodicalExecutor) backgroundFlush() {
|
||||||
threading.GoSafe(func() {
|
threading.GoSafe(func() {
|
||||||
|
// flush before quit goroutine to avoid missing tasks
|
||||||
|
defer pe.Flush()
|
||||||
|
|
||||||
ticker := pe.newTicker(pe.interval)
|
ticker := pe.newTicker(pe.interval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
@@ -114,6 +129,9 @@ func (pe *PeriodicalExecutor) backgroundFlush() {
|
|||||||
select {
|
select {
|
||||||
case vals := <-pe.commander:
|
case vals := <-pe.commander:
|
||||||
commanded = true
|
commanded = true
|
||||||
|
atomic.AddInt32(&pe.inflight, -1)
|
||||||
|
pe.enterExecution()
|
||||||
|
pe.confirmChan <- lang.Placeholder
|
||||||
pe.executeTasks(vals)
|
pe.executeTasks(vals)
|
||||||
last = timex.Now()
|
last = timex.Now()
|
||||||
case <-ticker.Chan():
|
case <-ticker.Chan():
|
||||||
@@ -121,13 +139,7 @@ func (pe *PeriodicalExecutor) backgroundFlush() {
|
|||||||
commanded = false
|
commanded = false
|
||||||
} else if pe.Flush() {
|
} else if pe.Flush() {
|
||||||
last = timex.Now()
|
last = timex.Now()
|
||||||
} else if timex.Since(last) > pe.interval*idleRound {
|
} else if pe.shallQuit(last) {
|
||||||
pe.lock.Lock()
|
|
||||||
pe.guarded = false
|
|
||||||
pe.lock.Unlock()
|
|
||||||
|
|
||||||
// flush again to avoid missing tasks
|
|
||||||
pe.Flush()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -135,13 +147,18 @@ func (pe *PeriodicalExecutor) backgroundFlush() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pe *PeriodicalExecutor) executeTasks(tasks interface{}) bool {
|
func (pe *PeriodicalExecutor) doneExecution() {
|
||||||
|
pe.waitGroup.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pe *PeriodicalExecutor) enterExecution() {
|
||||||
pe.wgBarrier.Guard(func() {
|
pe.wgBarrier.Guard(func() {
|
||||||
pe.waitGroup.Add(1)
|
pe.waitGroup.Add(1)
|
||||||
})
|
})
|
||||||
defer pe.wgBarrier.Guard(func() {
|
}
|
||||||
pe.waitGroup.Done()
|
|
||||||
})
|
func (pe *PeriodicalExecutor) executeTasks(tasks interface{}) bool {
|
||||||
|
defer pe.doneExecution()
|
||||||
|
|
||||||
ok := pe.hasTasks(tasks)
|
ok := pe.hasTasks(tasks)
|
||||||
if ok {
|
if ok {
|
||||||
@@ -165,3 +182,19 @@ func (pe *PeriodicalExecutor) hasTasks(tasks interface{}) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pe *PeriodicalExecutor) shallQuit(last time.Duration) (stop bool) {
|
||||||
|
if timex.Since(last) <= pe.interval*idleRound {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// checking pe.inflight and setting pe.guarded should be locked together
|
||||||
|
pe.lock.Lock()
|
||||||
|
if atomic.LoadInt32(&pe.inflight) == 0 {
|
||||||
|
pe.guarded = false
|
||||||
|
stop = true
|
||||||
|
}
|
||||||
|
pe.lock.Unlock()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|||||||
@@ -106,6 +106,60 @@ func TestPeriodicalExecutor_Bulk(t *testing.T) {
|
|||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPeriodicalExecutor_Wait(t *testing.T) {
|
||||||
|
var lock sync.Mutex
|
||||||
|
executer := NewBulkExecutor(func(tasks []interface{}) {
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
}, WithBulkTasks(1), WithBulkInterval(time.Second))
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
executer.Add(1)
|
||||||
|
}
|
||||||
|
executer.Flush()
|
||||||
|
executer.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeriodicalExecutor_WaitFast(t *testing.T) {
|
||||||
|
const total = 3
|
||||||
|
var cnt int
|
||||||
|
var lock sync.Mutex
|
||||||
|
executer := NewBulkExecutor(func(tasks []interface{}) {
|
||||||
|
defer func() {
|
||||||
|
cnt++
|
||||||
|
}()
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
}, WithBulkTasks(1), WithBulkInterval(10*time.Millisecond))
|
||||||
|
for i := 0; i < total; i++ {
|
||||||
|
executer.Add(2)
|
||||||
|
}
|
||||||
|
executer.Flush()
|
||||||
|
executer.Wait()
|
||||||
|
assert.Equal(t, total, cnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeriodicalExecutor_Deadlock(t *testing.T) {
|
||||||
|
executor := NewBulkExecutor(func(tasks []interface{}) {
|
||||||
|
}, WithBulkTasks(1), WithBulkInterval(time.Millisecond))
|
||||||
|
for i := 0; i < 1e5; i++ {
|
||||||
|
executor.Add(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeriodicalExecutor_hasTasks(t *testing.T) {
|
||||||
|
ticker := timex.NewFakeTicker()
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, nil))
|
||||||
|
exec.newTicker = func(d time.Duration) timex.Ticker {
|
||||||
|
return ticker
|
||||||
|
}
|
||||||
|
assert.False(t, exec.hasTasks(nil))
|
||||||
|
assert.True(t, exec.hasTasks(1))
|
||||||
|
}
|
||||||
|
|
||||||
// go test -benchtime 10s -bench .
|
// go test -benchtime 10s -bench .
|
||||||
func BenchmarkExecutor(b *testing.B) {
|
func BenchmarkExecutor(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|||||||
@@ -4,4 +4,5 @@ import "time"
|
|||||||
|
|
||||||
const defaultFlushInterval = time.Second
|
const defaultFlushInterval = time.Second
|
||||||
|
|
||||||
|
// Execute defines the method to execute tasks.
|
||||||
type Execute func(tasks []interface{})
|
type Execute func(tasks []interface{})
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
const bufSize = 1024
|
const bufSize = 1024
|
||||||
|
|
||||||
|
// FirstLine returns the first line of the file.
|
||||||
func FirstLine(filename string) (string, error) {
|
func FirstLine(filename string) (string, error) {
|
||||||
file, err := os.Open(filename)
|
file, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -17,6 +18,7 @@ func FirstLine(filename string) (string, error) {
|
|||||||
return firstLine(file)
|
return firstLine(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LastLine returns the last line of the file.
|
||||||
func LastLine(filename string) (string, error) {
|
func LastLine(filename string) (string, error) {
|
||||||
file, err := os.Open(filename)
|
file, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -69,11 +71,11 @@ func lastLine(filename string, file *os.File) (string, error) {
|
|||||||
|
|
||||||
if buf[n-1] == '\n' {
|
if buf[n-1] == '\n' {
|
||||||
buf = buf[:n-1]
|
buf = buf[:n-1]
|
||||||
n -= 1
|
n--
|
||||||
} else {
|
} else {
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
}
|
}
|
||||||
for n -= 1; n >= 0; n-- {
|
for n--; n >= 0; n-- {
|
||||||
if buf[n] == '\n' {
|
if buf[n] == '\n' {
|
||||||
return string(append(buf[n+1:], last...)), nil
|
return string(append(buf[n+1:], last...)), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/tal-tech/go-zero/core/fs"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tal-tech/go-zero/core/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -15,34 +14,34 @@ const (
|
|||||||
text = `first line
|
text = `first line
|
||||||
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
||||||
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
||||||
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculus sed magna.
|
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculous sed magna.
|
||||||
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
||||||
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
||||||
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
||||||
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
||||||
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculus sed magna.
|
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculous sed magna.
|
||||||
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
||||||
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
||||||
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
||||||
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
||||||
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculus sed magna.
|
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculous sed magna.
|
||||||
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
||||||
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
||||||
` + longLine
|
` + longLine
|
||||||
textWithLastNewline = `first line
|
textWithLastNewline = `first line
|
||||||
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
||||||
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
||||||
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculus sed magna.
|
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculous sed magna.
|
||||||
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
||||||
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
||||||
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
||||||
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
||||||
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculus sed magna.
|
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculous sed magna.
|
||||||
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
||||||
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
||||||
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
Cum sociis natoque penatibus et magnis dis parturient. Phasellus laoreet lorem vel dolor tempus vehicula. Vivamus sagittis lacus vel augue laoreet rutrum faucibus. Integer legentibus erat a ante historiarum dapibus.
|
||||||
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
Quisque ut dolor gravida, placerat libero vel, euismod. Quam temere in vitiis, legem sancimus haerentia. Qui ipsorum lingua Celtae, nostra Galli appellantur. Quis aute iure reprehenderit in voluptate velit esse. Fabio vel iudice vincam, sunt in culpa qui officia. Cras mattis iudicium purus sit amet fermentum.
|
||||||
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculus sed magna.
|
Quo usque tandem abutere, Catilina, patientia nostra? Gallia est omnis divisa in partes tres, quarum. Quam diu etiam furor iste tuus nos eludet? Quid securi etiam tamquam eu fugiat nulla pariatur. Curabitur blandit tempus ardua ridiculous sed magna.
|
||||||
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
Magna pars studiorum, prodita quaerimus. Cum ceteris in veneratione tui montes, nascetur mus. Morbi odio eros, volutpat ut pharetra vitae, lobortis sed nibh. Plura mihi bona sunt, inclinet, amari petere vellent. Idque Caesaris facere voluntate liceret: sese habere. Tu quoque, Brute, fili mi, nihil timor populi, nihil!
|
||||||
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
Tityre, tu patulae recubans sub tegmine fagi dolor. Inmensae subtilitatis, obscuris et malesuada fames. Quae vero auctorem tractata ab fiducia dicuntur.
|
||||||
` + longLine + "\n"
|
` + longLine + "\n"
|
||||||
|
|||||||
@@ -5,12 +5,15 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// OffsetRange represents a content block of a file.
|
||||||
type OffsetRange struct {
|
type OffsetRange struct {
|
||||||
File string
|
File string
|
||||||
Start int64
|
Start int64
|
||||||
Stop int64
|
Stop int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SplitLineChunks splits file into chunks.
|
||||||
|
// The whole line are guaranteed to be split in the same chunk.
|
||||||
func SplitLineChunks(filename string, chunks int) ([]OffsetRange, error) {
|
func SplitLineChunks(filename string, chunks int) ([]OffsetRange, error) {
|
||||||
info, err := os.Stat(filename)
|
info, err := os.Stat(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -3,8 +3,11 @@ package filex
|
|||||||
import "gopkg.in/cheggaaa/pb.v1"
|
import "gopkg.in/cheggaaa/pb.v1"
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// A Scanner is used to read lines.
|
||||||
Scanner interface {
|
Scanner interface {
|
||||||
|
// Scan checks if has remaining to read.
|
||||||
Scan() bool
|
Scan() bool
|
||||||
|
// Text returns next line.
|
||||||
Text() string
|
Text() string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -14,6 +17,7 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewProgressScanner returns a Scanner with progress indicator.
|
||||||
func NewProgressScanner(scanner Scanner, bar *pb.ProgressBar) Scanner {
|
func NewProgressScanner(scanner Scanner, bar *pb.ProgressBar) Scanner {
|
||||||
return &progressScanner{
|
return &progressScanner{
|
||||||
Scanner: scanner,
|
Scanner: scanner,
|
||||||
|
|||||||
@@ -5,12 +5,14 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A RangeReader is used to read a range of content from a file.
|
||||||
type RangeReader struct {
|
type RangeReader struct {
|
||||||
file *os.File
|
file *os.File
|
||||||
start int64
|
start int64
|
||||||
stop int64
|
stop int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewRangeReader returns a RangeReader, which will read the range of content from file.
|
||||||
func NewRangeReader(file *os.File, start, stop int64) *RangeReader {
|
func NewRangeReader(file *os.File, start, stop int64) *RangeReader {
|
||||||
return &RangeReader{
|
return &RangeReader{
|
||||||
file: file,
|
file: file,
|
||||||
@@ -19,6 +21,7 @@ func NewRangeReader(file *os.File, start, stop int64) *RangeReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read reads the range of content into p.
|
||||||
func (rr *RangeReader) Read(p []byte) (n int, err error) {
|
func (rr *RangeReader) Read(p []byte) (n int, err error) {
|
||||||
stat, err := rr.file.Stat()
|
stat, err := rr.file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux || darwin
|
||||||
// +build linux darwin
|
// +build linux darwin
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
@@ -7,6 +8,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CloseOnExec makes sure closing the file on process forking.
|
||||||
func CloseOnExec(file *os.File) {
|
func CloseOnExec(file *os.File) {
|
||||||
if file != nil {
|
if file != nil {
|
||||||
syscall.CloseOnExec(int(file.Fd()))
|
syscall.CloseOnExec(int(file.Fd()))
|
||||||
|
|||||||
357
core/fx/fn.go
357
core/fx/fn.go
@@ -1,357 +0,0 @@
|
|||||||
package fx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/tal-tech/go-zero/core/collection"
|
|
||||||
"github.com/tal-tech/go-zero/core/lang"
|
|
||||||
"github.com/tal-tech/go-zero/core/threading"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultWorkers = 16
|
|
||||||
minWorkers = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
rxOptions struct {
|
|
||||||
unlimitedWorkers bool
|
|
||||||
workers int
|
|
||||||
}
|
|
||||||
|
|
||||||
FilterFunc func(item interface{}) bool
|
|
||||||
ForAllFunc func(pipe <-chan interface{})
|
|
||||||
ForEachFunc func(item interface{})
|
|
||||||
GenerateFunc func(source chan<- interface{})
|
|
||||||
KeyFunc func(item interface{}) interface{}
|
|
||||||
LessFunc func(a, b interface{}) bool
|
|
||||||
MapFunc func(item interface{}) interface{}
|
|
||||||
Option func(opts *rxOptions)
|
|
||||||
ParallelFunc func(item interface{})
|
|
||||||
ReduceFunc func(pipe <-chan interface{}) (interface{}, error)
|
|
||||||
WalkFunc func(item interface{}, pipe chan<- interface{})
|
|
||||||
|
|
||||||
Stream struct {
|
|
||||||
source <-chan interface{}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// From constructs a Stream from the given GenerateFunc.
|
|
||||||
func From(generate GenerateFunc) Stream {
|
|
||||||
source := make(chan interface{})
|
|
||||||
|
|
||||||
threading.GoSafe(func() {
|
|
||||||
defer close(source)
|
|
||||||
generate(source)
|
|
||||||
})
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Just converts the given arbitary items to a Stream.
|
|
||||||
func Just(items ...interface{}) Stream {
|
|
||||||
source := make(chan interface{}, len(items))
|
|
||||||
for _, item := range items {
|
|
||||||
source <- item
|
|
||||||
}
|
|
||||||
close(source)
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range converts the given channel to a Stream.
|
|
||||||
func Range(source <-chan interface{}) Stream {
|
|
||||||
return Stream{
|
|
||||||
source: source,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffer buffers the items into a queue with size n.
|
|
||||||
func (p Stream) Buffer(n int) Stream {
|
|
||||||
if n < 0 {
|
|
||||||
n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
source := make(chan interface{}, n)
|
|
||||||
go func() {
|
|
||||||
for item := range p.source {
|
|
||||||
source <- item
|
|
||||||
}
|
|
||||||
close(source)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Distinct removes the duplicated items base on the given KeyFunc.
|
|
||||||
func (p Stream) Distinct(fn KeyFunc) Stream {
|
|
||||||
source := make(chan interface{})
|
|
||||||
|
|
||||||
threading.GoSafe(func() {
|
|
||||||
defer close(source)
|
|
||||||
|
|
||||||
keys := make(map[interface{}]lang.PlaceholderType)
|
|
||||||
for item := range p.source {
|
|
||||||
key := fn(item)
|
|
||||||
if _, ok := keys[key]; !ok {
|
|
||||||
source <- item
|
|
||||||
keys[key] = lang.Placeholder
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done waits all upstreaming operations to be done.
|
|
||||||
func (p Stream) Done() {
|
|
||||||
for range p.source {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter filters the items by the given FilterFunc.
|
|
||||||
func (p Stream) Filter(fn FilterFunc, opts ...Option) Stream {
|
|
||||||
return p.Walk(func(item interface{}, pipe chan<- interface{}) {
|
|
||||||
if fn(item) {
|
|
||||||
pipe <- item
|
|
||||||
}
|
|
||||||
}, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForAll handles the streaming elements from the source and no later streams.
|
|
||||||
func (p Stream) ForAll(fn ForAllFunc) {
|
|
||||||
fn(p.source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForEach seals the Stream with the ForEachFunc on each item, no successive operations.
|
|
||||||
func (p Stream) ForEach(fn ForEachFunc) {
|
|
||||||
for item := range p.source {
|
|
||||||
fn(item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Group groups the elements into different groups based on their keys.
|
|
||||||
func (p Stream) Group(fn KeyFunc) Stream {
|
|
||||||
groups := make(map[interface{}][]interface{})
|
|
||||||
for item := range p.source {
|
|
||||||
key := fn(item)
|
|
||||||
groups[key] = append(groups[key], item)
|
|
||||||
}
|
|
||||||
|
|
||||||
source := make(chan interface{})
|
|
||||||
go func() {
|
|
||||||
for _, group := range groups {
|
|
||||||
source <- group
|
|
||||||
}
|
|
||||||
close(source)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Stream) Head(n int64) Stream {
|
|
||||||
source := make(chan interface{})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for item := range p.source {
|
|
||||||
n--
|
|
||||||
if n >= 0 {
|
|
||||||
source <- item
|
|
||||||
}
|
|
||||||
if n == 0 {
|
|
||||||
// let successive method go ASAP even we have more items to skip
|
|
||||||
// why we don't just break the loop, because if break,
|
|
||||||
// this former goroutine will block forever, which will cause goroutine leak.
|
|
||||||
close(source)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
close(source)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maps converts each item to another corresponding item, which means it's a 1:1 model.
|
|
||||||
func (p Stream) Map(fn MapFunc, opts ...Option) Stream {
|
|
||||||
return p.Walk(func(item interface{}, pipe chan<- interface{}) {
|
|
||||||
pipe <- fn(item)
|
|
||||||
}, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges all the items into a slice and generates a new stream.
|
|
||||||
func (p Stream) Merge() Stream {
|
|
||||||
var items []interface{}
|
|
||||||
for item := range p.source {
|
|
||||||
items = append(items, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
source := make(chan interface{}, 1)
|
|
||||||
source <- items
|
|
||||||
close(source)
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parallel applies the given ParallenFunc to each item concurrently with given number of workers.
|
|
||||||
func (p Stream) Parallel(fn ParallelFunc, opts ...Option) {
|
|
||||||
p.Walk(func(item interface{}, pipe chan<- interface{}) {
|
|
||||||
fn(item)
|
|
||||||
}, opts...).Done()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reduce is a utility method to let the caller deal with the underlying channel.
|
|
||||||
func (p Stream) Reduce(fn ReduceFunc) (interface{}, error) {
|
|
||||||
return fn(p.source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reverse reverses the elements in the stream.
|
|
||||||
func (p Stream) Reverse() Stream {
|
|
||||||
var items []interface{}
|
|
||||||
for item := range p.source {
|
|
||||||
items = append(items, item)
|
|
||||||
}
|
|
||||||
// reverse, official method
|
|
||||||
for i := len(items)/2 - 1; i >= 0; i-- {
|
|
||||||
opp := len(items) - 1 - i
|
|
||||||
items[i], items[opp] = items[opp], items[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return Just(items...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort sorts the items from the underlying source.
|
|
||||||
func (p Stream) Sort(less LessFunc) Stream {
|
|
||||||
var items []interface{}
|
|
||||||
for item := range p.source {
|
|
||||||
items = append(items, item)
|
|
||||||
}
|
|
||||||
sort.Slice(items, func(i, j int) bool {
|
|
||||||
return less(items[i], items[j])
|
|
||||||
})
|
|
||||||
|
|
||||||
return Just(items...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Stream) Tail(n int64) Stream {
|
|
||||||
source := make(chan interface{})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
ring := collection.NewRing(int(n))
|
|
||||||
for item := range p.source {
|
|
||||||
ring.Add(item)
|
|
||||||
}
|
|
||||||
for _, item := range ring.Take() {
|
|
||||||
source <- item
|
|
||||||
}
|
|
||||||
close(source)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return Range(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk lets the callers handle each item, the caller may write zero, one or more items base on the given item.
|
|
||||||
func (p Stream) Walk(fn WalkFunc, opts ...Option) Stream {
|
|
||||||
option := buildOptions(opts...)
|
|
||||||
if option.unlimitedWorkers {
|
|
||||||
return p.walkUnlimited(fn, option)
|
|
||||||
} else {
|
|
||||||
return p.walkLimited(fn, option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
|
|
||||||
pipe := make(chan interface{}, option.workers)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
pool := make(chan lang.PlaceholderType, option.workers)
|
|
||||||
|
|
||||||
for {
|
|
||||||
pool <- lang.Placeholder
|
|
||||||
item, ok := <-p.source
|
|
||||||
if !ok {
|
|
||||||
<-pool
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
// better to safely run caller defined method
|
|
||||||
threading.GoSafe(func() {
|
|
||||||
defer func() {
|
|
||||||
wg.Done()
|
|
||||||
<-pool
|
|
||||||
}()
|
|
||||||
|
|
||||||
fn(item, pipe)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(pipe)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return Range(pipe)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream {
|
|
||||||
pipe := make(chan interface{}, defaultWorkers)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for {
|
|
||||||
item, ok := <-p.source
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
// better to safely run caller defined method
|
|
||||||
threading.GoSafe(func() {
|
|
||||||
defer wg.Done()
|
|
||||||
fn(item, pipe)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(pipe)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return Range(pipe)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnlimitedWorkers lets the caller to use as many workers as the tasks.
|
|
||||||
func UnlimitedWorkers() Option {
|
|
||||||
return func(opts *rxOptions) {
|
|
||||||
opts.unlimitedWorkers = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithWorkers lets the caller to customize the concurrent workers.
|
|
||||||
func WithWorkers(workers int) Option {
|
|
||||||
return func(opts *rxOptions) {
|
|
||||||
if workers < minWorkers {
|
|
||||||
opts.workers = minWorkers
|
|
||||||
} else {
|
|
||||||
opts.workers = workers
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildOptions(opts ...Option) *rxOptions {
|
|
||||||
options := newOptions()
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(options)
|
|
||||||
}
|
|
||||||
|
|
||||||
return options
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOptions() *rxOptions {
|
|
||||||
return &rxOptions{
|
|
||||||
workers: defaultWorkers,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user