feat: kubekey gitops

Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
joyceliu 2024-01-04 14:45:50 +08:00
parent 2f4c2fa795
commit 2a676185e2
157 changed files with 15703 additions and 0 deletions

41
.gitignore vendored Normal file
View File

@ -0,0 +1,41 @@
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
*.tmp
bin
hack/tools/bin
# Test binary, build with `go test -c`
*.test
# E2E test templates
test/e2e/data/infrastructure-kubekey/v1beta1/cluster-template*.yaml
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# IntelliJ
.idea/
*.iml
# Vscode files
.vscode
# rbac and manager config for example provider
manager_image_patch.yaml-e
manager_pull_policy.yaml-e
# Sample config files auto-generated by kubebuilder
config/samples
# test results
_artifacts
# Used during parts of the build process. Files _should_ get cleaned up automatically.
# This is also a good location for any temporary manfiests used during development
tmp
# Used by current object
/example/test/

121
CONTRIBUTORS.md Normal file
View File

@ -0,0 +1,121 @@
### Sincere gratitude goes to the following people for their contributions to Pipeline
Contributions of any kind are welcome! Thanks goes to these wonderful contributors, they made our project grow fast.
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<table>
<tbody>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/pixiake"><img src="https://avatars0.githubusercontent.com/u/22290449?v=4?s=100" width="100px;" alt="pixiake"/><br /><sub><b>pixiake</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=pixiake" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=pixiake" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Forest-L"><img src="https://avatars2.githubusercontent.com/u/50984129?v=4?s=100" width="100px;" alt="Forest"/><br /><sub><b>Forest</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=Forest-L" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=Forest-L" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://kubesphere.io/"><img src="https://avatars2.githubusercontent.com/u/28859385?v=4?s=100" width="100px;" alt="rayzhou2017"/><br /><sub><b>rayzhou2017</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=rayzhou2017" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=rayzhou2017" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://www.chenshaowen.com/"><img src="https://avatars2.githubusercontent.com/u/43693241?v=4?s=100" width="100px;" alt="shaowenchen"/><br /><sub><b>shaowenchen</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=shaowenchen" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=shaowenchen" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://surenpi.com/"><img src="https://avatars1.githubusercontent.com/u/1450685?v=4?s=100" width="100px;" alt="Zhao Xiaojie"/><br /><sub><b>Zhao Xiaojie</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=LinuxSuRen" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=LinuxSuRen" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/zackzhangkai"><img src="https://avatars1.githubusercontent.com/u/20178386?v=4?s=100" width="100px;" alt="Zack Zhang"/><br /><sub><b>Zack Zhang</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=zackzhangkai" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://akhilerm.com/"><img src="https://avatars1.githubusercontent.com/u/7610845?v=4?s=100" width="100px;" alt="Akhil Mohan"/><br /><sub><b>Akhil Mohan</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=akhilerm" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/FeynmanZhou"><img src="https://avatars3.githubusercontent.com/u/40452856?v=4?s=100" width="100px;" alt="pengfei"/><br /><sub><b>pengfei</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=FeynmanZhou" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/min-zh"><img src="https://avatars1.githubusercontent.com/u/35321102?v=4?s=100" width="100px;" alt="min zhang"/><br /><sub><b>min zhang</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=min-zh" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=min-zh" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/zgldh"><img src="https://avatars1.githubusercontent.com/u/312404?v=4?s=100" width="100px;" alt="zgldh"/><br /><sub><b>zgldh</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=zgldh" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/xrjk"><img src="https://avatars0.githubusercontent.com/u/16330256?v=4?s=100" width="100px;" alt="xrjk"/><br /><sub><b>xrjk</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=xrjk" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/stoneshi-yunify"><img src="https://avatars2.githubusercontent.com/u/70880165?v=4?s=100" width="100px;" alt="yonghongshi"/><br /><sub><b>yonghongshi</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=stoneshi-yunify" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/shenhonglei"><img src="https://avatars2.githubusercontent.com/u/20896372?v=4?s=100" width="100px;" alt="Honglei"/><br /><sub><b>Honglei</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=shenhonglei" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/liucy1983"><img src="https://avatars2.githubusercontent.com/u/2360302?v=4?s=100" width="100px;" alt="liucy1983"/><br /><sub><b>liucy1983</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=liucy1983" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/lilien1010"><img src="https://avatars1.githubusercontent.com/u/3814966?v=4?s=100" width="100px;" alt="Lien"/><br /><sub><b>Lien</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=lilien1010" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/klj890"><img src="https://avatars3.githubusercontent.com/u/19380605?v=4?s=100" width="100px;" alt="Tony Wang"/><br /><sub><b>Tony Wang</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=klj890" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/hlwanghl"><img src="https://avatars3.githubusercontent.com/u/4861515?v=4?s=100" width="100px;" alt="Hongliang Wang"/><br /><sub><b>Hongliang Wang</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=hlwanghl" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://fafucoder.github.io/"><img src="https://avatars0.githubusercontent.com/u/16442491?v=4?s=100" width="100px;" alt="dawn"/><br /><sub><b>dawn</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=fafucoder" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/duanjiong"><img src="https://avatars1.githubusercontent.com/u/3678855?v=4?s=100" width="100px;" alt="Duan Jiong"/><br /><sub><b>Duan Jiong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=duanjiong" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/calvinyv"><img src="https://avatars3.githubusercontent.com/u/28883416?v=4?s=100" width="100px;" alt="calvinyv"/><br /><sub><b>calvinyv</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=calvinyv" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/benjaminhuo"><img src="https://avatars2.githubusercontent.com/u/18525465?v=4?s=100" width="100px;" alt="Benjamin Huo"/><br /><sub><b>Benjamin Huo</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=benjaminhuo" title="Documentation">📖</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Sherlock113"><img src="https://avatars2.githubusercontent.com/u/65327072?v=4?s=100" width="100px;" alt="Sherlock113"/><br /><sub><b>Sherlock113</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=Sherlock113" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Fuchange"><img src="https://avatars1.githubusercontent.com/u/31716848?v=4?s=100" width="100px;" alt="fu_changjie"/><br /><sub><b>fu_changjie</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=Fuchange" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/yuswift"><img src="https://avatars1.githubusercontent.com/u/37265389?v=4?s=100" width="100px;" alt="yuswift"/><br /><sub><b>yuswift</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=yuswift" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/ruiyaoOps"><img src="https://avatars.githubusercontent.com/u/35256376?v=4?s=100" width="100px;" alt="ruiyaoOps"/><br /><sub><b>ruiyaoOps</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=ruiyaoOps" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://www.luxingmin.com"><img src="https://avatars.githubusercontent.com/u/1918195?v=4?s=100" width="100px;" alt="LXM"/><br /><sub><b>LXM</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=lxm" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/sbhnet"><img src="https://avatars.githubusercontent.com/u/2368131?v=4?s=100" width="100px;" alt="sbhnet"/><br /><sub><b>sbhnet</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=sbhnet" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/misteruly"><img src="https://avatars.githubusercontent.com/u/31399968?v=4?s=100" width="100px;" alt="misteruly"/><br /><sub><b>misteruly</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=misteruly" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://johnniang.me"><img src="https://avatars.githubusercontent.com/u/16865714?v=4?s=100" width="100px;" alt="John Niang"/><br /><sub><b>John Niang</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=JohnNiang" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://alimy.me"><img src="https://avatars.githubusercontent.com/u/10525842?v=4?s=100" width="100px;" alt="Michael Li"/><br /><sub><b>Michael Li</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=alimy" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/duguhaotian"><img src="https://avatars.githubusercontent.com/u/3174621?v=4?s=100" width="100px;" alt="独孤昊天"/><br /><sub><b>独孤昊天</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=duguhaotian" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/lshmouse"><img src="https://avatars.githubusercontent.com/u/118687?v=4?s=100" width="100px;" alt="Liu Shaohui"/><br /><sub><b>Liu Shaohui</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=lshmouse" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/24sama"><img src="https://avatars.githubusercontent.com/u/43993589?v=4?s=100" width="100px;" alt="Leo Li"/><br /><sub><b>Leo Li</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=24sama" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/RolandMa1986"><img src="https://avatars.githubusercontent.com/u/1720333?v=4?s=100" width="100px;" alt="Roland"/><br /><sub><b>Roland</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=RolandMa1986" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://ops.m114.org"><img src="https://avatars.githubusercontent.com/u/2347587?v=4?s=100" width="100px;" alt="Vinson Zou"/><br /><sub><b>Vinson Zou</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=vinsonzou" title="Documentation">📖</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/tagGeeY"><img src="https://avatars.githubusercontent.com/u/35259969?v=4?s=100" width="100px;" alt="tag_gee_y"/><br /><sub><b>tag_gee_y</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=tagGeeY" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/liulangwa"><img src="https://avatars.githubusercontent.com/u/25916792?v=4?s=100" width="100px;" alt="codebee"/><br /><sub><b>codebee</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=liulangwa" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/TheApeMachine"><img src="https://avatars.githubusercontent.com/u/9572060?v=4?s=100" width="100px;" alt="Daniel Owen van Dommelen"/><br /><sub><b>Daniel Owen van Dommelen</b></sub></a><br /><a href="#ideas-TheApeMachine" title="Ideas, Planning, & Feedback">🤔</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Naidile-P-N"><img src="https://avatars.githubusercontent.com/u/29476402?v=4?s=100" width="100px;" alt="Naidile P N"/><br /><sub><b>Naidile P N</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=Naidile-P-N" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/haiker2011"><img src="https://avatars.githubusercontent.com/u/8073429?v=4?s=100" width="100px;" alt="Haiker Sun"/><br /><sub><b>Haiker Sun</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=haiker2011" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/yj-cloud"><img src="https://avatars.githubusercontent.com/u/19648473?v=4?s=100" width="100px;" alt="Jing Yu"/><br /><sub><b>Jing Yu</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=yj-cloud" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/chaunceyjiang"><img src="https://avatars.githubusercontent.com/u/17962021?v=4?s=100" width="100px;" alt="Chauncey"/><br /><sub><b>Chauncey</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=chaunceyjiang" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/tanguofu"><img src="https://avatars.githubusercontent.com/u/87045830?v=4?s=100" width="100px;" alt="Tan Guofu"/><br /><sub><b>Tan Guofu</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=tanguofu" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/lvillis"><img src="https://avatars.githubusercontent.com/u/56720445?v=4?s=100" width="100px;" alt="lvillis"/><br /><sub><b>lvillis</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=lvillis" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/vincenthe11"><img src="https://avatars.githubusercontent.com/u/8400716?v=4?s=100" width="100px;" alt="Vincent He"/><br /><sub><b>Vincent He</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=vincenthe11" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://laminar.fun/"><img src="https://avatars.githubusercontent.com/u/2360535?v=4?s=100" width="100px;" alt="laminar"/><br /><sub><b>laminar</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=tpiperatgod" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/cumirror"><img src="https://avatars.githubusercontent.com/u/2455429?v=4?s=100" width="100px;" alt="tongjin"/><br /><sub><b>tongjin</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=cumirror" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://k8s.li"><img src="https://avatars.githubusercontent.com/u/42566386?v=4?s=100" width="100px;" alt="Reimu"/><br /><sub><b>Reimu</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=muzi502" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://bandism.net/"><img src="https://avatars.githubusercontent.com/u/22633385?v=4?s=100" width="100px;" alt="Ikko Ashimine"/><br /><sub><b>Ikko Ashimine</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=eltociear" title="Documentation">📖</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://yeya24.github.io/"><img src="https://avatars.githubusercontent.com/u/25150124?v=4?s=100" width="100px;" alt="Ben Ye"/><br /><sub><b>Ben Ye</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=yeya24" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/yinheli"><img src="https://avatars.githubusercontent.com/u/235094?v=4?s=100" width="100px;" alt="yinheli"/><br /><sub><b>yinheli</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=yinheli" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/hellocn9"><img src="https://avatars.githubusercontent.com/u/102210430?v=4?s=100" width="100px;" alt="hellocn9"/><br /><sub><b>hellocn9</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=hellocn9" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/brandan-schmitz"><img src="https://avatars.githubusercontent.com/u/6267549?v=4?s=100" width="100px;" alt="Brandan Schmitz"/><br /><sub><b>Brandan Schmitz</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=brandan-schmitz" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/yjqg6666"><img src="https://avatars.githubusercontent.com/u/1879641?v=4?s=100" width="100px;" alt="yjqg6666"/><br /><sub><b>yjqg6666</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=yjqg6666" title="Documentation">📖</a> <a href="https://github.com/kubesphere/kubekey/commits?author=yjqg6666" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/zaunist"><img src="https://avatars.githubusercontent.com/u/38528079?v=4?s=100" width="100px;" alt="失眠是真滴难受"/><br /><sub><b>失眠是真滴难受</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=zaunist" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/mangoGoForward"><img src="https://avatars.githubusercontent.com/u/35127166?v=4?s=100" width="100px;" alt="mango"/><br /><sub><b>mango</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/pulls?q=is%3Apr+reviewed-by%3AmangoGoForward" title="Reviewed Pull Requests">👀</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wenwutang1"><img src="https://avatars.githubusercontent.com/u/45817987?v=4?s=100" width="100px;" alt="wenwutang"/><br /><sub><b>wenwutang</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wenwutang1" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://kuops.com"><img src="https://avatars.githubusercontent.com/u/18283256?v=4?s=100" width="100px;" alt="Shiny Hou"/><br /><sub><b>Shiny Hou</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=kuops" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/zhouqiu0103"><img src="https://avatars.githubusercontent.com/u/108912268?v=4?s=100" width="100px;" alt="zhouqiu0103"/><br /><sub><b>zhouqiu0103</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=zhouqiu0103" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/77yu77"><img src="https://avatars.githubusercontent.com/u/73932296?v=4?s=100" width="100px;" alt="77yu77"/><br /><sub><b>77yu77</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=77yu77" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/hzhhong"><img src="https://avatars.githubusercontent.com/u/83079531?v=4?s=100" width="100px;" alt="hzhhong"/><br /><sub><b>hzhhong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=hzhhong" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/arugal"><img src="https://avatars.githubusercontent.com/u/26432832?v=4?s=100" width="100px;" alt="zhang-wei"/><br /><sub><b>zhang-wei</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=arugal" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://twitter.com/xds2000"><img src="https://avatars.githubusercontent.com/u/37678?v=4?s=100" width="100px;" alt="Deshi Xiao"/><br /><sub><b>Deshi Xiao</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=xiaods" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=xiaods" title="Documentation">📖</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://besscroft.com"><img src="https://avatars.githubusercontent.com/u/33775809?v=4?s=100" width="100px;" alt="besscroft"/><br /><sub><b>besscroft</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=besscroft" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/zhangzhiqiangcs"><img src="https://avatars.githubusercontent.com/u/8319897?v=4?s=100" width="100px;" alt="张志强"/><br /><sub><b>张志强</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=zhangzhiqiangcs" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/lwabish"><img src="https://avatars.githubusercontent.com/u/7044019?v=4?s=100" width="100px;" alt="lwabish"/><br /><sub><b>lwabish</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=lwabish" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=lwabish" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/qyz87"><img src="https://avatars.githubusercontent.com/u/36068894?v=4?s=100" width="100px;" alt="qyz87"/><br /><sub><b>qyz87</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=qyz87" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/fangzhengjin"><img src="https://avatars.githubusercontent.com/u/12680972?v=4?s=100" width="100px;" alt="ZhengJin Fang"/><br /><sub><b>ZhengJin Fang</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=fangzhengjin" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="http://lhr.wiki"><img src="https://avatars.githubusercontent.com/u/6327311?v=4?s=100" width="100px;" alt="Eric_Lian"/><br /><sub><b>Eric_Lian</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=ExerciseBook" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/nicognaW"><img src="https://avatars.githubusercontent.com/u/66731869?v=4?s=100" width="100px;" alt="nicognaw"/><br /><sub><b>nicognaw</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=nicognaW" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/deqingLv"><img src="https://avatars.githubusercontent.com/u/6064297?v=4?s=100" width="100px;" alt="吕德庆"/><br /><sub><b>吕德庆</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=deqingLv" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/littleplus"><img src="https://avatars.githubusercontent.com/u/11694750?v=4?s=100" width="100px;" alt="littleplus"/><br /><sub><b>littleplus</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=littleplus" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://www.linkedin.com/in/%D0%BA%D0%BE%D0%BD%D1%81%D1%82%D0%B0%D0%BD%D1%82%D0%B8%D0%BD-%D0%B0%D0%BA%D0%B0%D0%BA%D0%B8%D0%B5%D0%B2-13130b1b4/"><img src="https://avatars.githubusercontent.com/u/82488489?v=4?s=100" width="100px;" alt="Konstantin"/><br /><sub><b>Konstantin</b></sub></a><br /><a href="#ideas-Nello-Angelo" title="Ideas, Planning, & Feedback">🤔</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://kiragoo.github.io"><img src="https://avatars.githubusercontent.com/u/7400711?v=4?s=100" width="100px;" alt="kiragoo"/><br /><sub><b>kiragoo</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=kiragoo" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jojotong"><img src="https://avatars.githubusercontent.com/u/100849526?v=4?s=100" width="100px;" alt="jojotong"/><br /><sub><b>jojotong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=jojotong" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/littleBlackHouse"><img src="https://avatars.githubusercontent.com/u/54946465?v=4?s=100" width="100px;" alt="littleBlackHouse"/><br /><sub><b>littleBlackHouse</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=littleBlackHouse" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/testwill"><img src="https://avatars.githubusercontent.com/u/8717479?v=4?s=100" width="100px;" alt="guangwu"/><br /><sub><b>guangwu</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Code">💻</a> <a href="https://github.com/kubesphere/kubekey/commits?author=testwill" title="Documentation">📖</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wongearl"><img src="https://avatars.githubusercontent.com/u/36498442?v=4?s=100" width="100px;" alt="wongearl"/><br /><sub><b>wongearl</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wongearl" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/wenwenxiong"><img src="https://avatars.githubusercontent.com/u/10548812?v=4?s=100" width="100px;" alt="wenwenxiong"/><br /><sub><b>wenwenxiong</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=wenwenxiong" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://baimeow.cn/"><img src="https://avatars.githubusercontent.com/u/38121125?v=4?s=100" width="100px;" alt="柏喵Sakura"/><br /><sub><b>柏喵Sakura</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=BaiMeow" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://dashen.tech"><img src="https://avatars.githubusercontent.com/u/15921519?v=4?s=100" width="100px;" alt="cui fliter"/><br /><sub><b>cui fliter</b></sub></a><br /><a href="https://github.com/kubesphere/kubekey/commits?author=cuishuang" title="Documentation">📖</a></td>
</tr>
</tbody>
</table>
<!-- markdownlint-restore -->
<!-- prettier-ignore-end -->
<!-- ALL-CONTRIBUTORS-LIST:END -->

61
Dockerfile Normal file
View File

@ -0,0 +1,61 @@
# Build architecture
ARG ARCH
ARG builder_image
# Download dependencies
FROM alpine:3.19.0 as base_os_context
ENV OUTDIR=/out
RUN mkdir -p ${OUTDIR}/usr/local/bin/
WORKDIR /tmp
RUN apk add --no-cache ca-certificates
# Build the manager binary
FROM ${builder_image} as builder
# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy
ARG goproxy=https://goproxy.cn,direct
ENV GOPROXY=$goproxy
WORKDIR /workspace
COPY go.mod go.mod
COPY go.sum go.sum
# Cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN --mount=type=cache,target=/go/pkg/mod \
go mod download
# Copy the go source
COPY ./ ./
# Cache the go build into the the Gos compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go build -o controller-manager cmd/controller-manager/controller_manager.go
# Build
ARG ARCH
ARG LDFLAGS
# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \
go build -o controller-manager cmd/controller-manager/controller_manager.go
FROM --platform=${ARCH} alpine:3.19.0
WORKDIR /
RUN mkdir -p /var/lib/kubekey/rootfs
COPY --from=base_os_context /out/ /
COPY --from=builder /workspace/controller-manager /usr/local/bin
ENTRYPOINT ["sh"]

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018-2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

627
Makefile Normal file
View File

@ -0,0 +1,627 @@
# Ensure Make is run with bash shell as some syntax below is bash-specific
SHELL:=/usr/bin/env bash
.DEFAULT_GOAL:=help
#
# Go.
#
GO_VERSION ?= 1.20
GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)
# Use GOPROXY environment variable if set
GOPROXY := $(shell go env GOPROXY)
ifeq ($(GOPROXY),)
GOPROXY := https://goproxy.cn,direct
endif
export GOPROXY
# Active module mode, as we use go modules to manage dependencies
export GO111MODULE=on
# This option is for running docker manifest command
export DOCKER_CLI_EXPERIMENTAL := enabled
#
# Directories.
#
# Full directory of where the Makefile resides
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
EXP_DIR := exp
BIN_DIR := bin
TEST_DIR := test
TOOLS_DIR := hack/tools
TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR))
E2E_FRAMEWORK_DIR := $(TEST_DIR)/framework
GO_INSTALL := ./scripts/go_install.sh
export PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH)
#
# Binaries.
#
# Note: Need to use abspath so we can invoke these from subdirectories
KUSTOMIZE_VER := v4.5.2
KUSTOMIZE_BIN := kustomize
KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER))
KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4
SETUP_ENVTEST_VER := v0.0.0-20211110210527-619e6b92dab9
SETUP_ENVTEST_BIN := setup-envtest
SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest
CONTROLLER_GEN_VER := v0.13.0
CONTROLLER_GEN_BIN := controller-gen
CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))
CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen
GOTESTSUM_VER := v1.6.4
GOTESTSUM_BIN := gotestsum
GOTESTSUM := $(abspath $(TOOLS_BIN_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER))
GOTESTSUM_PKG := gotest.tools/gotestsum
HADOLINT_VER := v2.10.0
HADOLINT_FAILURE_THRESHOLD = warning
GOLANGCI_LINT_BIN := golangci-lint
GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN))
# Define Docker related variables. Releases should modify and double check these vars.
REGISTRY ?= docker.io/kubespheredev
PROD_REGISTRY ?= docker.io/kubesphere
# capkk
CAPKK_IMAGE_NAME ?= capkk-controller
CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME)
# bootstrap
K3S_BOOTSTRAP_IMAGE_NAME ?= k3s-bootstrap-controller
K3S_BOOTSTRAP_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME)
# control plane
K3S_CONTROL_PLANE_IMAGE_NAME ?= k3s-control-plane-controller
K3S_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME)
# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
TAG ?= dev
ARCH ?= $(shell go env GOARCH)
ALL_ARCH = amd64 arm arm64 ppc64le s390x
# Allow overriding the imagePullPolicy
PULL_POLICY ?= Always
# Hosts running SELinux need :z added to volume mounts
SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0)
ifeq ($(SELINUX_ENABLED),1)
DOCKER_VOL_OPTS?=:z
endif
# Set build time variables including version details
LDFLAGS := $(shell hack/version.sh)
# Set kk build tags
BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
.PHONY: all
all: test managers
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n\nTargets:\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-45s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
## --------------------------------------
## Generate / Manifests
## --------------------------------------
##@ generate:
ALL_GENERATE_MODULES = capkk k3s-bootstrap k3s-control-plane
.PHONY: generate
generate: ## Run all generate-manifests-*, generate-go-deepcopy-* targets
$(MAKE) generate-modules generate-manifests generate-go-deepcopy
.PHONY: generate-manifests
generate-manifests: ## Run all generate-manifest-* targets
$(MAKE) $(addprefix generate-manifests-,$(ALL_GENERATE_MODULES))
.PHONY: generate-manifests-capkk
generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./config/crd/bases"
$(CONTROLLER_GEN) \
paths=./api/... \
paths=./controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./config/crd/bases \
output:webhook:dir=./config/webhook \
webhook
.PHONY: generate-manifests-k3s-bootstrap
generate-manifests-k3s-bootstrap: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./bootstrap/k3s/api/... \
paths=./bootstrap/k3s/controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./bootstrap/k3s/config/crd/bases \
output:rbac:dir=./bootstrap/k3s/config/rbac \
output:webhook:dir=./bootstrap/k3s/config/webhook \
webhook
.PHONY: generate-manifests-k3s-control-plane
generate-manifests-k3s-control-plane: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./controlplane/k3s/api/... \
paths=./controlplane/k3s/controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./controlplane/k3s/config/crd/bases \
output:rbac:dir=./controlplane/k3s/config/rbac \
output:webhook:dir=./controlplane/k3s/config/webhook \
webhook
.PHONY: generate-go-deepcopy
generate-go-deepcopy: ## Run all generate-go-deepcopy-* targets
$(MAKE) $(addprefix generate-go-deepcopy-,$(ALL_GENERATE_MODULES))
.PHONY: generate-go-deepcopy-capkk
generate-go-deepcopy-capkk: $(CONTROLLER_GEN) ## Generate deepcopy go code for capkk
$(MAKE) clean-generated-deepcopy SRC_DIRS="./api"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./api/... \
.PHONY: generate-go-deepcopy-k3s-bootstrap
generate-go-deepcopy-k3s-bootstrap: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-bootstrap
$(MAKE) clean-generated-deepcopy SRC_DIRS="./bootstrap/k3s/api"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./bootstrap/k3s/api/... \
.PHONY: generate-go-deepcopy-k3s-control-plane
generate-go-deepcopy-k3s-control-plane: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-control-plane
$(MAKE) clean-generated-deepcopy SRC_DIRS="./controlplane/k3s/api"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./controlplane/k3s/api/... \
.PHONY: generate-modules
generate-modules: ## Run go mod tidy to ensure modules are up to date
go mod tidy
## --------------------------------------
## Lint / Verify
## --------------------------------------
##@ lint and verify:
.PHONY: lint
lint: $(GOLANGCI_LINT) ## Lint the codebase
$(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
cd $(TEST_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
cd $(TOOLS_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
./scripts/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD)
.PHONY: lint-dockerfiles
lint-dockerfiles:
./scripts/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD)
.PHONY: verify
verify: $(addprefix verify-,$(ALL_VERIFY_CHECKS)) lint-dockerfiles ## Run all verify-* targets
.PHONY: verify-modules
verify-modules: generate-modules ## Verify go modules are up to date
@if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(TEST_DIR)/go.mod $(TEST_DIR)/go.sum); then \
git diff; \
echo "go module files are out of date"; exit 1; \
fi
@if (find . -name 'go.mod' | xargs -n1 grep -q -i 'k8s.io/client-go.*+incompatible'); then \
find . -name "go.mod" -exec grep -i 'k8s.io/client-go.*+incompatible' {} \; -print; \
echo "go module contains an incompatible client-go version"; exit 1; \
fi
.PHONY: verify-gen
verify-gen: generate ## Verify go generated files are up to date
@if !(git diff --quiet HEAD); then \
git diff; \
echo "generated files are out of date, run make generate"; exit 1; \
fi
## --------------------------------------
## Binaries
## --------------------------------------
##@ build:
.PHONY: kk
kk:
CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/kk github.com/kubesphere/kubekey/v3/cmd/kk;
ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane
.PHONY: managers
managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
.PHONY: manager-capkk
manager-capkk: ## Build the capkk manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey/v3
.PHONY: manager-k3s-bootstrap
manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/v3/bootstrap/k3s
.PHONY: manager-k3s-control-plane
manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/v3/controlplane/k3s
.PHONY: docker-pull-prerequisites
docker-pull-prerequisites:
docker pull docker.io/docker/dockerfile:1.4
docker pull $(GO_CONTAINER_IMAGE)
.PHONY: docker-build-all
docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images for all architectures
docker-build-%:
$(MAKE) ARCH=$* docker-build
ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane
.PHONY: docker-build
docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers
$(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD))
.PHONY: docker-build-capkk
docker-build-capkk: ## Build the docker image for capkk
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-build-k3s-bootstrap
docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-build-k3s-control-plane
docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-build-e2e
docker-build-e2e: ## Build the docker image for capkk
$(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e
## --------------------------------------
## Deployment
## --------------------------------------
##@ deployment
ifndef ignore-not-found
ignore-not-found = false
endif
.PHONY: install
install: generate $(KUSTOMIZE) ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
.PHONY: uninstall
uninstall: generate $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
.PHONY: deploy
deploy: generate $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME)-$(ARCH) MANIFEST_TAG=$(TAG) \
TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
cd config/manager
$(KUSTOMIZE) build config/default | kubectl apply -f -
.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
## --------------------------------------
## Testing
## --------------------------------------
##@ test:
ARTIFACTS ?= ${ROOT_DIR}/_artifacts
ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available
KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
else
KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
endif
.PHONY: test
test: $(SETUP_ENVTEST) ## Run unit and integration tests
KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS)
.PHONY: test-verbose
test-verbose: ## Run unit and integration tests with verbose flag
$(MAKE) test TEST_ARGS="$(TEST_ARGS) -v"
.PHONY: test-junit
test-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests and generate a junit report
set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.exitcode) | tee $(ARTIFACTS)/junit.stdout
$(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.xml --raw-command cat $(ARTIFACTS)/junit.stdout
exit $$(cat $(ARTIFACTS)/junit.exitcode)
.PHONY: test-cover
test-cover: ## Run unit and integration tests and generate a coverage report
$(MAKE) test TEST_ARGS="$(TEST_ARGS) -coverprofile=out/coverage.out"
go tool cover -func=out/coverage.out -o out/coverage.txt
go tool cover -html=out/coverage.out -o out/coverage.html
.PHONY: test-e2e
test-e2e: ## Run e2e tests
$(MAKE) -C $(TEST_DIR)/e2e run
.PHONY: test-e2e-k3s
test-e2e-k3s: ## Run e2e tests
$(MAKE) -C $(TEST_DIR)/e2e run-k3s
## --------------------------------------
## Release
## --------------------------------------
##@ release:
## latest git tag for the commit, e.g., v0.3.10
RELEASE_TAG ?= $(shell git describe --abbrev=0 2>/dev/null)
ifneq (,$(findstring -,$(RELEASE_TAG)))
PRE_RELEASE=true
endif
# the previous release tag, e.g., v0.3.9, excluding pre-release tags
PREVIOUS_TAG ?= $(shell git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$$" | sort -V | grep -B1 $(RELEASE_TAG) | head -n 1 2>/dev/null)
RELEASE_DIR := out
$(RELEASE_DIR):
mkdir -p $(RELEASE_DIR)/
.PHONY: release
release: clean-release ## Build and push container images using the latest git tag for the commit
@if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi
@if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
git checkout "${RELEASE_TAG}"
## Build binaries first.
GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries
# Set the manifest image to the production bucket.
$(MAKE) manifest-modification REGISTRY=$(PROD_REGISTRY)
## Build the manifests
$(MAKE) release-manifests
## Build the templates
$(MAKE) release-templates
## Clean the git artifacts modified in the release process
$(MAKE) clean-release-git
release-binaries: ## Build the binaries to publish with a release
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-archive
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-archive
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-archive
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-archive
release-binary: $(RELEASE_DIR)
docker run \
--rm \
-e CGO_ENABLED=0 \
-e GOOS=$(GOOS) \
-e GOARCH=$(GOARCH) \
-e GOPROXY=$(GOPROXY) \
-v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
-w /workspace \
golang:$(GO_VERSION) \
go build -a -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS) -extldflags '-static'" \
-o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY)) $(RELEASE_BINARY)
release-archive: $(RELEASE_DIR)
tar -czf $(RELEASE_DIR)/kubekey-$(RELEASE_TAG)-$(GOOS)-$(GOARCH).tar.gz -C $(RELEASE_DIR)/ $(notdir $(RELEASE_BINARY))
rm -rf $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))
.PHONY: manifest-modification
manifest-modification: # Set the manifest images to the staging/production bucket.
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
$(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml"
$(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml"
.PHONY: release-manifests
release-manifests: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the manifests to publish with a release
# Build capkk-components.
$(KUSTOMIZE) build config/default > $(RELEASE_DIR)/infrastructure-components.yaml
# Build bootstrap-components.
$(KUSTOMIZE) build bootstrap/k3s/config/default > $(RELEASE_DIR)/bootstrap-components.yaml
# Build control-plane-components.
$(KUSTOMIZE) build controlplane/k3s/config/default > $(RELEASE_DIR)/control-plane-components.yaml
# Add metadata to the release artifacts
cp metadata.yaml $(RELEASE_DIR)/metadata.yaml
.PHONY: release-templates
release-templates: $(RELEASE_DIR) ## Generate release templates
cp templates/cluster-template*.yaml $(RELEASE_DIR)/
.PHONY: release-prod
release-prod: ## Build and push container images to the prod
REGISTRY=$(PROD_REGISTRY) TAG=$(RELEASE_TAG) $(MAKE) docker-build-all docker-push-all
## --------------------------------------
## Docker
## --------------------------------------
.PHONY: docker-push-all
docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests
$(MAKE) docker-push-manifest-capkk
$(MAKE) docker-push-manifest-k3s-bootstrap
$(MAKE) docker-push-manifest-k3s-control-plane
docker-push-%:
$(MAKE) ARCH=$* docker-push
.PHONY: docker-push
docker-push: ## Push the docker images
docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
docker push $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-push-manifest-capkk
docker-push-manifest-capkk: ## Push the multiarch manifest for the capkk docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(CAPKK_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPKK_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPKK_CONTROLLER_IMG}:${TAG} ${CAPKK_CONTROLLER_IMG}-$${arch}:${TAG}; done
docker manifest push --purge $(CAPKK_CONTROLLER_IMG):$(TAG)
.PHONY: docker-push-manifest-k3s-bootstrap
docker-push-manifest-k3s-bootstrap: ## Push the multiarch manifest for the k3s bootstrap docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${K3S_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done
docker manifest push --purge $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG)
.PHONY: docker-push-manifest-k3s-control-plane
docker-push-manifest-k3s-control-plane: ## Push the multiarch manifest for the k3s control plane docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done
docker manifest push --purge $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG)
.PHONY: set-manifest-pull-policy
set-manifest-pull-policy:
$(info Updating kustomize pull policy file for manager resources)
sed -i'' -e 's@imagePullPolicy: .*@imagePullPolicy: '"$(PULL_POLICY)"'@' $(TARGET_RESOURCE)
.PHONY: set-manifest-image
set-manifest-image:
$(info Updating kustomize image patch file for manager resource)
sed -i'' -e 's@image: .*@image: '"${MANIFEST_IMG}:$(MANIFEST_TAG)"'@' $(TARGET_RESOURCE)
## --------------------------------------
## Cleanup / Verification
## --------------------------------------
##@ clean:
.PHONY: clean
clean: ## Remove all generated files
$(MAKE) clean-bin
.PHONY: clean-bin
clean-bin: ## Remove all generated binaries
rm -rf $(BIN_DIR)
rm -rf $(TOOLS_BIN_DIR)
.PHONY: clean-release
clean-release: ## Remove the release folder
rm -rf $(RELEASE_DIR)
.PHONY: clean-release-git
clean-release-git: ## Restores the git files usually modified during a release
git restore ./*manager_image_patch.yaml ./*manager_pull_policy.yaml
.PHONY: clean-generated-yaml
clean-generated-yaml: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
(IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name '*.yaml' -exec rm -f {} \;; done)
.PHONY: clean-generated-deepcopy
clean-generated-deepcopy: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
(IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name 'zz_generated.deepcopy*' -exec rm -f {} \;; done)
## --------------------------------------
## Hack / Tools
## --------------------------------------
##@ hack/tools:
.PHONY: $(CONTROLLER_GEN_BIN)
$(CONTROLLER_GEN_BIN): $(CONTROLLER_GEN) ## Build a local copy of controller-gen.
.PHONY: $(GOTESTSUM_BIN)
$(GOTESTSUM_BIN): $(GOTESTSUM) ## Build a local copy of gotestsum.
.PHONY: $(KUSTOMIZE_BIN)
$(KUSTOMIZE_BIN): $(KUSTOMIZE) ## Build a local copy of kustomize.
.PHONY: $(SETUP_ENVTEST_BIN)
$(SETUP_ENVTEST_BIN): $(SETUP_ENVTEST) ## Build a local copy of setup-envtest.
.PHONY: $(GOLANGCI_LINT_BIN)
$(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint
$(CONTROLLER_GEN): # Build controller-gen from tools folder.
GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
$(GOTESTSUM): # Build gotestsum from tools folder.
GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER)
$(KUSTOMIZE): # Build kustomize from tools folder.
CGO_ENABLED=0 GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER)
$(SETUP_ENVTEST): # Build setup-envtest from tools folder.
GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER)
$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golangci-lint using hack script into tools folder.
hack/ensure-golangci-lint.sh \
-b $(TOOLS_BIN_DIR) \
$(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
# build the artifact of repository iso
ISO_ARCH ?= amd64
ISO_OUTPUT_DIR ?= ./output
ISO_BUILD_WORKDIR := hack/gen-repository-iso
ISO_OS_NAMES := centos7 debian9 debian10 ubuntu1604 ubuntu1804 ubuntu2004 ubuntu2204
ISO_BUILD_NAMES := $(addprefix build-iso-,$(ISO_OS_NAMES))
build-iso-all: $(ISO_BUILD_NAMES)
.PHONY: $(ISO_BUILD_NAMES)
$(ISO_BUILD_NAMES):
@export DOCKER_BUILDKIT=1
docker build \
--platform linux/$(ISO_ARCH) \
--build-arg TARGETARCH=$(ISO_ARCH) \
-o type=local,dest=$(ISO_OUTPUT_DIR) \
-f $(ISO_BUILD_WORKDIR)/dockerfile.$(subst build-iso-,,$@) \
$(ISO_BUILD_WORKDIR)
go-releaser-test:
goreleaser release --rm-dist --skip-publish --snapshot
.PHONY: generate-go-deepcopy-kubekey
generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object
$(MAKE) clean-generated-deepcopy SRC_DIRS="./pkg/apis/"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./pkg/apis/... \
.PHONY: generate-manifests-kubekey
generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
$(CONTROLLER_GEN) \
paths=./pkg/apis/... \
crd \
output:crd:dir=./config/helm/crds/
helm-package: ## Helm-package.
helm package config/helm -d ./bin
.PHONY: docker-build-operator
docker-build-operator: ## Build the docker image for operator
DOCKER_BUILDKIT=1 docker build --push --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg LDFLAGS="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG):$(TAG)
# Format all import, `goimports` is required.
goimports: ## Format all import, `goimports` is required.
@hack/update-goimports.sh

18
OWNERS Normal file
View File

@ -0,0 +1,18 @@
approvers:
- pixiake
- 24sama
- rayzhou2017
- littleBlackHouse
reviewers:
- pixiake
- rayzhou2017
- zryfish
- benjaminhuo
- calvinyv
- FeynmanZhou
- huanggze
- wansir
- LinuxSuRen
- 24sama
- littleBlackHouse

8
README.md Normal file
View File

@ -0,0 +1,8 @@
# 背景
当前kubekey中如果要添加命令或修改命令都需要提交代码并重新发版。扩展性较差。
1. 任务与框架分离优势目的更方便扩展借鉴ansible的playbook设计
2. 支持gitops可通过git方式管理自动化任务
3. 支持connector扩展
4. 支持云原生方式自动化批量任务管理
# 示例

View File

@ -0,0 +1,82 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"flag"
"strings"
"github.com/spf13/cobra"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog/v2"
)
type ControllerManagerServerOptions struct {
// Enable gops or not.
GOPSEnabled bool
// WorkDir is the baseDir which command find any resource (project etc.)
WorkDir string
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
Debug bool
// ControllerGates is the list of controller gates to enable or disable controller.
// '*' means "all enabled by default controllers"
// 'foo' means "enable 'foo'"
// '-foo' means "disable 'foo'"
// first item for a particular name wins.
// e.g. '-foo,foo' means "disable foo", 'foo,-foo' means "enable foo"
// * has the lowest priority.
// e.g. *,-foo, means "disable 'foo'"
ControllerGates []string
MaxConcurrentReconciles int
LeaderElection bool
}
func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
return &ControllerManagerServerOptions{
WorkDir: "/var/lib/kubekey",
ControllerGates: []string{"*"},
MaxConcurrentReconciles: 1,
}
}
func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
gfs := fss.FlagSet("generic")
gfs.BoolVar(&o.GOPSEnabled, "gops", o.GOPSEnabled, "Whether to enable gops or not. When enabled this option, "+
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
kfs := fss.FlagSet("klog")
local := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(local)
local.VisitAll(func(fl *flag.Flag) {
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
kfs.AddGoFlag(fl)
})
cfs := fss.FlagSet("controller-manager")
cfs.StringSliceVar(&o.ControllerGates, "controllers", o.ControllerGates, "The list of controller gates to enable or disable controller. "+
"'*' means \"all enabled by default controllers\"")
cfs.IntVar(&o.MaxConcurrentReconciles, "max-concurrent-reconciles", o.MaxConcurrentReconciles, "The number of maximum concurrent reconciles for controller.")
cfs.BoolVar(&o.LeaderElection, "leader-election", o.LeaderElection, "Whether to enable leader election for controller-manager.")
return fss
}
func (o *ControllerManagerServerOptions) Complete(cmd *cobra.Command, args []string) {
// do nothing
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"io/fs"
"os"
"github.com/google/gops/agent"
"github.com/spf13/cobra"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"github.com/kubesphere/kubekey/v4/cmd/controller-manager/app/options"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/manager"
)
func NewControllerManagerCommand() *cobra.Command {
o := options.NewControllerManagerServerOptions()
cmd := &cobra.Command{
Use: "controller-manager",
Short: "kubekey controller manager",
RunE: func(cmd *cobra.Command, args []string) error {
if o.GOPSEnabled {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
}
}
o.Complete(cmd, args)
// create workdir directory,if not exists
_const.SetWorkDir(o.WorkDir)
if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
if err := os.MkdirAll(o.WorkDir, fs.ModePerm); err != nil {
return err
}
}
return run(signals.SetupSignalHandler(), o)
},
}
fs := cmd.Flags()
for _, f := range o.Flags().FlagSets {
fs.AddFlagSet(f)
}
return cmd
}
func run(ctx context.Context, o *options.ControllerManagerServerOptions) error {
return manager.NewControllerManager(manager.ControllerManagerOptions{
ControllerGates: o.ControllerGates,
MaxConcurrentReconciles: o.MaxConcurrentReconciles,
LeaderElection: o.LeaderElection,
}).Run(ctx)
}

View File

@ -0,0 +1,31 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
"k8s.io/component-base/cli"
"github.com/kubesphere/kubekey/v4/cmd/controller-manager/app"
)
func main() {
command := app.NewControllerManagerCommand()
code := cli.Run(command)
os.Exit(code)
}

View File

@ -0,0 +1,92 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"github.com/google/uuid"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
cliflag "k8s.io/component-base/cli/flag"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
)
type PreCheckOptions struct {
// Playbook which to execute.
Playbook string
// HostFile is the path of host file
InventoryFile string
// ConfigFile is the path of config file
ConfigFile string
// WorkDir is the baseDir which command find any resource (project etc.)
WorkDir string
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
Debug bool
}
func NewPreCheckOption() *PreCheckOptions {
o := &PreCheckOptions{
WorkDir: "/var/lib/kubekey",
}
return o
}
func (o *PreCheckOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
gfs := fss.FlagSet("generic")
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
gfs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "the config file path. support *.yaml ")
gfs.StringVar(&o.InventoryFile, "inventory", o.InventoryFile, "the host list file path. support *.ini")
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
return fss
}
func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kubekeyv1.Pipeline, error) {
kk := &kubekeyv1.Pipeline{
TypeMeta: metav1.TypeMeta{
Kind: "Pipeline",
APIVersion: "kubekey.kubesphere.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("precheck-%s", rand.String(6)),
Namespace: metav1.NamespaceDefault,
UID: types.UID(uuid.NewString()),
CreationTimestamp: metav1.Now(),
Annotations: map[string]string{
kubekeyv1.BuiltinsProjectAnnotation: "",
},
},
}
// complete playbook. now only support one playbook
if len(args) == 1 {
o.Playbook = args[0]
} else {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
kk.Spec = kubekeyv1.PipelineSpec{
Playbook: o.Playbook,
Debug: o.Debug,
}
return kk, nil
}

147
cmd/kk/app/options/run.go Normal file
View File

@ -0,0 +1,147 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"flag"
"fmt"
"strings"
"github.com/google/uuid"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog/v2"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
)
type KubekeyRunOptions struct {
// Enable gops or not.
GOPSEnabled bool
// WorkDir is the baseDir which command find any resource (project etc.)
WorkDir string
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
Debug bool
// ProjectAddr is the storage for executable packages (in Ansible format).
// When starting with http or https, it will be obtained from a Git repository.
// When starting with file path, it will be obtained from the local path.
ProjectAddr string
// ProjectName is the name of project. it will store to project dir use this name.
// If empty generate from ProjectAddr
ProjectName string
// ProjectBranch is the git branch of the git Addr.
ProjectBranch string
// ProjectTag if the git tag of the git Addr.
ProjectTag string
// ProjectInsecureSkipTLS skip tls or not when git addr is https.
ProjectInsecureSkipTLS bool
// ProjectToken auther
ProjectToken string
// Playbook which to execute.
Playbook string
// HostFile is the path of host file
InventoryFile string
// ConfigFile is the path of config file
ConfigFile string
// Tags is the tags of playbook which to execute
Tags []string
// SkipTags is the tags of playbook which skip execute
SkipTags []string
}
func NewKubeKeyRunOptions() *KubekeyRunOptions {
o := &KubekeyRunOptions{
WorkDir: "/var/lib/kubekey",
}
return o
}
func (o *KubekeyRunOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
gfs := fss.FlagSet("generic")
gfs.BoolVar(&o.GOPSEnabled, "gops", o.GOPSEnabled, "Whether to enable gops or not. When enabled this option, "+
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
gfs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "the config file path. support *.yaml ")
gfs.StringVar(&o.InventoryFile, "inventory", o.InventoryFile, "the host list file path. support *.ini")
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
kfs := fss.FlagSet("klog")
local := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(local)
local.VisitAll(func(fl *flag.Flag) {
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
kfs.AddGoFlag(fl)
})
gitfs := fss.FlagSet("project")
gitfs.StringVar(&o.ProjectAddr, "project-addr", o.ProjectAddr, "the storage for executable packages (in Ansible format)."+
" When starting with http or https, it will be obtained from a Git repository."+
"When starting with file path, it will be obtained from the local path.")
gitfs.StringVar(&o.ProjectBranch, "project-branch", o.ProjectBranch, "the git branch of the remote Addr")
gitfs.StringVar(&o.ProjectTag, "project-tag", o.ProjectTag, "the git tag of the remote Addr")
gitfs.BoolVar(&o.ProjectInsecureSkipTLS, "project-insecure-skip-tls", o.ProjectInsecureSkipTLS, "skip tls or not when git addr is https.")
gitfs.StringVar(&o.ProjectToken, "project-token", o.ProjectToken, "the token for private project.")
tfs := fss.FlagSet("tags")
tfs.StringArrayVar(&o.Tags, "tags", o.Tags, "the tags of playbook which to execute")
tfs.StringArrayVar(&o.SkipTags, "skip_tags", o.SkipTags, "the tags of playbook which skip execute")
return fss
}
func (o *KubekeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kubekeyv1.Pipeline, error) {
kk := &kubekeyv1.Pipeline{
TypeMeta: metav1.TypeMeta{
Kind: "Pipeline",
APIVersion: "kubekey.kubesphere.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("run-command-%s", rand.String(6)),
Namespace: metav1.NamespaceDefault,
UID: types.UID(uuid.NewString()),
CreationTimestamp: metav1.Now(),
Annotations: map[string]string{},
},
}
// complete playbook. now only support one playbook
if len(args) == 1 {
o.Playbook = args[0]
} else {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
kk.Spec = kubekeyv1.PipelineSpec{
Project: kubekeyv1.PipelineProject{
Addr: o.ProjectAddr,
Name: o.ProjectName,
Branch: o.ProjectBranch,
Tag: o.ProjectTag,
InsecureSkipTLS: o.ProjectInsecureSkipTLS,
Token: o.ProjectToken,
},
Playbook: o.Playbook,
Tags: o.Tags,
SkipTags: o.SkipTags,
Debug: o.Debug,
}
return kk, nil
}

58
cmd/kk/app/precheck.go Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"io/fs"
"os"
"github.com/spf13/cobra"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
func newPreCheckCommand() *cobra.Command {
o := options.NewPreCheckOption()
cmd := &cobra.Command{
Use: "precheck",
Short: "kk precheck for cluster",
RunE: func(cmd *cobra.Command, args []string) error {
kk, err := o.Complete(cmd, []string{"playbooks/precheck.yaml"})
if err != nil {
return err
}
// set workdir
_const.SetWorkDir(o.WorkDir)
// create workdir directory,if not exists
if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
if err := os.MkdirAll(o.WorkDir, fs.ModePerm); err != nil {
return err
}
}
return run(signals.SetupSignalHandler(), kk, o.ConfigFile, o.InventoryFile)
},
}
flags := cmd.Flags()
for _, f := range o.Flags().FlagSets {
flags.AddFlagSet(f)
}
return cmd
}

106
cmd/kk/app/profiling.go Normal file
View File

@ -0,0 +1,106 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"github.com/spf13/pflag"
)
var (
profileName string
profileOutput string
)
func addProfilingFlags(flags *pflag.FlagSet) {
flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
}
func initProfiling() error {
var (
f *os.File
err error
)
switch profileName {
case "none":
return nil
case "cpu":
f, err = os.Create(profileOutput)
if err != nil {
return err
}
err = pprof.StartCPUProfile(f)
if err != nil {
return err
}
// Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
// output anything. We choose to sample all events.
case "block":
runtime.SetBlockProfileRate(1)
case "mutex":
runtime.SetMutexProfileFraction(1)
default:
// Check the profile name is valid.
if profile := pprof.Lookup(profileName); profile == nil {
return fmt.Errorf("unknown profile '%s'", profileName)
}
}
// If the command is interrupted before the end (ctrl-c), flush the
// profiling files
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
<-c
f.Close()
flushProfiling()
os.Exit(0)
}()
return nil
}
func flushProfiling() error {
switch profileName {
case "none":
return nil
case "cpu":
pprof.StopCPUProfile()
case "heap":
runtime.GC()
fallthrough
default:
profile := pprof.Lookup(profileName)
if profile == nil {
return nil
}
f, err := os.Create(profileOutput)
if err != nil {
return err
}
defer f.Close()
profile.WriteTo(f, 0)
}
return nil
}

122
cmd/kk/app/run.go Normal file
View File

@ -0,0 +1,122 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"io/fs"
"os"
"github.com/google/gops/agent"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"sigs.k8s.io/yaml"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/manager"
)
func newRunCommand() *cobra.Command {
o := options.NewKubeKeyRunOptions()
cmd := &cobra.Command{
Use: "run [playbook]",
Short: "run a playbook",
RunE: func(cmd *cobra.Command, args []string) error {
if o.GOPSEnabled {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
}
}
kk, err := o.Complete(cmd, args)
if err != nil {
return err
}
// set workdir
_const.SetWorkDir(o.WorkDir)
// create workdir directory,if not exists
if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
if err := os.MkdirAll(o.WorkDir, fs.ModePerm); err != nil {
return err
}
}
// convert option to kubekeyv1.Pipeline
return run(signals.SetupSignalHandler(), kk, o.ConfigFile, o.InventoryFile)
},
}
fs := cmd.Flags()
for _, f := range o.Flags().FlagSets {
fs.AddFlagSet(f)
}
return cmd
}
func run(ctx context.Context, kk *kubekeyv1.Pipeline, configFile string, inventoryFile string) error {
// convert configFile
config := &kubekeyv1.Config{}
cdata, err := os.ReadFile(configFile)
if err != nil {
klog.Errorf("read config file error %v", err)
return err
}
if err := yaml.Unmarshal(cdata, config); err != nil {
klog.Errorf("unmarshal config file error %v", err)
return err
}
if config.Namespace == "" {
config.Namespace = corev1.NamespaceDefault
}
kk.Spec.ConfigRef = &corev1.ObjectReference{
Kind: config.Kind,
Namespace: config.Namespace,
Name: config.Name,
UID: config.UID,
APIVersion: config.APIVersion,
ResourceVersion: config.ResourceVersion,
}
// convert inventoryFile
inventory := &kubekeyv1.Inventory{}
idata, err := os.ReadFile(inventoryFile)
if err := yaml.Unmarshal(idata, inventory); err != nil {
klog.Errorf("unmarshal inventory file error %v", err)
return err
}
if inventory.Namespace == "" {
inventory.Namespace = corev1.NamespaceDefault
}
kk.Spec.InventoryRef = &corev1.ObjectReference{
Kind: inventory.Kind,
Namespace: inventory.Namespace,
Name: inventory.Name,
UID: inventory.UID,
APIVersion: inventory.APIVersion,
ResourceVersion: inventory.ResourceVersion,
}
return manager.NewCommandManager(manager.CommandManagerOptions{
Pipeline: kk,
Config: config,
Inventory: inventory,
}).Run(ctx)
}

44
cmd/kk/app/server.go Normal file
View File

@ -0,0 +1,44 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/spf13/cobra"
)
func NewKubeKeyCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "kk",
Long: "kubekey is a daemon that execute command in a node",
PersistentPreRunE: func(*cobra.Command, []string) error {
return initProfiling()
},
PersistentPostRunE: func(*cobra.Command, []string) error {
return flushProfiling()
},
}
flags := cmd.PersistentFlags()
addProfilingFlags(flags)
cmd.AddCommand(newRunCommand())
cmd.AddCommand(newVersionCommand())
// internal command
cmd.AddCommand(newPreCheckCommand())
return cmd
}

33
cmd/kk/app/version.go Normal file
View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/spf13/cobra"
"github.com/kubesphere/kubekey/v4/version"
)
func newVersionCommand() *cobra.Command {
return &cobra.Command{
Use: "version",
Short: "Print the version of KubeSphere controller-manager",
Run: func(cmd *cobra.Command, args []string) {
cmd.Println(version.Get())
},
}
}

31
cmd/kk/kubekey.go Normal file
View File

@ -0,0 +1,31 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
"k8s.io/component-base/cli"
"github.com/kubesphere/kubekey/v4/cmd/kk/app"
)
func main() {
command := app.NewKubeKeyCommand()
code := cli.Run(command)
os.Exit(code)
}

15
config/helm/Chart.yaml Normal file
View File

@ -0,0 +1,15 @@
apiVersion: v2
name: kubekey
description: A Helm chart for kubekey
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "v4.0.0"

View File

@ -0,0 +1,38 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
name: configs.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
names:
kind: Config
listKind: ConfigList
plural: configs
singular: config
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
type: object
x-kubernetes-preserve-unknown-fields: true
type: object
served: true
storage: true

View File

@ -0,0 +1,66 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
name: inventories.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
names:
kind: Inventory
listKind: InventoryList
plural: inventories
singular: inventory
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
groups:
additionalProperties:
properties:
groups:
items:
type: string
type: array
hosts:
items:
type: string
type: array
vars:
type: object
x-kubernetes-preserve-unknown-fields: true
type: object
description: Groups nodes. a group contains repeated nodes
type: object
hosts:
additionalProperties:
type: object
x-kubernetes-preserve-unknown-fields: true
description: Hosts is all nodes
type: object
vars:
description: 'Vars for all host. the priority for vars is: host vars
> group vars > inventory vars'
type: object
x-kubernetes-preserve-unknown-fields: true
type: object
type: object
served: true
storage: true

View File

@ -0,0 +1,225 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
name: pipelines.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
names:
kind: Pipeline
listKind: PipelineList
plural: pipelines
singular: pipeline
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.playbook
name: Playbook
type: string
- jsonPath: .status.phase
name: Phase
type: string
- jsonPath: .status.taskResult.total
name: Total
type: integer
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
configRef:
description: ConfigRef is the global variable configuration for playbook
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
x-kubernetes-map-type: atomic
debug:
description: Debug mode, after a successful execution of Pipeline,
will retain runtime data, which includes task execution status and
parameters.
type: boolean
inventoryRef:
description: InventoryRef is the node configuration for playbook
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
x-kubernetes-map-type: atomic
playbook:
description: Playbook which to execute.
type: string
project:
description: Project is storage for executable packages
properties:
addr:
description: Addr is the storage for executable packages (in Ansible
file format). When starting with http or https, it will be obtained
from a Git repository. When starting with file path, it will
be obtained from the local path.
type: string
branch:
description: Branch is the git branch of the git Addr.
type: string
insecureSkipTLS:
description: InsecureSkipTLS skip tls or not when git addr is
https.
type: boolean
name:
description: Name is the project name base project
type: string
tag:
description: Tag is the git branch of the git Addr.
type: string
token:
description: Token of Authorization for http request
type: string
type: object
skipTags:
description: SkipTags is the tags of playbook which skip execute
items:
type: string
type: array
tags:
description: Tags is the tags of playbook which to execute
items:
type: string
type: array
required:
- playbook
type: object
status:
properties:
failedDetail:
description: FailedDetail will record the failed tasks.
items:
properties:
hosts:
description: failed Hosts Result of failed task.
items:
properties:
host:
description: Host name of failed task.
type: string
stdErr:
description: StdErr of failed task.
type: string
stdout:
description: Stdout of failed task.
type: string
type: object
type: array
task:
description: Task name of failed task.
type: string
type: object
type: array
phase:
description: Phase of pipeline.
type: string
reason:
description: failed Reason of pipeline.
type: string
taskResult:
description: TaskResult total related tasks execute result.
properties:
failed:
description: Failed number of tasks.
type: integer
ignored:
description: Ignored number of tasks.
type: integer
skipped:
description: Skipped number of tasks.
type: integer
success:
description: Success number of tasks.
type: integer
total:
description: Total number of tasks.
type: integer
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,43 @@
{{/*
Common labels
*/}}
{{- define "common.labels" -}}
helm.sh/chart: {{ include "common.chart" . }}
{{ include "common.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "common.selectorLabels" -}}
app.kubernetes.io/name: {{ .Chart.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "common.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "common.image" -}}
{{- $registryName := .Values.operator.image.registry -}}
{{- $repositoryName := .Values.operator.image.repository -}}
{{- $separator := ":" -}}
{{- $termination := .Values.operator.image.tag | toString -}}
{{- if .Values.operator.image.digest }}
{{- $separator = "@" -}}
{{- $termination = .Values.operator.image.digest | toString -}}
{{- end -}}
{{- if $registryName }}
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
{{- else }}
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,13 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Renders a value that contains template.
Usage:
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "common.tplvalues.render" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,70 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: {{ include "common.labels" . | nindent 4 }}
app: kk-operator
name: kk-operator
namespace: {{ .Release.Namespace }}
spec:
strategy:
rollingUpdate:
maxSurge: 0
type: RollingUpdate
progressDeadlineSeconds: 600
replicas: {{ .Values.operator.replicaCount }}
revisionHistoryLimit: 10
selector:
matchLabels:
app: kk-operator
template:
metadata:
labels: {{ include "common.labels" . | nindent 8 }}
app: kk-operator
spec:
serviceAccountName: {{ .Values.serviceAccount.name }}
{{- if .Values.operator.pullSecrets }}
imagePullSecrets: {{ .Values.operator.pullSecrets }}
{{- end }}
{{- if .Values.operator.nodeSelector }}
nodeSelector: {{ .Values.operator.nodeSelector }}
{{- end }}
{{- if .Values.operator.affinity }}
affinity: {{ .Values.operator.affinity }}
{{- end }}
{{- if .Values.operator.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.operator.tolerations "context" .) | nindent 8 }}
{{- end }}
dnsPolicy: {{ .Values.operator.dnsPolicy }}
restartPolicy: {{ .Values.operator.restartPolicy }}
schedulerName: {{ .Values.operator.schedulerName }}
terminationGracePeriodSeconds: {{ .Values.operator.terminationGracePeriodSeconds }}
containers:
- name: ks-controller-manager
image: {{ template "common.image" . }}
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
{{- if .Values.operator.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.operator.command "context" $) | nindent 12 }}
{{- end }}
env:
{{- if .Values.operator.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.operator.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.operator.resources }}
resources: {{- toYaml .Values.operator.resources | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /etc/localtime
name: host-time
readOnly: true
{{- if .Values.operator.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.operator.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
volumes:
- hostPath:
path: /etc/localtime
type: ""
name: host-time
{{- if .Values.operator.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.operator.extraVolumes "context" $) | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,36 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Values.role }}
namespace: {{ .Release.Namespace }}
labels: {{- include "common.labels" . | nindent 4 }}
rules:
- apiGroups:
- kubekey.kubesphere.io
resources:
- configs
- inventories
verbs:
- get
- list
- watch
- apiGroups:
- kubekey.kubesphere.io
resources:
- pipelines
- pipelines/status
verbs:
- "*"
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- "*"
- apiGroups:
- ""
resources:
- events
verbs:
- "*"

View File

@ -0,0 +1,27 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount.name }}
namespace: {{ .Release.Namespace }}
labels: {{- include "common.labels" . | nindent 4}}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.serviceAccount.name }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Values.role }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.name }}
namespace: {{ .Release.Namespace }}

84
config/helm/values.yaml Normal file
View File

@ -0,0 +1,84 @@
## @section Common parameters
##
# the role which operator pod need
role: "kk-operator"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: "kk-operator"
operator:
# tolerations of operator pod
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 60
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 60
# affinity of operator pod
affinity: { }
# nodeSelector of operator pod
nodeSelector: { }
# dnsPolicy of operator pod
dnsPolicy: Default
# restartPolicy of operator pod
restartPolicy: Always
# schedulerName of operator pod
schedulerName: default-scheduler
# terminationGracePeriodSeconds of operator pod
terminationGracePeriodSeconds: 30
# replica of operator deployment
replicaCount: 1
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
pullSecrets: []
image:
registry: ""
repository: kubesphere/kubekey-operator
tag: ""
digest: ""
pullPolicy: IfNotPresent
##
## @param resources.limits The resources limits for the haproxy containers
## @param resources.requests The requested resources for the haproxy containers
##
resources:
limits:
cpu: 1
memory: 1000Mi
requests:
cpu: 30m
memory: 50Mi
## @param command Override default container command (useful when using custom images)
##
command:
- controller-manager
- --logtostderr=true
- --leader-election=true
- --controllers=*
## @param extraEnvVars Array with extra environment variables to add to haproxy nodes
##
extraEnvVars: []
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the haproxy container(s)
##
extraVolumeMounts: []
## @param extraVolumes Optionally specify extra list of additional volumes for the haproxy pod(s)
##
extraVolumes: []

23
example/Makefile Normal file
View File

@ -0,0 +1,23 @@
BaseDir := $(CURDIR)/..
playbooks := bootstrap-os.yaml
.PHONY: build
build:
go build -o $(BaseDir)/example -gcflags all=-N github.com/kubesphere/kubekey/v4/cmd/kk
.PHONY: run-playbook
run-playbook: build
@for pb in $(playbooks); do \
$(BaseDir)/example/kk run --work-dir=$(BaseDir)/example/test \
--project-addr=git@github.com:littleBlackHouse/kse-installer.git \
--project-branch=demo --inventory=$(BaseDir)/example/inventory.yaml \
--config=$(BaseDir)/example/config.yaml \
--debug playbooks/$$pb;\
done
.PHONY: precheck
precheck: build
$(BaseDir)/example/kk precheck --work-dir=$(BaseDir)/example/test \
--inventory=$(BaseDir)/example/inventory.yaml \
--config=$(BaseDir)/example/config.yaml

19
example/config.yaml Normal file
View File

@ -0,0 +1,19 @@
apiVersion: kubekey.kubesphere.io/v1
kind: Config
metadata:
name: example
spec:
etcd_deployment_type: external
supported_os_distributions: [ ubuntu ]
kube_network_plugin: flannel
kube_version: 1.23.15
kube_version_min_required: 1.19.10
download_run_once: true
minimal_master_memory_mb: 10 #KB
minimal_node_memory_mb: 10 #KB
kube_network_node_prefix: 24
container_manager: containerd
containerd_version: v1.7.0
containerd_min_version_required: v1.6.0
kube_external_ca_mode: true
cilium_deploy_additionally: true

26
example/inventory.yaml Normal file
View File

@ -0,0 +1,26 @@
apiVersion: kubekey.kubesphere.io/v1
kind: Inventory
metadata:
name: example
spec:
hosts:
kk:
ssh_host: xxx
groups:
k8s_cluster:
groups:
- kube_control_plane
- kube_node
kube_control_plane:
hosts:
- kk
kube_node:
hosts:
- kk
etcd:
hosts:
- kk
vars:
ssh_port: xxx
ssh_user: xxx
ssh_password: xxx

18
example/pipeline.yaml Normal file
View File

@ -0,0 +1,18 @@
apiVersion: kubekey.kubesphere.io/v1
kind: Pipeline
metadata:
name: precheck-example
annotations:
"kubekey.kubesphere.io/builtins-repo": ""
spec:
playbook: playbooks/precheck.yaml
inventoryRef:
apiVersion: kubekey.kubesphere.io/v1
kind: Inventory
name: example
namespace: default
configRef:
apiVersion: kubekey.kubesphere.io/v1
kind: Config
name: example
namespace: default

7
exp/README.md Normal file
View File

@ -0,0 +1,7 @@
# Experimental
⚠️ This package holds experimental code and API types. ⚠️
## Compatibility notice
This package does not adhere to any compatibility guarantees. Some portions may eventually be promoted out of this package and considered stable/GA, while others may be removed entirely.

94
go.mod Normal file
View File

@ -0,0 +1,94 @@
module github.com/kubesphere/kubekey/v4
go 1.20
require (
github.com/evanphx/json-patch v5.7.0+incompatible
github.com/flosch/pongo2/v6 v6.0.0
github.com/go-git/go-git/v5 v5.11.0
github.com/google/gops v0.3.28
github.com/google/uuid v1.5.0
github.com/pkg/sftp v1.13.6
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.17.0
golang.org/x/time v0.5.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.0
k8s.io/apimachinery v0.29.0
k8s.io/client-go v0.29.0
k8s.io/component-base v0.29.0
k8s.io/klog/v2 v2.110.1
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
sigs.k8s.io/controller-runtime v0.16.3
sigs.k8s.io/yaml v1.4.0
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.1 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-openapi/jsonpointer v0.20.2 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.22.7 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.18.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/sergi/go-diff v1.3.1 // indirect
github.com/skeema/knownhosts v1.2.1 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/oauth2 v0.15.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.16.1 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/protobuf v1.32.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.29.0 // indirect
k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)

311
go.sum Normal file
View File

@ -0,0 +1,311 @@
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE=
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.1 h1:S+9bSbua1z3FgCnV0KKOSSZ3mDthb5NyEPL5gEpCvyk=
github.com/emicklei/go-restful/v3 v3.11.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/flosch/pongo2/v6 v6.0.0 h1:lsGru8IAzHgIAw6H2m4PCyleO58I40ow6apih0WprMU=
github.com/flosch/pongo2/v6 v6.0.0/go.mod h1:CuDpFm47R0uGGE7z13/tTlt1Y6zdxvr2RLT5LJhsHEU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8=
github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4=
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM=
golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A=
k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA=
k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0=
k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc=
k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o=
k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis=
k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8=
k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38=
k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8 h1:yHNkNuLjht7iq95pO9QmbjOWCguvn8mDe3lT78nqPkw=
k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 h1:+XYBQU3ZKUu60H6fEnkitTTabGoKfIG8zczhZBENu9o=
k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw=
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4=
sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

113
hack/auto-update-version.py Executable file
View File

@ -0,0 +1,113 @@
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2022 The KubeSphere Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import re
import json
from natsort import natsorted
import collections
GITHUB_BASE_URL = "https://api.github.com"
ORG = "kubernetes"
REPO = "kubernetes"
PER_PAGE = 15
ARCH_LIST = ["amd64", "arm64"]
K8S_COMPONENTS = ["kubeadm", "kubelet", "kubectl"]
def get_releases(org, repo, per_page=30):
try:
response = requests.get("{}/repos/{}/{}/releases?per_page={}".format(GITHUB_BASE_URL, org, repo, per_page))
except:
print("fetch {}/{} releases failed".format(org, repo))
else:
return response.json()
def get_new_kubernetes_version(current_version):
new_versions = []
kubernetes_release = get_releases(org=ORG, repo=REPO, per_page=PER_PAGE)
for release in kubernetes_release:
tag = release['tag_name']
res = re.search("^v[0-9]+.[0-9]+.[0-9]+$", tag)
if res and tag not in current_version['kubeadm']['amd64'].keys():
new_versions.append(tag)
return new_versions
def fetch_kubernetes_sha256(versions):
new_sha256 = {}
for version in versions:
for binary in K8S_COMPONENTS:
for arch in ARCH_LIST:
response = requests.get(
"https://storage.googleapis.com/kubernetes-release/release/{}/bin/linux/{}/{}.sha256".format(
version, arch, binary))
if response.status_code == 200:
new_sha256["{}-{}-{}".format(binary, arch, version)] = response.text
return new_sha256
def version_sort(data):
version_list = natsorted([*data])
sorted_data = collections.OrderedDict()
for v in version_list:
sorted_data[v] = data[v]
return sorted_data
def main():
# get current support versions
with open("version/components.json", "r") as f:
data = json.load(f)
# get new kubernetes versions
new_versions = get_new_kubernetes_version(current_version=data)
if len(new_versions) > 0:
# fetch new kubernetes sha256
new_sha256 = fetch_kubernetes_sha256(new_versions)
if new_sha256:
for k, v in new_sha256.items():
info = k.split('-')
data[info[0]][info[1]][info[2]] = v
for binary in K8S_COMPONENTS:
for arch in ARCH_LIST:
data[binary][arch] = version_sort(data[binary][arch])
print(new_versions)
# update components.json
with open("version/components.json", 'w') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
# set new version to tmp file
with open("version.tmp", 'w') as f:
f.write("\n".join(new_versions))
if __name__ == '__main__':
main()

15
hack/boilerplate.go.txt Normal file
View File

@ -0,0 +1,15 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

422
hack/ensure-golangci-lint.sh Executable file
View File

@ -0,0 +1,422 @@
#!/usr/bin/env bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This script is copied from from https://raw.githubusercontent.com/golangci/golangci-lint/main/install.sh.
set -e
usage() {
this=$1
cat <<EOF
$this: download go binaries for golangci/golangci-lint
Usage: $this [-b] bindir [-d] [tag]
-b sets bindir or installation directory, Defaults to ./bin
-d turns on debug logging
[tag] is a tag from
https://github.com/golangci/golangci-lint/releases
If tag is missing, then the latest will be used.
Generated by godownloader
https://github.com/goreleaser/godownloader
EOF
exit 2
}
parse_args() {
#BINDIR is ./bin unless set be ENV
# over-ridden by flag below
BINDIR=${BINDIR:-./bin}
while getopts "b:dh?x" arg; do
case "$arg" in
b) BINDIR="$OPTARG" ;;
d) log_set_priority 10 ;;
h | \?) usage "$0" ;;
x) set -x ;;
esac
done
shift $((OPTIND - 1))
TAG=$1
}
# this function wraps all the destructive operations
# if a curl|bash cuts off the end of the script due to
# network, either nothing will happen or will syntax error
# out preventing half-done work
execute() {
tmpdir=$(mktemp -d)
log_debug "downloading files into ${tmpdir}"
http_download "${tmpdir}/${TARBALL}" "${TARBALL_URL}"
http_download "${tmpdir}/${CHECKSUM}" "${CHECKSUM_URL}"
hash_sha256_verify "${tmpdir}/${TARBALL}" "${tmpdir}/${CHECKSUM}"
srcdir="${tmpdir}/${NAME}"
rm -rf "${srcdir}"
(cd "${tmpdir}" && untar "${TARBALL}")
test ! -d "${BINDIR}" && install -d "${BINDIR}"
for binexe in $BINARIES; do
if [ "$OS" = "windows" ]; then
binexe="${binexe}.exe"
fi
install "${srcdir}/${binexe}" "${BINDIR}/"
log_info "installed ${BINDIR}/${binexe}"
done
rm -rf "${tmpdir}"
}
get_binaries() {
case "$PLATFORM" in
darwin/amd64) BINARIES="golangci-lint" ;;
darwin/arm64) BINARIES="golangci-lint" ;;
darwin/armv6) BINARIES="golangci-lint" ;;
darwin/armv7) BINARIES="golangci-lint" ;;
darwin/mips64) BINARIES="golangci-lint" ;;
darwin/mips64le) BINARIES="golangci-lint" ;;
darwin/ppc64le) BINARIES="golangci-lint" ;;
darwin/s390x) BINARIES="golangci-lint" ;;
freebsd/386) BINARIES="golangci-lint" ;;
freebsd/amd64) BINARIES="golangci-lint" ;;
freebsd/armv6) BINARIES="golangci-lint" ;;
freebsd/armv7) BINARIES="golangci-lint" ;;
freebsd/mips64) BINARIES="golangci-lint" ;;
freebsd/mips64le) BINARIES="golangci-lint" ;;
freebsd/ppc64le) BINARIES="golangci-lint" ;;
freebsd/s390x) BINARIES="golangci-lint" ;;
linux/386) BINARIES="golangci-lint" ;;
linux/amd64) BINARIES="golangci-lint" ;;
linux/arm64) BINARIES="golangci-lint" ;;
linux/armv6) BINARIES="golangci-lint" ;;
linux/armv7) BINARIES="golangci-lint" ;;
linux/mips64) BINARIES="golangci-lint" ;;
linux/mips64le) BINARIES="golangci-lint" ;;
linux/ppc64le) BINARIES="golangci-lint" ;;
linux/s390x) BINARIES="golangci-lint" ;;
windows/386) BINARIES="golangci-lint" ;;
windows/amd64) BINARIES="golangci-lint" ;;
windows/arm64) BINARIES="golangci-lint" ;;
windows/armv6) BINARIES="golangci-lint" ;;
windows/armv7) BINARIES="golangci-lint" ;;
windows/mips64) BINARIES="golangci-lint" ;;
windows/mips64le) BINARIES="golangci-lint" ;;
windows/ppc64le) BINARIES="golangci-lint" ;;
windows/s390x) BINARIES="golangci-lint" ;;
*)
log_crit "platform $PLATFORM is not supported. Make sure this script is up-to-date and file request at https://github.com/${PREFIX}/issues/new"
exit 1
;;
esac
}
tag_to_version() {
if [ -z "${TAG}" ]; then
log_info "checking GitHub for latest tag"
else
log_info "checking GitHub for tag '${TAG}'"
fi
REALTAG=$(github_release "$OWNER/$REPO" "${TAG}") && true
if test -z "$REALTAG"; then
log_crit "unable to find '${TAG}' - use 'latest' or see https://github.com/${PREFIX}/releases for details"
exit 1
fi
# if version starts with 'v', remove it
TAG="$REALTAG"
VERSION=${TAG#v}
}
adjust_format() {
# change format (tar.gz or zip) based on OS
case ${OS} in
windows) FORMAT=zip ;;
esac
true
}
adjust_os() {
# adjust archive name based on OS
true
}
adjust_arch() {
# adjust archive name based on ARCH
true
}
cat /dev/null <<EOF
------------------------------------------------------------------------
https://github.com/client9/shlib - portable posix shell functions
Public domain - http://unlicense.org
https://github.com/client9/shlib/blob/master/LICENSE.md
but credit (and pull requests) appreciated.
------------------------------------------------------------------------
EOF
is_command() {
command -v "$1" >/dev/null
}
echoerr() {
echo "$@" 1>&2
}
log_prefix() {
echo "$0"
}
_logp=6
log_set_priority() {
_logp="$1"
}
log_priority() {
if test -z "$1"; then
echo "$_logp"
return
fi
[ "$1" -le "$_logp" ]
}
log_tag() {
case $1 in
0) echo "emerg" ;;
1) echo "alert" ;;
2) echo "crit" ;;
3) echo "err" ;;
4) echo "warning" ;;
5) echo "notice" ;;
6) echo "info" ;;
7) echo "debug" ;;
*) echo "$1" ;;
esac
}
log_debug() {
log_priority 7 || return 0
echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
}
log_info() {
log_priority 6 || return 0
echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
}
log_err() {
log_priority 3 || return 0
echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
}
log_crit() {
log_priority 2 || return 0
echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
}
uname_os() {
os=$(uname -s | tr '[:upper:]' '[:lower:]')
case "$os" in
cygwin_nt*) os="windows" ;;
mingw*) os="windows" ;;
msys_nt*) os="windows" ;;
esac
echo "$os"
}
uname_arch() {
arch=$(uname -m)
case $arch in
x86_64) arch="amd64" ;;
x86) arch="386" ;;
i686) arch="386" ;;
i386) arch="386" ;;
aarch64) arch="arm64" ;;
armv5*) arch="armv5" ;;
armv6*) arch="armv6" ;;
armv7*) arch="armv7" ;;
esac
echo ${arch}
}
uname_os_check() {
os=$(uname_os)
case "$os" in
darwin) return 0 ;;
dragonfly) return 0 ;;
freebsd) return 0 ;;
linux) return 0 ;;
android) return 0 ;;
nacl) return 0 ;;
netbsd) return 0 ;;
openbsd) return 0 ;;
plan9) return 0 ;;
solaris) return 0 ;;
windows) return 0 ;;
esac
log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
return 1
}
uname_arch_check() {
arch=$(uname_arch)
case "$arch" in
386) return 0 ;;
amd64) return 0 ;;
arm64) return 0 ;;
armv5) return 0 ;;
armv6) return 0 ;;
armv7) return 0 ;;
ppc64) return 0 ;;
ppc64le) return 0 ;;
mips) return 0 ;;
mipsle) return 0 ;;
mips64) return 0 ;;
mips64le) return 0 ;;
s390x) return 0 ;;
amd64p32) return 0 ;;
esac
log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
return 1
}
untar() {
tarball=$1
case "${tarball}" in
*.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;;
*.tar) tar --no-same-owner -xf "${tarball}" ;;
*.zip) unzip "${tarball}" ;;
*)
log_err "untar unknown archive format for ${tarball}"
return 1
;;
esac
}
http_download_curl() {
local_file=$1
source_url=$2
header=$3
if [ -z "$header" ]; then
code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
else
code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
fi
if [ "$code" != "200" ]; then
log_debug "http_download_curl received HTTP status $code"
return 1
fi
return 0
}
http_download_wget() {
local_file=$1
source_url=$2
header=$3
if [ -z "$header" ]; then
wget -q -O "$local_file" "$source_url"
else
wget -q --header "$header" -O "$local_file" "$source_url"
fi
}
http_download() {
log_debug "http_download $2"
if is_command curl; then
http_download_curl "$@"
return
elif is_command wget; then
http_download_wget "$@"
return
fi
log_crit "http_download unable to find wget or curl"
return 1
}
http_copy() {
tmp=$(mktemp)
http_download "${tmp}" "$1" "$2" || return 1
body=$(cat "$tmp")
rm -f "${tmp}"
echo "$body"
}
github_release() {
owner_repo=$1
version=$2
test -z "$version" && version="latest"
giturl="https://github.com/${owner_repo}/releases/${version}"
json=$(http_copy "$giturl" "Accept:application/json")
test -z "$json" && return 1
version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
test -z "$version" && return 1
echo "$version"
}
hash_sha256() {
TARGET=${1:-/dev/stdin}
if is_command gsha256sum; then
hash=$(gsha256sum "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command sha256sum; then
hash=$(sha256sum "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command shasum; then
hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command openssl; then
hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f a
else
log_crit "hash_sha256 unable to find command to compute sha-256 hash"
return 1
fi
}
hash_sha256_verify() {
TARGET=$1
checksums=$2
if [ -z "$checksums" ]; then
log_err "hash_sha256_verify checksum file not specified in arg2"
return 1
fi
BASENAME=${TARGET##*/}
want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
if [ -z "$want" ]; then
log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
return 1
fi
got=$(hash_sha256 "$TARGET")
if [ "$want" != "$got" ]; then
log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
return 1
fi
}
cat /dev/null <<EOF
------------------------------------------------------------------------
End of functions from https://github.com/client9/shlib
------------------------------------------------------------------------
EOF
PROJECT_NAME="golangci-lint"
OWNER=golangci
REPO="golangci-lint"
BINARY=golangci-lint
FORMAT=tar.gz
OS=$(uname_os)
ARCH=$(uname_arch)
PREFIX="$OWNER/$REPO"
# use in logging routines
log_prefix() {
echo "$PREFIX"
}
PLATFORM="${OS}/${ARCH}"
GITHUB_DOWNLOAD=https://github.com/${OWNER}/${REPO}/releases/download
uname_os_check "$OS"
uname_arch_check "$ARCH"
parse_args "$@"
get_binaries
tag_to_version
adjust_format
adjust_os
adjust_arch
log_info "found version: ${VERSION} for ${TAG}/${OS}/${ARCH}"
NAME=${BINARY}-${VERSION}-${OS}-${ARCH}
TARBALL=${NAME}.${FORMAT}
TARBALL_URL=${GITHUB_DOWNLOAD}/${TAG}/${TARBALL}
CHECKSUM=${PROJECT_NAME}-${VERSION}-checksums.txt
CHECKSUM_URL=${GITHUB_DOWNLOAD}/${TAG}/${CHECKSUM}
execute

45
hack/fetch-kubernetes-hash.sh Executable file
View File

@ -0,0 +1,45 @@
#!/bin/bash
v22_patch_max=15
v23_patch_max=13
v24_patch_max=7
v25_patch_max=3
versions=()
append_k8s_version() {
prefix=$1
max=$2
for i in $(seq 0 "$max");
do
versions+=("${prefix}${i}")
done
}
append_k8s_version "v1.22." $v22_patch_max
append_k8s_version "v1.23." $v23_patch_max
append_k8s_version "v1.24." $v24_patch_max
append_k8s_version "v1.25." $v25_patch_max
#versions=("v1.22.12" "v1.23.9" "v1.24.3")
arches=("amd64" "arm64")
apps=("kubeadm" "kubelet" "kubectl")
json="{}"
for app in "${apps[@]}";
do
for arch in "${arches[@]}"
do
echo "${app}@${arch}"
for ver in "${versions[@]}"
do
url="https://storage.googleapis.com/kubernetes-release/release/${ver}/bin/linux/${arch}/${app}.sha256"
hash=$(wget --quiet -O - "$url")
echo "\"${ver}\": \"${hash}\","
json=$(echo "$json" | jq ".${app}.${arch} += {\"${ver}\":\"${hash}\"}")
done
done
done
file="kubernetes-hashes.json"
echo "$json" | jq --indent 4 > "${file}" && echo -e "\n\nThe hash info have saved to file ${file}.\n\n"

View File

@ -0,0 +1,21 @@
FROM almalinux:9.0 as almalinux90
ARG TARGETARCH
ARG BUILD_TOOLS="dnf-plugins-core createrepo mkisofs epel-release"
ARG DIR=almalinux-9.0-${TARGETARCH}-rpms
ARG PKGS=.common[],.rpms[],.almalinux[],.almalinux90[]
RUN dnf install -q -y ${BUILD_TOOLS} \
&& dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \
&& dnf makecache
WORKDIR package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
RUN yq eval ${PKGS} packages.yaml | sed '/^ceph-common$/d' > packages.list
RUN sort -u packages.list | xargs dnf download --resolve --alldeps --downloaddir=${DIR} \
&& createrepo -d ${DIR} \
&& mkisofs -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=almalinux90 /package/*.iso /

View File

@ -0,0 +1,22 @@
FROM centos:7 as centos7
ARG TARGETARCH
ENV OS=centos
ENV OS_VERSION=7
ARG BUILD_TOOLS="yum-utils createrepo mkisofs epel-release"
ARG DIR=${OS}${OS_VERSION}-${TARGETARCH}-rpms
RUN yum install -q -y ${BUILD_TOOLS} \
&& yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \
&& yum makecache
WORKDIR package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
RUN yq eval ".common[],.rpms[],.${OS}[],.${OS}${OS_VERSION}[]" packages.yaml > packages.list
RUN sort -u packages.list | xargs repotrack -p ${DIR} \
&& createrepo -d ${DIR} \
&& mkisofs -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=centos7 /package/*.iso /

View File

@ -0,0 +1,38 @@
FROM debian:10 as debian10
ARG TARGETARCH
ARG OS_RELEASE=buster
ARG OS_VERSION=10
ARG DIR=debian-10-${TARGETARCH}-debs
ARG PKGS=.common[],.debs[],.debian[],.debian10[]
ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage dirmngr"
ENV DEBIAN_FRONTEND=noninteractive
# dump system package list
RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
RUN ARCH=$(dpkg --print-architecture) \
&& apt update -qq \
&& apt install -y --no-install-recommends $BUILD_TOOLS \
&& if [ "$TARGETARCH" = "amd64" ]; then \
curl -fsSL https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | apt-key add - ; \
echo deb https://download.gluster.org/pub/gluster/glusterfs/7/LATEST/Debian/${OS_VERSION}/amd64/apt ${OS_RELEASE} main > /etc/apt/sources.list.d/gluster.list ; \
fi \
&& curl -fsSL "https://download.docker.com/linux/debian/gpg" | apt-key add -qq - \
&& echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/debian ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list \
&& apt update -qq
WORKDIR /package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
RUN yq eval "${PKGS}" packages.yaml >> packages.list \
&& sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
RUN mkdir -p ${DIR} \
&& wget -q -x -P ${DIR} -i packages.urls \
&& cd ${DIR} \
&& dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
RUN genisoimage -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=debian10 /package/*.iso /

View File

@ -0,0 +1,41 @@
FROM debian:11.6 as debian11
ARG TARGETARCH
ARG OS_RELEASE=bullseye
ARG OS_VERSION=11
ARG DIR=debian-11-${TARGETARCH}-debs
ARG PKGS=.common[],.debs[],.debian[],.debian11[]
ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage dirmngr"
ENV DEBIAN_FRONTEND=noninteractive
# dump system package list
RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
RUN ARCH=$(dpkg --print-architecture) \
&& apt update -qq \
&& apt install -y --no-install-recommends $BUILD_TOOLS \
&& if [ "$TARGETARCH" = "amd64" ]; then \
curl -fsSL https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | apt-key add - ; \
echo deb https://download.gluster.org/pub/gluster/glusterfs/7/LATEST/Debian/${OS_VERSION}/amd64/apt ${OS_RELEASE} main > /etc/apt/sources.list.d/gluster.list ; \
fi \
&& curl -fsSL "https://download.docker.com/linux/debian/gpg" | apt-key add -qq - \
&& echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/debian ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list \
&& apt update -qq \
&& apt upgrade -y -qq
WORKDIR /package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.30.8 /usr/bin/yq /usr/bin/yq
RUN yq eval "${PKGS}" packages.yaml >> packages.list \
&& sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
RUN cat packages.urls
RUN mkdir -p ${DIR} \
&& wget -q -x -P ${DIR} -i packages.urls \
&& cd ${DIR} \
&& dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
RUN genisoimage -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=debian11 /package/*.iso /

View File

@ -0,0 +1,33 @@
FROM ubuntu:16.04 as ubuntu1604
ARG TARGETARCH
ARG OS_RELEASE=xenial
ARG DIR=ubuntu-16.04-${TARGETARCH}-debs
ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu1604[]
ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
ENV DEBIAN_FRONTEND=noninteractive
# dump system package list
RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
RUN apt update -qq \
&& apt install -y --no-install-recommends $BUILD_TOOLS \
&& add-apt-repository ppa:gluster/glusterfs-7 -y \
&& curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
&& echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
&& apt update -qq
WORKDIR /package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
RUN yq eval "${PKGS}" packages.yaml >> packages.list \
&& sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
RUN mkdir -p ${DIR} \
&& wget -q -x -P ${DIR} -i packages.urls \
&& cd ${DIR} \
&& dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
RUN genisoimage -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=ubuntu1604 /package/*.iso /

View File

@ -0,0 +1,34 @@
FROM ubuntu:18.04 as ubuntu1804
ARG TARGETARCH
ARG OS_RELEASE=bionic
ARG DIR=ubuntu-18.04-${TARGETARCH}-debs
ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu1804[]
ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
ENV DEBIAN_FRONTEND=noninteractive
# dump system package list
RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
RUN apt update -qq \
&& apt install -y --no-install-recommends $BUILD_TOOLS \
&& add-apt-repository ppa:gluster/glusterfs-7 -y \
&& curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
&& echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
&& apt update -qq
WORKDIR /package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
RUN yq eval "${PKGS}" packages.yaml >> packages.list \
&& dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 >> packages.list \
&& sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
RUN mkdir -p ${DIR} \
&& wget -q -x -P ${DIR} -i packages.urls \
&& cd ${DIR} \
&& dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
RUN genisoimage -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=ubuntu1804 /package/*.iso /

View File

@ -0,0 +1,33 @@
FROM ubuntu:20.04 as ubuntu2004
ARG TARGETARCH
ARG OS_RELEASE=focal
ARG DIR=ubuntu-20.04-${TARGETARCH}-debs
ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu2004[]
ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
ENV DEBIAN_FRONTEND=noninteractive
# dump system package list
RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
RUN apt update -qq \
&& apt install -y --no-install-recommends $BUILD_TOOLS \
&& add-apt-repository ppa:gluster/glusterfs-7 -y \
&& curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
&& echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
&& apt update -qq
WORKDIR /package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
RUN yq eval "${PKGS}" packages.yaml >> packages.list \
&& sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
RUN mkdir -p ${DIR} \
&& wget -q -x -P ${DIR} -i packages.urls \
&& cd ${DIR} \
&& dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
RUN genisoimage -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=ubuntu2004 /package/*.iso /

View File

@ -0,0 +1,33 @@
FROM ubuntu:22.04 as ubuntu2204
ARG TARGETARCH
ARG OS_RELEASE=jammy
ARG DIR=ubuntu-22.04-${TARGETARCH}-debs
ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu2204[]
ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
ENV DEBIAN_FRONTEND=noninteractive
# dump system package list
RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
RUN apt update -qq \
&& apt install -y --no-install-recommends $BUILD_TOOLS \
#&& add-apt-repository ppa:gluster/glusterfs-7 -y \
&& curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
&& echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
&& apt update -qq
WORKDIR /package
COPY packages.yaml .
COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
RUN yq eval "${PKGS}" packages.yaml >> packages.list \
&& sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
RUN mkdir -p ${DIR} \
&& wget -q -x -P ${DIR} -i packages.urls \
&& cd ${DIR} \
&& dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
RUN genisoimage -r -o ${DIR}.iso ${DIR}
FROM scratch
COPY --from=ubuntu2204 /package/*.iso /

View File

@ -0,0 +1,7 @@
#! /bin/sh
for p in ${PACKAGES} ; do
echo "\n Download $p ... \n"
sudo apt-get download $p 2>>errors.txt
for i in $(apt-cache depends $p | grep -E 'Depends|Recommends|Suggests' | cut -d ':' -f 2,3 | sed -e s/' '/''/); do sudo apt-get download $i 2>>errors.txt; done
done

View File

@ -0,0 +1,88 @@
---
common:
- curl
- ceph-common
- net-tools
- lvm2
- telnet
- tcpdump
- socat
- openssl
- chrony
- conntrack
- curl
- ipvsadm
- ipset
- psmisc
- bash-completion
- ebtables
- haproxy
- keepalived
rpms:
- nfs-utils
- yum-utils
- bind-utils
- glusterfs-fuse
- lz4
- nss
- nss-sysinit
- nss-tools
- conntrack-tools
debs:
- apt-transport-https
- ca-certificates
- dnsutils
- git
- glusterfs-client
- gnupg-agent
- nfs-common
- openssh-server
- software-properties-common
- sudo
centos:
- containerd.io
centos7:
- libselinux-python
- docker-ce-20.10.8
- docker-ce-cli-20.10.8
debian:
- containerd.io
debian10:
- docker-ce=5:20.10.8~3-0~debian-buster
- docker-ce-cli=5:20.10.8~3-0~debian-buster
debian11:
- docker-ce=5:20.10.8~3-0~debian-bullseye
- docker-ce-cli=5:20.10.8~3-0~debian-bullseye
ubuntu:
- containerd.io
ubuntu1604:
- docker-ce=5:20.10.8~3-0~ubuntu-xenial
- docker-ce-cli=5:20.10.8~3-0~ubuntu-xenial
ubuntu1804:
- docker-ce=5:20.10.8~3-0~ubuntu-bionic
- docker-ce-cli=5:20.10.8~3-0~ubuntu-bionic
ubuntu2004:
- docker-ce=5:20.10.8~3-0~ubuntu-focal
- docker-ce-cli=5:20.10.8~3-0~ubuntu-focal
# The minimum version of docker-ce on ubuntu 2204 is 20.10.13
ubuntu2204:
- docker-ce=5:20.10.13~3-0~ubuntu-jammy
- docker-ce-cli=5:20.10.13~3-0~ubuntu-jammy
almalinux:
- containerd.io
- docker-compose-plugin
almalinux90:
- docker-ce-20.10.17
- docker-ce-cli-20.10.17

64
hack/lib/golang.sh Executable file
View File

@ -0,0 +1,64 @@
#!/usr/bin/env bash
# This is a modified version of Kubernetes
KUBE_GO_PACKAGE=kubesphere.io/kubesphere
# Ensure the go tool exists and is a viable version.
kube::golang::verify_go_version() {
if [[ -z "$(command -v go)" ]]; then
kube::log::usage_from_stdin <<EOF
Can't find 'go' in PATH, please fix and retry.
See http://golang.org/doc/install for installation instructions.
EOF
return 2
fi
local go_version
IFS=" " read -ra go_version <<< "$(go version)"
local minimum_go_version
minimum_go_version=go1.20
if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then
kube::log::usage_from_stdin <<EOF
Detected go version: ${go_version[*]}.
Kubernetes requires ${minimum_go_version} or greater.
Please install ${minimum_go_version} or later.
EOF
return 2
fi
}
# Prints the value that needs to be passed to the -ldflags parameter of go build
# in order to set the Kubernetes based on the git tree status.
# IMPORTANT: if you update any of these, also update the lists in
# pkg/version/def.bzl and hack/print-workspace-status.sh.
kube::version::ldflags() {
kube::version::get_version_vars
local -a ldflags
function add_ldflag() {
local key=${1}
local val=${2}
# If you update these, also update the list component-base/version/def.bzl.
ldflags+=(
"-X '${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}'"
)
}
add_ldflag "buildDate" "$(date ${SOURCE_DATE_EPOCH:+"--date=@${SOURCE_DATE_EPOCH}"} -u +'%Y-%m-%dT%H:%M:%SZ')"
if [[ -n ${KUBE_GIT_COMMIT-} ]]; then
add_ldflag "gitCommit" "${KUBE_GIT_COMMIT}"
add_ldflag "gitTreeState" "${KUBE_GIT_TREE_STATE}"
fi
if [[ -n ${KUBE_GIT_VERSION-} ]]; then
add_ldflag "gitVersion" "${KUBE_GIT_VERSION}"
fi
if [[ -n ${KUBE_GIT_MAJOR-} && -n ${KUBE_GIT_MINOR-} ]]; then
add_ldflag "gitMajor" "${KUBE_GIT_MAJOR}"
add_ldflag "gitMinor" "${KUBE_GIT_MINOR}"
fi
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
}

111
hack/lib/init.sh Executable file
View File

@ -0,0 +1,111 @@
#!/usr/bin/env bash
# This script is modified version of Kubernetes script
set -o errexit
set -o nounset
set -o pipefail
export GO111MODULE=auto
# The root of the build/dist directory
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
KUBE_OUTPUT_SUBPATH="${KUBE_OUTPUT_SUBPATH:-_output/local}"
KUBE_OUTPUT="${KUBE_ROOT}/${KUBE_OUTPUT_SUBPATH}"
KUBE_OUTPUT_BINPATH="${KUBE_OUTPUT}/bin"
export THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin"
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/hack/lib/logging.sh"
kube::log::install_errexit
source "${KUBE_ROOT}/hack/lib/golang.sh"
KUBE_OUTPUT_HOSTBIN="${KUBE_OUTPUT_BINPATH}/$(kube::util::host_platform)"
export KUBE_OUTPUT_HOSTBIN
# This emulates "readlink -f" which is not available on MacOS X.
# Test:
# T=/tmp/$$.$RANDOM
# mkdir $T
# touch $T/file
# mkdir $T/dir
# ln -s $T/file $T/linkfile
# ln -s $T/dir $T/linkdir
# function testone() {
# X=$(readlink -f $1 2>&1)
# Y=$(kube::readlinkdashf $1 2>&1)
# if [ "$X" != "$Y" ]; then
# echo readlinkdashf $1: expected "$X", got "$Y"
# fi
# }
# testone /
# testone /tmp
# testone $T
# testone $T/file
# testone $T/dir
# testone $T/linkfile
# testone $T/linkdir
# testone $T/nonexistant
# testone $T/linkdir/file
# testone $T/linkdir/dir
# testone $T/linkdir/linkfile
# testone $T/linkdir/linkdir
function kube::readlinkdashf {
# run in a subshell for simpler 'cd'
(
if [[ -d "${1}" ]]; then # This also catch symlinks to dirs.
cd "${1}"
pwd -P
else
cd "$(dirname "${1}")"
local f
f=$(basename "${1}")
if [[ -L "${f}" ]]; then
readlink "${f}"
else
echo "$(pwd -P)/${f}"
fi
fi
)
}
# This emulates "realpath" which is not available on MacOS X
# Test:
# T=/tmp/$$.$RANDOM
# mkdir $T
# touch $T/file
# mkdir $T/dir
# ln -s $T/file $T/linkfile
# ln -s $T/dir $T/linkdir
# function testone() {
# X=$(realpath $1 2>&1)
# Y=$(kube::realpath $1 2>&1)
# if [ "$X" != "$Y" ]; then
# echo realpath $1: expected "$X", got "$Y"
# fi
# }
# testone /
# testone /tmp
# testone $T
# testone $T/file
# testone $T/dir
# testone $T/linkfile
# testone $T/linkdir
# testone $T/nonexistant
# testone $T/linkdir/file
# testone $T/linkdir/dir
# testone $T/linkdir/linkfile
# testone $T/linkdir/linkdir
kube::realpath() {
if [[ ! -e "${1}" ]]; then
echo "${1}: No such file or directory" >&2
return 1
fi
kube::readlinkdashf "${1}"
}

171
hack/lib/logging.sh Executable file
View File

@ -0,0 +1,171 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Controls verbosity of the script output and logging.
KUBE_VERBOSE="${KUBE_VERBOSE:-5}"
# Handler for when we exit automatically on an error.
# Borrowed from https://gist.github.com/ahendrix/7030300
kube::log::errexit() {
local err="${PIPESTATUS[*]}"
# If the shell we are in doesn't have errexit set (common in subshells) then
# don't dump stacks.
set +o | grep -qe "-o errexit" || return
set +o xtrace
local code="${1:-1}"
# Print out the stack trace described by $function_stack
if [ ${#FUNCNAME[@]} -gt 2 ]
then
kube::log::error "Call tree:"
for ((i=1;i<${#FUNCNAME[@]}-1;i++))
do
kube::log::error " ${i}: ${BASH_SOURCE[${i}+1]}:${BASH_LINENO[${i}]} ${FUNCNAME[${i}]}(...)"
done
fi
kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status ${err}" "${1:-1}" 1
}
kube::log::install_errexit() {
# trap ERR to provide an error handler whenever a command exits nonzero this
# is a more verbose version of set -o errexit
trap 'kube::log::errexit' ERR
# setting errtrace allows our ERR trap handler to be propagated to functions,
# expansions and subshells
set -o errtrace
}
# Print out the stack trace
#
# Args:
# $1 The number of stack frames to skip when printing.
kube::log::stack() {
local stack_skip=${1:-0}
stack_skip=$((stack_skip + 1))
if [[ ${#FUNCNAME[@]} -gt ${stack_skip} ]]; then
echo "Call stack:" >&2
local i
for ((i=1 ; i <= ${#FUNCNAME[@]} - stack_skip ; i++))
do
local frame_no=$((i - 1 + stack_skip))
local source_file=${BASH_SOURCE[${frame_no}]}
local source_lineno=${BASH_LINENO[$((frame_no - 1))]}
local funcname=${FUNCNAME[${frame_no}]}
echo " ${i}: ${source_file}:${source_lineno} ${funcname}(...)" >&2
done
fi
}
# Log an error and exit.
# Args:
# $1 Message to log with the error
# $2 The error code to return
# $3 The number of stack frames to skip when printing.
kube::log::error_exit() {
local message="${1:-}"
local code="${2:-1}"
local stack_skip="${3:-0}"
stack_skip=$((stack_skip + 1))
if [[ ${KUBE_VERBOSE} -ge 4 ]]; then
local source_file=${BASH_SOURCE[${stack_skip}]}
local source_line=${BASH_LINENO[$((stack_skip - 1))]}
echo "!!! Error in ${source_file}:${source_line}" >&2
[[ -z ${1-} ]] || {
echo " ${1}" >&2
}
kube::log::stack ${stack_skip}
echo "Exiting with status ${code}" >&2
fi
exit "${code}"
}
# Log an error but keep going. Don't dump the stack or exit.
kube::log::error() {
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "!!! ${timestamp} ${1-}" >&2
shift
for message; do
echo " ${message}" >&2
done
}
# Print an usage message to stderr. The arguments are printed directly.
kube::log::usage() {
echo >&2
local message
for message; do
echo "${message}" >&2
done
echo >&2
}
kube::log::usage_from_stdin() {
local messages=()
while read -r line; do
messages+=("${line}")
done
kube::log::usage "${messages[@]}"
}
# Print out some info that isn't a top level status line
kube::log::info() {
local V="${V:-0}"
if [[ ${KUBE_VERBOSE} < ${V} ]]; then
return
fi
for message; do
echo "${message}"
done
}
# Just like kube::log::info, but no \n, so you can make a progress bar
kube::log::progress() {
for message; do
echo -e -n "${message}"
done
}
kube::log::info_from_stdin() {
local messages=()
while read -r line; do
messages+=("${line}")
done
kube::log::info "${messages[@]}"
}
# Print a status line. Formatted to show up in a stream of output.
kube::log::status() {
local V="${V:-0}"
if [[ ${KUBE_VERBOSE} < ${V} ]]; then
return
fi
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "+++ ${timestamp} ${1}"
shift
for message; do
echo " ${message}"
done
}

765
hack/lib/util.sh Executable file
View File

@ -0,0 +1,765 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function kube::util::sourced_variable {
# Call this function to tell shellcheck that a variable is supposed to
# be used from other calling context. This helps quiet an "unused
# variable" warning from shellcheck and also document your code.
true
}
kube::util::sortable_date() {
date "+%Y%m%d-%H%M%S"
}
# arguments: target, item1, item2, item3, ...
# returns 0 if target is in the given items, 1 otherwise.
kube::util::array_contains() {
local search="$1"
local element
shift
for element; do
if [[ "${element}" == "${search}" ]]; then
return 0
fi
done
return 1
}
kube::util::wait_for_url() {
local url=$1
local prefix=${2:-}
local wait=${3:-1}
local times=${4:-30}
local maxtime=${5:-1}
command -v curl >/dev/null || {
kube::log::usage "curl must be installed"
exit 1
}
local i
for i in $(seq 1 "${times}"); do
local out
if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then
kube::log::status "On try ${i}, ${prefix}: ${out}"
return 0
fi
sleep "${wait}"
done
kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
return 1
}
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
kube::util::trap_add() {
local trap_add_cmd
trap_add_cmd=$1
shift
for trap_add_name in "$@"; do
local existing_cmd
local new_cmd
# Grab the currently defined trap commands for this trap
existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
if [[ -z "${existing_cmd}" ]]; then
new_cmd="${trap_add_cmd}"
else
new_cmd="${trap_add_cmd};${existing_cmd}"
fi
# Assign the test. Disable the shellcheck warning telling that trap
# commands should be single quoted to avoid evaluating them at this
# point instead evaluating them at run time. The logic of adding new
# commands to a single trap requires them to be evaluated right away.
# shellcheck disable=SC2064
trap "${new_cmd}" "${trap_add_name}"
done
}
# Opposite of kube::util::ensure-temp-dir()
kube::util::cleanup-temp-dir() {
rm -rf "${KUBE_TEMP}"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
kube::util::ensure-temp-dir() {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
kube::util::trap_add kube::util::cleanup-temp-dir EXIT
fi
}
kube::util::host_os() {
local host_os
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
exit 1
;;
esac
echo "${host_os}"
}
kube::util::host_arch() {
local host_arch
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
exit 1
;;
esac
echo "${host_arch}"
}
# This figures out the host platform without relying on golang. We need this as
# we don't want a golang install to be a prerequisite to building yet we need
# this info to figure out where the final binaries are placed.
kube::util::host_platform() {
echo "$(kube::util::host_os)/$(kube::util::host_arch)"
}
# looks for $1 in well-known output locations for the platform ($2)
# $KUBE_ROOT must be set
kube::util::find-binary-for-platform() {
local -r lookfor="$1"
local -r platform="$2"
local locations=(
"${KUBE_ROOT}/_output/bin/${lookfor}"
"${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
"${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
"${KUBE_ROOT}/platforms/${platform}/${lookfor}"
)
# Also search for binary in bazel build tree.
# The bazel go rules place some binaries in subtrees like
# "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
# the platform name is matched in the path.
while IFS=$'\n' read -r location; do
locations+=("$location");
done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
\( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true)
# List most recently-updated location.
local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
echo -n "${bin}"
}
# looks for $1 in well-known output locations for the host platform
# $KUBE_ROOT must be set
kube::util::find-binary() {
kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
}
# Run all known doc generators (today gendocs and genman for kubectl)
# $1 is the directory to put those generated documents
kube::util::gen-docs() {
local dest="$1"
# Find binary
gendocs=$(kube::util::find-binary "gendocs")
genkubedocs=$(kube::util::find-binary "genkubedocs")
genman=$(kube::util::find-binary "genman")
genyaml=$(kube::util::find-binary "genyaml")
genfeddocs=$(kube::util::find-binary "genfeddocs")
# TODO: If ${genfeddocs} is not used from anywhere (it isn't used at
# least from k/k tree), remove it completely.
kube::util::sourced_variable "${genfeddocs}"
mkdir -p "${dest}/docs/user-guide/kubectl/"
"${gendocs}" "${dest}/docs/user-guide/kubectl/"
mkdir -p "${dest}/docs/admin/"
"${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
"${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
"${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
"${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
"${genkubedocs}" "${dest}/docs/admin/" "kubelet"
"${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
mkdir -p "${dest}/docs/man/man1/"
"${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
"${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
"${genman}" "${dest}/docs/man/man1/" "kube-proxy"
"${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
"${genman}" "${dest}/docs/man/man1/" "kubelet"
"${genman}" "${dest}/docs/man/man1/" "kubectl"
"${genman}" "${dest}/docs/man/man1/" "kubeadm"
mkdir -p "${dest}/docs/yaml/kubectl/"
"${genyaml}" "${dest}/docs/yaml/kubectl/"
# create the list of generated files
pushd "${dest}" > /dev/null || return 1
touch docs/.generated_docs
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
popd > /dev/null || return 1
}
# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
# must be set.
kube::util::remove-gen-docs() {
if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
# remove all of the old docs; we don't want to check them in.
while read -r file; do
rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
done <"${KUBE_ROOT}/docs/.generated_docs"
# The docs/.generated_docs file lists itself, so we don't need to explicitly
# delete it.
fi
}
# Takes a group/version and returns the path to its location on disk, sans
# "pkg". E.g.:
# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
# * default behavior for only a group: experimental -> apis/experimental
# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
# * Very special handling for when both group and version are "": / -> api
#
# $KUBE_ROOT must be set.
kube::util::group-version-to-pkg-path() {
local group_version="$1"
while IFS=$'\n' read -r api; do
if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then
echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
return
fi
done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort)
# "v1" is the API GroupVersion
if [[ "${group_version}" == "v1" ]]; then
echo "vendor/k8s.io/api/core/v1"
return
fi
# Special cases first.
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
# moving the results to pkg/apis/api.
case "${group_version}" in
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
__internal)
echo "pkg/apis/core"
;;
meta/v1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
;;
meta/v1beta1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
;;
*.k8s.io)
echo "pkg/apis/${group_version%.*k8s.io}"
;;
*.k8s.io/*)
echo "pkg/apis/${group_version/.*k8s.io/}"
;;
*)
echo "pkg/apis/${group_version%__internal}"
;;
esac
}
# Takes a group/version and returns the swagger-spec file name.
# default behavior: extensions/v1beta1 -> extensions_v1beta1
# special case for v1: v1 -> v1
kube::util::gv-to-swagger-name() {
local group_version="$1"
case "${group_version}" in
v1)
echo "v1"
;;
*)
echo "${group_version%/*}_${group_version#*/}"
;;
esac
}
# Returns the name of the upstream remote repository name for the local git
# repo, e.g. "upstream" or "origin".
kube::util::git_upstream_remote_name() {
git remote -v | grep fetch |\
grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
head -n 1 | awk '{print $1}'
}
# Exits script if working directory is dirty. If it's run interactively in the terminal
# the user can commit changes in a second terminal. This script will wait.
kube::util::ensure_clean_working_dir() {
while ! git diff HEAD --exit-code &>/dev/null; do
echo -e "\nUnexpected dirty working directory:\n"
if tty -s; then
git status -s
else
git diff -a # be more verbose in log files without tty
exit 1
fi | sed 's/^/ /'
echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
read -r
done 1>&2
}
# Find the base commit using:
# $PULL_BASE_SHA if set (from Prow)
# current ref from the remote upstream branch
kube::util::base_ref() {
local -r git_branch=$1
if [[ -n ${PULL_BASE_SHA:-} ]]; then
echo "${PULL_BASE_SHA}"
return
fi
full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
# make sure the branch is valid, otherwise the check will pass erroneously.
if ! git describe "${full_branch}" >/dev/null; then
# abort!
exit 1
fi
echo "${full_branch}"
}
# Checks whether there are any files matching pattern $2 changed between the
# current branch and upstream branch named by $1.
# Returns 1 (false) if there are no changes
# 0 (true) if there are changes detected.
kube::util::has_changes() {
local -r git_branch=$1
local -r pattern=$2
local -r not_pattern=${3:-totallyimpossiblepattern}
local base_ref
base_ref=$(kube::util::base_ref "${git_branch}")
echo "Checking for '${pattern}' changes against '${base_ref}'"
# notice this uses ... to find the first shared ancestor
if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
return 0
fi
# also check for pending changes
if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
echo "Detected '${pattern}' uncommitted changes."
return 0
fi
echo "No '${pattern}' changes detected."
return 1
}
kube::util::download_file() {
local -r url=$1
local -r destination_file=$2
rm "${destination_file}" 2&> /dev/null || true
for i in $(seq 5)
do
if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then
echo "Downloading ${url} failed. $((5-i)) retries left."
sleep 1
else
echo "Downloading ${url} succeed"
return 0
fi
done
return 1
}
# Test whether openssl is installed.
# Sets:
# OPENSSL_BIN: The path to the openssl binary to use
function kube::util::test_openssl_installed {
if ! openssl version >& /dev/null; then
echo "Failed to run openssl. Please ensure openssl is installed"
exit 1
fi
OPENSSL_BIN=$(command -v openssl)
}
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
# purpose is dropped in after "key encipherment", you usually want
# '"client auth"'
# '"server auth"'
# '"client auth","server auth"'
function kube::util::create_signing_certkey {
local sudo=$1
local dest_dir=$2
local id=$3
local purpose=$4
# Create client ca
${sudo} /usr/bin/env bash -e <<EOF
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
EOF
}
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
function kube::util::create_client_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local groups=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
groups+="${SEP}{\"O\":\"$1\"}"
SEP=","
shift 1
done
${sudo} /usr/bin/env bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
mv "client-${id}-key.pem" "client-${id}.key"
mv "client-${id}.pem" "client-${id}.crt"
rm -f "client-${id}.csr"
EOF
}
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
function kube::util::create_serving_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local hosts=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
hosts+="${SEP}\"$1\""
SEP=","
shift 1
done
${sudo} /usr/bin/env bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
mv "serving-${id}-key.pem" "serving-${id}.key"
mv "serving-${id}.pem" "serving-${id}.crt"
rm -f "serving-${id}.csr"
EOF
}
# creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
function kube::util::write_client_kubeconfig {
local sudo=$1
local dest_dir=$2
local ca_file=$3
local api_host=$4
local api_port=$5
local client_id=$6
local token=${7:-}
cat <<EOF | ${sudo} tee "${dest_dir}"/"${client_id}".kubeconfig > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${ca_file}
server: https://${api_host}:${api_port}/
name: local-up-cluster
users:
- user:
token: ${token}
client-certificate: ${dest_dir}/client-${client_id}.crt
client-key: ${dest_dir}/client-${client_id}.key
name: local-up-cluster
contexts:
- context:
cluster: local-up-cluster
user: local-up-cluster
name: local-up-cluster
current-context: local-up-cluster
EOF
# flatten the kubeconfig files to make them self contained
username=$(whoami)
${sudo} /usr/bin/env bash -e <<EOF
$(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
chown ${username} "${dest_dir}/${client_id}.kubeconfig"
EOF
}
# list_staging_repos outputs a sorted list of repos in staging/src/k8s.io
# each entry will just be the $repo portion of staging/src/k8s.io/$repo/...
# $KUBE_ROOT must be set.
function kube::util::list_staging_repos() {
(
cd "${KUBE_ROOT}/staging/src/kubesphere.io" && \
find . -mindepth 1 -maxdepth 1 -type d | cut -c 3- | sort
)
}
# Determines if docker can be run, failures may simply require that the user be added to the docker group.
function kube::util::ensure_docker_daemon_connectivity {
IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}"
# Expand ${DOCKER[@]} only if it's not unset. This is to work around
# Bash 3 issue with unbound variable.
DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"})
if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
cat <<'EOF' >&2
Can't connect to 'docker' daemon. please fix and retry.
Possible causes:
- Docker Daemon not started
- Linux: confirm via your init system
- macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
- macOS w/ Docker for Mac: Check the menu bar and start the Docker application
- DOCKER_HOST hasn't been set or is set incorrectly
- Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
- macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- Other things to check:
- Linux: User isn't in 'docker' group. Add and relogin.
- Something like 'sudo usermod -a -G docker ${USER}'
- RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
EOF
return 1
fi
}
# Wait for background jobs to finish. Return with
# an error status if any of the jobs failed.
kube::util::wait-for-jobs() {
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
return ${fail}
}
# kube::util::join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: kube::util::join , a b c
# -> a,b,c
function kube::util::join {
local IFS="$1"
shift
echo "$*"
}
# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
#
# Assumed vars:
# $1 (cfssl directory) (optional)
#
# Sets:
# CFSSL_BIN: The path of the installed cfssl binary
# CFSSLJSON_BIN: The path of the installed cfssljson binary
#
function kube::util::ensure-cfssl {
if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
CFSSL_BIN=$(command -v cfssl)
CFSSLJSON_BIN=$(command -v cfssljson)
return 0
fi
host_arch=$(kube::util::host_arch)
if [[ "${host_arch}" != "amd64" ]]; then
echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed."
echo "Please install cfssl and cfssljson and verify they are in \$PATH."
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
exit 1
fi
# Create a temp dir for cfssl if no directory was given
local cfssldir=${1:-}
if [[ -z "${cfssldir}" ]]; then
kube::util::ensure-temp-dir
cfssldir="${KUBE_TEMP}/cfssl"
fi
mkdir -p "${cfssldir}"
pushd "${cfssldir}" > /dev/null || return 1
echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..."
kernel=$(uname -s)
case "${kernel}" in
Linux)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
;;
Darwin)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
;;
*)
echo "Unknown, unsupported platform: ${kernel}." >&2
echo "Supported platforms: Linux, Darwin." >&2
exit 2
esac
chmod +x cfssl || true
chmod +x cfssljson || true
CFSSL_BIN="${cfssldir}/cfssl"
CFSSLJSON_BIN="${cfssldir}/cfssljson"
if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
exit 1
fi
popd > /dev/null || return 1
}
# kube::util::ensure_dockerized
# Confirms that the script is being run inside a kube-build image
#
function kube::util::ensure_dockerized {
if [[ -f /kube-build-image ]]; then
return 0
else
echo "ERROR: This script is designed to be run inside a kube-build container"
exit 1
fi
}
# kube::util::ensure-gnu-sed
# Determines which sed binary is gnu-sed on linux/darwin
#
# Sets:
# SED: The name of the gnu-sed binary
#
function kube::util::ensure-gnu-sed {
if LANG=C sed --help 2>&1 | grep -q GNU; then
SED="sed"
elif command -v gsed &>/dev/null; then
SED="gsed"
else
kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
return 1
fi
kube::util::sourced_variable "${SED}"
}
# kube::util::check-file-in-alphabetical-order <file>
# Check that the file is in alphabetical order
#
function kube::util::check-file-in-alphabetical-order {
local failure_file="$1"
if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
{
echo
echo "${failure_file} is not in alphabetical order. Please sort it:"
echo
echo " LC_ALL=C sort -o ${failure_file} ${failure_file}"
echo
} >&2
false
fi
}
# kube::util::require-jq
# Checks whether jq is installed.
function kube::util::require-jq {
if ! command -v jq &>/dev/null; then
echo "jq not found. Please install." 1>&2
return 1
fi
}
# outputs md5 hash of $1, works on macOS and Linux
function kube::util::md5() {
if which md5 >/dev/null 2>&1; then
md5 -q "$1"
else
md5sum "$1" | awk '{ print $1 }'
fi
}
# kube::util::read-array
# Reads in stdin and adds it line by line to the array provided. This can be
# used instead of "mapfile -t", and is bash 3 compatible.
#
# Assumed vars:
# $1 (name of array to create/modify)
#
# Example usage:
# kube::util::read-array files < <(ls -1)
#
function kube::util::read-array {
local i=0
unset -v "$1"
while IFS= read -r "$1[i++]"; do :; done
eval "[[ \${$1[--i]} ]]" || unset "$1[i]" # ensures last element isn't empty
}
# Some useful colors.
if [[ -z "${color_start-}" ]]; then
declare -r color_start="\033["
declare -r color_red="${color_start}0;31m"
declare -r color_yellow="${color_start}0;33m"
declare -r color_green="${color_start}0;32m"
declare -r color_blue="${color_start}1;34m"
declare -r color_cyan="${color_start}1;36m"
declare -r color_norm="${color_start}0m"
kube::util::sourced_variable "${color_start}"
kube::util::sourced_variable "${color_red}"
kube::util::sourced_variable "${color_yellow}"
kube::util::sourced_variable "${color_green}"
kube::util::sourced_variable "${color_blue}"
kube::util::sourced_variable "${color_cyan}"
kube::util::sourced_variable "${color_norm}"
fi
# ex: ts=2 sw=2 et filetype=sh

340
hack/sync-components.sh Executable file
View File

@ -0,0 +1,340 @@
#!/usr/bin/env bash
# Copyright 2022 The KubeSphere Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
#
# Usage:
# Specify the component version through environment variables.
#
# For example:
#
# KUBERNETES_VERSION=v1.25.3 bash hack/sync-components.sh
#
####################################################################
set -e
KUBERNETES_VERSION=${KUBERNETES_VERSION}
NODE_LOCAL_DNS_VERSION=${NODE_LOCAL_DNS_VERSION}
COREDNS_VERSION=${COREDNS_VERSION}
CALICO_VERSION=${CALICO_VERSION}
KUBE_OVN_VERSION=${KUBE_OVN_VERSION}
CILIUM_VERSION=${CILIUM_VERSION}
OPENEBS_VERSION=${OPENEBS_VERSION}
KUBEVIP_VERSION=${KUBEVIP_VERSION}
HAPROXY_VERSION=${HAPROXY_VERSION}
HELM_VERSION=${HELM_VERSION}
CNI_VERSION=${CNI_VERSION}
ETCD_VERSION=${ETCD_VERSION}
CRICTL_VERSION=${CRICTL_VERSION}
K3S_VERSION=${K3S_VERSION}
CONTAINERD_VERSION=${CONTAINERD_VERSION}
RUNC_VERSION=${RUNC_VERSION}
COMPOSE_VERSION=${COMPOSE_VERSION}
CALICO_VERSION=${CALICO_VERSION}
# qsctl
QSCTL_ACCESS_KEY_ID=${QSCTL_ACCESS_KEY_ID}
QSCTL_SECRET_ACCESS_KEY=${QSCTL_SECRET_ACCESS_KEY}
# docker.io
DOCKERHUB_USERNAME=${DOCKERHUB_USERNAME}
DOCKERHUB_PASSWORD=${DOCKERHUB_PASSWORD}
# registry.cn-beijing.aliyuncs.com
ALIYUNCS_USERNAME=${ALIYUNCS_USERNAME}
ALIYUNCS_PASSWORD=${ALIYUNCS_PASSWORD}
DOCKERHUB_NAMESPACE="kubesphere"
ALIYUNCS_NAMESPACE="kubesphereio"
BINARIES=("kubeadm" "kubelet" "kubectl")
ARCHS=("amd64" "arm64")
# Generate qsctl config
if [ $QSCTL_ACCESS_KEY_ID ] && [ $QSCTL_SECRET_ACCESS_KEY ];then
echo "access_key_id: $QSCTL_ACCESS_KEY_ID" > qsctl-config.yaml
echo "secret_access_key: $QSCTL_SECRET_ACCESS_KEY" >> qsctl-config.yaml
fi
# Login docker.io
if [ $DOCKERHUB_USERNAME ] && [ $DOCKERHUB_PASSWORD ];then
skopeo login docker.io -u $DOCKERHUB_USERNAME -p $DOCKERHUB_PASSWORD
fi
# Login registry.cn-beijing.aliyuncs.com
if [ $ALIYUNCS_USERNAME ] && [ $ALIYUNCS_PASSWORD ];then
skopeo login registry.cn-beijing.aliyuncs.com -u $ALIYUNCS_USERNAME -p $ALIYUNCS_PASSWORD
fi
# Sync Kubernetes Binaries and Images
if [ $KUBERNETES_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/kube/$KUBERNETES_VERSION/$arch
for binary in ${BINARIES[@]}
do
echo "Synchronizing $binary-$arch"
curl -L -o binaries/kube/$KUBERNETES_VERSION/$arch/$binary \
https://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/$arch/$binary
qsctl cp binaries/kube/$KUBERNETES_VERSION/$arch/$binary \
qs://kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/$arch/$binary \
-c qsctl-config.yaml
done
done
chmod +x binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm
binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm config images list | xargs -I {} skopeo sync --src docker --dest docker {} docker.io/$DOCKERHUB_NAMESPACE/${image##} --all
binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm config images list | xargs -I {} skopeo sync --src docker --dest docker {} registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/${image##} --all
rm -rf binaries
fi
# Sync Helm Binary
if [ $HELM_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/helm/$HELM_VERSION/$arch
echo "Synchronizing helm-$arch"
curl -L -o binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz \
https://get.helm.sh/helm-$HELM_VERSION-linux-$arch.tar.gz
tar -zxf binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz -C binaries/helm/$HELM_VERSION/$arch
qsctl cp $KUBERNETES_VERSION/$arch/linux-$arch/helm \
qs://kubernetes-helm/linux-$arch/$HELM_VERSION/helm \
-c qsctl-config.yaml
qsctl cp binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz \
qs://kubernetes-helm/linux-$arch/$HELM_VERSION/helm-$HELM_VERSION-linux-$arch.tar.gz \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync ETCD Binary
if [ $ETCD_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/etcd/$ETCD_VERSION/$arch
echo "Synchronizing etcd-$arch"
curl -L -o binaries/etcd/$ETCD_VERSION/$arch/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
https://github.com/coreos/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$arch.tar.gz
qsctl cp binaries/etcd/$ETCD_VERSION/$arch/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
qs://kubernetes-release/etcd/release/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync CNI Binary
if [ $CNI_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/cni/$CNI_VERSION/$arch
echo "Synchronizing cni-$arch"
curl -L -o binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz
qsctl cp binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
qs://containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync CALICOCTL Binary
if [ $CALICO_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/calicoctl/$CALICO_VERSION/$arch
echo "Synchronizing calicoctl-$arch"
curl -L -o binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
https://github.com/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch
qsctl cp binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
qs://kubernetes-release/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync crictl Binary
if [ $CRICTL_VERSION ]; then
echo "access_key_id: $ACCESS_KEY_ID" > qsctl-config.yaml
echo "secret_access_key: $SECRET_ACCESS_KEY" >> qsctl-config.yaml
for arch in ${ARCHS[@]}
do
mkdir -p binaries/crictl/$CRICTL_VERSION/$arch
echo "Synchronizing crictl-$arch"
curl -L -o binaries/crictl/$CRICTL_VERSION/$arch/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
https://github.com/kubernetes-sigs/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-$arch.tar.gz
qsctl cp binaries/crictl/$CRICTL_VERSION/$arch/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
qs://kubernetes-release/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync k3s Binary
if [ $K3S_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/k3s/$K3S_VERSION/$arch
echo "Synchronizing k3s-$arch"
if [ $arch != "amd64" ]; then
curl -L -o binaries/k3s/$K3S_VERSION/$arch/k3s \
https://github.com/rancher/k3s/releases/download/$K3S_VERSION+k3s1/k3s-$arch
else
curl -L -o binaries/k3s/$K3S_VERSION/$arch/k3s \
https://github.com/rancher/k3s/releases/download/$K3S_VERSION+k3s1/k3s
fi
qsctl cp binaries/k3s/$K3S_VERSION/$arch/k3s \
qs://kubernetes-release/k3s/releases/download/$K3S_VERSION+k3s1/linux/$arch/k3s \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync containerd Binary
if [ $CONTAINERD_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/containerd/$CONTAINERD_VERSION/$arch
echo "Synchronizing containerd-$arch"
curl -L -o binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
https://github.com/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz
qsctl cp binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
qs://kubernetes-releas/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync runc Binary
if [ $RUNC_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/runc/$RUNC_VERSION/$arch
echo "Synchronizing runc-$arch"
curl -L -o binaries/runc/$RUNC_VERSION/$arch/runc.$arch \
https://github.com/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch
qsctl cp binaries/runc/$RUNC_VERSION/$arch/runc.$arch \
qs://kubernetes-release/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch \
-c qsctl-config.yaml
done
rm -rf binaries
fi
# Sync docker-compose Binary
if [ $RUNC_VERSION ]; then
for arch in ${ARCHS[@]}
do
mkdir -p binaries/compose/$COMPOSE_VERSION/$arch
echo "Synchronizing runc-$arch"
if [ $arch == "amd64" ]; then
curl -L -o binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-x86_64 \
https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-x86_64
qsctl cp binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-x86_64 \
qs://kubernetes-release/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-x86_64 \
-c qsctl-config.yaml
elif [ $arch == "arm64" ]; then
curl -L -o binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-aarch64 \
https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-aarch64
qsctl cp binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-aarch64 \
qs://kubernetes-release/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-aarch64 \
-c qsctl-config.yaml
fi
done
rm -rf binaries
fi
rm -rf qsctl-config.yaml
# Sync NodeLocalDns Images
if [ $NODE_LOCAL_DNS_VERSION ]; then
skopeo sync --src docker --dest docker registry.k8s.io/dns/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION docker.io/$DOCKERHUB_NAMESPACE/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION --all
skopeo sync --src docker --dest docker registry.k8s.io/dns/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION --all
fi
# Sync Coredns Images
if [ $COREDNS_VERSION ]; then
skopeo sync --src docker --dest docker docker.io/coredns/coredns:$COREDNS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/coredns:$COREDNS_VERSION --all
fi
# Sync Calico Images
if [ $CALICO_VERSION ]; then
skopeo sync --src docker --dest docker docker.io/calico/kube-controllers:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kube-controllers:$CALICO_VERSION --all
skopeo sync --src docker --dest docker docker.io/calico/cni:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cni:$CALICO_VERSION --all
skopeo sync --src docker --dest docker docker.io/calico/node:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/node:$CALICO_VERSION --all
skopeo sync --src docker --dest docker docker.io/calico/pod2daemon-flexvol:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/pod2daemon-flexvol:$CALICO_VERSION --all
skopeo sync --src docker --dest docker docker.io/calico/typha:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/typha:$CALICO_VERSION --all
fi
# Sync Kube-OVN Images
if [ $KUBE_OVN_VERSION ]; then
skopeo sync --src docker --dest docker docker.io/kubeovn/kube-ovn:$KUBE_OVN_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kube-ovn:$KUBE_OVN_VERSION --all
skopeo sync --src docker --dest docker docker.io/kubeovn/vpc-nat-gateway:$KUBE_OVN_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/vpc-nat-gateway:$KUBE_OVN_VERSION --all
fi
# Sync Cilium Images
if [ $CILIUM_VERSION ]; then
skopeo sync --src docker --dest docker docker.io/cilium/cilium:$CILIUM_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cilium:$CILIUM_VERSION --all
skopeo sync --src docker --dest docker docker.io/cilium/cilium-operator-generic:$CILIUM_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cilium-operator-generic:$CILIUM_VERSION --all
fi
# Sync OpenEBS Images
if [ $OPENEBS_VERSION ]; then
skopeo sync --src docker --dest docker docker.io/openebs/provisioner-localpv:$OPENEBS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/provisioner-localpv:$OPENEBS_VERSION --all
skopeo sync --src docker --dest docker docker.io/openebs/linux-utils:$OPENEBS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/linux-utils:$OPENEBS_VERSION --all
fi
# Sync Haproxy Images
if [ $HAPROXY_VERSION ]; then
skopeo sync --src docker --dest docker docker.io/library/haproxy:$HAPROXY_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/haproxy:$HAPROXY_VERSION --all
fi
# Sync Kube-vip Images
if [ $KUBEVIP_VERSION ]; then
skopeo sync --src docker --dest docker docker.io/plndr/kubevip:$KUBEVIP_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kubevip:$KUBEVIP_VERSION --all
fi

46
hack/update-goimports.sh Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
source "${KUBE_ROOT}/hack/lib/util.sh"
kube::golang::verify_go_version
# Ensure that we find the binaries we build before anything else.
export GOBIN="${KUBE_OUTPUT_BINPATH}"
PATH="${GOBIN}:${PATH}"
# Explicitly opt into go modules, even though we're inside a GOPATH directory
export GO111MODULE=on
if ! command -v goimports ; then
# Install goimports
echo 'installing goimports'
pushd "${KUBE_ROOT}/hack/tools" >/dev/null
GO111MODULE=auto go install -mod=mod golang.org/x/tools/cmd/goimports@v0.7.0
popd >/dev/null
fi
cd "${KUBE_ROOT}" || exit 1
IFS=$'\n' read -r -d '' -a files < <( find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/client/*" -not -name "zz_generated.deepcopy.go" && printf '\0' )
"goimports" -w -local kubesphere.io/kubesphere "${files[@]}"

54
hack/verify-goimports.sh Executable file
View File

@ -0,0 +1,54 @@
#!/usr/bin/env bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
source "${KUBE_ROOT}/hack/lib/util.sh"
kube::golang::verify_go_version
# Ensure that we find the binaries we build before anything else.
export GOBIN="${KUBE_OUTPUT_BINPATH}"
PATH="${GOBIN}:${PATH}"
# Explicitly opt into go modules, even though we're inside a GOPATH directory
export GO111MODULE=on
if ! command -v goimports ; then
# Install goimports
echo 'installing goimports'
pushd "${KUBE_ROOT}/hack/tools" >/dev/null
GO111MODULE=auto go install -mod=mod golang.org/x/tools/cmd/goimports@v0.7.0
popd >/dev/null
fi
cd "${KUBE_ROOT}" || exit 1
IFS=$'\n' read -r -d '' -a files < <( find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/apis/*" -not -path "./pkg/client/*" -not -name "zz_generated.deepcopy.go" && printf '\0' )
output=$(goimports -local kubesphere.io/kubesphere -l "${files[@]}")
if [ "${output}" != "" ]; then
echo "The following files are not import formatted"
printf '%s\n' "${output[@]}"
echo "Please run the following command:"
echo "make goimports"
exit 1
fi

108
hack/version.sh Executable file
View File

@ -0,0 +1,108 @@
#!/usr/bin/env bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
if [[ "${TRACE-0}" == "1" ]]; then
set -o xtrace
fi
version::get_version_vars() {
# shellcheck disable=SC1083
GIT_COMMIT="$(git rev-parse HEAD^{commit})"
if git_status=$(git status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
GIT_TREE_STATE="clean"
else
GIT_TREE_STATE="dirty"
fi
# stolen from k8s.io/hack/lib/version.sh
# Use git describe to find the version based on annotated tags.
if [[ -n ${GIT_VERSION-} ]] || GIT_VERSION=$(git describe --tags --abbrev=14 --match "v[0-9]*" "${GIT_COMMIT}" 2>/dev/null); then
# This translates the "git describe" to an actual semver.org
# compatible semantic version that looks something like this:
# v1.1.0-alpha.0.6+84c76d1142ea4d
#
# TODO: We continue calling this "git version" because so many
# downstream consumers are expecting it there.
# shellcheck disable=SC2001
DASHES_IN_VERSION=$(echo "${GIT_VERSION}" | sed "s/[^-]//g")
if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
# We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
# shellcheck disable=SC2001
GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\-\2/")
elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
# We have distance to base tag (v1.1.0-1-gCommitHash)
# shellcheck disable=SC2001
GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/-\1/")
fi
if [[ "${GIT_TREE_STATE}" == "dirty" ]]; then
# git describe --dirty only considers changes to existing files, but
# that is problematic since new untracked .go files affect the build,
# so use our idea of "dirty" from git status instead.
GIT_VERSION+="-dirty"
fi
# Try to match the "git describe" output to a regex to try to extract
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
GIT_MAJOR=${BASH_REMATCH[1]}
GIT_MINOR=${BASH_REMATCH[2]}
fi
# If GIT_VERSION is not a valid Semantic Version, then refuse to build.
if ! [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
echo "GIT_VERSION should be a valid Semantic Version. Current value: ${GIT_VERSION}"
echo "Please see more details here: https://semver.org"
exit 1
fi
fi
GIT_RELEASE_TAG=$(git describe --abbrev=0 --tags)
GIT_RELEASE_COMMIT=$(git rev-list -n 1 "${GIT_RELEASE_TAG}")
}
# stolen from k8s.io/hack/lib/version.sh and modified
# Prints the value that needs to be passed to the -ldflags parameter of go build
version::ldflags() {
version::get_version_vars
local -a ldflags
function add_ldflag() {
local key=${1}
local val=${2}
ldflags+=(
"-X 'github.com/kubesphere/kubekey/v4/version.${key}=${val}'"
)
}
add_ldflag "buildDate" "$(date ${SOURCE_DATE_EPOCH:+"--date=@${SOURCE_DATE_EPOCH}"} -u +'%Y-%m-%dT%H:%M:%SZ')"
add_ldflag "gitCommit" "${GIT_COMMIT}"
add_ldflag "gitTreeState" "${GIT_TREE_STATE}"
add_ldflag "gitMajor" "${GIT_MAJOR}"
add_ldflag "gitMinor" "${GIT_MINOR}"
add_ldflag "gitVersion" "${GIT_VERSION}"
add_ldflag "gitReleaseCommit" "${GIT_RELEASE_COMMIT}"
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
}
version::ldflags

24
pipeline/fs.go Normal file
View File

@ -0,0 +1,24 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipeline
import (
"embed"
)
//go:embed playbooks roles
var InternalPipeline embed.FS

View File

@ -0,0 +1,7 @@
---
- hosts:
- k8s_cluster
- etcd
gather_facts: true
roles:
- {role: precheck}

View File

@ -0,0 +1,114 @@
---
- name: Stop if either kube_control_plane or kube_node group is empty
assert:
that: "'{{ item }}' in groups"
loop:
- kube_control_plane
- kube_node
run_once: true
- name: Stop if etcd group is empty in external etcd mode
assert:
that: "'etcd' in groups"
fail_msg: "Group 'etcd' cannot be empty in external etcd mode"
run_once: true
when:
- etcd_deployment_type != "kubeadm"
- name: Stop if the os does not support
assert:
that: (allow_unsupported_distribution_setup | default:false) or os.release.ID in supported_os_distributions
fail_msg: "{{ os.release.ID }} is not a known OS"
- name: Stop if unknown network plugin
vars:
require_network_plugin: ['calico', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'kube-ovn', 'kube-router', 'macvlan', 'custom_cni']
assert:
that: kube_network_plugin in require_network_plugin
fail_msg: "{{ kube_network_plugin }} is not supported"
when:
- kube_network_plugin | defined
- name: Stop if unsupported version of Kubernetes
assert:
that: kube_version | version:'>=,{{kube_version_min_required}}'
fail_msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}"
- name: Stop if even number of etcd hosts
assert:
that: not groups.etcd | length | divisibleby:2
when:
- inventory_hostname in groups['etcd']
- name: Stop if memory is too small for masters
assert:
that: process.memInfo.MemTotal | cut:' kB' >= minimal_master_memory_mb
when:
- inventory_hostname in groups['kube_control_plane']
- name: Stop if memory is too small for nodes
assert:
that: process.memInfo.MemTotal | cut:' kB' >= minimal_node_memory_mb
when:
- inventory_hostname in groups['kube_node']
# This assertion will fail on the safe side: One can indeed schedule more pods
# on a node than the CIDR-range has space for when additional pods use the host
# network namespace. It is impossible to ascertain the number of such pods at
# provisioning time, so to establish a guarantee, we factor these out.
# NOTICE: the check blatantly ignores the inet6-case
- name: Guarantee that enough network address space is available for all pods
assert:
that: "(kubelet_max_pods | default_if_none:110 | integer) <= (2 | pow: {{ 32 - kube_network_node_prefix | integer }} - 2)"
fail_msg: "Do not schedule more pods on a node than inet addresses are available."
when:
- inventory_hostname in groups['k8s_cluster']
- kube_network_node_prefix | defined
- kube_network_plugin != 'calico'
- name: Stop if access_ip is not pingable
command: ping -c1 {{ access_ip }}
when:
- access_ip | defined
- ping_access_ip
changed_when: false
- name: Stop if kernel version is too low
assert:
that: os.kernelVersion | split:'-' | first | version:'>=,4.9.17'
when:
- kube_network_plugin == 'cilium' or (cilium_deploy_additionally | default:false)
- name: Stop if bad hostname
vars:
regex: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'
assert:
that: inventory_hostname | match:regex
fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
- name: Stop if etcd deployment type is not host, docker or kubeadm
vars:
require_etcd_deployment_type: ['kubekey', 'external', 'kubeadm']
assert:
that: etcd_deployment_type in require_etcd_deployment_type
fail_msg: "The etcd deployment type, 'etcd_deployment_type', must be host, docker or kubeadm"
when:
- inventory_hostname in groups['etcd']
- name: Stop if container manager is not docker, crio or containerd
vars:
require_container_manager: ['docker', 'crio', 'containerd']
assert:
that: container_manager in require_container_manager
fail_msg: "The container manager, 'container_manager', must be docker, crio or containerd"
run_once: true
- name: Ensure minimum containerd version
require_containerd_version: ['latest', 'edge', 'stable']
assert:
that: containerd_version | version:'>=,{{containerd_min_version_required}}'
fail_msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}"
run_once: yes
when:
- not containerd_version in require_containerd_version
- container_manager == 'containerd'

53
pkg/apis/core/v1/base.go Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
type Base struct {
Name string `yaml:"name,omitempty"`
// connection/transport
Connection string `yaml:"connection,omitempty"`
Port int `yaml:"port,omitempty"`
RemoteUser string `yaml:"remote_user,omitempty"`
// variables
Vars map[string]any `yaml:"vars,omitempty"`
// module default params
ModuleDefaults []map[string]map[string]any `yaml:"module_defaults,omitempty"`
// flags and misc. settings
Environment []map[string]string `yaml:"environment,omitempty"`
NoLog bool `yaml:"no_log,omitempty"`
RunOnce bool `yaml:"run_once,omitempty"`
IgnoreErrors bool `yaml:"ignore_errors,omitempty"`
CheckMode bool `yaml:"check_mode,omitempty"`
Diff bool `yaml:"diff,omitempty"`
AnyErrorsFatal bool `yaml:"any_errors_fatal,omitempty"`
Throttle int `yaml:"throttle,omitempty"`
Timeout int `yaml:"timeout,omitempty"`
// Debugger invoke a debugger on tasks
Debugger string `yaml:"debugger,omitempty"`
// privilege escalation
Become bool `yaml:"become,omitempty"`
BecomeMethod string `yaml:"become_method,omitempty"`
BecomeUser string `yaml:"become_user,omitempty"`
BecomeFlags string `yaml:"become_flags,omitempty"`
BecomeExe string `yaml:"become_exe,omitempty"`
}

133
pkg/apis/core/v1/block.go Normal file
View File

@ -0,0 +1,133 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"reflect"
"strings"
"k8s.io/klog/v2"
)
type Block struct {
BlockBase
// If has Block, Task should be empty
Task
IncludeTasks string `yaml:"include_tasks,omitempty"`
BlockInfo
}
type BlockBase struct {
Base `yaml:",inline"`
Conditional `yaml:",inline"`
CollectionSearch `yaml:",inline"`
Taggable `yaml:",inline"`
Notifiable `yaml:",inline"`
Delegatable `yaml:",inline"`
}
type BlockInfo struct {
Block []Block `yaml:"block,omitempty"`
Rescue []Block `yaml:"rescue,omitempty"`
Always []Block `yaml:"always,omitempty"`
}
type Task struct {
AsyncVal int `yaml:"async,omitempty"`
ChangedWhen When `yaml:"changed_when,omitempty"`
Delay int `yaml:"delay,omitempty"`
FailedWhen When `yaml:"failed_when,omitempty"`
Loop []any `yaml:"loop,omitempty"`
LoopControl LoopControl `yaml:"loop_control,omitempty"`
Poll int `yaml:"poll,omitempty"`
Register string `yaml:"register,omitempty"`
Retries int `yaml:"retries,omitempty"`
Until When `yaml:"until,omitempty"`
// deprecated, used to be loop and loop_args but loop has been repurposed
//LoopWith string `yaml:"loop_with"`
//
UnknownFiled map[string]any `yaml:"-"`
}
func (b *Block) UnmarshalYAML(unmarshal func(interface{}) error) error {
// fill baseInfo
var bb BlockBase
if err := unmarshal(&bb); err == nil {
b.BlockBase = bb
}
var m map[string]any
if err := unmarshal(&m); err != nil {
klog.Errorf("unmarshal data to map error: %v", err)
return err
}
if v, ok := m["include_tasks"]; ok {
b.IncludeTasks = v.(string)
} else if _, ok := m["block"]; ok {
// render block
var bi BlockInfo
err := unmarshal(&bi)
if err != nil {
klog.Errorf("unmarshal data to block error: %v", err)
return err
}
b.BlockInfo = bi
} else {
// render task
var t Task
err := unmarshal(&t)
if err != nil {
klog.Errorf("unmarshal data to task error: %v", err)
return err
}
b.Task = t
deleteExistField(reflect.TypeOf(Block{}), m)
// set unknown flied to task.UnknownFiled
b.UnknownFiled = m
}
return nil
}
func deleteExistField(rt reflect.Type, m map[string]any) {
for i := 0; i < rt.NumField(); i++ {
field := rt.Field(i)
if field.Anonymous {
deleteExistField(field.Type, m)
} else {
yamlTag := rt.Field(i).Tag.Get("yaml")
if yamlTag != "" {
for _, t := range strings.Split(yamlTag, ",") {
if _, ok := m[t]; ok {
delete(m, t)
break
}
}
} else {
t := strings.ToUpper(rt.Field(i).Name[:1]) + rt.Field(i).Name[1:]
if _, ok := m[t]; ok {
delete(m, t)
break
}
}
}
}
}

View File

@ -0,0 +1,21 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
type CollectionSearch struct {
Collections []string `yaml:"collections,omitempty"`
}

View File

@ -0,0 +1,43 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
)
type Conditional struct {
When When `yaml:"when,omitempty"`
}
type When struct {
Data []string
}
func (w *When) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err == nil {
w.Data = []string{s}
return nil
}
var a []string
if err := unmarshal(&a); err == nil {
w.Data = a
return nil
}
return fmt.Errorf("unsupported type, excepted string or array of strings")
}

View File

@ -0,0 +1,22 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
type Delegatable struct {
DelegateTo string `yaml:"delegate_to,omitempty"`
DelegateFacts bool `yaml:"delegate_facts,omitempty"`
}

188
pkg/apis/core/v1/docs.go Normal file
View File

@ -0,0 +1,188 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
// Playbook keyword in ansible: https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#playbook-keywords
// support list (base on ansible 2.15.5)
/**
Play
+------+------------------------+------------+
| Row | Keyword | Support |
+------+------------------------+------------+
| 1 | any_errors_fatal | |
| 2 | become | |
| 3 | become_exe | |
| 4 | become_flags | |
| 5 | become_method | |
| 6 | become_user | |
| 7 | check_mode | |
| 8 | collections | |
| 9 | connection | |
| 10 | debugger | |
| 11 | diff | |
| 12 | environment | |
| 13 | fact_path | |
| 14 | force_handlers | |
| 15 | gather_facts | |
| 16 | gather_subset | |
| 17 | gather_timeout | |
| 18 | handlers | |
| 19 | hosts | |
| 20 | ignore_errors | |
| 21 | ignore_unreachable | |
| 22 | max_fail_percentage | |
| 23 | module_defaults | |
| 24 | name | |
| 25 | no_log | |
| 26 | order | |
| 27 | port | |
| 28 | post_task | |
| 29 | pre_tasks | |
| 30 | remote_user | |
| 31 | roles | |
| 32 | run_once | |
| 33 | serial | |
| 34 | strategy | |
| 35 | tags | |
| 36 | tasks | |
| 37 | throttle | |
| 38 | timeout | |
| 39 | vars | |
| 40 | vars_files | |
| 41 | vars_prompt | |
+------+------------------------+------------+
Role
+------+------------------------+------------+
| Row | Keyword | Support |
+------+------------------------+------------+
| 1 | any_errors_fatal | |
| 2 | become | |
| 3 | become_exe | |
| 4 | become_flags | |
| 5 | become_method | |
| 6 | become_user | |
| 7 | check_mode | |
| 8 | collections | |
| 9 | connection | |
| 10 | debugger | |
| 11 | delegate_facts | |
| 12 | delegate_to | |
| 13 | diff | |
| 14 | environment | |
| 15 | ignore_errors | |
| 16 | ignore_unreachable | |
| 17 | max_fail_percentage | |
| 18 | module_defaults | |
| 19 | name | |
| 20 | no_log | |
| 21 | port | |
| 22 | remote_user | |
| 23 | run_once | |
| 24 | tags | |
| 25 | throttle | |
| 26 | timeout | |
| 27 | vars | |
| 28 | when | |
+------+------------------------+------------+
Block
+------+------------------------+------------+
| Row | Keyword | Support |
+------+------------------------+------------+
| 1 | always | |
| 2 | any_errors_fatal | |
| 3 | become | |
| 4 | become_exe | |
| 5 | become_flags | |
| 6 | become_method | |
| 7 | become_user | |
| 8 | block | |
| 9 | check_mode | |
| 10 | collections | |
| 11 | debugger | |
| 12 | delegate_facts | |
| 13 | delegate_to | |
| 14 | diff | |
| 15 | environment | |
| 16 | ignore_errors | |
| 17 | ignore_unreachable | |
| 18 | max_fail_percentage | |
| 19 | module_defaults | |
| 20 | name | |
| 21 | no_log | |
| 22 | notify | |
| 23 | port | |
| 24 | remote_user | |
| 25 | rescue | |
| 26 | run_once | |
| 27 | tags | |
| 28 | throttle | |
| 29 | timeout | |
| 30 | vars | |
| 31 | when | |
+------+------------------------+------------+
Task
+------+------------------------+------------+
| Row | Keyword | Support |
+------+------------------------+------------+
| 1 | action | |
| 2 | any_errors_fatal | |
| 3 | args | |
| 4 | async | |
| 5 | become | |
| 6 | become_exe | |
| 7 | become_flags | |
| 8 | become_method | |
| 9 | become_user | |
| 10 | changed_when | |
| 11 | check_mode | |
| 12 | collections | |
| 13 | debugger | |
| 14 | delay | |
| 15 | delegate_facts | |
| 16 | delegate_to | |
| 17 | diff | |
| 18 | environment | |
| 19 | failed_when | |
| 20 | ignore_errors | |
| 21 | ignore_unreachable | |
| 22 | local_action | |
| 23 | loop | |
| 24 | loop_control | |
| 25 | module_defaults | |
| 26 | name | |
| 27 | no_log | |
| 28 | notify | |
| 29 | poll | |
| 30 | port | |
| 31 | register | |
| 32 | remote_user | |
| 33 | retries | |
| 34 | run_once | |
| 35 | tags | |
| 36 | throttle | |
| 37 | timeout | |
| 38 | until | |
| 39 | vars | |
| 40 | when | |
| 41 | with_<lookup_plugin> | |
+------+------------------------+------------+
*/

View File

@ -0,0 +1,23 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
type Handler struct {
//Task
Listen []string `yaml:"listen,omitempty"`
}

26
pkg/apis/core/v1/loop.go Normal file
View File

@ -0,0 +1,26 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
type LoopControl struct {
LoopVar string `yaml:"loop_var,omitempty"`
IndexVar string `yaml:"index_var,omitempty"`
Label string `yaml:"label,omitempty"`
Pause float32 `yaml:"pause,omitempty"`
Extended bool `yaml:"extended,omitempty"`
ExtendedAllitems bool `yaml:"extended_allitems,omitempty"`
}

View File

@ -0,0 +1,21 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
type Notifiable struct {
Notify string `yaml:"notify,omitempty"`
}

95
pkg/apis/core/v1/play.go Normal file
View File

@ -0,0 +1,95 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import "fmt"
type Play struct {
ImportPlaybook string `yaml:"import_playbook,omitempty"`
Base `yaml:",inline"`
Taggable `yaml:",inline"`
CollectionSearch `yaml:",inline"`
PlayHost PlayHost `yaml:"hosts,omitempty"`
// Facts
GatherFacts bool `yaml:"gather_facts,omitempty"`
// defaults to be deprecated, should be 'None' in future
//GatherSubset []GatherSubset
//GatherTimeout int
//FactPath string
// Variable Attribute
VarsFiles []string `yaml:"vars_files,omitempty"`
VarsPrompt []string `yaml:"vars_prompt,omitempty"`
// Role Attributes
Roles []Role `yaml:"roles,omitempty"`
// Block (Task) Lists Attributes
Handlers []Block `yaml:"handlers,omitempty"`
PreTasks []Block `yaml:"pre_tasks,omitempty"`
PostTasks []Block `yaml:"post_tasks,omitempty"`
Tasks []Block `yaml:"tasks,omitempty"`
// Flag/Setting Attributes
ForceHandlers bool `yaml:"force_handlers,omitempty"`
MaxFailPercentage float32 `yaml:"percent,omitempty"`
Serial PlaySerial `yaml:"serial,omitempty"`
Strategy string `yaml:"strategy,omitempty"`
Order string `yaml:"order,omitempty"`
}
type PlaySerial struct {
Data []any
}
func (s *PlaySerial) UnmarshalYAML(unmarshal func(interface{}) error) error {
var as []any
if err := unmarshal(&as); err == nil {
s.Data = as
return nil
}
var a any
if err := unmarshal(&a); err == nil {
s.Data = []any{a}
return nil
}
return fmt.Errorf("unsupported type, excepted any or array")
}
type PlayHost struct {
Hosts []string
}
func (p *PlayHost) UnmarshalYAML(unmarshal func(interface{}) error) error {
var hs []string
if err := unmarshal(&hs); err == nil {
p.Hosts = hs
return nil
}
var h string
if err := unmarshal(&h); err == nil {
p.Hosts = []string{h}
return nil
}
return fmt.Errorf("unsupported type, excepted string or string array")
}

View File

@ -0,0 +1,221 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
func TestUnmarshalYaml(t *testing.T) {
testcases := []struct {
name string
data []byte
excepted []Play
}{
{
name: "Unmarshal hosts with single value",
data: []byte(`---
- name: test play
hosts: localhost
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{[]string{"localhost"}},
},
},
},
{
name: "Unmarshal hosts with multiple value",
data: []byte(`---
- name: test play
hosts: ["control-plane", "worker"]
`),
excepted: []Play{
{
Base: Base{
Name: "test play",
},
PlayHost: PlayHost{[]string{"control-plane", "worker"}},
},
},
},
{
name: "Unmarshal role with single value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- test
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal role with map value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- role: test
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal when with single value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- role: test
when: "true"
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Conditional: Conditional{When: When{Data: []string{"true"}}},
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal when with multiple value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- role: test
when: ["true","false"]
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Conditional: Conditional{When: When{Data: []string{"true", "false"}}},
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal single level block",
data: []byte(`---
- name: test play
hosts: localhost
tasks:
- name: test
custom-module: abc
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{Hosts: []string{"localhost"}},
Tasks: []Block{
{
BlockBase: BlockBase{Base: Base{Name: "test"}},
Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}},
},
},
},
},
},
{
name: "Unmarshal multi level block",
data: []byte(`---
- name: test play
hosts: localhost
tasks:
- name: test
block:
- name: test | test
custom-module: abc
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{Hosts: []string{"localhost"}},
Tasks: []Block{
{
BlockBase: BlockBase{Base: Base{Name: "test"}},
BlockInfo: BlockInfo{
Block: []Block{{
BlockBase: BlockBase{Base: Base{Name: "test | test"}},
Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}},
}},
},
},
},
},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var pb []Play
err := yaml.Unmarshal(tc.data, &pb)
assert.NoError(t, err)
assert.Equal(t, tc.excepted, pb)
})
}
}

View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import "fmt"
type Playbook struct {
Play []Play
}
func (p *Playbook) Validate() error {
for _, play := range p.Play {
if len(play.PlayHost.Hosts) == 0 {
return fmt.Errorf("playbook's hosts must not be empty")
}
}
return nil
}

View File

@ -0,0 +1,48 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestValidate(t *testing.T) {
testcases := []struct {
name string
playbook Playbook
}{
{
name: "host is empty",
playbook: Playbook{Play: []Play{
{
Base: Base{
Name: "test",
},
},
}},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
err := tc.playbook.Validate()
assert.Error(t, err)
})
}
}

47
pkg/apis/core/v1/role.go Normal file
View File

@ -0,0 +1,47 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
type Role struct {
RoleInfo
}
type RoleInfo struct {
Base `yaml:",inline"`
Conditional `yaml:",inline"`
Taggable `yaml:",inline"`
CollectionSearch `yaml:",inline"`
Role string `yaml:"role,omitempty"`
Block []Block
}
func (r *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err == nil {
r.Role = s
return nil
}
var info RoleInfo
if err := unmarshal(&info); err == nil {
r.RoleInfo = info
return nil
}
return nil
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import "k8s.io/utils/strings/slices"
type Taggable struct {
Tags []string `yaml:"tags,omitempty"`
}
// IsEnabled check if the block should be executed
func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool {
shouldRun := true
if len(onlyTags) > 0 {
if slices.Contains(t.Tags, "always") {
shouldRun = true
} else if slices.Contains(onlyTags, "all") && !slices.Contains(t.Tags, "never") {
shouldRun = true
} else if slices.Contains(onlyTags, "tagged") && len(onlyTags) > 0 && !slices.Contains(t.Tags, "never") {
shouldRun = true
} else if !isdisjoint(onlyTags, t.Tags) {
shouldRun = true
} else {
shouldRun = false
}
}
if shouldRun && len(skipTags) > 0 {
if slices.Contains(skipTags, "all") {
if !slices.Contains(t.Tags, "always") || !slices.Contains(skipTags, "always") {
shouldRun = false
}
} else if !isdisjoint(skipTags, t.Tags) {
shouldRun = false
} else if slices.Contains(skipTags, "tagged") && len(skipTags) > 0 {
shouldRun = false
}
}
return shouldRun
}
// isdisjoint returns true if a and b have no elements in common.
func isdisjoint(a, b []string) bool {
for _, s := range a {
if slices.Contains(b, s) {
return false
}
}
return true
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced
type Config struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec runtime.RawExtension `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Config `json:"items"`
}
func init() {
SchemeBuilder.Register(&Config{}, &ConfigList{})
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
type InventoryHost map[string]runtime.RawExtension
type InventoryGroup struct {
Groups []string `json:"groups,omitempty"`
Hosts []string `json:"hosts,omitempty"`
Vars runtime.RawExtension `json:"vars,omitempty"`
}
type InventorySpec struct {
// Hosts is all nodes
Hosts InventoryHost `json:"hosts,omitempty"`
// Vars for all host. the priority for vars is: host vars > group vars > inventory vars
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
Vars runtime.RawExtension `json:"vars,omitempty"`
// Groups nodes. a group contains repeated nodes
// +optional
Groups map[string]InventoryGroup `json:"groups,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced
type Inventory struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec InventorySpec `json:"spec,omitempty"`
//Status InventoryStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type InventoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Inventory `json:"items"`
}
func init() {
SchemeBuilder.Register(&Inventory{}, &InventoryList{})
}

View File

@ -0,0 +1,154 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type PipelinePhase string
const (
PipelinePhasePending PipelinePhase = "Pending"
PipelinePhaseRunning PipelinePhase = "Running"
PipelinePhaseFailed PipelinePhase = "Failed"
PipelinePhaseSucceed PipelinePhase = "Succeed"
)
const (
// BuiltinsProjectAnnotation use builtins project of KubeKey
BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project"
// PauseAnnotation pause the pipeline
PauseAnnotation = "kubekey.kubesphere.io/pause"
)
type PipelineSpec struct {
// Project is storage for executable packages
// +optional
Project PipelineProject `json:"project,omitempty"`
// Playbook which to execute.
Playbook string `json:"playbook"`
// InventoryRef is the node configuration for playbook
// +optional
InventoryRef *corev1.ObjectReference `json:"inventoryRef,omitempty"`
// ConfigRef is the global variable configuration for playbook
// +optional
ConfigRef *corev1.ObjectReference `json:"configRef,omitempty"`
// Tags is the tags of playbook which to execute
// +optional
Tags []string `json:"tags,omitempty"`
// SkipTags is the tags of playbook which skip execute
// +optional
SkipTags []string `json:"skipTags,omitempty"`
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
// +optional
Debug bool `json:"debug,omitempty"`
}
type PipelineProject struct {
// Addr is the storage for executable packages (in Ansible file format).
// When starting with http or https, it will be obtained from a Git repository.
// When starting with file path, it will be obtained from the local path.
// +optional
Addr string `json:"addr,omitempty"`
// Name is the project name base project
// +optional
Name string `json:"name,omitempty"`
// Branch is the git branch of the git Addr.
// +optional
Branch string `json:"branch,omitempty"`
// Tag is the git branch of the git Addr.
// +optional
Tag string `json:"tag,omitempty"`
// InsecureSkipTLS skip tls or not when git addr is https.
// +optional
InsecureSkipTLS bool `json:"insecureSkipTLS,omitempty"`
// Token of Authorization for http request
// +optional
Token string `json:"token,omitempty"`
}
type PipelineStatus struct {
// TaskResult total related tasks execute result.
TaskResult PipelineTaskResult `json:"taskResult,omitempty"`
// Phase of pipeline.
Phase PipelinePhase `json:"phase,omitempty"`
// failed Reason of pipeline.
Reason string `json:"reason,omitempty"`
// FailedDetail will record the failed tasks.
FailedDetail []PipelineFailedDetail `json:"failedDetail,omitempty"`
}
type PipelineTaskResult struct {
// Total number of tasks.
Total int `json:"total,omitempty"`
// Success number of tasks.
Success int `json:"success,omitempty"`
// Failed number of tasks.
Failed int `json:"failed,omitempty"`
// Skipped number of tasks.
Skipped int `json:"skipped,omitempty"`
// Ignored number of tasks.
Ignored int `json:"ignored,omitempty"`
}
type PipelineFailedDetail struct {
// Task name of failed task.
Task string `json:"task,omitempty"`
// failed Hosts Result of failed task.
Hosts []PipelineFailedDetailHost `json:"hosts,omitempty"`
}
type PipelineFailedDetailHost struct {
// Host name of failed task.
Host string `json:"host,omitempty"`
// Stdout of failed task.
Stdout string `json:"stdout,omitempty"`
// StdErr of failed task.
StdErr string `json:"stdErr,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Playbook",type="string",JSONPath=".spec.playbook"
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase"
// +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
type Pipeline struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PipelineSpec `json:"spec,omitempty"`
Status PipelineStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PipelineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Pipeline `json:"items"`
}
func init() {
SchemeBuilder.Register(&Pipeline{}, &PipelineList{})
}

View File

@ -0,0 +1,36 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 contains API Schema definitions for the kubekey v1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=kubekey.kubesphere.io
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,402 @@
//go:build !ignore_autogenerated
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Config) DeepCopyInto(out *Config) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
func (in *Config) DeepCopy() *Config {
if in == nil {
return nil
}
out := new(Config)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Config) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigList) DeepCopyInto(out *ConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Config, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList.
func (in *ConfigList) DeepCopy() *ConfigList {
if in == nil {
return nil
}
out := new(ConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Inventory) DeepCopyInto(out *Inventory) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inventory.
func (in *Inventory) DeepCopy() *Inventory {
if in == nil {
return nil
}
out := new(Inventory)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Inventory) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventoryGroup) DeepCopyInto(out *InventoryGroup) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Vars.DeepCopyInto(&out.Vars)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryGroup.
func (in *InventoryGroup) DeepCopy() *InventoryGroup {
if in == nil {
return nil
}
out := new(InventoryGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in InventoryHost) DeepCopyInto(out *InventoryHost) {
{
in := &in
*out = make(InventoryHost, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryHost.
func (in InventoryHost) DeepCopy() InventoryHost {
if in == nil {
return nil
}
out := new(InventoryHost)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventoryList) DeepCopyInto(out *InventoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Inventory, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryList.
func (in *InventoryList) DeepCopy() *InventoryList {
if in == nil {
return nil
}
out := new(InventoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *InventoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventorySpec) DeepCopyInto(out *InventorySpec) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make(InventoryHost, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
in.Vars.DeepCopyInto(&out.Vars)
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make(map[string]InventoryGroup, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventorySpec.
func (in *InventorySpec) DeepCopy() *InventorySpec {
if in == nil {
return nil
}
out := new(InventorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pipeline) DeepCopyInto(out *Pipeline) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline.
func (in *Pipeline) DeepCopy() *Pipeline {
if in == nil {
return nil
}
out := new(Pipeline)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Pipeline) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineFailedDetail) DeepCopyInto(out *PipelineFailedDetail) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]PipelineFailedDetailHost, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetail.
func (in *PipelineFailedDetail) DeepCopy() *PipelineFailedDetail {
if in == nil {
return nil
}
out := new(PipelineFailedDetail)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineFailedDetailHost) DeepCopyInto(out *PipelineFailedDetailHost) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetailHost.
func (in *PipelineFailedDetailHost) DeepCopy() *PipelineFailedDetailHost {
if in == nil {
return nil
}
out := new(PipelineFailedDetailHost)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineList) DeepCopyInto(out *PipelineList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pipeline, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList.
func (in *PipelineList) DeepCopy() *PipelineList {
if in == nil {
return nil
}
out := new(PipelineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineProject) DeepCopyInto(out *PipelineProject) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineProject.
func (in *PipelineProject) DeepCopy() *PipelineProject {
if in == nil {
return nil
}
out := new(PipelineProject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
out.Project = in.Project
if in.InventoryRef != nil {
in, out := &in.InventoryRef, &out.InventoryRef
*out = new(corev1.ObjectReference)
**out = **in
}
if in.ConfigRef != nil {
in, out := &in.ConfigRef, &out.ConfigRef
*out = new(corev1.ObjectReference)
**out = **in
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SkipTags != nil {
in, out := &in.SkipTags, &out.SkipTags
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
func (in *PipelineSpec) DeepCopy() *PipelineSpec {
if in == nil {
return nil
}
out := new(PipelineSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) {
*out = *in
out.TaskResult = in.TaskResult
if in.FailedDetail != nil {
in, out := &in.FailedDetail, &out.FailedDetail
*out = make([]PipelineFailedDetail, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus.
func (in *PipelineStatus) DeepCopy() *PipelineStatus {
if in == nil {
return nil
}
out := new(PipelineStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskResult) DeepCopyInto(out *PipelineTaskResult) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskResult.
func (in *PipelineTaskResult) DeepCopy() *PipelineTaskResult {
if in == nil {
return nil
}
out := new(PipelineTaskResult)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,37 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the internal version. should not register in kubernetes
// +k8s:deepcopy-gen=package,register
// +groupName=kubekey.kubesphere.io
// +kubebuilder:skip
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,118 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
type TaskPhase string
const (
TaskPhasePending TaskPhase = "Pending"
TaskPhaseRunning TaskPhase = "Running"
TaskPhaseSuccess TaskPhase = "Success"
TaskPhaseFailed TaskPhase = "Failed"
TaskPhaseSkipped TaskPhase = "Skipped"
TaskPhaseIgnored TaskPhase = "Ignored"
)
const (
// TaskAnnotationRole is the absolute dir of task in project.
TaskAnnotationRole = "kubesphere.io/role"
)
type KubeKeyTaskSpec struct {
Name string `json:"name,omitempty"`
Hosts []string `json:"hosts,omitempty"`
IgnoreError bool `json:"ignoreError,omitempty"`
Retries int `json:"retries,omitempty"`
When []string `json:"when,omitempty"`
FailedWhen []string `json:"failedWhen,omitempty"`
Loop runtime.RawExtension `json:"loop,omitempty"`
Module Module `json:"module,omitempty"`
Register string `json:"register,omitempty"`
}
type Module struct {
Name string `json:"name,omitempty"`
Args runtime.RawExtension `json:"args,omitempty"`
}
type TaskStatus struct {
RestartCount int `json:"restartCount,omitempty"`
Phase TaskPhase `json:"phase,omitempty"`
Conditions []TaskCondition `json:"conditions,omitempty"`
FailedDetail []TaskFailedDetail `json:"failedDetail,omitempty"`
}
type TaskCondition struct {
StartTimestamp metav1.Time `json:"startTimestamp,omitempty"`
EndTimestamp metav1.Time `json:"endTimestamp,omitempty"`
// HostResults of runtime.RawExtension host. the key is host name. value is host result
HostResults []TaskHostResult `json:"hostResults,omitempty"`
}
type TaskFailedDetail struct {
Host string `json:"host,omitempty"`
Stdout string `json:"stdout,omitempty"`
StdErr string `json:"stdErr,omitempty"`
}
type TaskHostResult struct {
Host string `json:"host,omitempty"`
Stdout string `json:"stdout,omitempty"`
StdErr string `json:"stdErr,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:scope=Namespaced
type Task struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KubeKeyTaskSpec `json:"spec,omitempty"`
Status TaskStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TaskList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Task `json:"items"`
}
func (t Task) IsComplete() bool {
return t.IsSucceed() || t.IsFailed() || t.IsSkipped()
}
func (t Task) IsSkipped() bool {
return t.Status.Phase == TaskPhaseSkipped
}
func (t Task) IsSucceed() bool {
return t.Status.Phase == TaskPhaseSuccess || t.Status.Phase == TaskPhaseIgnored
}
func (t Task) IsFailed() bool {
return t.Status.Phase == TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount
}

View File

@ -0,0 +1,211 @@
//go:build !ignore_autogenerated
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeKeyTaskSpec) DeepCopyInto(out *KubeKeyTaskSpec) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.When != nil {
in, out := &in.When, &out.When
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.FailedWhen != nil {
in, out := &in.FailedWhen, &out.FailedWhen
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Loop.DeepCopyInto(&out.Loop)
in.Module.DeepCopyInto(&out.Module)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeKeyTaskSpec.
func (in *KubeKeyTaskSpec) DeepCopy() *KubeKeyTaskSpec {
if in == nil {
return nil
}
out := new(KubeKeyTaskSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Module) DeepCopyInto(out *Module) {
*out = *in
in.Args.DeepCopyInto(&out.Args)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Module.
func (in *Module) DeepCopy() *Module {
if in == nil {
return nil
}
out := new(Module)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Task) DeepCopyInto(out *Task) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task.
func (in *Task) DeepCopy() *Task {
if in == nil {
return nil
}
out := new(Task)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Task) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskCondition) DeepCopyInto(out *TaskCondition) {
*out = *in
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
in.EndTimestamp.DeepCopyInto(&out.EndTimestamp)
if in.HostResults != nil {
in, out := &in.HostResults, &out.HostResults
*out = make([]TaskHostResult, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskCondition.
func (in *TaskCondition) DeepCopy() *TaskCondition {
if in == nil {
return nil
}
out := new(TaskCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskFailedDetail) DeepCopyInto(out *TaskFailedDetail) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskFailedDetail.
func (in *TaskFailedDetail) DeepCopy() *TaskFailedDetail {
if in == nil {
return nil
}
out := new(TaskFailedDetail)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskHostResult) DeepCopyInto(out *TaskHostResult) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskHostResult.
func (in *TaskHostResult) DeepCopy() *TaskHostResult {
if in == nil {
return nil
}
out := new(TaskHostResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskList) DeepCopyInto(out *TaskList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Task, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList.
func (in *TaskList) DeepCopy() *TaskList {
if in == nil {
return nil
}
out := new(TaskList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TaskList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskStatus) DeepCopyInto(out *TaskStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]TaskCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FailedDetail != nil {
in, out := &in.FailedDetail, &out.FailedDetail
*out = make([]TaskFailedDetail, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskStatus.
func (in *TaskStatus) DeepCopy() *TaskStatus {
if in == nil {
return nil
}
out := new(TaskStatus)
in.DeepCopyInto(out)
return out
}

89
pkg/cache/cache.go vendored Normal file
View File

@ -0,0 +1,89 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
)
// Cache is the interface for cache.
type Cache interface {
// Name of pool
Name() string
// Put the cached value for the given key.
Put(key string, value any)
// Get the cached value for the given key.
Get(key string) (any, bool)
// Release the cached value for the given id.
Release(id string)
// Clean all cached value
Clean()
}
type local struct {
name string
cache map[string]any
sync.Mutex
}
func (p *local) Name() string {
return p.name
}
func (p *local) Put(key string, value any) {
p.Lock()
defer p.Unlock()
p.cache[key] = value
}
func (p *local) Get(key string) (any, bool) {
v, ok := p.cache[key]
if ok {
return v, ok
}
return v, false
}
func (p *local) Release(id string) {
p.Lock()
defer p.Unlock()
delete(p.cache, id)
}
func (p *local) Clean() {
p.Lock()
defer p.Unlock()
for id := range p.cache {
delete(p.cache, id)
}
}
// NewLocalCache return a local cache
func NewLocalCache(name string) Cache {
return &local{
name: name,
cache: make(map[string]any),
}
}
var (
// LocalVariable is a local cache for variable.Variable
LocalVariable = NewLocalCache("variable")
)

31
pkg/cache/cache_test.go vendored Normal file
View File

@ -0,0 +1,31 @@
package cache
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCache(t *testing.T) {
testCache := NewLocalCache("test")
assert.Equal(t, "test", testCache.Name())
// should not be able to get the key
_, ok := testCache.Get("foo")
assert.False(t, ok)
// put a key
testCache.Put("foo", "bar")
// should be able to get the key
v, ok := testCache.Get("foo")
assert.True(t, ok)
assert.Equal(t, "bar", v)
// release the key
testCache.Release("foo")
// should not be able to get the key
_, ok = testCache.Get("foo")
assert.False(t, ok)
}

386
pkg/cache/runtime_client.go vendored Normal file
View File

@ -0,0 +1,386 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
jsonpatch "github.com/evanphx/json-patch"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/yaml"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
type delegatingClient struct {
client ctrlclient.Client
scheme *runtime.Scheme
}
func NewDelegatingClient(client ctrlclient.Client) ctrlclient.Client {
scheme := runtime.NewScheme()
if err := kubekeyv1.AddToScheme(scheme); err != nil {
klog.Errorf("failed to add scheme: %v", err)
}
kubekeyv1.SchemeBuilder.Register(&kubekeyv1alpha1.Task{}, &kubekeyv1alpha1.TaskList{})
return &delegatingClient{
client: client,
scheme: scheme,
}
}
func (d delegatingClient) Get(ctx context.Context, key ctrlclient.ObjectKey, obj ctrlclient.Object, opts ...ctrlclient.GetOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Get(ctx, key, obj, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
path := filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, key.Namespace, resource, key.Name, key.Name+".yaml")
data, err := os.ReadFile(path)
if err != nil {
klog.Errorf("failed to read yaml file: %v", err)
return err
}
if err := yaml.Unmarshal(data, obj); err != nil {
klog.Errorf("unmarshal file %s error %v", path, err)
return err
}
return nil
}
func (d delegatingClient) List(ctx context.Context, list ctrlclient.ObjectList, opts ...ctrlclient.ListOption) error {
resource := _const.ResourceFromObject(list)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.List(ctx, list, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", list.GetObjectKind().GroupVersionKind().String())
}
// read all runtime.Object
var objects []runtime.Object
runtimeDirEntries, err := os.ReadDir(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir))
if err != nil && !os.IsNotExist(err) {
klog.Errorf("readDir %s error %v", filepath.Join(_const.GetWorkDir(), _const.RuntimeDir), err)
return err
}
for _, re := range runtimeDirEntries {
if re.IsDir() {
resourceDir := filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, re.Name(), resource)
entries, err := os.ReadDir(resourceDir)
if err != nil {
if os.IsNotExist(err) {
continue
}
klog.Errorf("readDir %s error %v", resourceDir, err)
return err
}
for _, e := range entries {
if !e.IsDir() {
continue
}
resourceFile := filepath.Join(resourceDir, e.Name(), e.Name()+".yaml")
data, err := os.ReadFile(resourceFile)
if err != nil {
if os.IsNotExist(err) {
continue
}
klog.Errorf("read file %s error: %v", resourceFile, err)
return err
}
var obj runtime.Object
switch resource {
case _const.RuntimePipelineDir:
obj = &kubekeyv1.Pipeline{}
case _const.RuntimeInventoryDir:
obj = &kubekeyv1.Inventory{}
case _const.RuntimeConfigDir:
obj = &kubekeyv1.Config{}
case _const.RuntimePipelineTaskDir:
obj = &kubekeyv1alpha1.Task{}
}
if err := yaml.Unmarshal(data, &obj); err != nil {
klog.Errorf("unmarshal file %s error: %v", resourceFile, err)
return err
}
objects = append(objects, obj)
}
}
}
o := ctrlclient.ListOptions{}
o.ApplyOptions(opts)
switch {
case o.Namespace != "":
for i := len(objects) - 1; i >= 0; i-- {
if objects[i].(metav1.Object).GetNamespace() != o.Namespace {
objects = append(objects[:i], objects[i+1:]...)
}
}
}
if err := apimeta.SetList(list, objects); err != nil {
return err
}
return nil
}
func (d delegatingClient) Create(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.CreateOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Create(ctx, obj, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
data, err := yaml.Marshal(obj)
if err != nil {
klog.Errorf("failed to marshal object: %v", err)
return err
}
if err := os.MkdirAll(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName()), fs.ModePerm); err != nil {
klog.Errorf("create dir %s error: %v", filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName()), err)
return err
}
return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
}
func (d delegatingClient) Delete(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.DeleteOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Delete(ctx, obj, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
return os.RemoveAll(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName()))
}
func (d delegatingClient) Update(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.UpdateOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Update(ctx, obj, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
data, err := yaml.Marshal(obj)
if err != nil {
klog.Errorf("failed to marshal object: %v", err)
return err
}
return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
}
func (d delegatingClient) Patch(ctx context.Context, obj ctrlclient.Object, patch ctrlclient.Patch, opts ...ctrlclient.PatchOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Patch(ctx, obj, patch, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
patchData, err := patch.Data(obj)
if err != nil {
klog.Errorf("failed to get patch data: %v", err)
return err
}
if len(patchData) == 0 {
klog.V(4).Infof("nothing to patch, skip")
return nil
}
data, err := yaml.Marshal(obj)
if err != nil {
klog.Errorf("failed to marshal object: %v", err)
return err
}
return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
}
func (d delegatingClient) DeleteAllOf(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.DeleteAllOfOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.DeleteAllOf(ctx, obj, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
return d.Delete(ctx, obj)
}
func (d delegatingClient) Status() ctrlclient.SubResourceWriter {
if d.client != nil {
return d.client.Status()
}
return &delegatingSubResourceWriter{client: d.client}
}
func (d delegatingClient) SubResource(subResource string) ctrlclient.SubResourceClient {
if d.client != nil {
return d.client.SubResource(subResource)
}
return nil
}
func (d delegatingClient) Scheme() *runtime.Scheme {
if d.client != nil {
return d.client.Scheme()
}
return d.scheme
}
func (d delegatingClient) RESTMapper() apimeta.RESTMapper {
if d.client != nil {
return d.client.RESTMapper()
}
return nil
}
func (d delegatingClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) {
if d.client != nil {
return d.client.GroupVersionKindFor(obj)
}
return apiutil.GVKForObject(obj, d.scheme)
}
func (d delegatingClient) IsObjectNamespaced(obj runtime.Object) (bool, error) {
if d.client != nil {
return d.client.IsObjectNamespaced(obj)
}
return true, nil
}
type delegatingSubResourceWriter struct {
client ctrlclient.Client
}
func (d delegatingSubResourceWriter) Create(ctx context.Context, obj ctrlclient.Object, subResource ctrlclient.Object, opts ...ctrlclient.SubResourceCreateOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Status().Create(ctx, obj, subResource, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
data, err := yaml.Marshal(obj)
if err != nil {
klog.Errorf("failed to marshal object: %v", err)
return err
}
return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
}
func (d delegatingSubResourceWriter) Update(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.SubResourceUpdateOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Status().Update(ctx, obj, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
data, err := yaml.Marshal(obj)
if err != nil {
klog.Errorf("failed to marshal object: %v", err)
return err
}
return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
}
func (d delegatingSubResourceWriter) Patch(ctx context.Context, obj ctrlclient.Object, patch ctrlclient.Patch, opts ...ctrlclient.SubResourcePatchOption) error {
resource := _const.ResourceFromObject(obj)
if d.client != nil && resource != _const.RuntimePipelineTaskDir {
return d.client.Status().Patch(ctx, obj, patch, opts...)
}
if resource == "" {
return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
patchData, err := patch.Data(obj)
if err != nil {
klog.Errorf("failed to get patch data: %v", err)
return err
}
if len(patchData) == 0 {
klog.V(4).Infof("nothing to patch, skip")
return nil
}
data, err := yaml.Marshal(obj)
if err != nil {
klog.Errorf("failed to marshal object: %v", err)
return err
}
return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
}
func getPatchedJSON(patchType types.PatchType, originalJS, patchJS []byte, gvk schema.GroupVersionKind, creater runtime.ObjectCreater) ([]byte, error) {
switch patchType {
case types.JSONPatchType:
patchObj, err := jsonpatch.DecodePatch(patchJS)
if err != nil {
return nil, err
}
bytes, err := patchObj.Apply(originalJS)
// TODO: This is pretty hacky, we need a better structured error from the json-patch
if err != nil && strings.Contains(err.Error(), "doc is missing key") {
msg := err.Error()
ix := strings.Index(msg, "key:")
key := msg[ix+5:]
return bytes, fmt.Errorf("Object to be patched is missing field (%s)", key)
}
return bytes, err
case types.MergePatchType:
return jsonpatch.MergePatch(originalJS, patchJS)
case types.StrategicMergePatchType:
// get a typed object for this GVK if we need to apply a strategic merge patch
obj, err := creater.New(gvk)
if err != nil {
return nil, fmt.Errorf("cannot apply strategic merge patch for %s locally, try --type merge", gvk.String())
}
return strategicpatch.StrategicMergePatch(originalJS, patchJS, obj)
default:
// only here as a safety net - go-restful filters content-type
return nil, fmt.Errorf("unknown Content-Type header for patch: %v", patchType)
}
}

View File

@ -0,0 +1,75 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connector
import (
"context"
"io"
"io/fs"
"os"
"k8s.io/utils/exec"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
// Connector is the interface for connecting to a remote host
type Connector interface {
// Init initializes the connection
Init(ctx context.Context) error
// Close closes the connection
Close(ctx context.Context) error
// CopyFile copies a file from local to remote
CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error
// FetchFile copies a file from remote to local
FetchFile(ctx context.Context, remoteFile string, local io.Writer) error
// ExecuteCommand executes a command on the remote host
ExecuteCommand(ctx context.Context, cmd string) ([]byte, error)
}
// NewConnector creates a new connector
func NewConnector(host string, vars variable.VariableData) Connector {
switch vars["connector"] {
case "local":
return &localConnector{Cmd: exec.New()}
case "ssh":
if variable.StringVar(vars, "ssh_host") != nil {
host = *variable.StringVar(vars, "ssh_host")
}
return &sshConnector{
Host: host,
Port: variable.IntVar(vars, "ssh_port"),
User: variable.StringVar(vars, "ssh_user"),
Password: variable.StringVar(vars, "ssh_password"),
}
default:
localHost, _ := os.Hostname()
if localHost == host {
return &localConnector{Cmd: exec.New()}
}
if variable.StringVar(vars, "ssh_host") != nil {
host = *variable.StringVar(vars, "ssh_host")
}
return &sshConnector{
Host: host,
Port: variable.IntVar(vars, "ssh_port"),
User: variable.StringVar(vars, "ssh_user"),
Password: variable.StringVar(vars, "ssh_password"),
}
}
}

View File

@ -0,0 +1,100 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connector
import (
"context"
"io"
"io/fs"
"os"
"path/filepath"
"k8s.io/klog/v2"
"k8s.io/utils/exec"
)
type localConnector struct {
Cmd exec.Interface
}
func (c *localConnector) Init(ctx context.Context) error {
return nil
}
func (c *localConnector) Close(ctx context.Context) error {
return nil
}
func (c *localConnector) CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error {
// create remote file
if _, err := os.Stat(filepath.Dir(remoteFile)); err != nil {
klog.Warningf("Failed to stat dir %s: %v create it", filepath.Dir(remoteFile), err)
if err := os.MkdirAll(filepath.Dir(remoteFile), mode); err != nil {
klog.Errorf("Failed to create dir %s: %v", filepath.Dir(remoteFile), err)
return err
}
}
rf, err := os.Create(remoteFile)
if err != nil {
klog.Errorf("Failed to create file %s: %v", remoteFile, err)
return err
}
if _, err := rf.Write(local); err != nil {
klog.Errorf("Failed to write file %s: %v", remoteFile, err)
return err
}
return rf.Chmod(mode)
}
func (c *localConnector) FetchFile(ctx context.Context, remoteFile string, local io.Writer) error {
var err error
file, err := os.Open(remoteFile)
if err != nil {
klog.Errorf("Failed to read file %s: %v", remoteFile, err)
return err
}
if _, err := io.Copy(local, file); err != nil {
klog.Errorf("Failed to copy file %s: %v", remoteFile, err)
return err
}
return nil
}
func (c *localConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
return c.Cmd.CommandContext(ctx, cmd).CombinedOutput()
}
func (c *localConnector) copyFile(sourcePath, destinationPath string) error {
sourceFile, err := os.Open(sourcePath)
if err != nil {
return err
}
defer sourceFile.Close()
destinationFile, err := os.Create(destinationPath)
if err != nil {
return err
}
defer destinationFile.Close()
_, err = io.Copy(destinationFile, sourceFile)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,79 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connector
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/utils/exec"
testingexec "k8s.io/utils/exec/testing"
)
func newFakeLocalConnector(runCmd string, output string) *localConnector {
return &localConnector{
Cmd: &testingexec.FakeExec{CommandScript: []testingexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == runCmd {
return &testingexec.FakeCmd{
CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
return []byte(output), nil, nil
}},
}
}
return &testingexec.FakeCmd{
CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
return nil, nil, fmt.Errorf("error command")
}},
}
},
}},
}
}
func TestSshConnector_ExecuteCommand(t *testing.T) {
testcases := []struct {
name string
cmd string
exceptedErr error
}{
{
name: "execute command succeed",
cmd: "echo 'hello'",
exceptedErr: nil,
},
{
name: "execute command failed",
cmd: "echo 'hello1'",
exceptedErr: fmt.Errorf("error command"),
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
lc := newFakeLocalConnector("echo 'hello'", "hello")
_, err := lc.ExecuteCommand(ctx, tc.cmd)
assert.Equal(t, tc.exceptedErr, err)
})
}
}

View File

@ -0,0 +1,131 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connector
import (
"context"
"fmt"
"io"
"io/fs"
"path/filepath"
"strconv"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
)
type sshConnector struct {
Host string
Port *int
User *string
Password *string
client *ssh.Client
}
func (c *sshConnector) Init(ctx context.Context) error {
if c.Host == "" {
return fmt.Errorf("host is not set")
}
if c.Port == nil {
c.Port = pointer.Int(22)
}
var auth []ssh.AuthMethod
if c.Password != nil {
auth = []ssh.AuthMethod{
ssh.Password(*c.Password),
}
}
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", c.Host, strconv.Itoa(*c.Port)), &ssh.ClientConfig{
User: pointer.StringDeref(c.User, ""),
Auth: auth,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
})
if err != nil {
return err
}
c.client = sshClient
return nil
}
func (c *sshConnector) Close(ctx context.Context) error {
return c.client.Close()
}
func (c *sshConnector) CopyFile(ctx context.Context, src []byte, remoteFile string, mode fs.FileMode) error {
// create sftp client
sftpClient, err := sftp.NewClient(c.client)
if err != nil {
klog.Errorf("Failed to create sftp client: %v", err)
return err
}
defer sftpClient.Close()
// create remote file
if _, err := sftpClient.Stat(filepath.Dir(remoteFile)); err != nil {
klog.Warningf("Failed to stat dir %s: %v create it", filepath.Dir(remoteFile), err)
if err := sftpClient.MkdirAll(filepath.Dir(remoteFile)); err != nil {
klog.Errorf("Failed to create dir %s: %v", filepath.Dir(remoteFile), err)
return err
}
}
rf, err := sftpClient.Create(remoteFile)
if err != nil {
klog.Errorf("Failed to create file %s: %v", remoteFile, err)
return err
}
defer rf.Close()
if _, err = rf.Write(src); err != nil {
klog.Errorf("Failed to write file %s: %v", remoteFile, err)
return err
}
return rf.Chmod(mode)
}
func (c *sshConnector) FetchFile(ctx context.Context, remoteFile string, local io.Writer) error {
// create sftp client
sftpClient, err := sftp.NewClient(c.client)
if err != nil {
klog.Errorf("Failed to create sftp client: %v", err)
return err
}
defer sftpClient.Close()
rf, err := sftpClient.Open(remoteFile)
if err != nil {
klog.Errorf("Failed to open file %s: %v", remoteFile, err)
return err
}
defer rf.Close()
if _, err := io.Copy(local, rf); err != nil {
klog.Errorf("Failed to copy file %s: %v", remoteFile, err)
return err
}
return nil
}
func (c *sshConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
// create ssh session
session, err := c.client.NewSession()
if err != nil {
return nil, err
}
defer session.Close()
return session.CombinedOutput(cmd)
}

27
pkg/const/context.go Normal file
View File

@ -0,0 +1,27 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package _const
// key in context
// use in marshal playbook.Block
const (
CtxBlockHosts = "block-hosts"
CtxBlockRole = "block-role"
CtxBlockWhen = "block-when"
CtxBlockTaskUID = "block-task-uid"
)

73
pkg/const/helper.go Normal file
View File

@ -0,0 +1,73 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package _const
import (
"path/filepath"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
)
var workDirOnce = &sync.Once{}
// SetWorkDir sets the workdir once.
func SetWorkDir(wd string) {
workDirOnce.Do(func() {
workDir = wd
})
}
// GetWorkDir returns the workdir.
func GetWorkDir() string {
return workDir
}
func ResourceFromObject(obj runtime.Object) string {
switch obj.(type) {
case *kubekeyv1.Pipeline, *kubekeyv1.PipelineList:
return RuntimePipelineDir
case *kubekeyv1.Config, *kubekeyv1.ConfigList:
return RuntimeConfigDir
case *kubekeyv1.Inventory, *kubekeyv1.InventoryList:
return RuntimeInventoryDir
case *kubekeyv1alpha1.Task, *kubekeyv1alpha1.TaskList:
return RuntimePipelineTaskDir
default:
return ""
}
}
func RuntimeDirFromObject(obj runtime.Object) string {
resource := ResourceFromObject(obj)
if resource == "" {
klog.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
return ""
}
mo, ok := obj.(metav1.Object)
if !ok {
klog.Errorf("failed convert to metav1.Object: %s", obj.GetObjectKind().GroupVersionKind().String())
return ""
}
return filepath.Join(workDir, RuntimeDir, mo.GetNamespace(), resource, mo.GetName())
}

50
pkg/const/helper_test.go Normal file
View File

@ -0,0 +1,50 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package _const
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
)
func TestWorkDir(t *testing.T) {
// should not get workdir before set
assert.Empty(t, GetWorkDir())
// set workdir
SetWorkDir("/tmp")
assert.Equal(t, "/tmp", GetWorkDir())
// should not set workdir again
SetWorkDir("/tmp2")
assert.Equal(t, "/tmp", GetWorkDir())
}
func TestResourceFromObject(t *testing.T) {
assert.Equal(t, RuntimePipelineDir, ResourceFromObject(&kubekeyv1.Pipeline{}))
assert.Equal(t, RuntimePipelineDir, ResourceFromObject(&kubekeyv1.PipelineList{}))
assert.Equal(t, RuntimeConfigDir, ResourceFromObject(&kubekeyv1.Config{}))
assert.Equal(t, RuntimeConfigDir, ResourceFromObject(&kubekeyv1.ConfigList{}))
assert.Equal(t, RuntimeInventoryDir, ResourceFromObject(&kubekeyv1.Inventory{}))
assert.Equal(t, RuntimeInventoryDir, ResourceFromObject(&kubekeyv1.InventoryList{}))
assert.Equal(t, RuntimePipelineTaskDir, ResourceFromObject(&kubekeyv1alpha1.Task{}))
assert.Equal(t, RuntimePipelineTaskDir, ResourceFromObject(&kubekeyv1alpha1.TaskList{}))
assert.Equal(t, "", ResourceFromObject(&unstructured.Unstructured{}))
}

128
pkg/const/workdir.go Normal file
View File

@ -0,0 +1,128 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package _const
/** a kubekey workdir like that:
workdir/
|-- projects/
| |-- ansible-project1/
| | |-- playbooks/
| | |-- roles/
| | | |-- roleName/
| | | | |-- tasks/
| | | | | |-- main.yml
| | | | |-- defaults/
| | | | | |-- main.yml
| | | | |-- templates/
| | | | |-- files/
| |
| |-- ansible-project2/
| |
|
|-- runtime/
| |-- namespace/
| | |-- pipelines/
| | | |-- pipelineName/
| | | | |-- pipeline.yaml
| | | | |-- variable/
| | | | | |-- location.json
| | | | | |-- hostname.json
| | |-- tasks/
| | | |-- taskName/
| | | | |-- task.yaml
| | |-- configs/
| | | |-- configName/
| | | | |-- config.yaml
| | |-- inventories/
| | | |-- inventoryName/
| | | | |-- inventory.yaml
*/
// workDir is the user-specified working directory. By default, it is the same as the directory where the kubekey command is executed.
var workDir string
// ProjectDir is a fixed directory name under workdir, used to store the Ansible project.
const ProjectDir = "projects"
// ansible-project is the name of different Ansible projects
// ProjectPlaybooksDir is a fixed directory name under ansible-project. used to store executable playbook files.
const ProjectPlaybooksDir = "playbooks"
// ProjectRolesDir is a fixed directory name under ansible-project. used to store roles which playbook need.
const ProjectRolesDir = "roles"
// roleName is the name of different roles
// ProjectRolesTasksDir is a fixed directory name under roleName. used to store task which role need.
const ProjectRolesTasksDir = "tasks"
// ProjectRolesTasksMainFile is a fixed file under tasks. it must run if the role run. support *.yaml or *yml
const ProjectRolesTasksMainFile = "main"
// ProjectRolesDefaultsDir is a fixed directory name under roleName. it set default variables to role.
const ProjectRolesDefaultsDir = "defaults"
// ProjectRolesDefaultsMainFile is a fixed file under defaults. support *.yaml or *yml
const ProjectRolesDefaultsMainFile = "main"
// ProjectRolesTemplateDir is a fixed directory name under roleName. used to store template which task need.
const ProjectRolesTemplateDir = "templates"
// ProjectRolesFilesDir is a fixed directory name under roleName. used to store files which task need.
const ProjectRolesFilesDir = "files"
// RuntimeDir is a fixed directory name under workdir, used to store the runtime data of the current task execution.
const RuntimeDir = "runtime"
// namespace is the namespace for resource of Pipeline,Task,Config,Inventory.
// RuntimePipelineDir store Pipeline resources
const RuntimePipelineDir = "pipelines"
// pipelineName is the name of Pipeline resource
// pipeline.yaml is the data of Pipeline resource
// RuntimePipelineVariableDir is a fixed directory name under runtime, used to store the task execution parameters.
const RuntimePipelineVariableDir = "variable"
// RuntimePipelineVariableLocationFile is a location variable file under RuntimePipelineVariableDir
const RuntimePipelineVariableLocationFile = "location.json"
// hostname.json is host variable file under RuntimePipelineVariableDir. Each host has a separate file.
// RuntimePipelineTaskDir is a fixed directory name under runtime, used to store the task execution status.
const RuntimePipelineTaskDir = "tasks"
// taskName is the name of Task resource
// task.yaml is the data of Task resource
// RuntimeConfigDir store Config resources
const RuntimeConfigDir = "configs"
// configName is the name of Config resource
// config.yaml is the data of Config resource
// RuntimeInventoryDir store Inventory resources
const RuntimeInventoryDir = "inventories"
// inventoryName is the name of Inventory resource
// inventory.yaml is the data of Inventory resource

View File

@ -0,0 +1,43 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
type Options struct {
ControllerGates []string
ctrlcontroller.Options
}
// IsControllerEnabled check if a specified controller enabled or not.
func (o Options) IsControllerEnabled(name string) bool {
hasStar := false
for _, ctrl := range o.ControllerGates {
if ctrl == name {
return true
}
if ctrl == "-"+name {
return false
}
if ctrl == "*" {
hasStar = true
}
}
return hasStar
}

View File

@ -0,0 +1,138 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"path/filepath"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/task"
)
type PipelineReconciler struct {
ctrlclient.Client
record.EventRecorder
TaskController task.Controller
}
func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
klog.Infof("[Pipeline %s] begin reconcile", req.NamespacedName.String())
defer func() {
klog.Infof("[Pipeline %s] end reconcile", req.NamespacedName.String())
}()
pipeline := &kubekeyv1.Pipeline{}
err := r.Client.Get(ctx, req.NamespacedName, pipeline)
if err != nil {
if errors.IsNotFound(err) {
klog.V(5).Infof("[Pipeline %s] pipeline not found", req.NamespacedName.String())
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
if pipeline.DeletionTimestamp != nil {
klog.V(5).Infof("[Pipeline %s] pipeline is deleting", req.NamespacedName.String())
return ctrl.Result{}, nil
}
switch pipeline.Status.Phase {
case "":
excepted := pipeline.DeepCopy()
pipeline.Status.Phase = kubekeyv1.PipelinePhasePending
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
return ctrl.Result{}, err
}
case kubekeyv1.PipelinePhasePending:
excepted := pipeline.DeepCopy()
pipeline.Status.Phase = kubekeyv1.PipelinePhaseRunning
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
return ctrl.Result{}, err
}
case kubekeyv1.PipelinePhaseRunning:
return r.dealRunningPipeline(ctx, pipeline)
case kubekeyv1.PipelinePhaseFailed:
r.clean(ctx, pipeline)
case kubekeyv1.PipelinePhaseSucceed:
r.clean(ctx, pipeline)
}
return ctrl.Result{}, nil
}
func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *kubekeyv1.Pipeline) (ctrl.Result, error) {
if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok {
// if pipeline is paused, do nothing
klog.V(5).Infof("[Pipeline %s] pipeline is paused", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, nil
}
cp := pipeline.DeepCopy()
defer func() {
// update pipeline status
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(cp)); err != nil {
klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
}
}()
if err := r.TaskController.AddTasks(ctx, task.AddTaskOptions{
Pipeline: pipeline,
}); err != nil {
klog.Errorf("[Pipeline %s] add task error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed
pipeline.Status.Reason = fmt.Sprintf("add task to controller failed: %v", err)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// clean runtime directory
func (r *PipelineReconciler) clean(ctx context.Context, pipeline *kubekeyv1.Pipeline) {
if !pipeline.Spec.Debug && pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed {
klog.Infof("[Pipeline %s] clean runtimeDir", ctrlclient.ObjectKeyFromObject(pipeline))
// clean runtime directory
if err := os.RemoveAll(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir)); err != nil {
klog.Errorf("clean runtime directory %s error: %v", filepath.Join(_const.GetWorkDir(), _const.RuntimeDir), err)
}
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *PipelineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options Options) error {
if !options.IsControllerEnabled("pipeline") {
klog.V(5).Infof("pipeline controller is disabled")
return nil
}
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options.Options).
For(&kubekeyv1.Pipeline{}).
Complete(r)
}

View File

@ -0,0 +1,412 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/utils/strings/slices"
ctrl "sigs.k8s.io/controller-runtime"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/v4/pkg/cache"
"github.com/kubesphere/kubekey/v4/pkg/converter"
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
"github.com/kubesphere/kubekey/v4/pkg/modules"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
type TaskReconciler struct {
// Client to resources
ctrlclient.Client
// VariableCache to store variable
VariableCache cache.Cache
}
type taskReconcileOptions struct {
*kubekeyv1.Pipeline
*kubekeyv1alpha1.Task
variable.Variable
}
func (r *TaskReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
klog.V(5).Infof("[Task %s] start reconcile", request.String())
defer klog.V(5).Infof("[Task %s] finish reconcile", request.String())
// get task
var task = &kubekeyv1alpha1.Task{}
if err := r.Client.Get(ctx, request.NamespacedName, task); err != nil {
klog.Errorf("get task %s error %v", request, err)
return ctrl.Result{}, nil
}
// if task is deleted, skip
if task.DeletionTimestamp != nil {
klog.V(5).Infof("[Task %s] task is deleted, skip", request.String())
return ctrl.Result{}, nil
}
// get pipeline
var pipeline = &kubekeyv1.Pipeline{}
for _, ref := range task.OwnerReferences {
if ref.Kind == "Pipeline" {
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: task.Namespace, Name: ref.Name}, pipeline); err != nil {
klog.Errorf("[Task %s] get pipeline %s error %v", request.String(), types.NamespacedName{Namespace: task.Namespace, Name: ref.Name}.String(), err)
if errors.IsNotFound(err) {
klog.V(4).Infof("[Task %s] pipeline is deleted, skip", request.String())
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
break
}
}
if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok {
klog.V(5).Infof("[Task %s] pipeline is paused, skip", request.String())
return ctrl.Result{}, nil
}
// get variable
var v variable.Variable
if vc, ok := r.VariableCache.Get(string(pipeline.UID)); !ok {
// create new variable
nv, err := variable.New(variable.Options{
Ctx: ctx,
Client: r.Client,
Pipeline: *pipeline,
})
if err != nil {
return ctrl.Result{}, err
}
r.VariableCache.Put(string(pipeline.UID), nv)
v = nv
} else {
v = vc.(variable.Variable)
}
defer func() {
var nsTasks = &kubekeyv1alpha1.TaskList{}
klog.V(5).Infof("[Task %s] update pipeline %s status", ctrlclient.ObjectKeyFromObject(task).String(), ctrlclient.ObjectKeyFromObject(pipeline).String())
if err := r.Client.List(ctx, nsTasks, ctrlclient.InNamespace(task.Namespace)); err != nil {
klog.Errorf("[Task %s] list task error %v", ctrlclient.ObjectKeyFromObject(task).String(), err)
return
}
// filter by ownerReference
for i := len(nsTasks.Items) - 1; i >= 0; i-- {
var hasOwner bool
for _, ref := range nsTasks.Items[i].OwnerReferences {
if ref.UID == pipeline.UID && ref.Kind == "Pipeline" {
hasOwner = true
}
}
if !hasOwner {
nsTasks.Items = append(nsTasks.Items[:i], nsTasks.Items[i+1:]...)
}
}
cp := pipeline.DeepCopy()
converter.CalculatePipelineStatus(nsTasks, pipeline)
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(cp)); err != nil {
klog.Errorf("[Task %s] update pipeline %s status error %v", ctrlclient.ObjectKeyFromObject(task).String(), pipeline.Name, err)
}
}()
switch task.Status.Phase {
case kubekeyv1alpha1.TaskPhaseFailed:
if task.Spec.Retries > task.Status.RestartCount {
task.Status.Phase = kubekeyv1alpha1.TaskPhasePending
task.Status.RestartCount++
if err := r.Client.Update(ctx, task); err != nil {
klog.Errorf("update task %s error %v", task.Name, err)
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
case kubekeyv1alpha1.TaskPhasePending:
// deal pending task
return r.dealPendingTask(ctx, taskReconcileOptions{
Pipeline: pipeline,
Task: task,
Variable: v,
})
case kubekeyv1alpha1.TaskPhaseRunning:
// deal running task
return r.dealRunningTask(ctx, taskReconcileOptions{
Pipeline: pipeline,
Task: task,
Variable: v,
})
default:
return ctrl.Result{}, nil
}
}
func (r *TaskReconciler) dealPendingTask(ctx context.Context, options taskReconcileOptions) (ctrl.Result, error) {
// find dependency tasks
dl, err := options.Variable.Get(variable.DependencyTasks{
LocationUID: string(options.Task.UID),
})
if err != nil {
klog.Errorf("[Task %s] find dependency error %v", ctrlclient.ObjectKeyFromObject(options.Task).String(), err)
return ctrl.Result{}, err
}
dt, ok := dl.(variable.DependencyTask)
if !ok {
klog.Errorf("[Task %s] failed to convert dependency", ctrlclient.ObjectKeyFromObject(options.Task).String())
return ctrl.Result{}, fmt.Errorf("[Task %s] failed to convert dependency", ctrlclient.ObjectKeyFromObject(options.Task).String())
}
var nsTasks = &kubekeyv1alpha1.TaskList{}
if err := r.Client.List(ctx, nsTasks, ctrlclient.InNamespace(options.Task.Namespace)); err != nil {
klog.Errorf("[Task %s] list task error %v", ctrlclient.ObjectKeyFromObject(options.Task).String(), err)
return ctrl.Result{}, err
}
// filter by ownerReference
for i := len(nsTasks.Items) - 1; i >= 0; i-- {
var hasOwner bool
for _, ref := range nsTasks.Items[i].OwnerReferences {
if ref.UID == options.Pipeline.UID && ref.Kind == "Pipeline" {
hasOwner = true
}
}
if !hasOwner {
nsTasks.Items = append(nsTasks.Items[:i], nsTasks.Items[i+1:]...)
}
}
var dts []kubekeyv1alpha1.Task
for _, t := range nsTasks.Items {
if slices.Contains(dt.Tasks, string(t.UID)) {
dts = append(dts, t)
}
}
// Based on the results of the executed tasks dependent on, infer the next phase of the current task.
switch dt.Strategy(dts) {
case kubekeyv1alpha1.TaskPhasePending:
return ctrl.Result{Requeue: true}, nil
case kubekeyv1alpha1.TaskPhaseRunning:
// update task phase to running
options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseRunning
if err := r.Client.Update(ctx, options.Task); err != nil {
klog.Errorf("[Task %s] update task to Running error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
}
return ctrl.Result{Requeue: true}, nil
case kubekeyv1alpha1.TaskPhaseSkipped:
options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseSkipped
if err := r.Client.Update(ctx, options.Task); err != nil {
klog.Errorf("[Task %s] update task to Skipped error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
}
return ctrl.Result{}, nil
default:
return ctrl.Result{}, fmt.Errorf("unknown TependencyTask.Strategy result. only support: Pending, Running, Skipped")
}
}
func (r *TaskReconciler) dealRunningTask(ctx context.Context, options taskReconcileOptions) (ctrl.Result, error) {
// find task in location
klog.Infof("[Task %s] dealRunningTask begin", ctrlclient.ObjectKeyFromObject(options.Task))
defer func() {
klog.Infof("[Task %s] dealRunningTask end, task phase: %s", ctrlclient.ObjectKeyFromObject(options.Task), options.Task.Status.Phase)
}()
if err := r.executeTask(ctx, options); err != nil {
klog.Errorf("[Task %s] execute task error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
func (r *TaskReconciler) executeTask(ctx context.Context, options taskReconcileOptions) error {
cd := kubekeyv1alpha1.TaskCondition{
StartTimestamp: metav1.Now(),
}
defer func() {
cd.EndTimestamp = metav1.Now()
options.Task.Status.Conditions = append(options.Task.Status.Conditions, cd)
if err := r.Client.Update(ctx, options.Task); err != nil {
klog.Errorf("[Task %s] update task status error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
}
}()
// check task host results
wg := &wait.Group{}
dataChan := make(chan kubekeyv1alpha1.TaskHostResult, len(options.Task.Spec.Hosts))
for _, h := range options.Task.Spec.Hosts {
host := h
wg.StartWithContext(ctx, func(ctx context.Context) {
var stdout, stderr string
defer func() {
if stderr != "" {
klog.Errorf("[Task %s] run failed: %s", ctrlclient.ObjectKeyFromObject(options.Task), stderr)
}
dataChan <- kubekeyv1alpha1.TaskHostResult{
Host: host,
Stdout: stdout,
StdErr: stderr,
}
if options.Task.Spec.Register != "" {
puid, err := options.Variable.Get(variable.ParentLocation{LocationUID: string(options.Task.UID)})
if err != nil {
klog.Errorf("[Task %s] get location error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
return
}
// set variable to parent location
if err := options.Variable.Merge(variable.HostMerge{
HostNames: []string{h},
LocationUID: puid.(string),
Data: variable.VariableData{
options.Task.Spec.Register: map[string]string{
"stdout": stdout,
"stderr": stderr,
},
},
}); err != nil {
klog.Errorf("[Task %s] register error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
return
}
}
}()
lg, err := options.Variable.Get(variable.LocationVars{
HostName: host,
LocationUID: string(options.Task.UID),
})
if err != nil {
stderr = err.Error()
return
}
// check when condition
if len(options.Task.Spec.When) > 0 {
ok, err := tmpl.ParseBool(lg.(variable.VariableData), options.Task.Spec.When)
if err != nil {
stderr = err.Error()
return
}
if !ok {
stdout = "skip by when"
return
}
}
data := variable.Extension2Slice(options.Task.Spec.Loop)
if len(data) == 0 {
stdout, stderr = r.executeModule(ctx, options.Task, modules.ExecOptions{
Args: options.Task.Spec.Module.Args,
Host: host,
Variable: options.Variable,
Task: *options.Task,
Pipeline: *options.Pipeline,
})
} else {
for _, item := range data {
switch item.(type) {
case string:
item, err = tmpl.ParseString(lg.(variable.VariableData), item.(string))
if err != nil {
stderr = err.Error()
return
}
case variable.VariableData:
for k, v := range item.(variable.VariableData) {
sv, err := tmpl.ParseString(lg.(variable.VariableData), v.(string))
if err != nil {
stderr = err.Error()
return
}
item.(map[string]any)[k] = sv
}
default:
stderr = "unknown loop vars, only support string or map[string]string"
return
}
// set item to runtime variable
options.Variable.Merge(variable.HostMerge{
HostNames: []string{h},
LocationUID: string(options.Task.UID),
Data: variable.VariableData{
"item": item,
},
})
stdout, stderr = r.executeModule(ctx, options.Task, modules.ExecOptions{
Args: options.Task.Spec.Module.Args,
Host: host,
Variable: options.Variable,
Task: *options.Task,
Pipeline: *options.Pipeline,
})
}
}
})
}
go func() {
wg.Wait()
close(dataChan)
}()
options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseSuccess
for data := range dataChan {
if data.StdErr != "" {
if options.Task.Spec.IgnoreError {
options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseIgnored
} else {
options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseFailed
options.Task.Status.FailedDetail = append(options.Task.Status.FailedDetail, kubekeyv1alpha1.TaskFailedDetail{
Host: data.Host,
Stdout: data.Stdout,
StdErr: data.StdErr,
})
}
}
cd.HostResults = append(cd.HostResults, data)
}
return nil
}
func (r *TaskReconciler) executeModule(ctx context.Context, task *kubekeyv1alpha1.Task, opts modules.ExecOptions) (string, string) {
lg, err := opts.Variable.Get(variable.LocationVars{
HostName: opts.Host,
LocationUID: string(task.UID),
})
if err != nil {
klog.Errorf("[Task %s] get location variable error %v", ctrlclient.ObjectKeyFromObject(task), err)
return "", err.Error()
}
// check failed when condition
if len(task.Spec.FailedWhen) > 0 {
ok, err := tmpl.ParseBool(lg.(variable.VariableData), task.Spec.FailedWhen)
if err != nil {
klog.Errorf("[Task %s] validate FailedWhen condition error %v", ctrlclient.ObjectKeyFromObject(task), err)
return "", err.Error()
}
if ok {
return "", "failed by failedWhen"
}
}
return modules.FindModule(task.Spec.Module.Name)(ctx, opts)
}

432
pkg/converter/converter.go Normal file
View File

@ -0,0 +1,432 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package converter
import (
"context"
"fmt"
"io/fs"
"math"
"path/filepath"
"strconv"
"strings"
"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/project"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
// MarshalPlaybook kkcorev1.Playbook from a playbook file
func MarshalPlaybook(baseFS fs.FS, pbPath string) (*kkcorev1.Playbook, error) {
// convert playbook to kkcorev1.Playbook
pb := &kkcorev1.Playbook{}
if err := loadPlaybook(baseFS, pbPath, pb); err != nil {
klog.Errorf(" load playbook with include %s failed: %v", pbPath, err)
return nil, err
}
// convertRoles
if err := convertRoles(baseFS, pbPath, pb); err != nil {
klog.Errorf("convertRoles error %v", err)
return nil, err
}
if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil {
klog.Errorf("convertIncludeTasks error %v", err)
return nil, err
}
if err := pb.Validate(); err != nil {
klog.Errorf("validate playbook %s failed: %v", pbPath, err)
return nil, err
}
return pb, nil
}
// loadPlaybook with include_playbook. Join all playbooks into one playbook
func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
// baseDir is the local ansible project dir which playbook belong to
pbData, err := fs.ReadFile(baseFS, pbPath)
if err != nil {
klog.Errorf("read playbook %s failed: %v", pbPath, err)
return err
}
var plays []kkcorev1.Play
if err := yaml.Unmarshal(pbData, &plays); err != nil {
klog.Errorf("unmarshal playbook %s failed: %v", pbPath, err)
return err
}
for _, p := range plays {
if p.ImportPlaybook != "" {
importPlaybook := project.GetPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook)
if importPlaybook == "" {
return fmt.Errorf("cannot found import playbook %s", importPlaybook)
}
if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil {
return err
}
}
// fill block in roles
for i, r := range p.Roles {
roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
if roleBase == "" {
return fmt.Errorf("cannot found role %s", r.Role)
}
mainTask := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
if mainTask == "" {
return fmt.Errorf("cannot found main task for role %s", r.Role)
}
rdata, err := fs.ReadFile(baseFS, mainTask)
if err != nil {
klog.Errorf("read role %s failed: %v", mainTask, err)
return err
}
var blocks []kkcorev1.Block
if err := yaml.Unmarshal(rdata, &blocks); err != nil {
klog.Errorf("unmarshal role %s failed: %v", r.Role, err)
return err
}
p.Roles[i].Block = blocks
}
pb.Play = append(pb.Play, p)
}
return nil
}
// convertRoles convert roleName to block
func convertRoles(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
for i, p := range pb.Play {
for i, r := range p.Roles {
roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
if roleBase == "" {
return fmt.Errorf("cannot found role %s", r.Role)
}
// load block
mainTask := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
if mainTask == "" {
return fmt.Errorf("cannot found main task for role %s", r.Role)
}
rdata, err := fs.ReadFile(baseFS, mainTask)
if err != nil {
klog.Errorf("read role %s failed: %v", mainTask, err)
return err
}
var blocks []kkcorev1.Block
if err := yaml.Unmarshal(rdata, &blocks); err != nil {
klog.Errorf("unmarshal role %s failed: %v", r.Role, err)
return err
}
p.Roles[i].Block = blocks
// load defaults (optional)
mainDefault := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesDefaultsDir, _const.ProjectRolesDefaultsMainFile))
if mainDefault != "" {
mainData, err := fs.ReadFile(baseFS, mainDefault)
if err != nil {
klog.Errorf("read defaults variable for role %s error: %v", r.Role, err)
return err
}
var vars variable.VariableData
if err := yaml.Unmarshal(mainData, &vars); err != nil {
klog.Errorf("unmarshal defaults variable for role %s error: %v", r.Role, err)
return err
}
p.Roles[i].Vars = vars
}
}
pb.Play[i] = p
}
return nil
}
// convertIncludeTasks from file to blocks
func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
var pbBase = filepath.Dir(filepath.Dir(pbPath))
for _, play := range pb.Play {
if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil {
return err
}
if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil {
return err
}
if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil {
return err
}
for _, r := range play.Roles {
roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil {
return err
}
}
}
return nil
}
func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkcorev1.Block) error {
for i, b := range blocks {
if b.IncludeTasks != "" {
data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks))
if err != nil {
klog.Errorf("readFile %s error %v", filepath.Join(baseDir, b.IncludeTasks), err)
return err
}
var bs []kkcorev1.Block
if err := yaml.Unmarshal(data, &bs); err != nil {
klog.Errorf("unmarshal data %s to []Block error %v", filepath.Join(baseDir, b.IncludeTasks), err)
return err
}
b.Block = bs
blocks[i] = b
}
if err := fileToBlock(baseFS, baseDir, b.Block); err != nil {
return err
}
if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil {
return err
}
if err := fileToBlock(baseFS, baseDir, b.Always); err != nil {
return err
}
}
return nil
}
// MarshalBlock marshal block to task
func MarshalBlock(ctx context.Context, block kkcorev1.Block, owner ctrlclient.Object) *kubekeyv1alpha1.Task {
var role string
if v := ctx.Value(_const.CtxBlockRole); v != nil {
role = v.(string)
}
hosts := ctx.Value(_const.CtxBlockHosts).([]string)
if block.RunOnce { // if run_once. execute on the first task
hosts = hosts[:1]
}
var uid string
if v := ctx.Value(_const.CtxBlockTaskUID); v != nil {
uid = v.(string)
}
var when []string
if v := ctx.Value(_const.CtxBlockWhen); v != nil {
when = v.([]string)
}
task := &kubekeyv1alpha1.Task{
TypeMeta: metav1.TypeMeta{
Kind: "Task",
APIVersion: "kubekey.kubesphere.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", owner.GetName(), rand.String(12)),
Namespace: owner.GetNamespace(),
UID: types.UID(uid),
CreationTimestamp: metav1.Now(),
Annotations: map[string]string{
kubekeyv1alpha1.TaskAnnotationRole: role,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: owner.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: owner.GetObjectKind().GroupVersionKind().Kind,
Name: owner.GetName(),
UID: owner.GetUID(),
Controller: pointer.Bool(true),
BlockOwnerDeletion: pointer.Bool(true),
},
},
},
Spec: kubekeyv1alpha1.KubeKeyTaskSpec{
Name: block.Name,
Hosts: hosts,
IgnoreError: block.IgnoreErrors,
Retries: block.Retries,
//Loop: block.Loop,
When: when,
FailedWhen: block.FailedWhen.Data,
Register: block.Register,
},
Status: kubekeyv1alpha1.TaskStatus{
Phase: kubekeyv1alpha1.TaskPhasePending,
},
}
if len(block.Loop) != 0 {
data, err := json.Marshal(block.Loop)
if err != nil {
klog.Errorf("marshal loop %v error: %v", block.Loop, err)
}
task.Spec.Loop = runtime.RawExtension{Raw: data}
}
return task
}
// GroupHostBySerial group hosts by serial
func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
if len(serial) == 0 {
return [][]string{hosts}, nil
}
result := make([][]string, 0)
sp := 0
for _, a := range serial {
switch a.(type) {
case int:
if sp+a.(int) > len(hosts)-1 {
result = append(result, hosts[sp:])
return result, nil
}
result = append(result, hosts[sp:sp+a.(int)])
sp += a.(int)
case string:
if strings.HasSuffix(a.(string), "%") {
b, err := strconv.Atoi(strings.TrimSuffix(a.(string), "%"))
if err != nil {
klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
return nil, err
}
if sp+int(math.Ceil(float64(len(hosts)*b)/100.0)) > len(hosts)-1 {
result = append(result, hosts[sp:])
return result, nil
}
result = append(result, hosts[sp:sp+int(math.Ceil(float64(len(hosts)*b)/100.0))])
sp += int(math.Ceil(float64(len(hosts)*b) / 100.0))
} else {
b, err := strconv.Atoi(a.(string))
if err != nil {
klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
return nil, err
}
if sp+b > len(hosts)-1 {
result = append(result, hosts[sp:])
return result, nil
}
result = append(result, hosts[sp:sp+b])
sp += b
}
default:
return nil, fmt.Errorf("unknown serial type. only support int or percent")
}
}
// if serial is not match all hosts. use last serial
if sp < len(hosts) {
a := serial[len(serial)-1]
for {
switch a.(type) {
case int:
if sp+a.(int) > len(hosts)-1 {
result = append(result, hosts[sp:])
return result, nil
}
result = append(result, hosts[sp:sp+a.(int)])
sp += a.(int)
case string:
if strings.HasSuffix(a.(string), "%") {
b, err := strconv.Atoi(strings.TrimSuffix(a.(string), "%"))
if err != nil {
klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
return nil, err
}
if sp+int(math.Ceil(float64(len(hosts)*b)/100.0)) > len(hosts)-1 {
result = append(result, hosts[sp:])
return result, nil
}
result = append(result, hosts[sp:sp+int(math.Ceil(float64(len(hosts)*b)/100.0))])
sp += int(math.Ceil(float64(len(hosts)*b) / 100.0))
} else {
b, err := strconv.Atoi(a.(string))
if err != nil {
klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
return nil, err
}
if sp+b > len(hosts)-1 {
result = append(result, hosts[sp:])
return result, nil
}
result = append(result, hosts[sp:sp+b])
sp += b
}
default:
return nil, fmt.Errorf("unknown serial type. only support int or percent")
}
}
}
return result, nil
}
// CalculatePipelineStatus calculate pipeline status from tasks
func CalculatePipelineStatus(nsTasks *kubekeyv1alpha1.TaskList, pipeline *kubekeyv1.Pipeline) {
if pipeline.Status.Phase != kubekeyv1.PipelinePhaseRunning {
// only deal running pipeline
return
}
pipeline.Status.TaskResult = kubekeyv1.PipelineTaskResult{
Total: len(nsTasks.Items),
}
var failedDetail []kubekeyv1.PipelineFailedDetail
for _, t := range nsTasks.Items {
switch t.Status.Phase {
case kubekeyv1alpha1.TaskPhaseSuccess:
pipeline.Status.TaskResult.Success++
case kubekeyv1alpha1.TaskPhaseIgnored:
pipeline.Status.TaskResult.Ignored++
case kubekeyv1alpha1.TaskPhaseSkipped:
pipeline.Status.TaskResult.Skipped++
}
if t.Status.Phase == kubekeyv1alpha1.TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount {
var hostReason []kubekeyv1.PipelineFailedDetailHost
for _, tr := range t.Status.FailedDetail {
hostReason = append(hostReason, kubekeyv1.PipelineFailedDetailHost{
Host: tr.Host,
Stdout: tr.Stdout,
StdErr: tr.StdErr,
})
}
failedDetail = append(failedDetail, kubekeyv1.PipelineFailedDetail{
Task: t.Name,
Hosts: hostReason,
})
pipeline.Status.TaskResult.Failed++
}
}
if pipeline.Status.TaskResult.Failed != 0 {
pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed
pipeline.Status.Reason = "task failed"
pipeline.Status.FailedDetail = failedDetail
} else if pipeline.Status.TaskResult.Total == pipeline.Status.TaskResult.Success+pipeline.Status.TaskResult.Ignored+pipeline.Status.TaskResult.Skipped {
pipeline.Status.Phase = kubekeyv1.PipelinePhaseSucceed
}
}

View File

@ -0,0 +1,214 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package converter
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
)
func TestMarshalPlaybook(t *testing.T) {
testcases := []struct {
name string
file string
except *kkcorev1.Playbook
}{
{
name: "marshal playbook",
file: "playbooks/playbook1.yaml",
except: &kkcorev1.Playbook{[]kkcorev1.Play{
{
Base: kkcorev1.Base{Name: "play1"},
PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}},
Roles: []kkcorev1.Role{
{kkcorev1.RoleInfo{
Role: "role1",
Block: []kkcorev1.Block{
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "role1 | block1"}},
Task: kkcorev1.Task{UnknownFiled: map[string]any{
"debug": map[string]any{
"msg": "echo \"hello world\"",
},
}},
},
},
}},
},
Handlers: nil,
PreTasks: []kkcorev1.Block{
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | pre_block1"}},
Task: kkcorev1.Task{UnknownFiled: map[string]any{
"debug": map[string]any{
"msg": "echo \"hello world\"",
},
}},
},
},
PostTasks: []kkcorev1.Block{
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | post_block1"}},
Task: kkcorev1.Task{UnknownFiled: map[string]any{
"debug": map[string]any{
"msg": "echo \"hello world\"",
},
}},
},
},
Tasks: []kkcorev1.Block{
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1"}},
BlockInfo: kkcorev1.BlockInfo{Block: []kkcorev1.Block{
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block1"}},
Task: kkcorev1.Task{UnknownFiled: map[string]any{
"debug": map[string]any{
"msg": "echo \"hello world\"",
},
}},
},
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block2"}},
Task: kkcorev1.Task{UnknownFiled: map[string]any{
"debug": map[string]any{
"msg": "echo \"hello world\"",
},
}},
},
}},
},
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block2"}},
Task: kkcorev1.Task{UnknownFiled: map[string]any{
"debug": map[string]any{
"msg": "echo \"hello world\"",
},
}},
},
},
},
{
Base: kkcorev1.Base{Name: "play2"},
PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}},
Tasks: []kkcorev1.Block{
{
BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play2 | block1"}},
Task: kkcorev1.Task{UnknownFiled: map[string]any{
"debug": map[string]any{
"msg": "echo \"hello world\"",
},
}},
},
},
},
}},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
pb, err := MarshalPlaybook(os.DirFS("testdata"), tc.file)
assert.NoError(t, err)
assert.Equal(t, tc.except, pb)
})
}
}
func TestGroupHostBySerial(t *testing.T) {
hosts := []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7"}
testcases := []struct {
name string
serial []any
exceptResult [][]string
exceptErr bool
}{
{
name: "group host by 1",
serial: []any{1},
exceptResult: [][]string{
{"h1"},
{"h2"},
{"h3"},
{"h4"},
{"h5"},
{"h6"},
{"h7"},
},
exceptErr: false,
},
{
name: "group host by serial 2",
serial: []any{2},
exceptResult: [][]string{
{"h1", "h2"},
{"h3", "h4"},
{"h5", "h6"},
{"h7"},
},
exceptErr: false,
},
{
name: "group host by serial 1 and 2",
serial: []any{1, 2},
exceptResult: [][]string{
{"h1"},
{"h2", "h3"},
{"h4", "h5"},
{"h6", "h7"},
},
exceptErr: false,
},
{
name: "group host by serial 1 and 40%",
serial: []any{"1", "40%"},
exceptResult: [][]string{
{"h1"},
{"h2", "h3", "h4"},
{"h5", "h6", "h7"},
},
exceptErr: false,
},
{
name: "group host by unSupport serial type",
serial: []any{1.1},
exceptResult: nil,
exceptErr: true,
},
{
name: "group host by unSupport serial value",
serial: []any{"%1.1%"},
exceptResult: nil,
exceptErr: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
result, err := GroupHostBySerial(hosts, tc.serial)
if tc.exceptErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tc.exceptResult, result)
}
})
}
}

View File

@ -0,0 +1,30 @@
- name: play1
hosts: localhost
pre_tasks:
- name: play1 | pre_block1
debug:
msg: echo "hello world"
tasks:
- name: play1 | block1
block:
- name: play1 | block1 | block1
debug:
msg: echo "hello world"
- name: play1 | block1 | block2
debug:
msg: echo "hello world"
- name: play1 | block2
debug:
msg: echo "hello world"
post_tasks:
- name: play1 | post_block1
debug:
msg: echo "hello world"
roles:
- role1
- name: play2
hosts: localhost
tasks:
- name: play2 | block1
debug:
msg: echo "hello world"

Some files were not shown because too many files have changed in this diff Show More