This commit is contained in:
zhaojing1987 2023-10-13 11:46:15 +08:00
parent be00226fcb
commit c0adf76eff
115 changed files with 7325 additions and 83 deletions

View File

@ -1,17 +1,18 @@
from fastapi import APIRouter, Query,Path from fastapi import APIRouter, Query,Path,Response
from src.schemas.appAvailable import AppAvailableResponse from src.schemas.appAvailable import AppAvailableResponse
from src.schemas.appCatalog import AppCatalogResponse from src.schemas.appCatalog import AppCatalogResponse
from src.schemas.appInstall import appInstall from src.schemas.appInstall import appInstall
from src.schemas.appResponse import AppResponse from src.schemas.appResponse import AppResponse
from src.schemas.errorResponse import ErrorResponse from src.schemas.errorResponse import ErrorResponse
from src.services.app_manager import AppManger from src.services.app_manager import AppManger
from src.core.logger import logger
router = APIRouter() router = APIRouter()
@router.get( @router.get(
"/apps/catalog/{locale}", "/apps/catalog/{locale}",
summary="List Catalogs", summary="List Catalogsaaaaaaaaaaaaa",
description="List all app's catalogs", description="List all app's catalogs aaaa",
responses={ responses={
200: {"model": list[AppCatalogResponse]}, 200: {"model": list[AppCatalogResponse]},
400: {"model": ErrorResponse}, 400: {"model": ErrorResponse},

View File

@ -2,20 +2,20 @@
[nginx_proxy_manager] [nginx_proxy_manager]
base_url = http://websoft9-proxy:81/api base_url = http://websoft9-proxy:81/api
user_name = help@websoft9.com user_name = help@websoft9.com
user_pwd = L2l6nuFu2jL0Rm8L user_pwd = E3cMekM72yPp1pUX
nike_name = admin nike_name = admin
#The config for gitea #The config for gitea
[gitea] [gitea]
base_url = http://websoft9-git:3000/api/v1 base_url = http://websoft9-git:3000/api/v1
user_name = websoft9 user_name = websoft9
user_pwd = XATY3rEgQ1bA user_pwd = RilXTqVRvEvU
#The config for portainer #The config for portainer
[portainer] [portainer]
base_url = http://websoft9-deployment:9000/api base_url = http://websoft9-deployment:9000/api
user_name = admin user_name = admin
user_pwd = fn9BrJhd)An8T8Ez user_pwd = k0(t9g:Vnbs)<!ux
#The path of docker library #The path of docker library
[docker_library] [docker_library]
@ -27,7 +27,7 @@ path = /websoft9/media/json/
#The value of api_key #The value of api_key
[api_key] [api_key]
key = key = ee30700022f9d1b99e1112a28e6b008be2c2817ec505306120d8a7ea45fb6d5f
#The config for cockpit #The config for cockpit
[cockpit] [cockpit]

View File

@ -38,6 +38,7 @@ async def verify_key(request: Request, api_key_header: str = Security(api_key_he
) )
if api_key_header != API_KEY: if api_key_header != API_KEY:
logger.error(f"Invalid API Key: {api_key_header}")
raise CustomException( raise CustomException(
status_code=400, status_code=400,
message="Invalid Request", message="Invalid Request",

View File

@ -5,7 +5,17 @@ from src.core.exception import CustomException
from src.core.logger import logger from src.core.logger import logger
class APIKeyManager: class APIKeyManager:
"""
A class for managing API keys.
Methods:
generate_key: Generate a new API key.
delete_key: Delete the API key.
"""
def generate_key(self): def generate_key(self):
"""
Generate a new API key.
"""
try: try:
base = secrets.token_urlsafe(32) base = secrets.token_urlsafe(32)
key = hashlib.sha256(base.encode()).hexdigest() key = hashlib.sha256(base.encode()).hexdigest()
@ -16,8 +26,11 @@ class APIKeyManager:
raise CustomException() raise CustomException()
def delete_key(self): def delete_key(self):
"""
Delete the API key.
"""
try: try:
ConfigManager().remove_value("api_key", "key") ConfigManager().remove_value("api_key", "key")
except Exception as e: except Exception as e:
logger.error("Error deleting API key"+e) logger.error("Error deleting API key"+str(e))
raise CustomException() raise CustomException()

View File

@ -21,6 +21,7 @@ from src.utils.password_generator import PasswordGenerator
class AppManger: class AppManger:
def get_catalog_apps(self,locale:str): def get_catalog_apps(self,locale:str):
logger.access(f"Get catalog apps: {locale}")
try: try:
# Get the app media path # Get the app media path
base_path = ConfigManager().get_value("app_media", "path") base_path = ConfigManager().get_value("app_media", "path")

View File

@ -64,15 +64,19 @@ class GitManager:
logger.error(f"Invalid repo path: {self.local_path}") logger.error(f"Invalid repo path: {self.local_path}")
raise CustomException() raise CustomException()
# Parse the remote URL. try:
parsed = urlparse(remote_url) # Parse the remote URL.
parsed = urlparse(remote_url)
# Get the network location. # Get the network location.
auth_netloc = f"{user_name}:{user_pwd}@{parsed.netloc}" auth_netloc = f"{user_name}:{user_pwd}@{parsed.netloc}"
# Create a new ParseResult with the updated network location # Create a new ParseResult with the updated network location
auth_parsed = parsed._replace(netloc=auth_netloc) auth_parsed = parsed._replace(netloc=auth_netloc)
auth_repo_url = urlunparse(auth_parsed) auth_repo_url = urlunparse(auth_parsed)
except Exception as e:
logger.error(f"Failed to parse remote URL {remote_url}: {str(e)}")
raise CustomException()
# Set remote origin URL. # Set remote origin URL.
try: try:

View File

@ -76,6 +76,15 @@ class ProxyManager:
raise CustomException() raise CustomException()
def check_proxy_host_exists(self,domain_names: list[str]): def check_proxy_host_exists(self,domain_names: list[str]):
"""
Check proxy host is exist
Args:
domain_names (list[str]): Domain names
Returns:
bool: True if proxy host is exist, False if proxy host is not exist, raise exception if error
"""
response = self.nginx.get_proxy_hosts() response = self.nginx.get_proxy_hosts()
if response.status_code == 200: if response.status_code == 200:
proxy_hosts = response.json() proxy_hosts = response.json()
@ -101,6 +110,18 @@ class ProxyManager:
forward_port=forward_port, forward_port=forward_port,
advanced_config=advanced_config, advanced_config=advanced_config,
) )
# if response.status_code == 201:
# return response.json()
# elif response.status_code == 500:
# logger.error(f"Create proxy for app:{forward_host} error:{response.status_code}:{response.text}")
# raise CustomException()
# else:
# logger.error(f"Create proxy for app:{forward_host} error:{response.status_code}:{response.text}")
# raise CustomException(
# status_code=400,
# message=f"Invalid Request",
# details=f"{json.loads(response.text).get('error',{}).get('message','Unknown Error')}"
# )
if response.status_code != 201: if response.status_code != 201:
logger.error(f"Create proxy for app:{forward_host} error:{response.status_code}:{response.text}") logger.error(f"Create proxy for app:{forward_host} error:{response.status_code}:{response.text}")
raise CustomException() raise CustomException()

View File

@ -1,68 +0,0 @@
import requests
from src.core.config import ConfigManager
class PublicIPGetter:
"""
A utility class to retrieve a valid IPv4 address.
Attributes:
url_list (list[str]): A list of URLs to retrieve the response from.
Methods:
get_ip_address(): Retrieves and returns a valid IPv4 address.
"""
def __init__(self):
"""
Initializes the PublicIPGetter class.
"""
self.url_list = [url.strip() for url in ConfigManager().get_value("public_ip_url_list", "url_list").split(',')]
def get_ip_address(self):
"""
Retrieves and returns a valid IPv4 address from the list of URLs.
Returns:
str: The valid IPv4 address if found, otherwise None.
"""
for url in self.url_list:
ip = self._get_ip(url)
if ip and self._is_valid_ipv4(ip):
return ip
return None
def _get_ip(self, url):
"""
Retrieves and returns the response from the given URL.
Args:
url (str): The URL to retrieve the response from.
Returns:
str: The response from the given URL if found, otherwise None.
"""
try:
response = requests.get(url, timeout=2)
if response.status_code == 200:
return response.text.strip()
except requests.RequestException:
pass
return None
def _is_valid_ipv4(self, ip) -> bool:
"""
Checks if the given string is a valid IPv4 address.
Args:
ip (str): The string to check.
Returns:
bool: True if the string is a valid IPv4 address, otherwise False.
"""
parts = ip.split('.')
return len(parts) == 4 and all(0 <= int(part) < 256 for part in parts)
if __name__ == "__main__":
ip = PublicIPGetter().get_ip_address()
print(ip)

Binary file not shown.

After

Width:  |  Height:  |  Size: 665 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 628 B

View File

@ -0,0 +1,16 @@
html {
box-sizing: border-box;
overflow: -moz-scrollbars-vertical;
overflow-y: scroll;
}
*,
*:before,
*:after {
box-sizing: inherit;
}
body {
margin: 0;
background: #fafafa;
}

View File

@ -0,0 +1,19 @@
<!-- HTML for static distribution bundle build -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Swagger UI</title>
<link rel="stylesheet" type="text/css" href="./swagger-ui.css" />
<link rel="stylesheet" type="text/css" href="index.css" />
<link rel="icon" type="image/png" href="./favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="./favicon-16x16.png" sizes="16x16" />
</head>
<body>
<div id="swagger-ui"></div>
<script src="./swagger-ui-bundle.js" charset="UTF-8"> </script>
<script src="./swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
<script src="./swagger-initializer.js" charset="UTF-8"> </script>
</body>
</html>

View File

@ -0,0 +1,79 @@
<!doctype html>
<html lang="en-US">
<head>
<title>Swagger UI: OAuth2 Redirect</title>
</head>
<body>
<script>
'use strict';
function run () {
var oauth2 = window.opener.swaggerUIRedirectOauth2;
var sentState = oauth2.state;
var redirectUrl = oauth2.redirectUrl;
var isValid, qp, arr;
if (/code|token|error/.test(window.location.hash)) {
qp = window.location.hash.substring(1).replace('?', '&');
} else {
qp = location.search.substring(1);
}
arr = qp.split("&");
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
qp = qp ? JSON.parse('{' + arr.join() + '}',
function (key, value) {
return key === "" ? value : decodeURIComponent(value);
}
) : {};
isValid = qp.state === sentState;
if ((
oauth2.auth.schema.get("flow") === "accessCode" ||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
oauth2.auth.schema.get("flow") === "authorization_code"
) && !oauth2.auth.code) {
if (!isValid) {
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
});
}
if (qp.code) {
delete oauth2.state;
oauth2.auth.code = qp.code;
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
} else {
let oauthErrorMsg;
if (qp.error) {
oauthErrorMsg = "["+qp.error+"]: " +
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
(qp.error_uri ? "More info: "+qp.error_uri : "");
}
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "error",
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
});
}
} else {
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
}
window.close();
}
if (document.readyState !== 'loading') {
run();
} else {
document.addEventListener('DOMContentLoaded', function () {
run();
});
}
</script>
</body>
</html>

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,20 @@
window.onload = function() {
//<editor-fold desc="Changeable Configuration Block">
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
window.ui = SwaggerUIBundle({
url: "https://petstore.swagger.io/v2/swagger.json",
dom_id: '#swagger-ui',
deepLinking: true,
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIStandalonePreset
],
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "StandaloneLayout"
});
//</editor-fold>
};

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

107
source/CHANGELOG.md Normal file
View File

@ -0,0 +1,107 @@
## 0.8.22 release on 2023-09-27
1. add systemd to websoft9 artifacts
## 0.8.20 release on 2023-08-23
1. appmanage config files error:bug fix
## 0.8.19 release on 2023-08-23
1. New App Store preview push function added
2. Fix some known bugs
## 0.8.18 release on 2023-08-17
1. appmanage volumes bug edit
## 0.8.15 release on 2023-08-17
1. service menu bug
## 0.8.14 release on 2023-08-16
1. myapps plugins refresh bug
## 0.8.13 release on 2023-08-15
1. update plugins
2. fix bug data save in session
## 0.8.12 release on 2023-08-12
1. navigator plugin install way change
2. update plugin
## 0.8.11 release on 2023-08-03
1. Optimize interface calls
2. library artifacts directory: websoft9/plugin/library
3. add init apps: nocobase, affine
## 0.8.10 release on 2023-08-01
1. improve update.sh
2. add docs to artifacts
3. improve server's hostname
## 0.8.8 release on 2023-07-27
fixed update search api bug
## 0.8.5 release on 2023-07-26
add docs
## 0.8.4 release on 2023-07-26
add appstore search api
## 0.8.2 release on 2023-07-24
1. install from artifacts
2. add extre version.json into artifacts
## 0.7.2 release on 2023-06-25
1. appmanage 自动更新接口升级
## 0.7.1 release on 2023-06-21
1. appmanage version 文件意外删除时 bug 修改
2. 自动更新的时间频率调整为一天
3. 更新脚本 version 文件不存在的 bug 修改
## 0.7.0 release on 2023-06-20
1. appstore 增加 更新功能
2. myapps 功能优化
3. 新增 settings(设置) 功能
## 0.6.0 release on 2023-06-17
1. 上架 wordpress
2. 下架 moodle
3. 修改 redmine
4. 升级 discuzqzabbix
5. 新增自动更新软件商店功能
## 0.4.0 release on 2023-06-15
1. owncloud 下线测试
## 0.3.0 release on 2023-06-06
1. appmanage docker 镜像更新到 0.3.0
2. 修复 prestashop 无法访问的 bug
3. 修复 odoo 无法安装的 bug
## 0.2.0 release on 2023-06-03
1. appmanage docker 镜像更新到 0.2.0
2. Portainer 插件修复自动登录 bug
3. My Apps 插件修复首次使用获取容器 bug
## 0.1.0 release on 2023-05-26
1. stackhub 预发布,基本功能实现

68
source/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,68 @@
# Contributing to Websoft9
From opening a bug report to creating a pull request: every contribution is appreciated and welcome.
If you're planning to implement a new feature or change the api please [create an issue](https://github.com/websoft9/websoft9/issues/new/choose) first. This way we can ensure that your precious work is not in vain.
## Not Sure Architecture?
It's important to figure out the design [architecture of Websoft9](docs/architecture.md)
## Fork
Contributor only allow to fork [main branch](https://github.com/Websoft9/websoft9/tree/main) and pull request for it. Maintainers don't accept any pr to **production branch**
## Branch
This repository have these branchs:
* **Contributor's branch**: Develpoer can fork main branch as their delelopment branch anytime
* **main branch**: The only branch that accepts PR from Contributors's branch
* **production branch**: For version release and don't permit modify directly, only merge PR from **main branch**
Flow: Contributor's branch → main branch → production branch
## Pull request
[Pull request](https://docs.github.com/pull-requests) let you tell others about changes you've pushed to a branch in a repository on GitHub.
#### When is PR produced?
* Contributor commit to main branch
* main branch commit to production branch
#### How to deal with PR?
1. [pull request reviews](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/about-pull-request-reviews)
2. Merge RP and CI/CD for it
## DevOps principle
DevOps thinks the same way **[5m1e](https://www.dgmfmoldclamps.com/what-is-5m1e-in-injection-molding-industry/)** for manufacturing companies
We follow the development principle of minimization, rapid release
### Version
Use *[[major].[minor].[patch]](https://semver.org/lang/zh-CN/)* for version serial number and [version.json](../version.json) for version dependencies
### Artifact
Websoft9 use below [Artifact](https://jfrog.com/devops-tools/article/what-is-a-software-artifact/) for different usage:
* **Dockerhub for image**: Access [Websoft9 docker images](https://hub.docker.com/u/websoft9dev) on Dockerhub
* **Azure Storage for files**: Access [packages list](https://artifact.azureedge.net/release?restype=container&comp=list) at [Azure Storage](https://learn.microsoft.com/en-us/azure/storage/storage-dotnet-how-to-use-blobs#list-the-blobs-in-a-container)
### Tags
- Type tags: Bug, enhancement, Documetation
- Stages Tags: PRD, Dev, QA(include deployment), Documentation
### WorkFlow
Websoft9 use the [Production branch with GitLab flow](https://cm-gitlab.stanford.edu/help/workflow/gitlab_flow.md#production-branch-with-gitlab-flow) for development collaboration
> [gitlab workflow](https://docs.gitlab.com/ee/topics/gitlab_flow.html) is improvement model for git

167
source/LICENSE.md Normal file
View File

@ -0,0 +1,167 @@
This program is released under LGPL-3.0 and with the additional Terms:
Without authorization, it is not allowed to publish free or paid image based on this program in any Cloud platform's Marketplace.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

72
source/README.md Normal file
View File

@ -0,0 +1,72 @@
[![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](http://www.gnu.org/licenses/gpl-3.0)
[![GitHub last commit](https://img.shields.io/github/last-commit/websoft9/websoft9)](https://github.com/websoft9/websoft9)
[![GitHub Release Date](https://img.shields.io/github/release-date/websoft9/websoft9)](https://github.com/websoft9/websoft9)
[![GitHub Repo stars](https://img.shields.io/github/stars/websoft9/websoft9?style=social)](https://github.com/websoft9/websoft9)
# What is Websoft9?
Websoft9 is web-based PaaS platform for running 200+ hot [open source application](https://github.com/Websoft9/docker-library/tree/main/apps) on your own server.
Websoft9 help you running multiple applications in a single server, that means we believe Microservices on single machine is reasonable. On the contrary, it becomes more and more valuable as computing power increases
Although the Cloud Native emphasizes high availability and clustering, but most of the time, applications do not need to implement complex clusters or K8S.
Websoft9's [architecture](https://github.com/Websoft9/websoft9/blob/main/docs/architecture.md) is simple, it did not create any new technology stack, and we fully utilize popular technology components to achieve our product goals, allowing users and developers to participate in our projects without the need to learn new technologies.
## Demos
You can see the sceenshoots below:
| ![image](https://github.com/Websoft9/websoft9/assets/16741975/8321780c-4824-4e40-997d-676a31534063) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/e842575b-60bc-4b0d-a57b-28c26b16196a) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/c598412a-9529-4286-ba03-6234d6da99b9) |
| --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- |
| ![image](https://github.com/Websoft9/websoft9/assets/16741975/7bed3744-1e9f-429e-8678-3714c8c262e2) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/a0923c69-2792-4cde-bfaf-bc018b61aee9) | ![image](https://github.com/Websoft9/websoft9/assets/16741975/901efd1c-31a0-4b31-b79c-fc2d441bb679) |
## Features
- Applications listing
- Install 200+ template applications without any configuration
- Web-based file browser to manage files and folder
- Manage user accounts
- Use a terminal on a remote server in your local web browser
- Nginx gui for proxy and free SSL with Let's Encrypt
- Deploy, configure, troubleshoot and secure containers in minutes on Kubernetes, Docker, and Swarm in any data center, cloud, network edge or IIOT device.
- Manage your Linux by GUI: Inspect and change network settings, Configure a firewall, Manage storage, Browse and search system logs, Inspect a systems hardware, Inspect and interact with systemd-based services,
- Supported languages: English, Chinese中文
# Installation
You should have root privileges user to install or upgrade or uninstall Websoft9, if you use no-root user you can `sudo su` for it
## Install & Upgrade
```
# Install by default
curl https://websoft9.github.io/websoft9/install/install.sh | bash
# Install Websoft9 special version by development artifact and install path at /data/websoft9/source ...
wget -O - https://websoft9.github.io/websoft9/install/install.sh | bash /dev/stdin --port 9000 --channel dev --path "/data/websoft9/source" --version "0.8.25"
```
After installation, access it by: **http://Internet IP:9000** and using **Linux user** for login
## Uninstall
```
# Uninstall by default
curl https://websoft9.github.io/websoft9/install/uninstall.sh | bash
# Uninstall all
wget -O - https://websoft9.github.io/websoft9/install/uninstall.sh | bash /dev/stdin --cockpit --files
```
# Contributing
Follow the [contributing guidelines](CONTRIBUTING.md) if you want to propose a change in the Websoft9 core. For more information about participating in the community and contributing to the Websoft9 project, see [this page](https://support.websoft9.com/docs/community/contributing).
- Documentation for [Websoft9 core maintainers](docs/MAINTAINERS.md)
- Documentation for application templates based on Docker maintainers is in the [docker-library](https://github.com/Websoft9/docker-library).
- [Articles promoting Websoft9](https://github.com/Websoft9/websoft9/issues/327)
# License
Websoft9 is licensed under the [LGPL-3.0](/License.md), and additional Terms: It is not allowed to publish free or paid image based on this repository in any Cloud platform's Marketplace without authorization

15
source/SECURITY.md Normal file
View File

@ -0,0 +1,15 @@
# Security Policy
## Versions
As an open source product, we will only patch the latest major version for security vulnerabilities. Previous versions of Websoft9 will not be retroactively patched.
## Disclosing
You can get in touch with us regarding a vulnerability via [issue](https://github.com/Websoft9/websoft9/issues) or email at help@websoft9.com.
You can also disclose via huntr.dev. If you believe you have found a vulnerability, please disclose it on huntr and let us know.
https://huntr.dev/bounties/disclose
This will enable us to review the vulnerability and potentially reward you for your work.

View File

@ -0,0 +1 @@
1. Add install: developer mode at install.sh

24
source/cockpit/README.md Normal file
View File

@ -0,0 +1,24 @@
# Cockpit
Cockpit is used for backend service gatway, we have not modify Cockpit core, just improve the installation and modify config for Websoft9
## Install
```
# default install
wget https://websoft9.github.io/websoft9/install/install_cockpit.sh && bash install_cockpit.sh
# define Cockpit port and install
wget https://websoft9.github.io/websoft9/install/install_cockpit.sh && bash install_cockpit.sh --port 9099
```
## Development
Developer should improve these codes:
- Install and Upgrade Cockpit: */install/install_cockpit.sh*
- Override the default menus: */cockpit/menu_override*
> shell.override.json is used for Top menu of Cockpit。Override function until Cockpit 297
- Cockipt configuration file: */cockpit/cockpit.conf*

View File

@ -0,0 +1,5 @@
# docs: https://cockpit-project.org/guide/latest/cockpit.conf.5.html
[WebService]
AllowUnencrypted = true
LoginTitle= Websoft9 - Linux AppStore

View File

@ -0,0 +1,3 @@
{
"tools": null
}

View File

@ -0,0 +1,3 @@
{
"tools": null
}

View File

@ -0,0 +1,3 @@
{
"menu": null
}

View File

@ -0,0 +1,57 @@
{
"menu": null,
"tools": {
"index": {
"label": "Networking",
"order": 40,
"docs": [
{
"label": "Managing networking bonds",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-network-bonds-using-the-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing networking teams",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-network-teams-using-the-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing networking bridges",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-network-bridges-in-the-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing VLANs",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-vlans-in-the-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing firewall",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing_firewall_using_the_web_console"
}
],
"keywords": [
{
"matches": [
"network",
"interface",
"bridge",
"vlan",
"bond",
"team",
"port",
"mac",
"ipv4",
"ipv6"
]
},
{
"matches": [
"firewall",
"firewalld",
"zone",
"tcp",
"udp"
],
"goto": "/network/firewall"
}
]
}
}
}

View File

@ -0,0 +1,3 @@
{
"tools": null
}

View File

@ -0,0 +1,3 @@
{
"tools": null
}

View File

@ -0,0 +1,3 @@
{
"menu": null
}

View File

@ -0,0 +1,29 @@
{
"locales": {
"cs-cz": null,
"de-de": null,
"es-es": null,
"fi-fi": null,
"fr-fr": null,
"he-il": null,
"it-it": null,
"ja-jp": null,
"ka-ge": null,
"ko-kr": null,
"nb-no": null,
"nl-nl": null,
"pl-pl": null,
"pt-br": null,
"ru-ru": null,
"sk-sk": null,
"sv-se": null,
"tr-tr": null,
"uk-ua": null
},
"docs": [
{
"label": "Documentation",
"url": "https://support.websoft9.com/en/docs/"
}
]
}

View File

@ -0,0 +1,3 @@
{
"tools": null
}

View File

@ -0,0 +1,69 @@
{
"menu": null,
"tools": {
"index": {
"label": "Storage",
"order": 30,
"docs": [
{
"label": "Managing partitions",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-partitions-using-the-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing NFS mounts",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-nfs-mounts-in-the-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing RAIDs",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-redundant-arrays-of-independent-disks-in-the-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing LVMs",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/using-the-web-console-for-configuring-lvm-logical-volumes_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing physical drives",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/using-the-web-console-for-changing-physical-drives-in-volume-groups_system-management-using-the-rhel-8-web-console"
},
{
"label": "Managing VDOs",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/using-the-web-console-for-managing-virtual-data-optimizer-volumes_system-management-using-the-rhel-8-web-console"
},
{
"label": "Using LUKS encryption",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/locking-data-with-luks-password-in-the-rhel-web-console_system-management-using-the-rhel-8-web-console"
},
{
"label": "Using Tang server",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-automated-unlocking-using-a-tang-key-in-the-web-console_system-management-using-the-rhel-8-web-console"
}
],
"keywords": [
{
"matches": [
"filesystem",
"partition",
"nfs",
"raid",
"volume",
"disk",
"vdo",
"iscsi",
"drive",
"mount",
"unmount",
"udisks",
"mkfs",
"format",
"fstab",
"lvm2",
"luks",
"encryption",
"nbde",
"tang"
]
}
]
}
}
}

View File

@ -0,0 +1,3 @@
{
"tools": null
}

View File

@ -0,0 +1,147 @@
{
"tools": {
"terminal": null,
"services": {
"label": "Services",
"order": 10,
"docs": [
{
"label": "Managing services",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-services-in-the-web-console_system-management-using-the-rhel-8-web-console"
}
],
"keywords": [
{
"matches": [
"service",
"systemd",
"target",
"socket",
"timer",
"path",
"unit",
"systemctl"
]
},
{
"matches": [
"boot",
"mask",
"unmask",
"restart",
"enable",
"disable"
],
"weight": 1
}
]
},
"logs": {
"label": "Logs",
"order": 20,
"docs": [
{
"label": "Reviewing logs",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/reviewing-logs_system-management-using-the-rhel-8-web-console"
}
],
"keywords": [
{
"matches": [
"journal",
"warning",
"error",
"debug"
]
},
{
"matches": [
"abrt",
"crash",
"coredump"
],
"goto": "?tag=abrt-notification"
}
]
}
},
"menu": {
"logs": null,
"services": null,
"index": {
"label": "Overview",
"order": -2,
"docs": [
{
"label": "Configuring system settings",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/getting-started-with-the-rhel-8-web-console_system-management-using-the-rhel-8-web-console"
}
],
"keywords": [
{
"matches": [
"time",
"date",
"restart",
"shut",
"domain",
"machine",
"operating system",
"os",
"asset tag",
"ssh",
"power",
"version",
"host"
]
},
{
"matches": [
"hardware",
"mitigation",
"pci",
"memory",
"cpu",
"bios",
"ram",
"dimm",
"serial"
],
"goto": "/system/hwinfo"
},
{
"matches": [
"graphs",
"metrics",
"history",
"pcp",
"cpu",
"memory",
"disks",
"network",
"cgroups",
"performance"
],
"goto": "/metrics"
}
]
},
"terminal": {
"label": "Terminal",
"keywords": [
{
"matches": [
"console",
"command",
"bash",
"shell"
]
}
]
}
},
"preload": [
"index"
],
"content-security-policy": "img-src 'self' data:"
}

View File

@ -0,0 +1,3 @@
{
"tools": null
}

View File

@ -0,0 +1,31 @@
{
"menu": null,
"tools": {
"index": {
"label": "Accounts",
"order": 70,
"docs": [
{
"label": "Managing user accounts",
"url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/managing-user-accounts-in-the-web-console_system-management-using-the-rhel-8-web-console"
}
],
"keywords": [
{
"matches": [
"user",
"password",
"useradd",
"passwd",
"username",
"login",
"access",
"roles",
"ssh",
"keys"
]
}
]
}
}
}

View File

@ -0,0 +1,4 @@
git clone --depth=1 https://github.com/Websoft9/websoft9.git
rm -rf /etc/cockpit/*.override.json
cp -r websoft9/cockpit/menu_override/* /etc/cockpit
rm -rf websoft9

4
source/docker/.env Normal file
View File

@ -0,0 +1,4 @@
APPHUB_VERSION=0.0.5
DEPLOYMENT_VERSION=2.19.0
GIT_VERSION=1.20.4
PROXY_VERSION=2.10.4

28
source/docker/README.md Normal file
View File

@ -0,0 +1,28 @@
# Docker
## Test it
All backend services of Websoft9 is packaged to Docker image, just these steps you can running them:
```
curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh && sudo systemctl enable docker && sudo systemctl start docker
sudo docker network create websoft9
wget https://websoft9.github.io/websoft9/docker/.env
wget https://websoft9.github.io/websoft9/docker/docker-compose.yml
sudo docker compose -p websoft9 up -d
```
> If you want only want to change to development, you should execute following commands:
```
sudo docker compose -p websoft9 down -v
wget https://websoft9.github.io/websoft9/docker/docker-compose-dev.yml
# /data/source is development sources path in host
docker compose -f docker-compose-dev.yml -p websoft9 up -d --build
```
## Develop it
The folder **apphub, deployment, git, proxy** stored development files, and used for:
- Optimize dockerfile
- Release version
- Build docker image by Githuh action

View File

@ -0,0 +1,52 @@
FROM python:3.10-bullseye AS buildstage
LABEL maintainer="Websoft9<help@websoft9.com>"
LABEL version="0.0.5"
ENV LIBRARY_VERSION=v0.5.8
# Prepare library
RUN wget https://github.com/Websoft9/docker-library/archive/refs/tags/$LIBRARY_VERSION.zip -O ./library.zip && \
unzip library.zip && \
mv docker-library-* library && \
mkdir credentials && \
echo "This folder stored the credentials of other services that apphub will connect" > credentials/readme && \
# Prepare Media and master data from Contentful
git clone --depth=1 https://github.com/swagger-api/swagger-ui.git && \
wget https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js && \
cp redoc.standalone.js swagger-ui/dist && \
git clone --depth=1 https://github.com/Websoft9/plugin-appstore && \
mv -f plugin-appstore/data ./media && \
git clone --depth=1 https://github.com/Websoft9/websoft9
FROM python:3.10-slim-bullseye
WORKDIR /websoft9
COPY --from=buildstage /media/data ./media
COPY --from=buildstage /library ./library
COPY --from=buildstage /websoft9/apphub ./apphub
COPY --from=buildstage /swagger-ui/dist ./apphub/swagger-ui
RUN apt update && apt install git jq iproute2 supervisor -y && \
mkdir credentials && \
echo "This folder stored the credentials of other services that integrated with apphub" > credentials/readme
RUN pip install --upgrade pip && pip install -r apphub/requirements.txt
RUN pip install -e ./apphub
# supervisor
COPY config/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
RUN chmod +r /etc/supervisor/conf.d/supervisord.conf
COPY config/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
VOLUME /websoft9/apphub/logs
VOLUME /websoft9/apphub/src/config
VOLUME /websoft9/media
# Clean cache and install files
RUN rm -rf apphub/docs apphub/tests library.zip plugin-appstore && \
apt clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc /usr/share/doc-base
EXPOSE 8080
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -0,0 +1,4 @@
ARG APPHUB_VERSION
FROM websoft9dev/apphub:${APPHUB_VERSION} as buildstage
RUN mkdir -p /websoft9/apphub-dev
RUN sed -i 's/supervisorctl start apphub/supervisorctl start apphubdev/g' /entrypoint.sh

View File

@ -0,0 +1,10 @@
# README
- Download docker-library release to image
- install git
- entrypoint: config git credential for remote gitea
- health.sh: gitea/portaner/nginx credentials, if have exception output to logs
- use virtualenv for pip install requirements.txt
- create volumes at dockerfile
- EXPOSE port
- process logs should output to docker logs by supervisord

View File

@ -0,0 +1 @@
{"username":"appuser","password":"apppassword"}

View File

@ -0,0 +1,33 @@
#!/bin/bash
# check credentials exists
check_file_exists() {
file_path=$1
max_attempts=$2
for ((i=1; i<=max_attempts; i++))
do
if [ -f "$file_path" ]; then
echo "$file_path exists"
return 0
else
echo "$file_path is not exists, wait a moment.."
fi
sleep 1
if ((i==max_attempts)); then
echo "$file_path is not exists, app may be work normally."
return 1
fi
done
}
set +e
check_file_exists "/websoft9/credentials/credential_proxy" 1
check_file_exists "/websoft9/credentials/credential_deployment" 1
check_file_exists "/websoft9/credentials/credential_git" 1
set -e
# start by supervisord
/usr/bin/supervisord
supervisorctl start apphub
tail -f /dev/null

View File

@ -0,0 +1,12 @@
[supervisord]
nodaemon=false
[program:apphub]
command=uvicorn src.main:app --host 0.0.0.0 --port 8080 --log-level error
autostart=true
directory=/websoft9/apphub
[program:apphubdev]
command=/bin/bash -c '[ -z "$(ls -A /websoft9/apphub-dev)" ] && cp -r /websoft9/apphub/* /websoft9/apphub-dev && uvicorn src.main:app --reload --host 0.0.0.0 --port 8080 --log-level error'
autostart=true
directory=/websoft9/apphub-dev

View File

@ -0,0 +1,7 @@
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "5"
}
}

View File

@ -0,0 +1,14 @@
# step1: build entrypoint execute program init_portainer by golang
FROM golang:latest AS builder
WORKDIR /
COPY init_portainer.go /
RUN go build -o init_portainer /init_portainer.go
RUN chmod +x /init_portainer
# step2: copy build go program to portainer
FROM portainer/portainer-ce:2.19.0
LABEL maintainer="websoft9<help@websoft9.com>"
LABEL version="2.19.0"
COPY --from=builder /init_portainer /
ENTRYPOINT ["/init_portainer"]

View File

@ -0,0 +1,3 @@
# Readme
- create local endpoint and lock

View File

@ -0,0 +1,85 @@
package main
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"time"
)
func main() {
dirPath := "/var/websoft9"
if _, err := os.Stat("/var"); os.IsNotExist(err) {
err = os.Mkdir("/var", 0755)
if err != nil {
fmt.Println(err)
return
}
}
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
err = os.Mkdir(dirPath, 0755)
if err != nil {
fmt.Println(err)
return
}
}
filePath := "/var/websoft9/credential"
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
fmt.Println("credential is not exist, create it.")
password := generatePassword(16)
err := writeToFile(filePath, password)
if err != nil {
fmt.Println("write file error:", err)
return
}
}else{
fmt.Println("credential is exist, skip it.")
cmd := exec.Command("./portainer")
cmd.Run()
}
content, err := ioutil.ReadFile(filePath)
if err != nil {
fmt.Println("read file error:", err)
return
}
fmt.Println("-----portainer_admin_user: admin, portainer_admin_password: " + string(content) + " ------")
// call portainer
cmd := exec.Command("./portainer", "--admin-password-file", filePath)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
fmt.Println("error running compiled_program:", err)
return
}
}
func generatePassword(length int) string {
rand.Seed(time.Now().UnixNano())
charset := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()_+{}[]:;?.,<>"
password := make([]byte, length)
for i := range password {
password[i] = charset[rand.Intn(len(charset))]
}
return string(password)
}
func writeToFile(filePath , content string) error {
return ioutil.WriteFile(filePath , []byte(content), 0755)
}

View File

@ -0,0 +1,87 @@
# this file for developer
# install --devto "/data/mysource"
version: "3.8"
services:
apphub:
image: websoft9dev/apphub:${APPHUB_VERSION}
container_name: websoft9-apphub
build:
context: .
dockerfile: ./apphub/Dockerfiledev
args:
- APPHUB_VERSION=${APPHUB_VERSION}
ports:
- 9001-9999:8080
restart: always
volumes:
- /data/mysource:/websoft9/apphub-dev
- apphub_media:/websoft9/media
depends_on:
- deployment
- git
- proxy
deployment:
image: websoft9dev/deployment:$DEPLOYMENT_VERSION
container_name: websoft9-deployment
restart: always
ports:
- 9001-9999:9000
volumes:
- portainer:/data
- /data/compose:/data/compose
- /var/run/docker.sock:/var/run/docker.sock
#- /run/podman/podman.sock:/var/run/docker.sock
labels:
com.docker.compose.w9_http.port: 9000
git:
image: websoft9dev/git:$GIT_VERSION
container_name: websoft9-git
restart: always
volumes:
- gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 9001-9999:3000
environment:
- INSTALL_LOCK=true
- DISABLE_SSH=true
- RUN_MODE=prod
- HTTP_PORT=3000
- DISABLE_REGISTRATION=false
- REQUIRE_SIGNIN_VIEW=false
- ROOT_URL=http://localhost/w9git/
labels:
com.docker.compose.w9_http.port: 3000
proxy:
image: websoft9dev/proxy:$PROXY_VERSION
container_name: websoft9-proxy
restart: always
ports:
- "80:80"
- "443:443"
- "9001-9999:81"
volumes:
- nginx_data:/data
- nginx_letsencrypt:/etc/letsencrypt
labels:
com.docker.compose.w9_http.port: 80
com.docker.compose.w9_https.port: 443
com.docker.compose.w9_console.port: 81
networks:
default:
name: websoft9
external: true
volumes:
apphub_media:
portainer:
gitea:
nginx_data:
nginx_letsencrypt:

View File

@ -0,0 +1,77 @@
version: "3.8"
services:
apphub:
image: websoft9dev/apphub:$APPHUB_VERSION
container_name: websoft9-apphub
restart: always
volumes:
- apphub_logs:/websoft9/apphub/logs
- apphub_media:/websoft9/media
- apphub_config:/websoft9/apphub/src/config
depends_on:
- deployment
- git
- proxy
labels:
com.docker.compose.w9_http.port: 8080
deployment:
image: websoft9dev/deployment:$DEPLOYMENT_VERSION
container_name: websoft9-deployment
restart: always
volumes:
- portainer:/data
- /data/compose:/data/compose
- /var/run/docker.sock:/var/run/docker.sock
#- /run/podman/podman.sock:/var/run/docker.sock
labels:
com.docker.compose.w9_http.port: 9000
git:
image: websoft9dev/git:$GIT_VERSION
container_name: websoft9-git
restart: always
volumes:
- gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
- INSTALL_LOCK=true
- DISABLE_SSH=true
- RUN_MODE=prod
- HTTP_PORT=3000
- DISABLE_REGISTRATION=false
- REQUIRE_SIGNIN_VIEW=false
- ROOT_URL=http://localhost/w9git/
labels:
com.docker.compose.w9_http.port: 3000
proxy:
image: websoft9dev/proxy:$PROXY_VERSION
container_name: websoft9-proxy
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- nginx_data:/data
- nginx_letsencrypt:/etc/letsencrypt
labels:
com.docker.compose.w9_http.port: 80
com.docker.compose.w9_https.port: 443
com.docker.compose.w9_console.port: 81
networks:
default:
name: websoft9
external: true
volumes:
apphub_logs:
apphub_media:
apphub_config:
portainer:
gitea:
nginx_data:
nginx_letsencrypt:

View File

@ -0,0 +1,6 @@
FROM gitea/gitea:1.20.4
LABEL maintainer="Websoft9<help@websoft9.com>"
LABEL version="1.20.4"
COPY ./src/s6/user /etc/s6/user
RUN chmod -R 755 /etc/s6/user

View File

@ -0,0 +1,19 @@
# Readme
## Development
From official Gitea image, and:
- Complete install wizard automaticlly by enviroment INSTALL_LOCK
- Use default URL localhost for Host/Root_URL settings
## User
### Repository
How to clone a Repository ?
- Git clone a repository by external network address(gitea repository page's HTTP URL)
- Git clone a repository by internal network address(e.g. http://websoft9-git:3000/organization/repository.git

View File

@ -0,0 +1,98 @@
APP_NAME = gitea
RUN_MODE = prod
RUN_USER = git
WORK_PATH = /data/gitea
[repository]
ROOT = /data/git/repositories
[repository.local]
LOCAL_COPY_PATH = /data/gitea/tmp/local-repo
[repository.upload]
TEMP_PATH = /data/gitea/uploads
[server]
APP_DATA_PATH = /data/gitea
DOMAIN = 119.8.96.66
SSH_DOMAIN = 119.8.96.66
HTTP_PORT = 3000
ROOT_URL = http://119.8.96.66:3000/
DISABLE_SSH = true
SSH_PORT = 22
SSH_LISTEN_PORT = 22
LFS_START_SERVER = true
LFS_JWT_SECRET = prcv5KuvKilAB_369Vr4saJf4QBdlMwD-vOXD2l7IHo
OFFLINE_MODE = false
[database]
PATH = /data/gitea/gitea.db
DB_TYPE = sqlite3
HOST = localhost:3306
NAME = gitea
USER = root
PASSWD =
LOG_SQL = false
SCHEMA =
SSL_MODE = disable
[indexer]
ISSUE_INDEXER_PATH = /data/gitea/indexers/issues.bleve
[session]
PROVIDER_CONFIG = /data/gitea/sessions
PROVIDER = file
[picture]
AVATAR_UPLOAD_PATH = /data/gitea/avatars
REPOSITORY_AVATAR_UPLOAD_PATH = /data/gitea/repo-avatars
[attachment]
PATH = /data/gitea/attachments
[log]
MODE = console
LEVEL = info
ROOT_PATH = /data/gitea/log
[security]
INSTALL_LOCK = true
SECRET_KEY =
REVERSE_PROXY_LIMIT = 1
REVERSE_PROXY_TRUSTED_PROXIES = *
INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE2OTQ1NzI0NDN9.kzFr-t0io9M_XQWojbvG20KPiXIZPS2GMadRBgR9xMM
PASSWORD_HASH_ALGO = pbkdf2
[service]
DISABLE_REGISTRATION = false
REQUIRE_SIGNIN_VIEW = false
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
DEFAULT_KEEP_EMAIL_PRIVATE = false
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.119.8.96.66
[lfs]
PATH = /data/git/lfs
[mailer]
ENABLED = false
[openid]
ENABLE_OPENID_SIGNIN = true
ENABLE_OPENID_SIGNUP = true
[cron.update_checker]
ENABLED = false
[repository.pull-request]
DEFAULT_MERGE_STYLE = merge
[repository.signing]
DEFAULT_TRUST_MODEL = committer
[oauth2]
JWT_SECRET = vIGxNkS5o0NYgDZwxMIcS_zXbbN3GBLCTO5MseHgO8Q

View File

@ -0,0 +1,5 @@
## Readme
- Create admin credential by admin cli
- to do: Disable user register
- to do: Disable Gravatar

View File

@ -0,0 +1,6 @@
#!/bin/bash
[[ -f ./setup ]] && source ./setup
pushd /root >/dev/null
exec s6-svc -D /etc/s6/user
popd

View File

@ -0,0 +1,48 @@
#!/bin/bash
echo "start create user shell" >> /tmp/user
set -e
# Confirm gitea is running
count=1
response=""
while [ "$response" != "200" ]; do
response=$(curl -s -o /dev/null -w "%{http_code}" localhost:3000)
if [ "$response" = "200" ]; then
echo "gitea is runing"
break
fi
count=$((count+1))
if [ $count -gt 10 ]; then
echo "gitea is not runing"
break
fi
done
cred_path="/var/websoft9/credential"
admin_username="websoft9"
admin_email="help@websoft9.com"
if [ -e "$cred_path" ]; then
echo "File $cred_path exists. Exiting script."
exit 0
fi
echo "create diretory"
mkdir -p "$(dirname "$cred_path")"
echo "Create admin credential by admin cli"
su -c "
gitea admin user create --admin --username '$admin_username' --random-password --email '$admin_email' > /tmp/credential
" git
echo "Read credential from tmp"
username=$(grep -o "New user '[^']*" /tmp/credential | sed "s/New user '//")
if [ -z "$username" ]; then
username="websoft9"
fi
password=$(grep -o "generated random password is '[^']*" /tmp/credential | sed "s/generated random password is '//")
echo "Save to credential"
json="{\"username\":\"$admin_username\",\"password\":\"$password\",\"email\":\"$admin_email\"}"
echo "$json" > "$cred_path"

View File

@ -0,0 +1,11 @@
FROM jc21/nginx-proxy-manager:2.10.4
LABEL maintainer="Websoft9<help@websoft9.com>"
LABEL version="2.10.4"
RUN apt-get update && apt-get install -y curl jq
COPY ./config/initproxy.conf /data/nginx/proxy_host/
COPY ./s6/init_user/init_user.sh /app/init_user.sh
RUN chmod +x /app/init_user.sh
CMD ["/bin/sh", "-c", "/app/init_user.sh && tail -f /dev/null"]

View File

@ -0,0 +1,7 @@
# Readme
From official Nginx Proxy Manager image, and:
- add init_proxy.conf to image
- init install wizard and modify user and password
- lock the line of BoltDB at Portainer where envrionment=1

View File

@ -0,0 +1,121 @@
# ------------------------------------------------------------
# domain.com
# ------------------------------------------------------------
server {
listen 80;
listen [::]:80;
server_name ~\.?[0-9a-zA-Z]$;
access_log /data/logs/proxy-host-1_access.log proxy;
error_log /data/logs/proxy-host-1_error.log warn;
if ($http_referer ~* /w9deployment/) {
rewrite ^/locales/(.*) /w9deployment/locales/$1 break;
}
location / {
# Proxy!
include conf.d/include/proxy.conf;
}
# proxy for portainer
location /w9deployment/ {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Accept-Encoding \"\";
proxy_pass http://websoft9-deployment:9000/;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
# proxy_http_version 1.1;
add_header 'Access-Control-Allow-Origin' $http_origin;
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
if ($request_method = OPTIONS) {
return 204;
}
set $quot_tmp "\"";
set $portainer_jwt "${quot_tmp}${cookie_portainerJWT}${quot_tmp}";
sub_filter '</head>' "<script>($portainer_jwt)?window.localStorage.setItem('portainer.JWT', '$portainer_jwt'):null;</script></head>";
sub_filter_once on;
sub_filter_types *;
}
# proxy for Nginx proxy Manager
location /w9proxy/ {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://websoft9-proxy:81;
rewrite ^/w9proxy/?(.*)$ /$1 break;
proxy_http_version 1.1;
proxy_redirect off;
proxy_set_header Upgrade $http_upgrade;
proxy_cache_bypass $http_upgrade;
proxy_set_header Connection $http_connection;
proxy_cache_bypass $http_secret_header;
proxy_set_header Accept-Encoding \"\";
add_header Pragma "no-cache";
add_header Cache-Control "no-cache";
if ($request_method = OPTIONS) {
return 204;
}
sub_filter '</head>' "<script>var tokens='$cookie_nginx_tokens';(tokens)?window.localStorage.setItem('nginx-proxy-manager-tokens', '[{\"t\":\"$cookie_nginx_tokens\",\"n\":\"$cookie_nginx_nikeName\"}]'):null;</script></head>";
# source changes
sub_filter 'href="/' 'href="/w9proxy/';
sub_filter 'src="/' 'src="/w9proxy/';
sub_filter '/api' '/w9proxy/api';
sub_filter '/assets' '/w9proxy/assets';
sub_filter '/js/' '/w9proxy/js/';
# script changes
sub_filter 'r.p="/' 'r.p="/w9proxy/';
sub_filter '"/login' '"/w9proxy/login';
sub_filter 'case"/logout"' 'case"/w9proxy/logout"';
sub_filter 'window.location="/"' 'window.location="/w9proxy/"';
sub_filter 'history.start({pushState:!0})' 'history.start({pushState:!0,root: "/w9proxy/"})';
sub_filter 'i.history.navigate(e.' 'i.history.navigate(e.replace("/w9proxy","").';
sub_filter_types *;
sub_filter_once off;
}
# proxy for Gitea
location /w9git/ {
proxy_pass http://websoft9-git:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_hide_header X-Frame-Options;
add_header 'Access-Control-Allow-Origin' $http_origin;
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
add_header 'Access-Control-Allow-Credentials' 'true';
if ($request_method = OPTIONS) {
return 204;
}
}
location /apidocs/static {
proxy_pass http://websoft9-apphub:8080/static;
}
location /apidocs/openapi.json {
proxy_pass http://websoft9-apphub:8080/;
}
location /apidocs/ {
proxy_pass http://websoft9-apphub:8080/redoc;
sub_filter 'src="/static' 'src="/apidocs/static';
sub_filter 'spec-url="/' 'spec-url="/apidocs/openapi.json';
}
# Custom
include /data/nginx/custom/server_proxy[.]conf;
}

View File

@ -0,0 +1,53 @@
#!/bin/bash
echo "Start to change nginxproxymanage users" >> /tmp/userlog
set +e
username="help@websoft9.com"
password=$(openssl rand -base64 16 | tr -d '/+' | cut -c1-16)
token=""
cred_path="/var/websoft9/credential"
if [ -e "$cred_path" ]; then
echo "File $cred_path exists. Exiting script."
exit 0
fi
echo "create diretory"
mkdir -p "$(dirname "$cred_path")"
while [ -z "$token" ]; do
sleep 5
login_data=$(curl -X POST -H "Content-Type: application/json" -d '{"identity":"admin@example.com","scope":"user", "secret":"changeme"}' http://localhost:81/api/tokens)
token=$(echo $login_data | jq -r '.token')
done
echo "Change username(email)" >> /tmp/userlog
while true; do
response=$(curl -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer $token" -d '{"email": "'$username'", "nickname": "admin", "is_disabled": false, "roles": ["admin"]}' http://localhost:81/api/users/1)
if [ $? -eq 0 ]; then
echo "HTTP call successful"
break
else
echo "HTTP call Change username failed, retrying..." >> /tmp/userlog
sleep 5
fi
done
echo "Update password" >> /tmp/userlog
while true; do
response=$(curl -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer $token" -d '{"type":"password","current":"changeme","secret":"'$password'"}' http://localhost:81/api/users/1/auth)
if [ $? -eq 0 ]; then
echo "HTTP call successful"
break
else
echo "HTTP call Update password failed, retrying..." >> /tmp/userlog
sleep 5
fi
done
echo "Save to credential"
json="{\"username\":\"$username\",\"password\":\"$password\"}"
echo "$json" > "$cred_path"
set -e

View File

@ -0,0 +1,6 @@
#!/bin/bash
[[ -f ./init_user.sh ]] && source ./init_user.sh
pushd /root >/dev/null
exec s6-svc -D /etc/s6-overlay/s6-rc.d/init_user
popd

View File

@ -0,0 +1 @@
oneshot

View File

View File

@ -0,0 +1,42 @@
# Documentation for core maintainers
This documentaion is from [jenkins MAINTAINERS](https://github.com/jenkinsci/jenkins/blob/master/docs/MAINTAINERS.adoc) which have a paradigm of rigorous open source project maintenance
## Scope
This document applies to the following components:
- Websoft9 core
- Websoft9 core plugins
- docker-library
## Roles
| Role/job | submit pr | review pr | assign pr | merge pr | close pr | create issue | manage issue | release |
| ------------ | --------- | --------- | --------- | -------- | -------- | ------------ | ------------ | ------- |
| Contributor | √ | | | | | √ | | |
| Issue Team | √ | | | | | √ | √ | |
| PR Reviewer | √ | √ | | | | √ | | |
| Release Team | √ | | | | | √ | | √ |
| Maintainer | √ | √ | √ | √ | √ | √ | | |
| PR Assignee | | | | √ | | √ | | |
* **Contributor**: submit pull requests to the Jenkins core and review changes submitted by others. There are no special preconditions to do so. Anyone is welcome to contribute.
* **Issue Triage Team Member**: review the incoming issues: bug reports, requests for enhancement, etc. Special permissions are not required to take this role or to contribute.
* **Core Pull Request Reviewer**: A team for contributors who are willing to regularly review pull requests and eventually become core maintainers.
* **Core Maintainer**: Get permissions in the repository, and hence they are able to merge pull requests.Their responsibility is to perform pull request reviews on a regular basis and to bring pull requests to closure, either by merging ready pull requests towards weekly releases ( branch) or by closing pull requests that are not ready for merge because of submitter inaction after an extended period of time.
* **Pull Request Assignee**: Core maintainers make a commitment to bringing a pull request to closure by becoming an Assignee. They are also responsible to monitor the weekly release status and to perform triage of critical issues.
* **Release Team Member**: Responsible for Websoft9 weekly and LTS releases
## Pull request review process
## Pull request Merge process
## Issue triage
## Release process
## Tools
## Communication

View File

@ -0,0 +1,24 @@
## Architecture
Websoft9 is very simple [architecture](https://www.canva.cn/design/DAFpI9loqzQ/hI_2vrtfoK7zJwauhJzipQ/view?utm_content=DAFpI9loqzQ&utm_campaign=designshare&utm_medium=link&utm_source=publishsharelink) which used [Redhat Cockpit ](https://cockpit-project.org/) for web framework and [Docker](https://www.docker.com/) for running [application](https://github.com/Websoft9/docker-library).
The benefits of this architecture means you don't have to learn new technology stacks or worry about the lack of maintenance this project.
![Alt text](image/archi.png)
What we do is integrating below stacks's API or interfaces to Cockpit console by [Cockpit packages (Also known as plugin)](https://cockpit-project.org/guide/latest/packages.html) :
- [Nginx Proxy Manager](https://nginxproxymanager.com/): A web-based Nginx management
- [Portainer](https://www.portainer.io/): Powerful container management for DevSecOps
- [Duplicati](https://www.duplicati.com/): Backup software to store encrypted backups online
- [Redis](https://redis.io/): The open source, in-memory data store
- [Appmanage](https://github.com/Websoft9/websoft9/tree/main/appmanage): API for create and manage docker compose based application powered by Websoft9
- [websoft9-plugins](https://github.com/websoft9?q=plugin&type=all&language=&sort=): Cockpit packages powered by Websoft9
As Websoft9 is a complete product, we also offer:
* API
* CLI
And Websoft9 is more attractive to users is [200+ application templates](https://github.com/Websoft9/docker-library).

24
source/docs/developer.md Normal file
View File

@ -0,0 +1,24 @@
# Developer Guide
## Release
#### 制品库自动化
- 插件制品管理:开发人员开发测试完成后,修改插件版本,触发 Action 构建 Github packages 制品
- docker-libaray 库制品管理:开发人员测试完成后,修改 library 版本,触发 Action 构建 Github packages 制品
- websoft9 制品管理:开发人员修改 appmanage 源码或微服务 docker-compose 测试完成后,修改 微服务 版本,触发 Action 构建 Dockerhub 镜像制品以及后台微服务 Github packages 制品
> Portainer,redis,nginxproxymanager 使用外部 dockerhub 镜像
### 自动化测试
当各个制品更新后,项目管理者修改 version_test.json 对应的组件的版本,构建 Action 触发自动化系统测试。
自动化测试失败,通知各开发人员,删除制品,修改后重新生成制品。
自动化测试成功,同步 version_test.json 到 version.json 新制品正式发布。

BIN
source/docs/image/archi.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

151
source/docs/notes/PRD.md Normal file
View File

@ -0,0 +1,151 @@
# 需求
从两个主线理解 stackhub 的需求:
- 应用生命周期管理:寻找、安装、发布、停止、卸载、升级等软件全生命周期。
- 基础设施运维管理:安全、存储、文件、容器、监控等系统管理
## 应用生命周期
### 业务需求
#### 寻找
用户可以通过两个入口寻找应用:
- 应用商店:采用一级分类的方式展现应用,并支持**筛选+搜索**的方式以便于用户检索
- Docker 镜像仓库:检索 Docker 镜像仓库,找到对应的应用
#### 安装
- 用户自主安装应用,后端按顺序依次启动目标应用
- 启动应用之前先进行资源约束判断,不符合条件的目标应用不予安装
- 与安装有关的状态:安装中、运行中、安装失败、反复重启、已停止
#### 发布
- 以域名或端口的方式,将运行中的应用发布出去,供外部用户访问。
- 自助设置 HTTPS上传或更新证书
#### 停止
将应用服务停止
#### 卸载
卸载应用并删除数据
#### 升级
升级应用,如果升级失败会自动回滚到升级之前的状态
#### 恢复
在已有的完整备份的基础,恢复应用。
可能存在两种情况:
- 覆盖现有应用
- 恢复成一个新的应用
#### 克隆
克隆一个已经存在的应用,命名为新应用
### 技术需求
#### 模板编排
应用的底层编排 100% 以 Docker Compose 语法作为编排语言
#### 多语言
- 前端支持 i18n
- 后端接口支持英文
#### 用户管理
- 支持多个用户,用户角色分为普通用户和管理员用户
- 普通用户可以创建和管理自己的应用,不可以删除他人的应用
#### UI 自适应
UI 自适应各种屏幕尺寸
#### 2FA
引入一种双重登录策略
#### 商店基础设置
- 商店 Logo 可自定义
- 语言、时区可选
- 绑定域名
- SMTP 信息填写
#### 通知
- SMTP 邮件通知
#### 商店更新
商店支持在线更新提示和在线更新
#### API
支持生成 API Tokens
#### CLI
基于 API 的 CLI
#### 仓库管理
默认以 DockerHub 作为镜像仓库,支持自建仓库并同步 DockerHub 镜像
#### 安装程序
一键自动化安装程序,类似:
```
curl https://websoft9.github.io/stackhub/install/install.sh | bash
```
主要步骤包括:
1. Check 硬件、操作系统、cpu 架构
2. 安装依赖包
3. 安装 docker
4. 下载各源码包
5. 启动个源码对应服务
## 基础设施运维
### SSH 终端
Web-Based SSH 终端
### 文件管理器
Web-Based 文件管理器
### 存储管理
- 支持接入第三方对象存储
### 备份
备份完整的应用数据:
- 自定义备份时间区间
- 自动备份可取消
- 备份可以管理:删除、下载等
### 容器管理
可视化的容器管理,包括:拉镜像、创建/删除/停止容器、SSH 进入容器、向容器上传文件等
### 系统监控
- 监控容器的 CPU内存和存储消耗情况
- 监控系统的 CPU内存和存储消耗情况

View File

@ -0,0 +1,339 @@
# Process
要理解整个架构设计,先打开[组件图](https://www.canva.cn/design/DAFt2DhfqYM/3uwKe09X5xaD4QPc47rNMQ/view?utm_content=DAFt2DhfqYM&utm_campaign=designshare&utm_medium=link&utm_source=publishsharelink),然后结合一下内容进行阅读:
所有的微操作一定归属于如下三个类别:
- CI持续集成即源码准确性
- CD持续部署即让软件 running目前采用拉式方式与CI协作
- CP持续发布即通过域名让用户可以访问
另外还有与系统维护相关的:
- Settings
- CLI
## API
API 接口功能设计:
### app/install
功能:安装应用并自动绑定域名
输入参数:
```
body:
{
- app_name # 产品名
- app_id # 自定义应用名称
- domains[] #域名-可选
- default_domain #默认域名-可选:设置.env中APP_URL
- edition{dist:community, version:5.0} #应用版本来自variable.json但目前variable.json中只有 version 的数据
- endpointId: 安装目的地portainer中有定义默认为 local
}
```
过程:
1. 参数验证:
app_id 验证:
业务要求gitea 中是否存在同名的 repositoryPortainer中是否存在同名stack
技术要求-【非空容器要求2位-20位字母数字以及-组成 giteatodo portainertodo】
app_name 验证: 在gitea容器的library目录下验证
domains[]验证是否绑定过数量不能超过2泛域名+其他域名
default_domain验证来自domains[]中,自定义域名优先
edition: community这个不做验证保留扩展只做version处理
endpointId通过Portainer容器取名称【local】的endpointId不存在报错
2. CIGitea 创建 repository通过Gitea创建仓库并修改.env文件
3. CD: Portainer
创建websoft9网络判断 websoft9 network (先判断是否存在)
Portainer 基于 Gitea Repository 在对应的 endpointId 中创建项目staus: [active,inactive]
4. CPNginx 为应用创建 Proxy 访问如果Proxy创建失败应用安装成功但提示Proxy创建失败不做应用安装回滚
2-3 步骤是有状态操作(产生对后续操作有影响的记录),故需考虑事务完整性。
### apps
查询所有apps的信息返回完整数据。等同于 CD: deploy/apps
另外app 的状态以及各个状态对应的操作:
- 状态:
- Active等同于 Portainer Active。此状态下显示容器的状态 running(1),stopped(2)
- Unactive,等同于 Portainer Unactive
- 操作:
- for running: stop | start | restart | redeploy | delete && delete(down -v)
- for Unactive: redeploy | delete(down -v)
### apps/{id}/*
对单个 apps 的增删改查:
- 查询deploy/apps/{id} with **get** method
- 启动deploy/apps/{id}/start
- 停止deploy/apps/{id}/stop
- 重启deploy/apps/{id}/restart
- 迁移deploy/apps/{id}/migrate
- 重建deploy/apps/{id}/git/redeploy + deploy/apps/{id}/restart
- 卸载:
- APP_Manage 调用 integration/,删除 Repository
- APP_Manage 调用 publish/nginx/proxy删除 Proxy
- APP_Manage 调用 deploy/apps/{id} with **delete** method删除应用
> 卸载必须是一个事务操作,确保完成所有步骤,以上先后顺序非常关键
### app/domains
App 域名的:增、删、改、查。
输入参数:
pulisherId: 默认为本地 nginx将来可扩展支持云平台的应用访问网关。
```
body:
{
- app_id
- domains[] 可选
- default_domain 可选
}
```
流程:
- CP: publish/nginx/proxy
- CI: Gitea 修改 repository 的 .env 文件中的 APP_URL 为默认域名
- CDdeploy/apps/{id}/git/redeploy + deploy/apps/{id}/restart
## Settings
配置文件可以通过接口和CLI进行更改
### 系统配置
系统配置,需重启服务后生效。
system.ini
### 应用配置
app.ini
应用配置一般会提供API供前端调用。应用配置更改后不需要重启。
功能:
- settings 增删改查
```
[system]
# websoft9 install path, it can not modify now
install_path=/data/websoft9
# apps install path, it can not modify now
apps_path=/data/compose
# enable appstore preview, it use for CLI upgrade COMMAND
appstore_preview=false
[address]
# Wildcard Domain Name for application
wildcard_domain=test.websoft9.com
[smtp]
smtp_port=743
smtp_server=smtp.websoft9.com
smtp_tls/ssl=true
smtp_user=admin
smtp_password=password
[receive]
# receive the notify of system
email=help@websoft9.com
wechat=
```
## CLI
CLI 是安装到服务器的服务端命令行工具。它的功能有几种来源:
1. 继承:由 API 直接转换
2. 相关:多个 API 以及 组合
3. 无关:与 API 无关,
具体指令以及参数设计如下:
```
Usage: w9 [OPTIONS] COMMAND sub-COMMAND
Common Commands
version 查询 websoft9 版本
repair 修复 websoft9
clean 清空 websoft9 使用过程中不需要的残留资源
upgrade 检查并更新 [core|plugin], --check 只检查
uninstall 删除 Websoft9 所有服务以及组件,除了 Docker 以及 Docker 应用之外
environments list all Environments
apikey 生产以及管理 AppManage keys
ip --replace newIP直接更改 gitea 和 Nginx IP相关的配置
App Commands:
install 安装应用
ls List 应用列表 [app_id, app_name, status, time, endpointId]
inspect 显示 APP 详细信息
start 启动一个停止的应用
stop 停止一个运行中的应用
restart 重启应用
redeploy 重建应用(包含更新镜像后重建)
delete 删除应用
Global Options:
-c, --context string
-D, --debug Enable debug mode
-e, --environment which environment you used
Run 'w9 COMMAND --help' for more information on a command.
```
## Core
### CI
CI 遵循几个法则:
* 为 CD 准备一个完全可用的编排物料
* Git 驱动,保证编排物料与应用运行环境分离,编排物料可修改可复原
* 编排物料中的资源(包/镜像)具备良好的网络可达性
CI 过程中除了直接使用 [Gitea API](https://docs.gitea.cn/api/1.19/) 之外,还需增加如下业务:
#### integation/repository/create
功能:
基于本地目录 library/apps/app_name创建一个符合 Websoft9 规范格式的 repository名称为app_id
> app_name 是软件名称例如wordpress。app_id 是用户安装的应用名称例如mywordpress
步骤:
1. 在 Gitea 中创建一个名称为 app_id 的 repository
2. 修改 Gitea repository 仓库的设置属性,只保留【代码】栏
#### integation/repository/modify
更改临时目录 .env 文件中的重要参数:
- APP_URL 用域名/公网IP替换
- POWER_PASSWORD 使用 16位 【大小写数字特殊字符】 替代
- APP_VERSION 根据安装输入参数替换
- APP_NAME 更换为 app_id
然后 git push
#### integation/repository/delete
### CD
CD 遵循几个法则:
* 可以管理应用的完全生命周期
* 应用可以指定部署到local之外的服务器或集群环境portainer 中对应的术语为 endpoint 或 environment
* 部署编排物料(CI 的产出物)可以是 docker-compose也可以是 helm
* 也可以支持源码编译成镜像后自动部署参考waypoint
#### deploy/apps/create/standalone/repository
基于 repository 创建应用100% 使用 Portainer API /stacks/create/standalone/repository
#### deploy/apps/{id}/git
设置 portainer 与 repository 之间的连接关系100% 使用 Portainer API /stacks/{id}/git
#### deploy/apps
List all apps继承 Portainer API /stacks
额外需要增加如下几类数据:
1. 将 app 主容器的 "Env" 合并到 Portainer API 返回的 env[] 中。
> portaier 中的 repository 安装方式中,.env 并不会被 portainer 保存到接口中
2. portainer 中的应用目录的 variables.json 或 repository variables.json
3. Gitea API 列出当前 APP 的 repository 之 URL提供访问链接?
4. 所用应用的数据目录:/var/lib/docker/volumes/...
5. Portainer 通过主容器的 Label 标签和 Ports获取 app_*_port等
#### deploy/apps/{id}
与 Portainer API /stacks{id} 雷同,支持 get(查询), delete删除
#### deploy/apps/{id}/git/redeploy
100% 使用 Portainer API /stacks/{id}/git/redeploy
#### deploy/apps/{id}/start
100% 使用 Portainer API /stacks/{id}/start
#### deploy/apps/{id}/stop
100% 使用 Portainer API /stacks/{id}/stop
#### deploy/apps/{id}/restart
Portainer 未提供对应的 API可以创建此操作也可以直接在前端通过 stop & start 组合实现。
#### deploy/apps/{id}/migrate
将 Docker 应用迁移到另外一台服务器上。此需求暂不实现
100% 使用 Portainer API /stacks/{id}/migrate
### CP
#### publish/nginx/proxy
function proxy(hostdomains[], Optional:port, Optional:exra_proxy.conf)
**init()**
也可以使用 getPort(), getExra_proxy()
1. 获取 Port: 从 portainer.containers 接口中 Label 属性集中获取 http 或 https
> com.docker.compose.http.port": "9001" | com.docker.compose.https.port": "9002"
2. 获取 exra_proxy.conf: 从 Gitea 接口中获取 repository 的 src/nginx_proxy.conf
**update()**
修改 proxy 中的 domain
**add()**
查询 Nginx 中是否有此应用的 Proxy
- Y将新的域名插入到 Proxy 中(忽略 nginx_proxy.conf
- N新增 Proxy
**delete()**
删除所有相关的 proxys
**list()**
查询所有相关的 proxys
**enable()**
enable所有相关的 proxys
**disable()**
disable 所有相关的 proxys

View File

@ -0,0 +1,57 @@
# 概述
## 需求草稿
| | Cloudron | [casaos](https://www.casaos.io/) | umbrel | runtipi |
| -------------- | -------- | -------------------------------------------------------- | ------------ | ------- |
| 应用编排 | | 单一镜像 | | 多镜像compose 编排 |
| 市场应用来源 | | 官方+社区 | 官方+社区 | |
| 一键安装程度 | | 不需任何配置 | 不需任何配置 | |
| 应用访问方式 | | 端口 | 端口 | |
| 自定义安装应用 | | Y | N | N |
| Web 管理容器 | | Y | N | |
| 默认镜像仓库 | | DockerHub | | |
| 自适应 | | Y | Y | |
| 多语言 | | Y | N | |
| 用户管理 | | 单一用户 | 单一用户 | |
| 自带应用 | | 文件,服务器终端,容器终端,监控,日志 | 监控,日志 | |
| 应用管理 | | 完整容器参数设置,克隆,绑定域名?备份?证书? | 无 | |
| 应用更新 | | N | | |
| 后端语言 | | Go | | |
| API | | HTTP API | | |
| 前端 | | vue.js | | |
| CLI | | Y | | |
| HTTP 服务器 | | 无,端口访问应用 | | traefik |
| 公共数据库 | | 无 | | |
| 开发文档 | | [wiki](https://wiki.casaos.io/en/contribute/development) | | |
| 2FA | | N | Y | |
| 安装方式 | | 服务器安装 | 容器安装 | |
| 商店更新 | | N | Y | Y |
| 商店绑定域名 | Y | N | N | |
| DNS服务 | Y | N | | |
* 应用自动分配4级域名后如何再 CNAME 二级域名?
### casaos 架构分析
#### 安装脚本
1. Check硬件、操作系统、cpu架构
2. 安装依赖包
3. 安装docker
4. 下载各源码包
5. 启动个源码对应服务
#### 源码解析
| 运行时项目 | 对应项目源码 | 说明 |
| -------------- | -------- | -------------------------------------------------------- |
| casaos | CasaOS | 每隔5秒通过websocekt推送内存/CPU/网络等系统信息;提供ssh登录操作的http接口;提供"sys", "port", "file", "folder", "batch", "image", "samba", "notify"这些http接口的访问|
| casaos-message-bus | CasaOS-MessageBus | 类似一个MQ提供消息的发布/订阅 |
| casaos-local-storage | CasaOS-LocalStorage | 每隔5S统计磁盘/USB信息,提供监控信息;提供http接口访问disk/usb/storage信息 |
| casaos-user-service | CasaOS-UserService | 通过http server提供用户管理的接口 |
| casaos-app-management | CasaOS-AppManagement | 使用CasaOS-AppStore中App的元数据;提供所有appList的分类/列表/详细信息;通过docker来管理app,提供安装/启动/关闭/重启/日志查看等相关接口;docker-compose管理V2;|
| casaos-gateway | CasaOS-Gateway | 提供Gateway自身管理接口,比如切换Gateway的port的接口,查看所有路由的接口;提供CasaOS-UI的静态资源访问服务;根据请求的PATH将请求代理转发至其它模块 |
| casaos-cli | CasaOS-CLI | 通过命令行的方式来调用CasaOS-Gateway的接口,该模块未完全实现,实现了部分命令 |
| linux-all-casaos | CasaOS-UI | VUE2,CasaOS的Web源码,编译后的html/js/image/css等由CasaOS-Gateway提供访问入口,所有API接口指向CasaOS-Gateway |
| - | CasaOS-Common | Common structs and functions for CasaOS |

View File

@ -0,0 +1,37 @@
# 软件工厂
由 Websoft9 自主研发的面向高校的【软件工厂】解决方案,学生和老师可以自由使用镜像库用于教学。
## 商业需求
高校老师和学生在教学中需要使用大量的开源软件作为教学的载体,以及通过使用开源软件学习实战的经验,打开商业化软件领域的大门。
目前,老师和学生受制于眼界以及技术原因,无法很方便的搭建和使用各种开源软件,大大的制约了教学的发展。
我们目前的方案只需要加以【盒子化】即可满足用户的需要。
## 业务模式
对我们既有的方案进行盒子化之后,通过如下方式盈利:
- 售卖软件解决方案以及技术支持
- 云资源分成
- 镜像按小时付费
- 知识库付费
- 课程合作付费
## 功能需求
盒子化的解决方案包括:
### 业务功能
- 可以一键使用的软件库(提供 300+场景方案)
- 可以在线使用的工具库(基于 Web 的工具库,学生在上课中无需安装大量的客户端工具即可完成任务)
- 可以管理教学过程的慕课系统
### 系统功能
- 账号管理
- 日志管理
- 安全管理
- 资源消耗管理

View File

@ -0,0 +1,8 @@
# Developer Guide
## Mulitiple language
Below points you should know if you want to tranlate:
- Every plugin's po.zh_CN.js can be used for other Cockpit plugin
- po.zh_CN.js.gz at base1 is the system language file

29
source/docs/recruit.md Normal file
View File

@ -0,0 +1,29 @@
# recruit
In order to optimize the app management architecture and code specifications, and perform daily maintenance on new features and bugs, Websoft9 recruits a senior Python development expert.
## Requirements
1. Proficient in Python and have architectural experience in Python web projects
2. Have experience in developing distributed (caching, message middleware)
3. Familiar with Docker and other container technologies
4. Love coding and willing to continuously optimize code at work
5. Strong document reading and understanding skills as well as document writing experience
## Job Description
1. Complete additional features and modify bugs for existing projects
2. Provide reasons and solutions for optimizing the project architecture and API methods
## Work form
Remote, must complete 40 hours of work per month
## Remuneration and payment
Pay 4000 yuan before the 10th of each month

33
source/docs/team.md Normal file
View File

@ -0,0 +1,33 @@
# Team and Division
## Active Team{#active-team}
The Websoft9 active team works on the core functionality, as well as the documentation website.
* Xu Wei: One of the founders of Websoft9, responsible for code review
* Darren Chen: One of the founders of Websoft9, responsible for user experience design and architecture
* Morning Tan: Testing
* Lao Zhou: Document writing and issue collection feedback
## Architect Consultant
Create more, better and more realistic software solutions for the world
* Liu Guanghui: Engaged in enterprise architecture for 20 years, experienced complete typical enterprise applications such as ERP, MES, WMS, e-commerce, OA, etc.
## Candidate
- [mayo7e](https://github.com/mayo7e): [#296](https://github.com/Websoft9/websoft9/issues/296) mayowa.wh@gmail.com
## Honorary Alumni
Websoft9 would never be what it is today without the huge contributions from these folks who have moved on to bigger and greater things.
* Zengxc: RHCE
* [Brendan](https://github.com/dudeisbrendan03): Infra Engineer in Greater Manchester. Studying MSc @ Lancaster
* [Biao Yang](https://github.com/hotHeart48156): Hot heart and smart developer
* [Junhao](https://github.com/hnczhjh): RedHat Engineer CA, Studding at ChangSha colleague
* [QiuJiaHon](https://github.com/orgs/Websoft9/people/QiuJiaHon): Studing at Hunan Normal University
* [Ryan Gates](https://github.com/gatesry)
* [Kai Jiao](https://github.com/jiaosir-cn)
* [Geraintl yu](https://github.com/geraintlyu)

28
source/docs/user.md Normal file
View File

@ -0,0 +1,28 @@
# User Guide
## FAQ
#### user can not sudo?
```
# add user to sudo/admin group (select one command)
usermod -aG wheel username
usermod -aG sudo username
# sudo not need to input password
```
#### Can not login with correct credential?
Many reason may make you login failed with the correct credential:
- Cookie at you browser if IP change, need to clear cookie
- *.override.json is not correct
- TLS certificate
- User not allowed login, need to modify ssh_config file
More details, you can get it from `sudo grep cockpit /var/log/messages`
#### How to modify Websoft9 port?
Access web console > settings or use cli to modify port

58
source/install/README.md Normal file
View File

@ -0,0 +1,58 @@
# Install
- The [install.sh](./install.sh) is the entry file for install or upgrade
- You can separate running the [install_cockpit.sh](./install_cockpit.sh), [install_docker.sh](./install_docker.sh), [install_plugins.sh](./install_plugins.sh) also
- The [uninstall.sh](./install.sh) is the entry file for uninstall
## User it
```
# install or upgrade Websoft9
wget -O - https://websoft9.github.io/websoft9/install/install.sh | bash
# install or upgrade Websoft9 with parameters
wget -O - https://websoft9.github.io/websoft9/install/install.sh | bash /dev/stdin --port 9000 --channel release --path "/data/websoft9/source" --version "latest"
# install or upgrade Cockpit with parameters
wget -O - https://websoft9.github.io/websoft9/install/install_cockpit.sh | bash --port 9000
# install or upgrade Docker
wget -O - https://websoft9.github.io/websoft9/install/install_docker.sh | bash
# uninstall by default
curl https://websoft9.github.io/websoft9/install/uninstall.sh | bash
# uninstall all
wget -O - https://websoft9.github.io/websoft9/install/uninstall.sh | bash /dev/stdin --cockpit --files
```
## Develop it
This install script have below related resources:
- Tools: Install or upgrade some useful software packages at Linux
- Source Code: Download source code from artifactory
- Docker: Install and upgrade Docker, compose up **backend service** with docker-compose.yml
- Cockpit: Install and upgrade Cockpit and its Packages, manage it port, fix it menu
- Plugins: Install and upgrade Websoft9 plugins which is the **frontend**
- Systemd: Install and upgrade websoft9.serivce
- Set Firewalld: let 80,443 and Cockpit port allowed, Cockpit and Docker service with firewalld
The install script should adhere to the following principles:
1. Not allowed to modify the source code of the application.
2. Every task must have an exception exit mechanism.
3. Both installation and updates should be considered simultaneously.
4. Upgrade script should not overwrite existing configurations.
5. Duplication of codes in any form is not allowed, it must used function.
6. Paths, ports, etc. must be defined using variables.
Some default parameters you should know:
- Websoft9 root path*/data/websoft9/source*
- Websoft9 Systemd script path: */opt/websoft9/systemd*
- Plugins path: */usr/share/cockpit*
- Cockpit config path: */ect/cockpit*
- Cockpit default port: 9000

398
source/install/install.sh Normal file
View File

@ -0,0 +1,398 @@
#!/bin/bash
# Define PATH
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
# Export PATH
export PATH
# Command-line options
# ==============================================================================
#
# --version
# Use the --version option to install a special version for installation. default is latest, for example:
#
# $ sudo bash install.sh --version "0.8.25"
#
# --port <9000>
# Use the --port option to set Websoft9 cosole port. default is 9000, for example:
#
# $ sudo bash install.sh --port 9001
#
# --channel <release|dev>
# Use the --channel option to install a release(production) or dev distribution. default is release, for example:
#
# $ sudo bash install.sh --channel release
#
# --path
# Use the --path option to for installation path for example:
#
# $ sudo bash install.sh --path "/data/websoft9/source"
#
# --devto
# Use the --devto option to developer mode, devto is the developer code path, for example:
#
# $ sudo bash install.sh --devto "/data/dev/mycode"
#
# ==============================================================================
# 设置参数的默认值
version="latest"
channel="release"
path="/data/websoft9/source"
# 获取参数值
while [[ $# -gt 0 ]]; do
case $1 in
--version)
version="$2"
shift 2
;;
--port)
port="$2"
shift 2
;;
--channel)
channel="$2"
shift 2
;;
--path)
path="$2"
shift 2
;;
--devto)
devto="$2"
shift 2
;;
*)
shift
;;
esac
done
if [ -n "$port" ]; then
export port
else
port=9000
fi
# 输出参数值
echo -e "\n------ Welcome to install Websoft9, it will take 3-5 minutes ------"
echo -e "\nYour installation parameters are as follows: "
echo "--version: $version"
echo "--port: $port"
echo "--channel: $channel"
echo "--path: $path"
echo "--devto: $devto"
echo -e "\nYour OS: "
cat /etc/os-release | head -n 3 2>/dev/null
# Define global vars
# export var can send it to subprocess
export http_port=80
export https_port=443
export install_path=$path
export channel
export version
export systemd_path="/opt/websoft9/systemd"
export source_zip="websoft9-$version.zip"
export source_unzip="websoft9"
export source_github_pages="https://websoft9.github.io/websoft9"
export tools_yum="git curl wget yum-utils jq bc unzip inotify-tools"
export tools_apt="git curl wget jq bc unzip inotify-tools"
export docker_network="websoft9"
export artifact_url="https://w9artifact.blob.core.windows.net/$channel/websoft9"
echo Install from url: $artifact_url
# Define common functions
Wait_apt() {
# Function to check if apt is locked
local lock_files=("/var/lib/dpkg/lock" "/var/lib/apt/lists/lock")
for lock_file in "${lock_files[@]}"; do
while fuser "${lock_file}" >/dev/null 2>&1 ; do
echo "${lock_file} is locked by another process. Waiting..."
sleep 5
done
done
echo "APT locks are not held by any processes. You can proceed."
}
export -f Wait_apt
install_tools(){
echo_prefix_tools=$'\n[Tools] - '
echo "$echo_prefix_tools Starting install necessary tool..."
dnf --version >/dev/null 2>&1
dnf_status=$?
yum --version >/dev/null 2>&1
yum_status=$?
apt --version >/dev/null 2>&1
apt_status=$?
if [ $dnf_status -eq 0 ]; then
dnf install $tools_yum -y
elif [ $yum_status -eq 0 ]; then
yum install $tools_yum -y
elif [ $apt_status -eq 0 ]; then
while fuser /var/lib/dpkg/lock >/dev/null 2>&1 ; do
echo "Waiting for other software managers to finish..."
sleep 5
done
sudo apt update -y 1>/dev/null 2>&1
apt install $tools_apt -y --assume-yes
else
echo "None of the required package managers are installed."
fi
}
download_source() {
echo_prefix_source=$'\n[Download Source] - '
echo "$echo_prefix_source Download Websoft9 source code from $artifact_url/$source_zip"
find . -type f -name "websoft9*.zip*" -exec rm -f {} \;
if [ -d "$install_path" ]; then
echo "Directory $install_path already exists and installation will cover it."
else
mkdir -p "$install_path"
fi
wget "$artifact_url/$source_zip"
if [ $? -ne 0 ]; then
echo "Failed to download source package."
exit 1
fi
sudo unzip -o "$source_zip" -d "$install_path" > /dev/null
if [ $? -ne 0 ]; then
echo "Failed to unzip source package."
exit 1
fi
cp -r $install_path/$source_unzip/* "$install_path"
if [ $? -ne 0 ]; then
echo "Move directory failed"
exit 1
fi
rm -rf "$source_zip" "$install_path/$source_unzip"
}
check_ports() {
local ports=("$@")
echo "Stop Websoft9 Proxy and Cockpit service for reserve ports..."
sudo docker stop websoft9-proxy 2>/dev/null || echo "docker stop websoft9-proxy not need "
sudo systemctl stop cockpit 2>/dev/null || echo "systemctl stop cockpit not need"
sudo systemctl stop cockpit.socket 2>/dev/null || echo "systemctl stop cockpit.socket not need"
for port in "${ports[@]}"; do
if netstat -tuln | grep ":$port " >/dev/null; then
echo "Port $port is in use, install failed"
exit
fi
done
echo "All ports are available"
}
source_github_pages="https://websoft9.github.io/websoft9"
install_path="/data/websoft9/source"
merge_json_files() {
local target_path="/etc/docker/daemon.json"
python3 - <<EOF 2>/dev/null
import json
import urllib.request
import os
def merge_json_files(file1, file2):
print("Merge from local file... ")
with open(file1, 'r') as f1, open(file2, 'r') as f2:
data1 = json.load(f1)
data2 = json.load(f2)
merged_data = {**data1, **data2}
with open(file1, 'w') as f:
json.dump(merged_data, f, indent=4)
def download_and_merge(url, file_path):
print("Download daemon.json from url and merge... ")
with urllib.request.urlopen(url) as response:
data = json.loads(response.read().decode())
with open(file_path, 'r') as f:
local_data = json.load(f)
merged_data = {**local_data, **data}
with open(file_path, 'w') as f:
json.dump(merged_data, f, indent=4)
# Create target file if it does not exist
if not os.path.exists("${target_path}"):
os.makedirs(os.path.dirname("${target_path}"), exist_ok=True)
with open("${target_path}", 'w') as f:
json.dump({}, f)
if os.path.exists("${install_path}/docker/daemon.json"):
merge_json_files("${target_path}", "${install_path}/docker/daemon.json")
elif urllib.request.urlopen("${source_github_pages}/docker/daemon.json").getcode() == 200:
download_and_merge("${source_github_pages}/docker/daemon.json", "${target_path}")
else:
print("No target daemon.json file need to merged")
EOF
if [ $? -ne 0 ]; then
echo "merge daemon.json failed, but install continue running"
fi
}
set_docker(){
echo "Set Docker for Websoft9 backend service..."
merge_json_files
if ! systemctl is-active --quiet firewalld; then
echo "firewalld is not running"
else
echo "Set firewall for Docker..."
sudo sudo firewall-cmd --permanent --new-zone=docker 2> /dev/null
sudo firewall-cmd --permanent --zone=docker --add-interface=docker0 2> /dev/null
sudo firewall-cmd --permanent --zone=docker --set-target=ACCEPT
sudo firewall-cmd --reload
fi
sudo systemctl restart docker
}
install_backends() {
echo_prefix_backends=$'\n[Backend] - '
echo "$echo_prefix_backends Install backend docker services"
set_docker
cd "$install_path/docker"
if [ $? -ne 0 ]; then
echo "Failed to change directory."
exit 1
fi
sudo docker network inspect $docker_network >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Docker network '$docker_network' already exists."
else
sudo docker network create $docker_network
if [ $? -ne 0 ]; then
echo "Failed to create docker network."
exit 1
fi
fi
# set to devloper mode
if [ -n "$devto" ]; then
sed -i "s|\(- \).*:/websoft9/apphub-dev|\1$devto:/websoft9/apphub-dev|g" docker-compose-dev.yml
composefile=docker-compose-dev.yml
else
composefile=docker-compose.yml
fi
container_names=$(docker ps -a --format "{{.Names}}" --filter "name=websoft9")
sudo docker compose -p websoft9 -f $composefile down
# delete some dead containers that docker compose cannot deleted
if [ ! -z "$container_names" ]; then
echo "Deleting containers:"
echo $container_names
docker rm -f $container_names 2>/dev/null
else
echo "No containers to delete."
fi
sudo docker compose -f $composefile pull
sudo docker compose -p websoft9 -f $composefile up -d --build
if [ $? -ne 0 ]; then
echo "Failed to start docker services."
exit 1
fi
}
install_systemd() {
echo_prefix_systemd=$'\n[Systemd] - '
echo "$echo_prefix_systemdInstall Systemd service"
if [ ! -d "$systemd_path" ]; then
sudo mkdir -p "$systemd_path"
fi
sudo cp -r $install_path/systemd/script/* "$systemd_path"
sudo cp -f "$install_path/systemd/websoft9.service" /lib/systemd/system/
if [ $? -ne 0 ]; then
echo "Failed to copy Systemd service file."
exit 1
fi
sudo systemctl daemon-reload
if [ $? -ne 0 ]; then
echo "Failed to reload Systemd daemon."
exit 1
fi
sudo systemctl enable websoft9.service
if [ $? -ne 0 ]; then
echo "Failed to enable Systemd service."
exit 1
fi
sudo systemctl start websoft9
if [ $? -ne 0 ]; then
echo "Failed to start Systemd service."
exit 1
fi
}
#--------------- main-----------------------------------------
check_ports $http_port $https_port $port
install_tools
download_source
bash $install_path/install/install_docker.sh
if [ $? -ne 0 ]; then
echo "install_docker failed with error $?. Exiting."
exit 1
fi
bash $install_path/install/install_cockpit.sh
if [ $? -ne 0 ]; then
echo "install_cockpit failed with error $?. Exiting."
exit 1
fi
bash $install_path/install/install_plugins.sh
if [ $? -ne 0 ]; then
echo "install_plugins failed with error $?. Exiting."
exit 1
fi
install_backends
install_systemd
echo -e "\n-- Install success! ------"
echo "Access Websoft9 console by: http://Internet IP:$(grep ListenStream /lib/systemd/system/cockpit.socket | cut -d= -f2) and using Linux user for login"

View File

@ -0,0 +1,375 @@
#!/bin/bash
# Define PATH
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
# Export PATH
export PATH
## This script is used for install or upgrade Cockpit on Linux
## Cockpit build at redhat family: https://copr.fedorainfracloud.org/coprs/g/cockpit/cockpit-preview/monitor/
## Cockpit reposoitory list: https://pkgs.org/download/cockpit
## PackageKit: https://www.freedesktop.org/software/PackageKit/
## Not use pkcon install/update cockpit, the reason is: https://cockpit-project.org/faq.html#error-message-about-being-offline
## pkcon can read repositories at you system directly, it don't provide exra repository
## [apt show cockpit] or [apt install cockpit] show all additional packages
## Ubuntu have backports at file /etc/apt/sources.list by default
## Cockpit application: https://cockpit-project.org/applications
# Command-line options
# ==========================================================
#
# --port <9000>
# Use the --port option to set Websoft9 cosole port. default is 9000, for example:
#
# $ sudo sh install_cockpit.sh --port 9001
############################################################
# Below vars export from install.sh
# $port
# $install_path
############################################################
echo -e "\n\n-------- Cockpit --------"
while [[ $# -gt 0 ]]; do
case $1 in
--port)
port="$2"
shift 2
;;
*)
shift
;;
esac
done
# Port priority: --port > ListenStream= > 9000
cockpit_exist() {
systemctl list-unit-files | grep -q "cockpit.service"
return $?
}
if cockpit_exist; then
cockpit_now_port=$(grep -oP "(?<=^ListenStream=).*" "/lib/systemd/system/cockpit.socket")
if [ -z "${cockpit_now_port// }" ]; then
echo "cockpit port is null,set it to 9000"
cockpit_now_port=9000
else
echo "$cockpit_now_port at cockpit.socket"
fi
else
cockpit_now_port=9000
fi
if [ -n "$port" ]; then
cockpit_port=$port
else
cockpit_port=$cockpit_now_port
fi
if [ -n "$install_path" ]; then
echo "Have found install files"
else
install_path="/data/websoft9/source"
fi
echo -e "\nYour installation parameters are as follows: "
echo "cockpit_port:$cockpit_port"
echo "install_path:$install_path"
echo_prefix_cockpit=$'\n[Cockpit] - '
# package cockpit depends_on [cockpit-bridge,cockpit-ws,cockpit-system], but update cockpit the depends don't update
cockpit_packages="cockpit cockpit-ws cockpit-bridge cockpit-system cockpit-pcp cockpit-storaged cockpit-networkmanager cockpit-session-recording cockpit-doc cockpit-packagekit cockpit-sosreport"
menu_overrides_github_page_url="https://websoft9.github.io/websoft9/cockpit/menu_override"
cockpit_config_github_page_url="https://websoft9.github.io/websoft9/cockpit/cockpit.conf"
cockpit_menu_overrides=()
# export OS release environments
if [ -f /etc/os-release ]; then
. /etc/os-release
else
echo "Can't judge your Linux distribution"
exit 1
fi
# This solution from: https://help.ubuntu.com/community/PinningHowto
pin_config="
Package: cockpit*
Pin: release a=$VERSION_CODENAME-backports
Pin-Priority: 1000
"
check_ports() {
local ports=("$@")
for port in "${ports[@]}"; do
if netstat -tuln | grep ":$port " >/dev/null; then
echo "Port $port is in use, install failed"
exit
fi
done
echo "All ports are available"
}
Print_Version(){
sudo /usr/libexec/cockpit-ws --version 2>/dev/null || sudo /usr/lib/cockpit-ws --version 2>/dev/null || /usr/lib/cockpit/cockpit-ws --version 2>/dev/null
}
Install_PackageKit(){
echo "$echo_prefix_cockpit Install PackageKit(pkcon) and Cockpit repository"
if command -v pkcon &> /dev/null; then
echo "pkcon is at your system ..."
elif command -v yum &> /dev/null; then
if [ "$(cat /etc/redhat-release)" = "Redhat7" ]; then
sudo subscription-manager repos --enable rhel-7-server-extras-rpms
fi
sudo yum install PackageKit -y
elif command -v dnf &> /dev/null; then
sudo dnf install PackageKit -y
elif command -v apt &> /dev/null; then
sudo apt update
sudo apt install packagekit -y
else
echo "PackageKit not found, Cockpit cannot be installed"
exit 1
fi
}
Set_Repository() {
echo "$echo_prefix_cockpit Set Cockpit deb repository"
if command -v apt &> /dev/null; then
if [ "$NAME" = "Debian" ]; then
echo "deb http://deb.debian.org/debian $VERSION_CODENAME-backports main" > /etc/apt/sources.list.d/backports.list
fi
echo "Set the cockpit repository priority on Ubuntu/Debian..."
sudo bash -c "echo '$pin_config' > /etc/apt/preferences.d/cockpit_backports"
fi
echo "Complete set Cockpit repository"
}
Restart_Cockpit(){
echo "$echo_prefix_cockpit Restart Cockpit"
sudo systemctl daemon-reload
sudo systemctl restart cockpit.socket 2> /dev/null
sudo systemctl restart cockpit
}
Add_Firewalld(){
echo "Add cockpit service to Firewalld..."
# cockpit.xml is not always the same path at Linux distributions
sudo sed -i "s/port=\"[0-9]*\"/port=\"$cockpit_port\"/g" /etc/firewalld/services/cockpit.xml
sudo sed -i "s/port=\"[0-9]*\"/port=\"$cockpit_port\"/g" /usr/lib/firewalld/services/cockpit.xml
sudo firewall-cmd --zone=public --add-service=cockpit --permanent
sudo firewall-cmd --zone=public --add-port=443/tcp --permanent
sudo firewall-cmd --zone=public --add-port=80/tcp --permanent
sudo firewall-cmd --reload
}
Set_Firewalld(){
echo "$echo_prefix_cockpit Set firewalld for cockpit access"
if command -v firewall-cmd &> /dev/null; then
echo "Set firewall for Cockpit..."
if ! systemctl is-active --quiet firewalld; then
sudo systemctl start firewalld
Add_Firewalld
sudo systemctl stop firewalld
else
Add_Firewalld
fi
fi
}
Set_Selinux(){
echo "$echo_prefix_cockpit Set Selinux for cockpit access"
if [ -f /etc/selinux/config ]; then
echo "Set Selinux for Cockpit..."
sudo setenforce 0 1>/dev/null 2>&1
sudo sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config 1>/dev/null 2>&1
fi
}
Set_Cockpit(){
echo "$echo_prefix_cockpit Set Cockpit for Websoft9"
echo "Cockpit allowed root user ..."
echo "" > /etc/cockpit/disallowed-users
# fix bug: https://github.com/Websoft9/websoft9/issues/332
sed 's/selector(:is():where())/selector(:is(*):where(*))/' -i /usr/share/cockpit/static/login.js
echo "Set Cockpit config file..."
if [ -f "$install_path/cockpit/cockpit.conf" ]; then
cp -f "$install_path/cockpit/cockpit.conf" /etc/cockpit/cockpit.conf
else
echo "Download config from URL $cockpit_config_github_page_url"
curl -sSL $cockpit_config_github_page_url | sudo tee /etc/cockpit/cockpit.conf > /dev/null
fi
echo "Change cockpit default port to $cockpit_port ..."
sudo sed -i "s/ListenStream=[0-9]*/ListenStream=${cockpit_port}/" /lib/systemd/system/cockpit.socket
# fwupd-refresh.service may push error for Cockpit menu, so disable it
if sudo systemctl is-active --quiet fwupd-refresh.service; then
echo "fwupd-refresh.service is already running. Stopping and disabling it..."
sudo systemctl stop fwupd-refresh.service
sudo systemctl disable fwupd-refresh.service
echo "fwupd-refresh.service stopped and disabled."
else
echo "fwupd-refresh.service is not running."
fi
}
get_github_files() {
python3 - <<EOF
import requests
import json
url = "https://api.github.com/repos/Websoft9/websoft9/contents/cockpit/menu_override?ref=main"
headers = {
"Accept": "application/vnd.github.v3+json"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
files = json.loads(response.text)
for file in files:
print(file['name'])
else:
print(f"Error: {response.status_code}")
EOF
}
Download_Menu_Override(){
cockpit_menu_overrides=($(get_github_files))
for file in "${cockpit_menu_overrides[@]}"
do
echo "$menu_overrides_github_page_url/$file"
curl -sSL "$menu_overrides_github_page_url/$file" | sudo tee /etc/cockpit/"$file" > /dev/null
if [ $? -ne 0 ]; then
echo "Failed to download files"
exit 1
fi
done
}
Edit_Menu(){
echo "$echo_prefix_cockpit Start to edit Cockpit origin Menu"
if [ -f "$install_path/cockpit/cockpit.conf" ]; then
cp -f "$install_path/cockpit/cockpit.conf" /etc/cockpit/cockpit.conf
else
echo "Download config file from URL..."
curl -sSL $cockpit_config_github_page_url | sudo tee /etc/cockpit/cockpit.conf > /dev/null
if [ $? -ne 0 ]; then
echo "Failed to download cockpit.conf"
exit 1
fi
fi
if test -d "$install_path/cockpit/menu_override"; then
cp -r $install_path/cockpit/menu_override/* /etc/cockpit
else
echo "Download override files from URL..."
Download_Menu_Override
fi
}
Upgrade_Cockpit(){
echo "$echo_prefix_cockpit Prepare to upgrade Cockpit"
echo "You installed version: "
Print_Version
if command -v apt >/dev/null; then
export DEBIAN_FRONTEND=noninteractive
sudo dpkg --configure -a
apt update -y
apt --fix-broken install
for pkg in $cockpit_packages
do
echo "Installing $pkg"
sudo apt install -u -y "$pkg" || echo "$pkg failed to install"
done
else
sudo pkcon refresh > /dev/null
sudo pkcon get-updates > /dev/null
sudo pkcon update $cockpit_packages -y
sudo pkcon install $cockpit_packages -y --allow-untrusted --allow-reinstall
fi
}
Install_Cockpit(){
if cockpit_exist; then
Upgrade_Cockpit
Restart_Cockpit
else
echo "$echo_prefix_cockpit Prepare to install Cockpit"
check_ports $port
export DEBIAN_FRONTEND=noninteractive
sudo pkcon refresh > /dev/null
sudo pkcon get-updates > /dev/null
output=$(sudo pkcon install $cockpit_packages -y --allow-untrusted --allow-reinstall 2>&1)
if echo "$output" | grep -q "offline"; then
Upgrade_Cockpit
fi
Restart_Cockpit
fi
Set_Firewalld
Set_Selinux
Set_Cockpit
Edit_Menu
Restart_Cockpit
}
Test_Cockpit(){
echo "$echo_prefix_cockpit Test Cockpit console accessibility"
test_cmd="curl localhost:$cockpit_port"
echo test_cmd
start_time=$(date +%s)
timeout=30
while true; do
if $test_cmd >/dev/null 2>&1; then
echo "Cockpit running OK..."
break
else
current_time=$(date +%s)
elapsed_time=$(($current_time - $start_time))
if [ $elapsed_time -ge $timeout ]; then
echo "Cockpit is not running... Timeout after waiting $timeout seconds."
exit 1
fi
sleep 1
fi
done
Print_Version
}
#### -------------- main() start here ------------------- ####
Install_PackageKit
Set_Repository
Install_Cockpit
Test_Cockpit
# release package memory
sudo systemctl restart packagekit.service

View File

@ -0,0 +1,119 @@
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
# Install and Upgade Docker for mosts of Linux
# This script is intended from https://get.docker.com and add below:
#
# - install or update Docker
# - support Redhat, CentOS-Stream, OracleLinux, AmazonLinux
#
# 1. download the script
#
# $ curl -fsSL https://websoft9.github.io/websoft9/install/install_docker.sh -o install_docker.sh
#
# 2. verify the script's content
#
# $ cat install_docker.sh
#
# 3. run the script with --dry-run to verify the steps it executes
#
# $ sh install_docker.sh --dry-run
#
# 4. run the script either as root, or using sudo to perform the installation.
#
# $ sudo sh install_docker.sh
docker_packages="docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin"
echo_prefix_docker=$'\n[Docker] - '
docker_exist() {
# 检查 `docker` 命令是否存在
if ! command -v docker &> /dev/null; then
echo "docker command not exist"
return 1
fi
# 检查 Docker 服务是否正在运行
systemctl is-active docker.service &> /dev/null
if [ $? -ne 0 ]; then
echo "Docker service is not running, trying to start it..."
systemctl start docker.service
if [ $? -ne 0 ]; then
echo "Failed to start Docker service."
return 1
fi
fi
return 0
}
Install_Docker(){
echo "$echo_prefix_docker Installing Docker for your system"
# For redhat family
if [[ -f /etc/redhat-release ]]; then
# For CentOS, Fedora, or RHEL(only s390x)
if [[ $(cat /etc/redhat-release) =~ "RHEL" ]] && [[ $(uname -m) == "s390x" ]] || [[ $(cat /etc/redhat-release) =~ "CentOS" ]] || [[ $(cat /etc/redhat-release) =~ "Fedora" ]]; then
curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh
else
# For other distributions
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install $docker_packages -y
fi
fi
# For Ubuntu, Debian, or Raspbian
if type apt >/dev/null 2>&1; then
apt update
# Wait for apt to be unlocked
curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh
fi
}
Upgrade_Docker(){
if docker_exist; then
echo "$echo_prefix_docker Upgrading Docker for your system..."
dnf --version >/dev/null 2>&1
dnf_status=$?
yum --version >/dev/null 2>&1
yum_status=$?
apt --version >/dev/null 2>&1
apt_status=$?
if [ $dnf_status -eq 0 ]; then
sudo dnf update -y $docker_packages
elif [ $yum_status -eq 0 ]; then
sudo yum update -y $docker_packages
elif [ $apt_status -eq 0 ]; then
sudo apt -y install --only-upgrade $docker_packages
else
echo "Docker installed, but cannot upgrade"
fi
else
export -f Install_Docker
timeout 300 bash -c Install_Docker
if [ $? -eq 124 ]; then
echo "Install Docker timed out, Docker packages maybe can't download"
exit 1
fi
fi
}
Start_Docker(){
# should have Docker server and Docker cli
if docker_exist; then
echo "$echo_prefix_docker Starting Docker"
sudo systemctl enable docker
sudo systemctl restart docker
else
echo "Docker not installed or start failed, exit..."
exit 1
fi
}
echo -e "\n\n-------- Docker --------"
Upgrade_Docker
Start_Docker

View File

@ -0,0 +1,109 @@
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
# Command-line options
# ==========================================================
#
# --channel <release|dev>
# Use the --channel option to install a release(production) or dev distribution. default is release, for example:
#
# $ sudo sh install.sh --channel release
#
# ==============================================================================
# get input and define vars
while [[ $# -gt 0 ]]; do
case $1 in
--channel)
channel="$2"
shift 2
;;
*)
shift
;;
esac
done
# channel,source_github_pages,install_path from install.sh priority
if [ -z "$channel" ]; then
channel="release"
fi
if [ -z "$source_github_pages" ]; then
source_github_pages="https://websoft9.github.io/websoft9"
fi
if [ -z "$install_path" ]; then
install_path="/data/websoft9/source"
fi
echo -e "\n\n-------- Plugins --------"
echo "Your installation parameters are as follows: "
echo "--channel: $channel"
echo "--install_path: $install_path"
artifact_url="https://w9artifact.blob.core.windows.net/$channel/websoft9/plugin"
echo_prefix_plugins=$'\n[Plugins] - '
versions_local_file="$install_path/version.json"
versions_url="$source_github_pages/version.json"
file_suffix=".zip"
plugin_path="/usr/share/cockpit"
echo "$echo_prefix_plugins Starting download plugin and update it"
python3 - << END
import requests
import json
import queue
import os
import sys
import zipfile
import io
def get_plugin_versions(versions_local_file, versions_url, artifact_url, file_suffix, plugin_path):
if os.path.exists(versions_local_file):
print("Get version file on your local install"+versions_local_file)
with open(versions_local_file) as f:
data = json.load(f)
else:
try:
print("Get version file from URL " + versions_url)
response = requests.get(versions_url, timeout=5) # Set timeout to 5 seconds
data = json.loads(response.text)
except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
print("Error occurred while getting version file from URL: ", e)
sys.exit(1) # Exit the program if an error occurred
plugins = data.get('plugins', {})
q = queue.Queue()
for plugin, version in plugins.items():
q.put(f'{artifact_url}/{plugin}/{plugin}-{version}{file_suffix}')
return q
# 使用函数
q = get_plugin_versions("${versions_local_file}", "${versions_url}", "${artifact_url}", "${file_suffix}", "${plugin_path}")
# 下载并解压缩文件
while not q.empty():
try:
file_url = q.get()
print(f"Downloading {file_url}...")
response = requests.get(file_url, stream=True, timeout=120) # Set timeout to 120 seconds
# Make sure the download was successful
response.raise_for_status()
with zipfile.ZipFile(io.BytesIO(response.content)) as z:
z.extractall("${plugin_path}")
print(f"Successfully extracted {file_url} to ${plugin_path}")
except Exception as e:
print(f"Error occurred while downloading or extracting file: {e}")
sys.exit(1) # Exit the program if an error occurred
END
echo "Plugins install successfully..."

View File

@ -0,0 +1,27 @@
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
# This script is on development, can not use now
# to do
Remove_Podman(){
echo "$echo_prefix_docker Try to remove Podman"
podman pod stop --all
# Remove Podman and its dependencies
if [ -x "$(command -v dnf)" ]; then
sudo dnf remove podman -y
elif [ -x "$(command -v apt)" ]; then
sudo apt remove podman -y
elif [ -x "$(command -v zypper)" ]; then
sudo zypper remove podman -y
elif [ -x "$(command -v pacman)" ]; then
sudo pacman -Rs podman --noconfirm
else
echo "Unable to find a suitable package manager to remove Podman."
exit 1
fi
echo "Podman has been stopped and removed."
}

View File

@ -0,0 +1,72 @@
#!/bin/bash
# Define PATH
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
# Export PATH
export PATH
# Command-line options
# ==============================================================================
#
# --cockpit
# Use the --cockpit option to remove cockpit:
#
# $ sudo sh install.sh --cockpit
#
# --files
# Use the --files option remove files have installed:
#
# $ sudo sh install.sh --files
#
#
# ==============================================================================
install_path="/data/websoft9/source"
systemd_path="/opt/websoft9/systemd"
cockpit_plugin_path="/usr/share/cockpit"
cockpit_packages="cockpit cockpit-ws cockpit-bridge cockpit-system cockpit-pcp cockpit-storaged cockpit-networkmanager cockpit-session-recording cockpit-doc cockpit-packagekit cockpit-sosreport"
echo -e "\n---Remove Websoft9 backend service containers---"
sudo docker compose -p websoft9 down -v
echo -e "\n---Remove Websoft9 systemd service---"
sudo systemctl disable websoft9
sudo systemctl stop websoft9
rm -rf /lib/systemd/system/websoft9.service
remove_cockpit() {
echo -e "\n---Remove Cockpit---"
sudo systemctl stop cockpit.socket cockpit
for package in $cockpit_packages; do
sudo pkcon remove $package -y || true
done
sudo rm -rf /etc/cockpit/*
}
remove_files() {
echo -e "\n---Remove files---"
sudo rm -rf $install_path/* $systemd_path/* $cockpit_plugin_path/*
}
for arg in "$@"
do
case $arg in
--cockpit)
remove_cockpit
shift
;;
--files)
remove_files
shift
;;
*)
echo "Unknown argument: $arg"
exit 1
;;
esac
done
echo -e "\nCongratulations, Websoft9 uninstall is complete!"

3
source/scripts/README.md Normal file
View File

@ -0,0 +1,3 @@
# Scripts
Some useful script for Websoft9 maintenance

View File

@ -0,0 +1,16 @@
#!/bin/bash
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
app_port=$(cat /data/apps/$1/.env |grep APP_HTTP_PORT |cut -d= -f2 |sed -n 1p)
while true
do
app_port_lines=$(cat /tmp/port.txt |grep "$app_port" |wc -l)
if [ "$app_port_lines" -gt 0 ];then
app_port=`expr $app_port + 1`
else
echo $app_port >> /tmp/port.txt
sed -i "s/APP_HTTP_PORT=.*/APP_HTTP_PORT=$app_port/g" /data/apps/$1/.env
break
fi
done

View File

@ -0,0 +1,511 @@
#!/bin/bash
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
set -e
# Please modify this version and time after update
version(){
sudo echo "version: 1.6"
sudo echo "updated date: 2021-05-26"
}
# Tool list, apt or yum installation is only supported
tool_list=(
git
pwgen
jq
wget
curl
figlet
boxes
)
help_str="
Usage:
-h, --help Show this help message
-r, --repo_name The name of the warehouse
-p, --package Making local packs,Only when you need to pack
-v, --version Show version info
example: bash install.sh -r template -p
"
make_package=false
# Get option parameters
getopt_cmd=$(getopt -o r:phv --long repo_name:,package,help,version -n "Parameter error" -- "$@")
eval set -- "$getopt_cmd"
while [ -n "$1" ]
do
case "$1" in
-r|--repo_name)
repo_name=$2
shift ;;
-h|--help)
sudo echo -e "$help_str"
exit ;;
-p|--package)
make_package=true
shift ;;
-v|--version)
version
shift ;;
--)
break
shift ;;
esac
shift
done
[ ! -n "$repo_name" ] && exit 1
install_tools(){
if command -v apt > /dev/null;then
sudo apt update 1>/dev/null 2>&1
sudo apt install ${tool_list[*]} -y 1>/dev/null 2>&1
elif command -v yum > /dev/null;then
sudo yum clean all 1>/dev/null 2>&1
sudo yum makecache 1>/dev/null 2>&1
sudo yum install ${tool_list[*]} -y 1>/dev/null 2>&1
fi
}
download_docker_source(){
docker_download_url="https://download.docker.com/linux/static/stable/x86_64/docker-20.10.6.tgz"
cd /tmp/
sudo rm -rf docker.tgz
sudo wget $docker_download_url -O docker.tgz 1>/dev/null 2>&1
sudo echo -e "docker downloaded successfully"
sudo cat > /tmp/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
}
install_docker_script(){
if command -v docker > /dev/null;then
sudo echo -e `docker -v`
sudo echo -e "Docker installed successfully"
else
sudo curl -fsSL https://get.docker.com -o get-docker.sh &>/dev/null && sh get-docker.sh &>/dev/null
sudo rm -rf get-docker.sh
sudo systemctl start docker
sudo systemctl enable docker &>/dev/null
sudo echo -e `docker -v`
sudo echo -e "Docker installed successfully"
fi
}
uninstall_docker(){
sudo rm -f /etc/systemd/system/docker.service
sudo rm -rf /usr/bin/docker*
sudo systemctl daemon-reload
sudo echo -e "Docker uninstalled successfully"
}
download_docker_compose(){
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.0/docker-compose-$(uname -s)-$(uname -m)" -o /tmp/docker-compose 1>/dev/null 2>&1
sudo chmod +x /tmp/docker-compose
sudo echo -e "docker-compose downloaded successfully"
}
install_docker_compose(){
curl -L "https://github.com/docker/compose/releases/download/1.29.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 1>/dev/null 2>&1
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose &>/dev/mull || true
sudo echo `docker-compose -v`
sudo echo -e "docker-compose installed successfully"
}
save_images(){
sudo rm -rf /tmp/docker-$repo_name
sudo git clone https://github.com/Websoft9/docker-$repo_name.git /tmp/docker-$repo_name || sudo git clone https://github.com.cnpmjs.org/Websoft9/docker-$repo_name.git /tmp/docker-$repo_name
sudo docker rmi `docker images -aq` -f &>/dev/null || true
cd /tmp/docker-$repo_name
# Pull images and save images
sudo systemctl start docker
docker-compose pull
sudo echo -e "In image packaging, there is a long wait..."
sudo docker save $(docker images | grep -v REPOSITORY | awk 'BEGIN{OFS=":";ORS=" "}{print $1,$2}') -o /tmp/$repo_name.tar
sudo echo -e "The image was successfully saved as a tar package"
}
installation(){
sudo rm -rf $install_dir
sudo mkdir -p $install_dir /credentials 1>/dev/null 2>&1 && cd $install_dir
sudo git clone https://github.com/Websoft9/docker-$repo_name.git $install_dir || sudo git clone https://github.com.cnpmjs.org/Websoft9/docker-$repo_name.git $install_dir
# Rename compose and env file name
cd $install_dir
# Stop the container and remove the Volumes for sec_installation
cd $install_dir
rm -rf volumes
sudo docker-compose down -v 1>/dev/null 2>&1
# Avoiding db port conflicts
sudo echo -e "The database port is changing"
db_port_lines=$(cat $install_dir/.env |grep DB_.*PORT |wc -l)
db_port=$(cat $install_dir/.env |grep DB_.*PORT |cut -d= -f2 |sed -n 1p)
while true
do
if [ "$db_port_lines" -gt 0 ];then
os_db_port_lines=$(ss -ntulp |grep -w "$db_port" |wc -l)
if [ "$os_db_port_lines" -gt 0 ];then
db_port=`expr $db_port + 1`
sed -ri "s/(DB.*_PORT=).*/\1$db_port/" $install_dir/.env
else
break
fi
else
break
fi
done
# DB Random password
sudo echo -e "---$repo_name Installation Wizard----" |boxes |tee -a /credentials/password.txt
new_password=$(pwgen -ncCs 15 1)
db_password_lines=`cat $install_dir/.env |grep DB.*PASSWORD |wc -l`
db_user_lines=`cat $install_dir/.env |grep DB_.*_USER |wc -l`
if [ $db_password_lines -gt 0 ];then
if [ $db_user_lines -gt 0 ];then
db_username=$(cat $install_dir/.env |grep DB_.*_USER |cut -d= -f2 |sed -n 1p )
sudo echo "db username: $db_username" |tee -a /credentials/password.txt
else
sudo echo "db username: root" |tee -a /credentials/password.txt
fi
sudo sed -ri "s/(DB_.*_PASSWORD=).*/\1$new_password/" $install_dir/.env &>/dev/null || true
sudo echo "db password: $new_password" |tee -a /credentials/password.txt
else
sudo echo "No database password" |tee -a /credentials/password.txt
fi
if [ "$db_port_lines" -gt 0 ];then
sudo echo "db port: $db_port" |tee -a /credentials/password.txt
fi
sudo echo -e "************************************\n"|tee -a /credentials/password.txt
# APP Random password
app_password_lines=$(cat $install_dir/.env |grep -w "APP_PASSWORD_INIT" |wc -l)
app_user_lines=$(cat $install_dir/.env |grep -w "APP_USER" |wc -l)
app_port_lines=$(cat $install_dir/.env |grep -w "APP_PORT" |wc -l)
if [ "$app_user_lines" -gt 0 ];then
app_username=$(cat $install_dir/.env |grep -w "APP_USER" |cut -d= -f2 |sed -n 1p)
sudo echo "$repo_name login username: $app_username" |tee -a /credentials/password.txt
else
sudo echo "$repo_name login username: default username, please see the $install_dir/.env" |tee -a /credentials/password.txt
fi
if [ "$app_password_lines" -gt 0 ];then
sudo sed -ri "s/(APP_PASSWORD=).*/\1$new_password/" $install_dir/.env &>/dev/null || true
sudo echo "$repo_name login password: $new_password" |tee -a /credentials/password.txt
else
sudo echo "$repo_name login password: default password, please see the $install_dir/.env" |tee -a /credentials/password.txt
fi
if [ "$app_port_lines" -gt 0 ];then
app_port=$(cat $install_dir/.env |grep -w "APP_PORT" |cut -d= -f2 |sed -n 1p)
sudo echo "$repo_name login port: $app_port" |tee -a /credentials/password.txt
fi
sudo echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" |tee -a /credentials/password.txt
# Modify public network IP
public_ip=`wget -O - https://download.websoft9.com/ansible/get_ip.sh 2>/dev/null |bash`
case $repo_name in
"erpnext")
sudo sed -i "s/APP_SITE_NAME.*/APP_SITE_NAME=$public_ip/g" $install_dir/.env
sudo sed -i "s/APP_SITES=.*/APP_SITES=\`$public_ip\`/g" $install_dir/.env
;;
"graylog")
sudo sed -i "s#APP_HTTP_EXTERNAL_URI=.*#APP_HTTP_EXTERNAL_URI=http://$public_ip:9001/#g" $install_dir/.env
;;
"rocketchat")
sudo sed -i "s#APP_ROOT_URL=.*#APP_ROOT_URL=http://$public_ip:9001/#g" $install_dir/.env
;;
*)
;;
esac
# Change compose cli environment
export DOCKER_CLIENT_TIMEOUT=500
export COMPOSE_HTTP_TIMEOUT=500
sudo systemctl start docker
sudo docker-compose up -d
sleep 5
sudo clear
sudo echo -e "\n $repo_name installation complete\n" |boxes -d whirly
sudo echo -e "\n Please go to $repo_name to view the README file"
sudo docker ps -a
}
add_install_script(){
sudo rm -rf /tmp/install.sh /tmp/README /tmp/setup.sh
# Mirror package installation script
cat > /tmp/install.sh <<-EOF
# Install docker
sudo tar -xf docker.tgz
sudo systemctl stop docker &>/dev/mull || true
sudo mv docker.service /etc/systemd/system/docker.service
sudo mv docker/* /usr/bin/ 1>/dev/null 2>&1
sudo systemctl daemon-reload
sudo systemctl start docker
sudo systemctl enable docker &>/dev/null
sudo echo \$(docker -v)
sudo echo -e "Docker was installed successfully"
# Install docker-compose
sudo mv docker-compose /usr/local/bin/docker-compose 1>/dev/null 2>&1
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose &>/dev/mull || true
sudo echo \$(docker-compose -v)
sudo echo -e "docker-compose installed successfully"
# Pre_installation
sudo rm -rf $install_dir
sudo mkdir -p $install_dir /credentials 1>/dev/null 2>&1
sudo docker load -i $repo_name.tar
cur_dir=\$(pwd)
upper_dir=\$(dirname $install_dir)
sudo rm -rf \$upper_dir/$repo_name
cp=\$(which cp)
\$cp -rf \$cur_dir/docker-$repo_name \$upper_dir/$repo_name 1>/dev/null 2>&1
sudo mv README \$upper_dir/$repo_name/README 1>/dev/null 2>&1
# Stop the container and remove the Volumes for sec_installation
cd $install_dir
rm -rf volumes
sudo docker-compose down -v 1>/dev/null 2>&1
# Avoiding db port conflicts
db_port_lines=\$(cat $install_dir/.env |grep DB_.*PORT |wc -l)
db_port=\$(cat $install_dir/.env |grep DB_.*PORT |cut -d= -f2 |sed -n 1p)
while true
do
if [ "\$db_port_lines" -gt 0 ];then
os_db_port_lines=\$(ss -ntulp |grep \$db_port |wc -l)
if [ "\$os_db_port_lines" -gt 0 ];then
db_port=`expr \$db_port + 1`
sed -ri "s/(DB.*_PORT=).*/\1\$db_port/" $install_dir/.env
else
break
fi
else
break
fi
done
# DB Random password
sudo echo -e "---$repo_name Installation Wizard---\n" |tee -a /credentials/password.txt
new_password=\$(date | md5sum | awk '{print $1}' |cut -c 3-18)
db_password_lines=\$(cat $install_dir/.env |grep DB.*PASSWORD |wc -l)
db_user_lines=\$(cat $install_dir/.env |grep DB_.*_USER |wc -l)
if [ \$db_password_lines -gt 0 ];then
if [ \$db_user_lines -gt 0 ];then
db_username=\$(cat $install_dir/.env |grep DB_.*_USER |cut -d= -f2 |sed -n 1p)
sudo echo "db username: \$db_username" |tee -a /credentials/password.txt
else
sudo echo "db username: root" |tee -a /credentials/password.txt
fi
sudo sed -ri "s/(DB_.*_PASSWORD=).*/\1\$new_password/" $install_dir/.env &>/dev/null || true
sudo echo "db password: \$new_password" |tee -a /credentials/password.txt
else
sudo echo "No database password" |tee -a /credentials/password.txt
fi
if [ "\$db_port_lines" -gt 0 ];then
sudo echo "db port: \$db_port" |tee -a /credentials/password.txt
fi
sudo echo -e "************************************\n" |tee -a /credentials/password.txt
# APP Random password
app_user_lines=\$(cat $install_dir/.env |grep -w "APP_USER" |wc -l)
app_password_lines=\$(cat $install_dir/.env |grep -w "APP_PASSWORD_INIT" |wc -l)
app_port_lines=\$(cat $install_dir/.env |grep -w "APP_PORT" |wc -l)
if [ "\$app_user_lines" -gt 0 ];then
app_username=\$(cat $install_dir/.env |cut -d= -f2 |sed -n 1p)
sudo echo "$repo_name login username: \$app_username" |tee -a /credentials/password.txt
else
sudo echo "$repo_name login username: default username, please see the $install_dir/.env" |tee -a /credentials/password.txt
fi
if [ "\$app_password_lines" -gt 0 ];then
sudo sed -ri "s/(APP_PASSWORD=).*/\1\$new_password/" $install_dir/.env &>/dev/null || true
sudo echo "$repo_name login password: \$new_password" |tee -a /credentials/password.txt
else
sudo echo "$repo_name login password: default password, please see the $install_dir/.env" |tee -a /credentials/password.txt
fi
if [ "\$app_port_lines" -gt 0 ];then
app_port=\$(cat $install_dir/.env |grep -w "APP_PORT" |cut -d= -f2 |sed -n 1p)
sudo echo "$repo_name login port: \$app_port" |tee -a /credentials/password.txt
fi
sudo rm -rf \$cur_dir/{$repo_name.tar,get-docker.sh,docker.service,docker-compose,docker.tgz,docker,install.sh,docker-$repo_name}
sudo echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" |tee -a /credentials/password.txt
# Modify public network IP
public_ip=\$(wget -O - https://download.websoft9.com/ansible/get_ip.sh 2>/dev/null | timeout 10 bash)
case $repo_name in
"erpnext")
sudo sed -i "s/APP_SITE_NAME.*/APP_SITE_NAME=\$public_ip/g" $install_dir/.env
sudo sed -i "s/APP_SITES=.*/APP_SITES=\`\$public_ip\`/g" $install_dir/.env
;;
"graylog")
sudo sed -i "s#APP_HTTP_EXTERNAL_URI=.*#APP_HTTP_EXTERNAL_URI=http://\$public_ip:9001/#g" $install_dir/.env
;;
"rocketchat")
sudo sed -i "s#APP_ROOT_URL=.*#APP_ROOT_URL=http://\$public_ip:9001/#g" $install_dir/.env
;;
*)
;;
esac
# Change compose cli environment
export DOCKER_CLIENT_TIMEOUT=500
export COMPOSE_HTTP_TIMEOUT=500
sudo systemctl start docker
sudo docker-compose up -d
sudo clear && sudo docker ps -a
sudo echo -e "\n $repo_name installation complete\n"
sudo echo -e "\n Please go to \$upper_dir/$repo_name to view the README file"
EOF
# README file
cat > /tmp/README <<-EOF
Document address:
http://support.websoft9.com/docs/$repo_name/zh/
Project address:
https://github.com/websoft9/docker-$repo_name
Password file location:
/credentials/password.txt
EOF
# Unpack the pre-installed script
cat > /tmp/setup.sh <<-EOF
#!/bin/bash
line=\`wc -l \$0|awk '{print \$1}'\`
line=\`expr \$line - 7\`
tail -n \$line \$0 |tar zx -C ~
cd ~
./install.sh
ret=\$?
exit \$ret
EOF
sudo chmod +x /tmp/install.sh
sudo chmod +x /tmp/setup.sh
}
get_install_information(){
install_dir=`curl -s https://raw.githubusercontent.com/Websoft9/docker-$repo_name/main/variables.json |jq -r .installpath` 1>/dev/null
compose_file_name=`curl -s https://raw.githubusercontent.com/Websoft9/docker-$repo_name/main/variables.json |jq -r .compose_file` 1>/dev/null
compose_env_url="https://raw.githubusercontent.com/Websoft9/docker-$repo_name/main/.env"
url_status=`curl -s -m 5 -IL $compose_env_url |grep 200 || true`
if [[ $url_status == "" ]];then
sudo echo "The env file does not exist"
exit 1
fi
if [[ $install_dir == "null" || $compose_file_name = "null" ]];then
sudo echo "variables.json has an undefined parameter"
exit 1
fi
sudo echo install path $install_dir
sudo echo compose filename $compose_file_name
}
make_package(){
sudo rm -rf /tmp/$repo_name.tgz install-$repo_name
cd /tmp && tar -zcf /tmp/$repo_name.tgz ./{install.sh,README,$repo_name.tar,docker-$repo_name,docker.tgz,docker.service,docker-compose}
sudo cat setup.sh $repo_name.tgz > ~/install-$repo_name
sudo chmod +x ~/install-$repo_name
cd ~ && sudo echo -e "Image packaging successfully" |boxes -d whirly
}
print_information(){
sudo figlet websoft9
# Check if the repo exists
repo_name_exists=$(curl -s --head https://github.com/Websoft9/docker-$repo_name | head -n 1 |grep -c '200')
[ "$repo_name_exists" -ne 1 ] && sudo echo -e "The repo does not exist !" && exit 1
# Print installation information
if [ -n "$repo_name" ] && [ "$make_package" == false ];then
sudo echo "docker-$repo_name to be installed..."
fi
if [ -n "$repo_name" ] && [ "$make_package" == true ];then
sudo echo "$repo_name will be packaged as an image..."
fi
}
install_tools
print_information
get_install_information
if [ $make_package = false ]; then
install_docker_script
install_docker_compose
installation
fi
if [ $make_package = true ]; then
install_docker_script
install_docker_compose
download_docker_source
download_docker_compose
save_images
add_install_script
make_package
fi

Some files were not shown because too many files have changed in this diff Show More