mirror of
https://github.com/mofeng-git/One-KVM.git
synced 2026-03-15 15:36:44 +08:00
Compare commits
237 Commits
v241004
...
onecloud-2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6002dfd9c7 | ||
|
|
cf6addeb0f | ||
|
|
8560a46f17 | ||
|
|
d4b4cdc492 | ||
|
|
687cea3658 | ||
|
|
12c7566581 | ||
|
|
10fb78abe6 | ||
|
|
1642ce73a0 | ||
|
|
64c83be0a4 | ||
|
|
76ca81bbfd | ||
|
|
ed7b2e5b33 | ||
|
|
a92a6f2811 | ||
|
|
3d58f6dd21 | ||
|
|
50022e7353 | ||
|
|
ff4f04d936 | ||
|
|
413fce72ec | ||
|
|
842238009e | ||
|
|
7116da2511 | ||
|
|
a59fe84e26 | ||
|
|
48fe790897 | ||
|
|
e375e41fb6 | ||
|
|
8c8bf35d0b | ||
|
|
f032b8c798 | ||
|
|
c711683c63 | ||
|
|
06a64725be | ||
|
|
94897ab8c9 | ||
|
|
c6a5ffa0cf | ||
|
|
9da06d3f58 | ||
|
|
7b3335ea94 | ||
|
|
15dbe6265f | ||
|
|
b2c8ed6818 | ||
|
|
2acd613a38 | ||
|
|
0202a3c2d1 | ||
|
|
be3e97178d | ||
|
|
dafc8e3941 | ||
|
|
6dcc41601e | ||
|
|
b9af5f8825 | ||
|
|
00ed5197b0 | ||
|
|
b2c5305564 | ||
|
|
e9443119ec | ||
|
|
ab5608e3e0 | ||
|
|
78557b0c47 | ||
|
|
f042ed38e0 | ||
|
|
e1e3605630 | ||
|
|
3f3a834c0c | ||
|
|
8631ee8555 | ||
|
|
da4da975ef | ||
|
|
b6c73aceb7 | ||
|
|
d3549ab52b | ||
|
|
965e649f8c | ||
|
|
b49107ff6c | ||
|
|
e9cbf04ba5 | ||
|
|
3cf543a13e | ||
|
|
4d89d6b222 | ||
|
|
e7c06643b4 | ||
|
|
72c9ae3aa0 | ||
|
|
05bced1461 | ||
|
|
464672d1a0 | ||
|
|
1061a6ba01 | ||
|
|
be6843a486 | ||
|
|
f5de6a0f2e | ||
|
|
21b7429ffe | ||
|
|
9ef1a3665a | ||
|
|
10a7ca978b | ||
|
|
4488365dfb | ||
|
|
5a61ddecd3 | ||
|
|
a12163a797 | ||
|
|
43e6cd3e26 | ||
|
|
57518468ad | ||
|
|
5973b9e773 | ||
|
|
e120b50f50 | ||
|
|
f1256ee74a | ||
|
|
9aef70c43f | ||
|
|
f9584929e3 | ||
|
|
7aa963330c | ||
|
|
5d8633556e | ||
|
|
ebda7ea03d | ||
|
|
fed3bf1efd | ||
|
|
d52bb34bb9 | ||
|
|
6c5f0bf09f | ||
|
|
aae529f40b | ||
|
|
253231adac | ||
|
|
e491057891 | ||
|
|
3b5d62dd98 | ||
|
|
38346bece1 | ||
|
|
647d3f3961 | ||
|
|
287244d376 | ||
|
|
56438a372e | ||
|
|
ab08d823c4 | ||
|
|
5db37797ea | ||
|
|
eda7ab3a49 | ||
|
|
af2ee26a2f | ||
|
|
596334735e | ||
|
|
c8385213cc | ||
|
|
c009985247 | ||
|
|
7caa695d79 | ||
|
|
630610bc53 | ||
|
|
e0bbf6968e | ||
|
|
ada1c39eef | ||
|
|
e014cbcedf | ||
|
|
adbd4f242b | ||
|
|
2649a2fa01 | ||
|
|
8cca5a8cc7 | ||
|
|
70452f048b | ||
|
|
be21a420a0 | ||
|
|
e337e8d45c | ||
|
|
8a09505baf | ||
|
|
870af902a1 | ||
|
|
0dd117711d | ||
|
|
ed68449274 | ||
|
|
85a2f2367d | ||
|
|
7fd4dae3c6 | ||
|
|
a32dcd2e00 | ||
|
|
0cf5f8de9e | ||
|
|
7394588279 | ||
|
|
666f0b694a | ||
|
|
b8ddf7c2da | ||
|
|
1b9b27660a | ||
|
|
7c453b8b49 | ||
|
|
5450d7297c | ||
|
|
506d8a4a64 | ||
|
|
8929d0f311 | ||
|
|
d25e43c934 | ||
|
|
3cbeabe2e8 | ||
|
|
eec64ef57c | ||
|
|
baa0f7e226 | ||
|
|
3ec872878e | ||
|
|
6928fab16c | ||
|
|
8fdb7d7cd6 | ||
|
|
433232c845 | ||
|
|
b419641251 | ||
|
|
50819d0a35 | ||
|
|
a37b818039 | ||
|
|
8a81996e52 | ||
|
|
69cb9ac950 | ||
|
|
30378211b5 | ||
|
|
e9e7f9bd05 | ||
|
|
72dce4de89 | ||
|
|
f1503d69e0 | ||
|
|
de5cb73b93 | ||
|
|
0751b519c2 | ||
|
|
0010dd1d11 | ||
|
|
7ef2e16b51 | ||
|
|
1a13760df0 | ||
|
|
d93639ba8d | ||
|
|
1e277c0f06 | ||
|
|
95597b15e4 | ||
|
|
6fbfc2b343 | ||
|
|
b893f27285 | ||
|
|
28167c4b45 | ||
|
|
5aef0a2193 | ||
|
|
0fd1174bc5 | ||
|
|
d4fb640418 | ||
|
|
d6b61cb407 | ||
|
|
8192b1fa95 | ||
|
|
deba110cdf | ||
|
|
936cc21c40 | ||
|
|
47778bc48c | ||
|
|
c02bc53bc4 | ||
|
|
546ac24b93 | ||
|
|
2195acf2ff | ||
|
|
60f413c1f4 | ||
|
|
a84242c9bc | ||
|
|
efa865ec9c | ||
|
|
399712c684 | ||
|
|
1ebc08eae8 | ||
|
|
684b9f629e | ||
|
|
76d70d0838 | ||
|
|
a26aee3543 | ||
|
|
0e4a70e7b9 | ||
|
|
cda32a083f | ||
|
|
11d8f26874 | ||
|
|
2929a925a2 | ||
|
|
b67a232584 | ||
|
|
90d8e745e3 | ||
|
|
3852d0a456 | ||
|
|
f5bebbc43f | ||
|
|
6707cb9932 | ||
|
|
87c887a62b | ||
|
|
40505e7e00 | ||
|
|
c1f408ea1a | ||
|
|
5b0ca351d7 | ||
|
|
b6869cfbec | ||
|
|
1e11678260 | ||
|
|
8c0953aafc | ||
|
|
073f67ca1b | ||
|
|
cb5c1e9e6d | ||
|
|
8ce27dca3f | ||
|
|
f4ba4210e1 | ||
|
|
4e1d9815cd | ||
|
|
8209ee2eb0 | ||
|
|
5ed368769c | ||
|
|
1217144ecd | ||
|
|
842ddc91a1 | ||
|
|
7a53f14456 | ||
|
|
45270a09d7 | ||
|
|
f03ac695bd | ||
|
|
b3e836e553 | ||
|
|
c57334f214 | ||
|
|
b779c18530 | ||
|
|
6ccd91a8d1 | ||
|
|
bd127c3fd3 | ||
|
|
4bc2ca3c90 | ||
|
|
445e2e04e2 | ||
|
|
489601bb96 | ||
|
|
56da910ebe | ||
|
|
40393acf67 | ||
|
|
2123799e51 | ||
|
|
0bb35806ff | ||
|
|
bbbc908af1 | ||
|
|
8113c5748b | ||
|
|
aa1ca3b329 | ||
|
|
508d5fe606 | ||
|
|
bc22a28022 | ||
|
|
80aa9de4cc | ||
|
|
572a75d27b | ||
|
|
864a2af45e | ||
|
|
5f26fa4072 | ||
|
|
af9023e8aa | ||
|
|
5c3ac4c9c1 | ||
|
|
fb9d860cf2 | ||
|
|
5045d8b3d7 | ||
|
|
cc66fbf1df | ||
|
|
9dc2af0356 | ||
|
|
99fcbdda05 | ||
|
|
308911191a | ||
|
|
0c213add4a | ||
|
|
3837e1a1c8 | ||
|
|
8569ed406a | ||
|
|
4772c2b6c3 | ||
|
|
e6b775089f | ||
|
|
721a80ef03 | ||
|
|
a55948bf8e | ||
|
|
39422f37ac | ||
|
|
06b69d3dde | ||
|
|
c9405efa05 | ||
|
|
abedace4b3 |
@@ -1,7 +1,7 @@
|
||||
[bumpversion]
|
||||
commit = True
|
||||
tag = True
|
||||
current_version = 4.3
|
||||
current_version = 4.49
|
||||
parse = (?P<major>\d+)\.(?P<minor>\d+)(\.(?P<patch>\d+)(\-(?P<release>[a-z]+))?)?
|
||||
serialize =
|
||||
{major}.{minor}
|
||||
|
||||
36
.github/ISSUE_TEMPLATE/bug-反馈.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug-反馈.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: BUG 反馈
|
||||
about: 反馈你所遇到的软件 BUG 或其他错误
|
||||
title: "[BUG]"
|
||||
labels: BUG
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### **Bug 反馈**
|
||||
|
||||
**问题描述**
|
||||
请清晰描述您遇到的问题。例如:软件无法启动、特定功能报错或表现异常等。
|
||||
|
||||
**复现步骤**
|
||||
请提供可复现此问题的详细步骤:
|
||||
1. 前往 '...'
|
||||
2. 点击 '....'
|
||||
3. 滚动到 '....'
|
||||
4. 发现错误
|
||||
|
||||
**日志信息**
|
||||
如果程序崩溃或报错,请在此处粘贴相关的日志。
|
||||
- **整合包镜像**: `systemctl status kvmd` 或 `journalctl -xeu kvmd`
|
||||
- **Docker 镜像**: `docker logs kvmd`
|
||||
|
||||
**系统环境**
|
||||
- **运行方式**: (例如:整合包镜像 / Docker)
|
||||
- **镜像版本**: (Docker 镜像请提供版本号)
|
||||
- **操作系统**: (例如:Debian 12)
|
||||
|
||||
**尝试过的解决方法**
|
||||
请简要描述您为解决此问题已尝试过的方法及其结果。如果未尝试,可留空。
|
||||
|
||||
**补充信息**
|
||||
可以附加截图、录屏或其他有助于理解问题的信息。
|
||||
25
.github/ISSUE_TEMPLATE/功能请求与设备适配.md
vendored
Normal file
25
.github/ISSUE_TEMPLATE/功能请求与设备适配.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
name: 功能请求与设备适配
|
||||
about: 请求新的功能或适配新的平台
|
||||
title: "[功能/适配]"
|
||||
labels: 特性
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**功能描述**
|
||||
请详细描述您期望的新功能应该是什么样子。
|
||||
- **对于新功能**:它应该如何工作?有哪些关键特性?
|
||||
- **对于新平台适配**:请提供该平台的具体信息(如设备型号、系统版本、相关链接等)。
|
||||
|
||||
**期望的效果**
|
||||
当该功能实现或平台适配完成后,您期望达到怎样的理想效果?可以像下面这样列出关键点:
|
||||
- [ ] 用户可以...
|
||||
- [ ] 系统能够...
|
||||
- [ ] 解决了之前的...问题
|
||||
|
||||
**我能提供的帮助**
|
||||
为了让这个想法更快成为现实,您可以提供哪些帮助?没有则填写无。
|
||||
- [ ] 我可以参与后续的功能测试
|
||||
- [ ] 我可以提供(临时的)远程调试环境(如 SSH、远程桌面)
|
||||
- [ ] 其他:...
|
||||
23
.github/workflows/arduino-hid.yml
vendored
23
.github/workflows/arduino-hid.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Arduino HID CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
container:
|
||||
image: python
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare platformio
|
||||
run: pip install platformio
|
||||
|
||||
- name: Build all
|
||||
run: make -C hid/arduino _build_all
|
||||
74
.github/workflows/build_img.yaml
vendored
Normal file
74
.github/workflows/build_img.yaml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: Build One-KVM Image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
device_target:
|
||||
description: 'Target device name'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- onecloud
|
||||
- cumebox2
|
||||
- chainedbox
|
||||
- vm
|
||||
- e900v22c
|
||||
- octopus-flanet
|
||||
- all
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
container:
|
||||
image: node:18
|
||||
options: --user root --privileged
|
||||
env:
|
||||
TZ: Asia/Shanghai
|
||||
volumes:
|
||||
- /dev:/dev
|
||||
- /mnt/nfs/lfs/:/mnt/nfs/lfs/
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt-get update
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get install -y --no-install-recommends \
|
||||
sudo tzdata docker.io qemu-utils qemu-user-static binfmt-support parted e2fsprogs \
|
||||
curl tar python3 python3-pip rsync git android-sdk-libsparse-utils coreutils zerofree
|
||||
apt-get clean
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime
|
||||
echo $TZ > /etc/timezone
|
||||
update-binfmts --enable
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
echo "BUILD_DATE=$(date +%y%m%d)" >> $GITHUB_ENV
|
||||
|
||||
chmod +x build/build_img.sh
|
||||
|
||||
echo "Starting build for target: ${{ github.event.inputs.device_target }}"
|
||||
bash build/build_img.sh ${{ github.event.inputs.device_target }}
|
||||
|
||||
echo "Build script finished."
|
||||
env:
|
||||
CI_PROJECT_DIR: ${{ github.workspace }}
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: onekvm-image-${{ github.event.inputs.device_target }}-${{ env.BUILD_DATE }}
|
||||
path: |
|
||||
${{ github.workspace }}/output/*.img
|
||||
${{ github.workspace }}/output/*.vmdk
|
||||
${{ github.workspace }}/output/*.vdi
|
||||
${{ github.workspace }}/output/*.burn.img
|
||||
if-no-files-found: ignore
|
||||
env:
|
||||
CI_PROJECT_DIR: ${{ github.workspace }}
|
||||
83
.github/workflows/docker-build.yaml
vendored
Normal file
83
.github/workflows/docker-build.yaml
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- latest
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
container:
|
||||
image: node:18
|
||||
env:
|
||||
TZ: Asia/Shanghai
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt-get update
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get install -y --no-install-recommends \
|
||||
sudo tzdata docker.io qemu-utils qemu-user-static binfmt-support parted e2fsprogs \
|
||||
curl tar python3 python3-pip rsync git android-sdk-libsparse-utils coreutils zerofree
|
||||
apt-get clean
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime
|
||||
echo $TZ > /etc/timezone
|
||||
update-binfmts --enable
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- name: Install Docker Buildx
|
||||
run: |
|
||||
# 创建插件目录
|
||||
mkdir -p ~/.docker/cli-plugins
|
||||
# 下载 buildx 二进制文件
|
||||
BUILDX_VERSION="v0.11.2"
|
||||
curl -L "https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.linux-amd64" -o ~/.docker/cli-plugins/docker-buildx
|
||||
chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||
# 验证安装
|
||||
docker buildx version
|
||||
|
||||
#- name: Install QEMU
|
||||
# run: |
|
||||
# 安装 QEMU 模拟器
|
||||
#docker run --privileged --rm tonistiigi/binfmt --install all
|
||||
# 验证 QEMU 安装
|
||||
#docker buildx inspect --bootstrap
|
||||
|
||||
- name: Create and use new builder instance
|
||||
run: |
|
||||
# 创建新的 builder 实例
|
||||
docker buildx create --name mybuilder --driver docker-container --bootstrap
|
||||
# 使用新创建的 builder
|
||||
docker buildx use mybuilder
|
||||
# 验证支持的平台
|
||||
docker buildx inspect --bootstrap
|
||||
|
||||
- name: Build multi-arch image
|
||||
run: |
|
||||
# 构建多架构镜像
|
||||
docker buildx build \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
--file ./build/Dockerfile \
|
||||
--tag silentwind/kvmd:${{ github.event.inputs.version }} \
|
||||
.
|
||||
|
||||
#- name: Login to DockerHub
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
|
||||
41
.github/workflows/pico-hid-release.yml
vendored
41
.github/workflows/pico-hid-release.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: Pico HID Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Installing deps ...
|
||||
run: sudo apt-get install cmake gcc-arm-none-eabi build-essential
|
||||
|
||||
- name: Building ...
|
||||
run: make -C hid/pico all
|
||||
|
||||
- name: Releasing ...
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: Release ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
- name: Uploading firmware ...
|
||||
id: upload-release-asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./hid/pico/hid.uf2
|
||||
asset_name: pico-hid.uf2
|
||||
asset_content_type: application/octet-stream
|
||||
20
.github/workflows/pico-hid.yml
vendored
20
.github/workflows/pico-hid.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: Pico HID CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Installing deps ...
|
||||
run: sudo apt-get install cmake gcc-arm-none-eabi build-essential
|
||||
|
||||
- name: Running tests ...
|
||||
run: make -C hid/pico all
|
||||
20
.github/workflows/tox.yml
vendored
20
.github/workflows/tox.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: TOX CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Building testenv ...
|
||||
run: make testenv
|
||||
|
||||
- name: Running tests ...
|
||||
run: make tox CMD="tox -c testenv/tox.ini"
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,5 +1,6 @@
|
||||
/pkg/
|
||||
/src/
|
||||
/src/**/*.img
|
||||
/src/tmp
|
||||
/site/
|
||||
/dist/
|
||||
/kvmd.egg-info/
|
||||
|
||||
16
LICENSE
16
LICENSE
@@ -1,11 +1,7 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
<<<<<<< HEAD
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
=======
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
>>>>>>> origin/dev
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
@@ -649,11 +645,7 @@ the "copyright" line and a pointer to where the full notice is found.
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
<<<<<<< HEAD
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
=======
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
>>>>>>> origin/dev
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
@@ -672,19 +664,11 @@ might be different; for a GUI interface, you would use an "about box".
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<<<<<<< HEAD
|
||||
<https://www.gnu.org/licenses/>.
|
||||
=======
|
||||
<http://www.gnu.org/licenses/>.
|
||||
>>>>>>> origin/dev
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<<<<<<< HEAD
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
=======
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
>>>>>>> origin/dev
|
||||
|
||||
33
Makefile
33
Makefile
@@ -86,7 +86,9 @@ tox: testenv
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.yaml /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*passwd /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.secret /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/edid/v2.hex /etc/kvmd/switch-edid.hex \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main/$(if $(P),$(P),$(DEFAULT_PLATFORM)).yaml /etc/kvmd/main.yaml \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main.yaml /etc/kvmd/main.yaml \
|
||||
&& mkdir -p /etc/kvmd/override.d \
|
||||
&& cp /src/testenv/$(if $(P),$(P),$(DEFAULT_PLATFORM)).override.yaml /etc/kvmd/override.yaml \
|
||||
&& cd /src \
|
||||
@@ -102,6 +104,7 @@ $(TESTENV_GPIO):
|
||||
|
||||
run: testenv $(TESTENV_GPIO)
|
||||
- $(DOCKER) run --rm --name kvmd \
|
||||
--ipc=shareable \
|
||||
--privileged \
|
||||
--volume `pwd`/testenv/run:/run/kvmd:rw \
|
||||
--volume `pwd`/testenv:/testenv:ro \
|
||||
@@ -128,6 +131,7 @@ run: testenv $(TESTENV_GPIO)
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.yaml /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*passwd /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.secret /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/edid/v2.hex /etc/kvmd/switch-edid.hex \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main/$(if $(P),$(P),$(DEFAULT_PLATFORM)).yaml /etc/kvmd/main.yaml \
|
||||
&& ln -s /testenv/web.css /etc/kvmd/web.css \
|
||||
&& mkdir -p /etc/kvmd/override.d \
|
||||
@@ -155,7 +159,9 @@ run-cfg: testenv
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.yaml /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*passwd /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.secret /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/edid/v2.hex /etc/kvmd/switch-edid.hex \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main/$(if $(P),$(P),$(DEFAULT_PLATFORM)).yaml /etc/kvmd/main.yaml \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main.yaml /etc/kvmd/main.yaml \
|
||||
&& mkdir -p /etc/kvmd/override.d \
|
||||
&& cp /testenv/$(if $(P),$(P),$(DEFAULT_PLATFORM)).override.yaml /etc/kvmd/override.yaml \
|
||||
&& $(if $(CMD),$(CMD),python -m kvmd.apps.kvmd -m) \
|
||||
@@ -178,6 +184,7 @@ run-ipmi: testenv
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.yaml /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*passwd /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.secret /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/edid/v2.hex /etc/kvmd/switch-edid.hex \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main/$(if $(P),$(P),$(DEFAULT_PLATFORM)).yaml /etc/kvmd/main.yaml \
|
||||
&& mkdir -p /etc/kvmd/override.d \
|
||||
&& cp /testenv/$(if $(P),$(P),$(DEFAULT_PLATFORM)).override.yaml /etc/kvmd/override.yaml \
|
||||
@@ -187,6 +194,7 @@ run-ipmi: testenv
|
||||
|
||||
run-vnc: testenv
|
||||
- $(DOCKER) run --rm --name kvmd-vnc \
|
||||
--ipc=container:kvmd \
|
||||
--volume `pwd`/testenv/run:/run/kvmd:rw \
|
||||
--volume `pwd`/testenv:/testenv:ro \
|
||||
--volume `pwd`/kvmd:/kvmd:ro \
|
||||
@@ -201,6 +209,7 @@ run-vnc: testenv
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.yaml /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*passwd /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.secret /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/edid/v2.hex /etc/kvmd/switch-edid.hex \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main/$(if $(P),$(P),$(DEFAULT_PLATFORM)).yaml /etc/kvmd/main.yaml \
|
||||
&& mkdir -p /etc/kvmd/override.d \
|
||||
&& cp /testenv/$(if $(P),$(P),$(DEFAULT_PLATFORM)).override.yaml /etc/kvmd/override.yaml \
|
||||
@@ -271,36 +280,24 @@ clean-all: testenv clean
|
||||
.PHONY: testenv
|
||||
|
||||
run-stage-0:
|
||||
$(DOCKER) buildx build -t registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd-stage-0 \
|
||||
--allow security.insecure --progress plain \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
-f build/Dockerfile-stage-0 . \
|
||||
--push
|
||||
$(DOCKER) buildx build -t silentwind0/kvmd-stage-0 \
|
||||
$(DOCKER) buildx build -t registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd-stage-0 -t silentwind0/kvmd-stage-0 \
|
||||
--allow security.insecure --progress plain \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
-f build/Dockerfile-stage-0 . \
|
||||
--push
|
||||
|
||||
run-build-dev:
|
||||
$(DOCKER) buildx build -t registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd:dev \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
-f build/Dockerfile . \
|
||||
--push
|
||||
$(DOCKER) buildx build -t silentwind0/kvmd:dev \
|
||||
$(DOCKER) buildx build -t registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd:dev -t silentwind0/kvmd:dev \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
--build-arg CACHEBUST=$(date +%s) \
|
||||
-f build/Dockerfile . \
|
||||
--push
|
||||
|
||||
run-build-release:
|
||||
$(DOCKER) buildx build -t registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd \
|
||||
--progress plain \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
-f build/Dockerfile . \
|
||||
--push
|
||||
$(DOCKER) buildx build -t silentwind0/kvmd \
|
||||
$(DOCKER) buildx build -t registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd -t silentwind0/kvmd \
|
||||
--progress plain \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
--build-arg CACHEBUST=$(date +%s) \
|
||||
-f build/Dockerfile . \
|
||||
--push
|
||||
|
||||
@@ -331,7 +328,7 @@ run-nogpio: testenv
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.yaml /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*passwd /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/*.secret /etc/kvmd \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main/$(if $(P),$(P),$(DEFAULT_PLATFORM)).yaml /etc/kvmd/main.yaml \
|
||||
&& cp /usr/share/kvmd/configs.default/kvmd/main.yaml /etc/kvmd/main.yaml \
|
||||
&& ln -s /testenv/web.css /etc/kvmd/web.css \
|
||||
&& mkdir -p /etc/kvmd/override.d \
|
||||
&& cp /testenv/$(if $(P),$(P),$(DEFAULT_PLATFORM)).override.yaml /etc/kvmd/override.yaml \
|
||||
|
||||
24
PKGBUILD
24
PKGBUILD
@@ -39,15 +39,15 @@ for _variant in "${_variants[@]}"; do
|
||||
pkgname+=(kvmd-platform-$_platform-$_board)
|
||||
done
|
||||
pkgbase=kvmd
|
||||
pkgver=4.3
|
||||
pkgver=4.49
|
||||
pkgrel=1
|
||||
pkgdesc="The main PiKVM daemon"
|
||||
url="https://github.com/pikvm/kvmd"
|
||||
license=(GPL)
|
||||
arch=(any)
|
||||
depends=(
|
||||
"python>=3.12"
|
||||
"python<3.13"
|
||||
"python>=3.13"
|
||||
"python<3.14"
|
||||
python-yaml
|
||||
python-aiohttp
|
||||
python-aiofiles
|
||||
@@ -77,6 +77,9 @@ depends=(
|
||||
python-ldap
|
||||
python-zstandard
|
||||
python-mako
|
||||
python-luma-oled
|
||||
python-pyusb
|
||||
python-pyudev
|
||||
"libgpiod>=2.1"
|
||||
freetype2
|
||||
"v4l-utils>=1.22.1-1"
|
||||
@@ -87,11 +90,11 @@ depends=(
|
||||
iproute2
|
||||
dnsmasq
|
||||
ipmitool
|
||||
"janus-gateway-pikvm>=0.14.2-3"
|
||||
"janus-gateway-pikvm>=1.3.0"
|
||||
certbot
|
||||
platform-io-access
|
||||
raspberrypi-utils
|
||||
"ustreamer>=6.11"
|
||||
"ustreamer>=6.26"
|
||||
|
||||
# Systemd UDEV bug
|
||||
"systemd>=248.3-2"
|
||||
@@ -131,6 +134,7 @@ conflicts=(
|
||||
python-aiohttp-pikvm
|
||||
platformio
|
||||
avrdude-pikvm
|
||||
kvmd-oled
|
||||
)
|
||||
makedepends=(
|
||||
python-setuptools
|
||||
@@ -164,7 +168,7 @@ package_kvmd() {
|
||||
install -DTm644 configs/os/tmpfiles.conf "$pkgdir/usr/lib/tmpfiles.d/kvmd.conf"
|
||||
|
||||
mkdir -p "$pkgdir/usr/share/kvmd"
|
||||
cp -r {hid,web,extras,contrib/keymaps} "$pkgdir/usr/share/kvmd"
|
||||
cp -r {switch,hid,web,extras,contrib/keymaps} "$pkgdir/usr/share/kvmd"
|
||||
find "$pkgdir/usr/share/kvmd/web" -name '*.pug' -exec rm -f '{}' \;
|
||||
|
||||
local _cfg_default="$pkgdir/usr/share/kvmd/configs.default"
|
||||
@@ -206,7 +210,7 @@ for _variant in "${_variants[@]}"; do
|
||||
cd \"kvmd-\$pkgver\"
|
||||
|
||||
pkgdesc=\"PiKVM platform configs - $_platform for $_board\"
|
||||
depends=(kvmd=$pkgver-$pkgrel \"linux-rpi-pikvm>=6.6.21-3\")
|
||||
depends=(kvmd=$pkgver-$pkgrel \"linux-rpi-pikvm>=6.6.45-10\" \"raspberrypi-bootloader-pikvm>=20240818-1\")
|
||||
|
||||
backup=(
|
||||
etc/sysctl.d/99-kvmd.conf
|
||||
@@ -250,8 +254,12 @@ for _variant in "${_variants[@]}"; do
|
||||
fi
|
||||
|
||||
if [[ $_platform =~ ^.*-hdmi$ ]]; then
|
||||
backup=(\"\${backup[@]}\" etc/kvmd/tc358743-edid.hex)
|
||||
backup=(\"\${backup[@]}\" etc/kvmd/tc358743-edid.hex etc/kvmd/switch-edid.hex)
|
||||
install -DTm444 configs/kvmd/edid/$_base.hex \"\$pkgdir/etc/kvmd/tc358743-edid.hex\"
|
||||
ln -s tc358743-edid.hex \"\$pkgdir/etc/kvmd/switch-edid.hex\"
|
||||
else
|
||||
backup=(\"\${backup[@]}\" etc/kvmd/switch-edid.hex)
|
||||
install -DTm444 configs/kvmd/edid/_no-1920x1200.hex \"\$pkgdir/etc/kvmd/switch-edid.hex\"
|
||||
fi
|
||||
|
||||
mkdir -p \"\$pkgdir/usr/share/kvmd\"
|
||||
|
||||
251
README.md
251
README.md
@@ -1,24 +1,97 @@
|
||||
<h3 align=center><img src="https://github.com/mofeng-git/Build-Armbian/assets/62919083/add9743a-0987-4e8a-b2cb-62121f236582" alt="logo" width="300"><br></h3>
|
||||
<h3 align=center><a href="https://github.com/mofeng-git/One-KVM/blob/master/README.md">简体中文</a> </h3>
|
||||
<p align=right> </p>
|
||||
<div align="center">
|
||||
<img src="https://github.com/mofeng-git/Build-Armbian/assets/62919083/add9743a-0987-4e8a-b2cb-62121f236582" alt="One-KVM Logo" width="300">
|
||||
<h1>One-KVM</h1>
|
||||
<p><strong>基于 PiKVM 的 DIY IP-KVM 解决方案</strong></p>
|
||||
|
||||
[](https://github.com/mofeng-git/One-KVM/stargazers)
|
||||
[](https://github.com/mofeng-git/One-KVM/network/members)
|
||||
[](https://github.com/mofeng-git/One-KVM/issues)
|
||||
[](https://github.com/mofeng-git/One-KVM/blob/master/LICENSE)
|
||||
|
||||
<p>
|
||||
<a href="https://one-kvm.mofeng.run">📖 详细文档</a> •
|
||||
<a href="https://kvmd-demo.mofeng.run">🚀 在线演示</a> •
|
||||
<a href="#快速开始">⚡ 快速开始</a> •
|
||||
<a href="#功能介绍">📊 功能介绍</a>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
### 介绍
|
||||
---
|
||||
|
||||
One-KVM 是基于廉价计算机硬件和 [PiKVM]((https://github.com/pikvm/pikvm)) 软件二次开发的 BIOS 级远程控制项目。可以实现远程管理服务器或工作站,无需在被控机安装软件调整设置,实现无侵入式控制,适用范围广泛。
|
||||
## 📋 目录
|
||||
|
||||
演示网站:[https://kvmd-demo.mofeng.run](https://kvmd-demo.mofeng.run)
|
||||
- [项目概述](#项目概述)
|
||||
- [功能介绍](#功能介绍)
|
||||
- [快速开始](#快速开始)
|
||||
- [贡献指南](#贡献指南)
|
||||
- [其他](#其他)
|
||||
|
||||

|
||||
## 📖 项目概述
|
||||
|
||||
**One-KVM** 是基于开源 [PiKVM](https://github.com/pikvm/pikvm) 项目进行二次开发的 DIY IP-KVM 解决方案。该方案利用成本较低的硬件设备,实现 BIOS 级别的远程服务器或工作站管理功能。
|
||||
|
||||
### 快速开始
|
||||
### 应用场景
|
||||
|
||||
**方式一:Docker 镜像部署(推荐)**
|
||||
- **家庭实验室主机管理** - 远程管理服务器和开发设备
|
||||
- **服务器远程维护** - 无需物理接触即可进行系统维护
|
||||
- **系统故障处理** - 远程解决系统启动和 BIOS 相关问题
|
||||
|
||||
Docker 版本可以使用 OTG 或 CH9329 作为虚拟 HID ,支持 amd64、arm64、armv7 架构的 Linux 系统安装。
|
||||

|
||||
|
||||
## 📊 功能介绍
|
||||
|
||||
### 核心特性
|
||||
|
||||
| 特性 | 描述 | 优势 |
|
||||
|------|------|------|
|
||||
| **无侵入性** | 无需在目标机器上安装软件或驱动 | 不依赖操作系统,可访问 BIOS/UEFI 设置 |
|
||||
| **成本效益** | 利用常见硬件设备(如电视盒子、开发板等) | 降低 KVM over IP 的实现成本 |
|
||||
| **功能扩展** | 在 PiKVM 基础上增加实用功能 | Docker 部署、视频录制、中文界面 |
|
||||
| **部署方式** | 支持 Docker 部署和硬件整合包 | 为特定硬件平台提供预配置方案 |
|
||||
|
||||
### 项目限制
|
||||
|
||||
本项目为个人维护的开源项目,资源有限,无商业运营计划
|
||||
|
||||
- 不提供内置免费内网穿透服务,相关问题请自行解决
|
||||
- 不提供24×7小时技术支持服务
|
||||
- 不承诺系统稳定性和合规性,使用风险需自行承担
|
||||
- 尽力优化用户体验,但仍需要一定的技术基础
|
||||
|
||||
### 功能对比
|
||||
|
||||
> 💡 **说明:** 以下表格展示了 One-KVM 与其他基于 PiKVM 项目的功能对比,仅供参考。如有遗漏或错误,欢迎联系更正。
|
||||
|
||||
| 功能特性 | One-KVM | PiKVM | ArmKVM | BLIKVM |
|
||||
|:--------:|:-------:|:-----:|:------:|:------:|
|
||||
| 简体中文 WebUI | ✅ | ❌ | ✅ | ✅ |
|
||||
| 远程视频流 | MJPEG/H.264 | MJPEG/H.264 | MJPEG/H.264 | MJPEG/H.264 |
|
||||
| H.264 视频编码 | CPU | GPU | 未知 | GPU |
|
||||
| 远程音频流 | ✅ | ✅ | ✅ | ✅ |
|
||||
| 远程鼠键控制 | OTG/CH9329 | OTG/CH9329/Pico/Bluetooth | OTG | OTG |
|
||||
| VNC 控制 | ✅ | ✅ | ✅ | ✅ |
|
||||
| ATX 电源控制 | GPIO/USB 继电器 | GPIO | GPIO | GPIO |
|
||||
| 虚拟存储驱动器挂载 | ✅ | ✅ | ✅ | ✅ |
|
||||
| 网页终端 | ✅ | ✅ | ✅ | ✅ |
|
||||
| Docker 部署 | ✅ | ❌ | ❌ | ❌ |
|
||||
| 商业化运营 | ❌ | ✅ | ✅ | ✅ |
|
||||
|
||||
## ⚡ 快速开始
|
||||
|
||||
### 方式一:Docker 镜像部署(推荐)
|
||||
|
||||
Docker 版本支持 OTG 或 CH9329 作为虚拟 HID,兼容 amd64、arm64、armv7 架构的 Linux 系统。
|
||||
|
||||
#### 一键脚本部署
|
||||
|
||||
```bash
|
||||
curl -sSL https://one-kvm.mofeng.run/quick_start.sh -o quick_start.sh && bash quick_start.sh
|
||||
```
|
||||
|
||||
#### 手动部署
|
||||
|
||||
**使用 OTG 作为虚拟 HID:**
|
||||
|
||||
如果使用 OTG 作为虚拟 HID,可以使用如下部署命令:
|
||||
```bash
|
||||
sudo docker run --name kvmd -itd --privileged=true \
|
||||
-v /lib/modules:/lib/modules:ro -v /dev:/dev \
|
||||
@@ -27,74 +100,170 @@ sudo docker run --name kvmd -itd --privileged=true \
|
||||
silentwind0/kvmd
|
||||
```
|
||||
|
||||
如果使用 CH9329,可以使用如下部署命令:
|
||||
**使用 CH9329 作为虚拟 HID:**
|
||||
|
||||
```bash
|
||||
sudo docker run --name kvmd -itd \
|
||||
--device /dev/video0:/dev/video0 \
|
||||
--device /dev/ttyUSB0:/dev/ttyUSB0 \
|
||||
--device /dev/snd:/dev/snd \
|
||||
-p 8080:8080 -p 4430:4430 -p 5900:5900 -p 623:623 \
|
||||
silentwind0/kvmd
|
||||
```
|
||||
|
||||
部署完成访问 https://IP:4430 ,点击信任自签证书,即可开始使用,默认账号密码:admin/admin。
|
||||
### 方式二:直刷 One-KVM 整合包
|
||||
|
||||
如无法访问可以使用 `sudo docker logs kvmd` 命令查看日志尝试修复、提交 issue 或在 QQ 群内寻求帮助。
|
||||
针对特定硬件平台,提供了预配置的 One-KVM 打包镜像,简化部署流程,实现开箱即用。
|
||||
|
||||
详细内容可以查阅 [One-KVM文档](https://one-kvm.mofeng.run/)。
|
||||
#### 固件下载
|
||||
|
||||
**方式二:直刷 One-KVM 镜像**
|
||||
**GitHub 下载:**
|
||||
- **GitHub Releases:** [https://github.com/mofeng-git/One-KVM/releases](https://github.com/mofeng-git/One-KVM/releases)
|
||||
|
||||
对于玩客云设备,本项目 Releases 页可以找到适配玩客云的 One-KVM 预编译镜像。镜像名称带 One-KVM 前缀、burn 后缀的为线刷镜像,可使用 USB_Burning_Tool 软件线刷至玩客云。预编译线刷镜像为开箱即用,刷好后启动设备就可以开始使用 One-KVM。
|
||||
**其他下载方式:**
|
||||
- **免登录下载:** [https://pan.huang1111.cn/s/mxkx3T1](https://pan.huang1111.cn/s/mxkx3T1) (由 Huang1111公益计划 提供)
|
||||
- **百度网盘:** [https://pan.baidu.com/s/166-2Y8PBF4SbHXFkGmFJYg?pwd=o9aj](https://pan.baidu.com/s/166-2Y8PBF4SbHXFkGmFJYg?pwd=o9aj) (提取码:o9aj)
|
||||
|
||||
#### 支持的硬件平台
|
||||
|
||||
**赞助**
|
||||
| 固件型号 | 固件代号 | 硬件配置 | 最新版本 | 状态 |
|
||||
|:--------:|:--------:|:--------:|:--------:|:----:|
|
||||
| 玩客云 | Onecloud | USB 采集卡、OTG | 241018 | ✅ |
|
||||
| 私家云二代 | Cumebox2 | USB 采集卡、OTG | 241004 | ✅ |
|
||||
| Vmare | Vmare-uefi | USB 采集卡、CH9329 | 241004 | ✅ |
|
||||
| Virtualbox | Virtualbox-uefi | USB 采集卡、CH9329 | 241004 | ✅ |
|
||||
| s905l3a 通用包 | E900v22c | USB 采集卡、OTG | 241004 | ✅ |
|
||||
| 我家云 | Chainedbox | USB 采集卡、OTG | 241004 | ✅ |
|
||||
| 龙芯久久派 | 2k0300 | USB 采集卡、CH9329 | 241025 | ✅ |
|
||||
|
||||
这个项目基于众多开源项目二次开发,作者为此花费了大量的时间和精力进行测试和维护。若此项目对您有用,您可以考虑通过 [为爱发电](https://afdian.com/a/silentwind) 赞助一笔小钱支持作者。作者将能够购买新的硬件(玩客云和周边设备)来测试和维护 One-KVM 的各种配置,并在项目上投入更多的时间。
|
||||
## 🤝 贡献指南
|
||||
|
||||
**感谢名单**
|
||||
欢迎各种形式的贡献!
|
||||
|
||||
### 如何贡献
|
||||
|
||||
1. **Fork 本仓库**
|
||||
2. **创建功能分支:** `git checkout -b feature/AmazingFeature`
|
||||
3. **提交更改:** `git commit -m 'Add some AmazingFeature'`
|
||||
4. **推送到分支:** `git push origin feature/AmazingFeature`
|
||||
5. **提交 Pull Request**
|
||||
|
||||
### 报告问题
|
||||
|
||||
如果您发现了问题,请:
|
||||
1. 使用 [GitHub Issues](https://github.com/mofeng-git/One-KVM/issues) 报告
|
||||
2. 提供详细的错误信息和复现步骤
|
||||
3. 包含您的硬件配置和系统信息
|
||||
|
||||
### 赞助支持
|
||||
|
||||
本项目基于多个优秀开源项目进行二次开发,作者投入了大量时间进行测试和维护。如果您觉得这个项目有价值,欢迎通过 **[为爱发电](https://afdian.com/a/silentwind)** 支持项目发展。
|
||||
|
||||
#### 感谢名单
|
||||
|
||||
<details>
|
||||
<summary><strong>点击查看感谢名单</strong></summary>
|
||||
|
||||
浩龙的电子嵌入式之路(赞助)
|
||||
- 浩龙的电子嵌入式之路
|
||||
|
||||
Tsuki(赞助)
|
||||
- Tsuki
|
||||
|
||||
H_xiaoming
|
||||
- H_xiaoming
|
||||
|
||||
0蓝蓝0
|
||||
- 0蓝蓝0
|
||||
|
||||
fairybl
|
||||
- fairybl
|
||||
|
||||
Will
|
||||
- Will
|
||||
|
||||
浩龙的电子嵌入式之路
|
||||
- 浩龙的电子嵌入式之路
|
||||
|
||||
自.知
|
||||
- 自.知
|
||||
|
||||
观棋不语٩ ི۶
|
||||
- 观棋不语٩ ི۶
|
||||
|
||||
爱发电用户_a57a4
|
||||
- 爱发电用户_a57a4
|
||||
|
||||
爱发电用户_2c769
|
||||
- 爱发电用户_2c769
|
||||
|
||||
霜序
|
||||
- 霜序
|
||||
|
||||
[远方](https://runyf.cn/)
|
||||
- 远方(闲鱼用户名:小远技术店铺)
|
||||
|
||||
爱发电用户_399fc
|
||||
- 爱发电用户_399fc
|
||||
|
||||
[斐斐の](https://www.mmuaa.com/)
|
||||
- 斐斐の
|
||||
|
||||
- 爱发电用户_09451
|
||||
|
||||
- 超高校级的錆鱼
|
||||
|
||||
- 爱发电用户_08cff
|
||||
|
||||
- guoke
|
||||
|
||||
- mgt
|
||||
|
||||
- 姜沢掵
|
||||
|
||||
- ui_beam
|
||||
|
||||
- 爱发电用户_c0dd7
|
||||
|
||||
- 爱发电用户_dnjK
|
||||
|
||||
- 忍者胖猪
|
||||
|
||||
- 永遠の願い
|
||||
|
||||
- 爱发电用户_GBrF
|
||||
|
||||
- 爱发电用户_fd65c
|
||||
|
||||
- 爱发电用户_vhNa
|
||||
|
||||
- 爱发电用户_Xu6S
|
||||
|
||||
- moss
|
||||
|
||||
- woshididi
|
||||
|
||||
- 爱发电用户_a0fd1
|
||||
|
||||
- 爱发电用户_f6bH
|
||||
|
||||
- 码农
|
||||
|
||||
- 爱发电用户_6639f
|
||||
|
||||
- jeron
|
||||
|
||||
- 爱发电用户_CN7y
|
||||
|
||||
- 爱发电用户_Up6w
|
||||
|
||||
- 爱发电用户_e3202
|
||||
|
||||
- ......
|
||||
|
||||
......
|
||||
</details>
|
||||
|
||||
本项目使用了下列开源项目:
|
||||
1. [pikvm/pikvm: Open and inexpensive DIY IP-KVM based on Raspberry Pi (github.com)](https://github.com/pikvm/pikvm)
|
||||
#### 赞助商
|
||||
|
||||
**状态**
|
||||
本项目得到以下赞助商的支持:
|
||||
|
||||
[](https://star-history.com/#mofeng-git/One-KVM&Date)
|
||||
**CDN 加速及安全防护:**
|
||||
- **[Tencent EdgeOne](https://edgeone.ai/zh?from=github)** - 提供 CDN 加速及安全防护服务
|
||||
|
||||

|
||||

|
||||
|
||||
**文件存储服务:**
|
||||
- **[Huang1111公益计划](https://pan.huang1111.cn/s/mxkx3T1)** - 提供免登录下载服务
|
||||
|
||||
## 📚 其他
|
||||
|
||||
### 使用的开源项目
|
||||
|
||||
本项目基于以下优秀开源项目进行二次开发:
|
||||
|
||||
- [PiKVM](https://github.com/pikvm/pikvm) - 开源的 DIY IP-KVM 解决方案
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM silentwind0/kvmd-stage-0 AS builder
|
||||
FROM registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd-stage-0 AS builder
|
||||
|
||||
FROM python:3.12.0rc2-slim-bookworm
|
||||
FROM python:3.11.11-slim-bookworm
|
||||
|
||||
LABEL maintainer="mofeng654321@hotmail.com"
|
||||
|
||||
@@ -12,30 +12,61 @@ COPY --from=builder /usr/lib/janus/transports/* /usr/lib/janus/transports/
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV TZ=Asia/Shanghai
|
||||
|
||||
RUN cp /tmp/lib/* /lib/*-linux-*/ \
|
||||
&& pip install --no-cache-dir --root-user-action=ignore --disable-pip-version-check /tmp/wheel/*.whl \
|
||||
&& rm -rf /tmp/lib /tmp/wheel
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
TZ=Asia/Shanghai
|
||||
|
||||
RUN sed -i 's/deb.debian.org/mirrors.tuna.tsinghua.edu.cn/' /etc/apt/sources.list.d/debian.sources \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends libxkbcommon-x11-0 nginx tesseract-ocr tesseract-ocr-eng tesseract-ocr-chi-sim iptables sudo curl kmod \
|
||||
libmicrohttpd12 libjansson4 libssl3 libsofia-sip-ua0 libglib2.0-0 libopus0 libogg0 libcurl4 libconfig9 libusrsctp2 libwebsockets17 libnss3 libasound2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN if [ ${TARGETARCH} = arm ]; then ARCH=armhf; elif [ ${TARGETARCH} = arm64 ]; then ARCH=aarch64; elif [ ${TARGETARCH} = amd64 ]; then ARCH=x86_64; fi \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
libxkbcommon-x11-0 \
|
||||
nginx \
|
||||
tesseract-ocr \
|
||||
tesseract-ocr-eng \
|
||||
tesseract-ocr-chi-sim \
|
||||
iptables \
|
||||
sudo \
|
||||
curl \
|
||||
kmod \
|
||||
libmicrohttpd12 \
|
||||
libjansson4 \
|
||||
libssl3 \
|
||||
libsofia-sip-ua0 \
|
||||
libglib2.0-0 \
|
||||
libopus0 \
|
||||
libogg0 \
|
||||
libcurl4 \
|
||||
libconfig9 \
|
||||
libusrsctp2 \
|
||||
libwebsockets17 \
|
||||
libnss3 \
|
||||
libasound2 \
|
||||
nano \
|
||||
&& cp /tmp/lib/* /lib/*-linux-*/ \
|
||||
&& pip install --no-cache-dir --root-user-action=ignore --disable-pip-version-check /tmp/wheel/*.whl \
|
||||
&& pip install --no-cache-dir --root-user-action=ignore --disable-pip-version-check pyfatfs \
|
||||
&& if [ ${TARGETARCH} = arm ]; then ARCH=armhf; \
|
||||
elif [ ${TARGETARCH} = arm64 ]; then ARCH=aarch64; \
|
||||
elif [ ${TARGETARCH} = amd64 ]; then ARCH=x86_64; \
|
||||
fi \
|
||||
&& curl https://github.com/tsl0922/ttyd/releases/download/1.7.7/ttyd.$ARCH -L -o /usr/local/bin/ttyd \
|
||||
&& chmod +x /usr/local/bin/ttyd \
|
||||
&& adduser kvmd --gecos "" --disabled-password \
|
||||
&& ln -sf /usr/share/tesseract-ocr/*/tessdata /usr/share/tessdata \
|
||||
&& mkdir -p /etc/kvmd_backup/override.d /var/lib/kvmd/msd/images /var/lib/kvmd/msd/meta /var/lib/kvmd/pst/data /opt/vc/bin /run/kvmd /tmp/kvmd-nginx \
|
||||
&& touch /run/kvmd/ustreamer.sock
|
||||
&& mkdir -p /etc/kvmd_backup/override.d \
|
||||
/var/lib/kvmd/msd/images \
|
||||
/var/lib/kvmd/msd/meta \
|
||||
/var/lib/kvmd/pst/data \
|
||||
/var/lib/kvmd/msd/NormalFiles \
|
||||
/opt/vc/bin \
|
||||
/run/kvmd \
|
||||
/tmp/kvmd-nginx \
|
||||
&& touch /run/kvmd/ustreamer.sock \
|
||||
&& apt clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rm -rf /tmp/lib /tmp/wheel
|
||||
|
||||
|
||||
COPY testenv/fakes/vcgencmd /usr/bin/
|
||||
COPY testenv/fakes/vcgencmd scripts/kvmd* /usr/bin/
|
||||
COPY extras/ /usr/share/kvmd/extras/
|
||||
COPY web/ /usr/share/kvmd/web/
|
||||
COPY scripts/kvmd-gencert /usr/share/kvmd/
|
||||
|
||||
@@ -1,70 +1,119 @@
|
||||
# syntax = docker/dockerfile:experimental
|
||||
FROM python:3.12.0rc2-slim-bookworm AS builder
|
||||
FROM debian:bookworm-slim AS builder
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# 设置环境变量
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
RUSTUP_DIST_SERVER="https://mirrors.tuna.tsinghua.edu.cn/rustup"
|
||||
|
||||
# 更新源并安装依赖
|
||||
RUN sed -i 's/deb.debian.org/mirrors.tuna.tsinghua.edu.cn/' /etc/apt/sources.list.d/debian.sources \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential libssl-dev libffi-dev python3-dev libevent-dev libjpeg-dev \
|
||||
libbsd-dev libudev-dev git pkg-config wget curl libmicrohttpd-dev libjansson-dev libssl-dev libsofia-sip-ua-dev libglib2.0-dev \
|
||||
libopus-dev libogg-dev libcurl4-openssl-dev liblua5.3-dev libconfig-dev libopus-dev libtool automake autoconf meson cmake \
|
||||
libx264-dev libyuv-dev libasound2-dev libspeex-dev libspeexdsp-dev libopus-dev \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
python3-full \
|
||||
python3-pip \
|
||||
python3-dev \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
python3-dev \
|
||||
libevent-dev \
|
||||
libjpeg-dev \
|
||||
libbsd-dev \
|
||||
libudev-dev \
|
||||
git \
|
||||
pkg-config \
|
||||
wget \
|
||||
curl \
|
||||
libmicrohttpd-dev \
|
||||
libjansson-dev \
|
||||
libsofia-sip-ua-dev \
|
||||
libglib2.0-dev \
|
||||
libopus-dev \
|
||||
libogg-dev \
|
||||
libcurl4-openssl-dev \
|
||||
liblua5.3-dev \
|
||||
libconfig-dev \
|
||||
libtool \
|
||||
automake \
|
||||
autoconf \
|
||||
meson \
|
||||
cmake \
|
||||
libx264-dev \
|
||||
libyuv-dev \
|
||||
libasound2-dev \
|
||||
libspeex-dev \
|
||||
libspeexdsp-dev \
|
||||
libusb-1.0-0-dev \
|
||||
&& apt clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY build/cargo_config /tmp/config
|
||||
|
||||
# 配置 pip 源并安装 Python 依赖
|
||||
RUN --security=insecure pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple \
|
||||
&& if [ ${TARGETARCH} = arm ]; then \
|
||||
mkdir -p /root/.cargo \
|
||||
&& chmod 777 /root/.cargo && mount -t tmpfs none /root/.cargo \
|
||||
&& export RUSTUP_DIST_SERVER="https://mirrors.tuna.tsinghua.edu.cn/rustup" \
|
||||
#&& export RUSTUP_UPDATE_ROOT="https://mirrors.ustc.edu.cn/rust-static/rustup" \
|
||||
&& wget https://sh.rustup.rs -O /root/rustup-init.sh \
|
||||
&& wget https://sh.rustup.rs -O /root/rustup-init.sh \
|
||||
&& sh /root/rustup-init.sh -y \
|
||||
&& export PATH=$PATH:/root/.cargo/bin \
|
||||
&& cp /tmp/config /root/.cargo/config.toml; \
|
||||
fi \
|
||||
&& pip wheel --wheel-dir=/tmp/wheel/ cryptography
|
||||
|
||||
RUN pip install --no-cache-dir --root-user-action=ignore --disable-pip-version-check build \
|
||||
&& pip wheel --wheel-dir=/tmp/wheel/ aiofiles aiohttp appdirs asn1crypto async_lru async-timeout bottle cffi chardet click colorama \
|
||||
dbus_next gpiod hidapi idna mako marshmallow more-itertools multidict netifaces packaging passlib pillow ply psutil pycparser \
|
||||
pyelftools pyghmi pygments pyparsing pyotp qrcode requests semantic-version setproctitle setuptools six spidev \
|
||||
tabulate urllib3 wrapt xlib yarl pyserial pyyaml zstandard supervisor
|
||||
&& pip install --root-user-action=ignore --disable-pip-version-check --upgrade --break-system-packages build setuptools pip \
|
||||
&& pip wheel --wheel-dir=/tmp/wheel/ cryptography \
|
||||
&& pip wheel --wheel-dir=/tmp/wheel/ \
|
||||
aiofiles aiohttp appdirs asn1crypto async_lru async-timeout bottle cffi \
|
||||
chardet click colorama dbus_next gpiod hidapi idna mako marshmallow \
|
||||
more-itertools multidict netifaces packaging passlib pillow ply psutil \
|
||||
pycparser pyelftools pyghmi pygments pyparsing pyotp qrcode requests \
|
||||
semantic-version setproctitle six spidev tabulate urllib3 wrapt xlib \
|
||||
yarl pyserial pyyaml zstandard supervisor pyfatfs
|
||||
|
||||
# 编译安装 libnice、libsrtp、libwebsockets 和 janus-gateway
|
||||
RUN git clone --depth=1 https://gitlab.freedesktop.org/libnice/libnice /tmp/libnice \
|
||||
&& cd /tmp/libnice \
|
||||
&& meson --prefix=/usr build && ninja -C build && ninja -C build install
|
||||
|
||||
RUN curl https://github.com/cisco/libsrtp/archive/v2.2.0.tar.gz -L -o /tmp/libsrtp-2.2.0.tar.gz \
|
||||
&& meson --prefix=/usr build && ninja -C build && ninja -C build install \
|
||||
&& rm -rf /tmp/libnice \
|
||||
&& curl https://github.com/cisco/libsrtp/archive/v2.2.0.tar.gz -L -o /tmp/libsrtp-2.2.0.tar.gz \
|
||||
&& cd /tmp \
|
||||
&& tar xfv libsrtp-2.2.0.tar.gz \
|
||||
&& tar xf libsrtp-2.2.0.tar.gz \
|
||||
&& cd libsrtp-2.2.0 \
|
||||
&& ./configure --prefix=/usr --enable-openssl \
|
||||
&& make shared_library && make install
|
||||
|
||||
RUN git clone --depth=1 https://libwebsockets.org/repo/libwebsockets /tmp/libwebsockets \
|
||||
&& make shared_library -j && make install \
|
||||
&& cd /tmp \
|
||||
&& rm -rf /tmp/libsrtp* \
|
||||
&& git clone --depth=1 https://libwebsockets.org/repo/libwebsockets /tmp/libwebsockets \
|
||||
&& cd /tmp/libwebsockets \
|
||||
&& mkdir build && cd build \
|
||||
&& cmake -DLWS_MAX_SMP=1 -DLWS_WITHOUT_EXTENSIONS=0 -DCMAKE_INSTALL_PREFIX:PATH=/usr -DCMAKE_C_FLAGS="-fpic" .. \
|
||||
&& make && make install
|
||||
|
||||
RUN git clone --depth=1 https://github.com/meetecho/janus-gateway.git /tmp/janus-gateway \
|
||||
&& make -j && make install \
|
||||
&& cd /tmp \
|
||||
&& rm -rf /tmp/libwebsockets \
|
||||
&& git clone --depth=1 https://github.com/meetecho/janus-gateway.git /tmp/janus-gateway \
|
||||
&& cd /tmp/janus-gateway \
|
||||
&& sh autogen.sh \
|
||||
&& ./configure --enable-static --enable-websockets --enable-plugin-audiobridge \
|
||||
--disable-data-channels --disable-rabbitmq --disable-mqtt --disable-all-plugins --disable-all-loggers \
|
||||
--prefix=/usr \
|
||||
&& make && make install
|
||||
&& ./configure --enable-static --enable-websockets --enable-plugin-audiobridge \
|
||||
--disable-data-channels --disable-rabbitmq --disable-mqtt --disable-all-plugins \
|
||||
--disable-all-loggers --prefix=/usr \
|
||||
&& make -j && make install \
|
||||
&& cd /tmp \
|
||||
&& rm -rf /tmp/janus-gateway
|
||||
|
||||
# 编译 ustreamer
|
||||
RUN sed --in-place --expression 's|^#include "refcount.h"$|#include "../refcount.h"|g' /usr/include/janus/plugins/plugin.h \
|
||||
&& git clone --depth=1 https://github.com/mofeng-git/ustreamer /tmp/ustreamer \
|
||||
&& sed -i '68s/-Wl,-Bstatic//' /tmp/ustreamer/src/Makefile \
|
||||
&& make -j WITH_PYTHON=1 WITH_JANUS=1 WITH_LIBX264=1 -C /tmp/ustreamer \
|
||||
&& /tmp/ustreamer/ustreamer -v
|
||||
&& /tmp/ustreamer/ustreamer -v \
|
||||
&& cp /tmp/ustreamer/python/dist/*.whl /tmp/wheel/
|
||||
|
||||
# 复制必要的库文件
|
||||
RUN mkdir /tmp/lib \
|
||||
&& cd /lib/*-linux-*/ \
|
||||
&& cp libevent_core-*.so.7 libbsd.so.0 libevent_pthreads-*.so.7 libspeexdsp.so.1 libevent-*.so.7 libjpeg.so.62 libx264.so.164 libyuv.so.0 \
|
||||
libnice.so.10 /usr/lib/libsrtp2.so.1 /usr/lib/libwebsockets.so.19 \
|
||||
/tmp/lib/ \
|
||||
&& cp /tmp/ustreamer/python/dist/*.whl /tmp/wheel/
|
||||
&& cp libevent_core-*.so.7 libbsd.so.0 libevent_pthreads-*.so.7 libspeexdsp.so.1 \
|
||||
libevent-*.so.7 libjpeg.so.62 libx264.so.164 libyuv.so.0 libnice.so.10 \
|
||||
/usr/lib/libsrtp2.so.1 /usr/lib/libwebsockets.so.19 \
|
||||
/tmp/lib/
|
||||
|
||||
264
build/build_img.sh
Normal file → Executable file
264
build/build_img.sh
Normal file → Executable file
@@ -1,129 +1,169 @@
|
||||
#!/bin/bash
|
||||
|
||||
#File List
|
||||
#src
|
||||
#└── image
|
||||
# ├── cumebox2
|
||||
# │ └── Armbian_24.8.1_Khadas-vim1_bookworm_current_6.6.47_minimal.img
|
||||
# └── onecloud
|
||||
# ├── AmlImg_v0.3.1_linux_amd64
|
||||
# ├── Armbian_by-SilentWind_24.5.0-trunk_Onecloud_bookworm_legacy_5.9.0-rc7_minimal.burn.img
|
||||
# └── rc.local
|
||||
# --- 配置 ---
|
||||
# 允许通过环境变量覆盖默认路径
|
||||
SRCPATH="${SRCPATH:-/mnt/nfs/lfs/src}"
|
||||
BOOTFS="${BOOTFS:-/tmp/bootfs}"
|
||||
ROOTFS="${ROOTFS:-/tmp/rootfs}"
|
||||
OUTPUTDIR="${OUTPUTDIR:-/mnt/nfs/lfs/src/output}"
|
||||
TMPDIR="${TMPDIR:-$SRCPATH/tmp}"
|
||||
|
||||
#预处理镜像文件
|
||||
SRCPATH=../src
|
||||
ROOTFS=/tmp/rootfs
|
||||
$SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64 unpack $SRCPATH/image/onecloud/Armbian_by-SilentWind_24.5.0-trunk_Onecloud_bookworm_legacy_5.9.0-rc7_minimal.burn.img $SRCPATH/tmp
|
||||
simg2img $SRCPATH/tmp/7.rootfs.PARTITION.sparse $SRCPATH/tmp/rootfs.img
|
||||
dd if=/dev/zero of=/tmp/add.img bs=1M count=800 && cat /tmp/add.img >> $SRCPATH/tmp/rootfs.img && rm /tmp/add.img
|
||||
e2fsck -f $SRCPATH/tmp/rootfs.img && resize2fs $SRCPATH/tmp/rootfs.img
|
||||
export LC_ALL=C
|
||||
|
||||
#挂载镜像文件
|
||||
mkdir $ROOTFS
|
||||
sudo mount $SRCPATH/tmp/rootfs.img $ROOTFS || exit -1
|
||||
sudo mount -t proc proc $ROOTFS/proc || exit -1
|
||||
sudo mount -t sysfs sys $ROOTFS/sys || exit -1
|
||||
sudo mount -o bind /dev $ROOTFS/dev || exit -1
|
||||
# 全局变量
|
||||
LOOPDEV=""
|
||||
ROOTFS_MOUNTED=0
|
||||
BOOTFS_MOUNTED=0
|
||||
PROC_MOUNTED=0
|
||||
SYS_MOUNTED=0
|
||||
DEV_MOUNTED=0
|
||||
DOCKER_CONTAINER_NAME="to_build_rootfs_$$"
|
||||
PREBUILT_DIR="/tmp/prebuilt_binaries"
|
||||
|
||||
#准备文件
|
||||
sudo mkdir -p $ROOTFS/etc/kvmd/override.d $ROOTFS/etc/kvmd/vnc $ROOTFS/var/lib/kvmd/msd $ROOTFS/opt/vc/bin $ROOTFS/usr/share/kvmd \
|
||||
$ROOTFS/usr/share/janus/javascript $ROOTFS/usr/lib/ustreamer/janus $ROOTFS/run/kvmd $ROOTFS/var/lib/kvmd/msd/images $ROOTFS/var/lib/kvmd/msd/meta
|
||||
sudo cp -r ../One-KVM $ROOTFS/
|
||||
sudo cp $SRCPATH/image/onecloud/rc.local $ROOTFS/etc/
|
||||
sudo cp -r $ROOTFS/One-KVM/configs/kvmd/* $ROOTFS/One-KVM/configs/nginx $ROOTFS/One-KVM/configs/janus \
|
||||
$ROOTFS/etc/kvmd
|
||||
sudo cp -r $ROOTFS/One-KVM/web $ROOTFS/One-KVM/extras $ROOTFS/One-KVM/contrib/keymaps $ROOTFS/usr/share/kvmd
|
||||
sudo cp $ROOTFS/One-KVM/build/platform/onecloud $ROOTFS/usr/share/kvmd/platform
|
||||
sudo cp $ROOTFS/One-KVM/testenv/fakes/vcgencmd $ROOTFS/usr/bin/
|
||||
sudo cp -r $ROOTFS/One-KVM/testenv/js/* $ROOTFS/usr/share/janus/javascript/
|
||||
# --- 引入模块化脚本 ---
|
||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||
source "$SCRIPT_DIR/functions/common.sh"
|
||||
source "$SCRIPT_DIR/functions/devices.sh"
|
||||
source "$SCRIPT_DIR/functions/install.sh"
|
||||
source "$SCRIPT_DIR/functions/packaging.sh"
|
||||
|
||||
#安装依赖
|
||||
sudo chroot --userspec "root:root" $ROOTFS bash -c " \
|
||||
apt update \
|
||||
&& apt install -y python3-aiofiles python3-aiohttp python3-appdirs python3-asn1crypto python3-async-timeout \
|
||||
python3-bottle python3-cffi python3-chardet python3-click python3-colorama python3-cryptography python3-dateutil \
|
||||
python3-dbus python3-dev python3-hidapi python3-idna python3-libgpiod python3-mako python3-marshmallow python3-more-itertools \
|
||||
python3-multidict python3-netifaces python3-packaging python3-passlib python3-pillow python3-ply python3-psutil \
|
||||
python3-pycparser python3-pyelftools python3-pyghmi python3-pygments python3-pyparsing python3-requests \
|
||||
python3-semantic-version python3-setproctitle python3-setuptools python3-six python3-spidev python3-systemd \
|
||||
python3-tabulate python3-urllib3 python3-wrapt python3-xlib python3-yaml python3-yarl python3-pyotp python3-qrcode \
|
||||
python3-serial python3-zstandard python3-dbus-next \
|
||||
&& apt install -y nginx python3-pip python3-dev python3-build net-tools tesseract-ocr tesseract-ocr-eng tesseract-ocr-chi-sim \
|
||||
git gpiod libxkbcommon0 build-essential janus-dev libssl-dev libffi-dev libevent-dev libjpeg-dev libbsd-dev libudev-dev \
|
||||
pkg-config libx264-dev libyuv-dev libasound2-dev libsndfile-dev libspeexdsp-dev cpufrequtils iptables\
|
||||
&& apt clean "
|
||||
# 获取日期与Git版本
|
||||
GIT_COMMIT_ID=$(get_git_commit_id)
|
||||
DATE=$(date +%y%m%d)
|
||||
if [ -n "$GIT_COMMIT_ID" ]; then
|
||||
DATE="${DATE}-${GIT_COMMIT_ID}"
|
||||
fi
|
||||
|
||||
sudo chroot --userspec "root:root" $ROOTFS bash -c " \
|
||||
pip3 config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple \
|
||||
&& pip3 install --target=/usr/lib/python3/dist-packages --break-system-packages async-lru gpiod \
|
||||
&& pip3 cache purge "
|
||||
# --- 注册清理函数 ---
|
||||
# 在脚本退出、收到错误信号、中断信号、终止信号时执行 cleanup
|
||||
trap cleanup EXIT ERR INT TERM
|
||||
|
||||
sudo chroot --userspec "root:root" $ROOTFS sed --in-place --expression 's|^#include "refcount.h"$|#include "../refcount.h"|g' /usr/include/janus/plugins/plugin.h
|
||||
# --- 构建流程函数 ---
|
||||
|
||||
sudo chroot --userspec "root:root" $ROOTFS bash -c " \
|
||||
git clone --depth=1 https://github.com/mofeng-git/ustreamer /tmp/ustreamer \
|
||||
&& make -j WITH_PYTHON=1 WITH_JANUS=1 WITH_LIBX264=1 -C /tmp/ustreamer \
|
||||
&& mv /tmp/ustreamer/src/ustreamer.bin /usr/bin/ustreamer \
|
||||
&& mv /tmp/ustreamer/src/ustreamer-dump.bin /usr/bin/ustreamer-dump \
|
||||
&& chmod +x /usr/bin/ustreamer /usr/bin/ustreamer-dump \
|
||||
&& mv /tmp/ustreamer/janus/libjanus_ustreamer.so /usr/lib/ustreamer/janus \
|
||||
&& pip3 install --target=/usr/lib/python3/dist-packages --break-system-packages /tmp/ustreamer/python/dist/*.whl "
|
||||
build_target() {
|
||||
local target="$1"
|
||||
local build_time=$(date "+%Y-%m-%d %H:%M:%S")
|
||||
echo "=================================================="
|
||||
echo "信息:构建目标: $target"
|
||||
echo "信息:构建时间: $build_time"
|
||||
echo "=================================================="
|
||||
|
||||
#安装 kvmd 主程序
|
||||
sudo chroot --userspec "root:root" $ROOTFS bash -c " \
|
||||
cd /One-KVM \
|
||||
&& python3 setup.py install \
|
||||
&& bash scripts/kvmd-gencert --do-the-thing \
|
||||
&& bash scripts/kvmd-gencert --do-the-thing --vnc \
|
||||
&& kvmd-nginx-mkconf /etc/kvmd/nginx/nginx.conf.mako /etc/kvmd/nginx/nginx.conf \
|
||||
&& kvmd -m "
|
||||
# 设置全局变量,供后续函数使用
|
||||
TARGET_DEVICE_NAME="$target"
|
||||
NEED_PREPARE_DNS=false # 默认不需要准备 DNS
|
||||
|
||||
sudo chroot --userspec "root:root" $ROOTFS bash -c " \
|
||||
curl https://github.com/tsl0922/ttyd/releases/download/1.7.7/ttyd.armhf -L -o /usr/bin/ttyd \
|
||||
&& chmod +x /usr/bin/ttyd \
|
||||
&& systemd-sysusers /One-KVM/configs/os/kvmd-webterm.conf \
|
||||
&& mkdir -p /home/kvmd-webterm \
|
||||
&& chown kvmd-webterm /home/kvmd-webterm "
|
||||
case "$target" in
|
||||
onecloud)
|
||||
onecloud_rootfs
|
||||
local arch="armhf"
|
||||
local device_type="gpio"
|
||||
local network_type="systemd-networkd"
|
||||
;;
|
||||
cumebox2)
|
||||
cumebox2_rootfs
|
||||
local arch="aarch64"
|
||||
local device_type="video1"
|
||||
local network_type="" # 默认 NetworkManager
|
||||
NEED_PREPARE_DNS=true
|
||||
;;
|
||||
chainedbox)
|
||||
chainedbox_rootfs_and_fix_dtb
|
||||
local arch="aarch64"
|
||||
local device_type="video1"
|
||||
local network_type=""
|
||||
NEED_PREPARE_DNS=true
|
||||
;;
|
||||
vm)
|
||||
vm_rootfs
|
||||
local arch="amd64"
|
||||
local device_type=""
|
||||
local network_type=""
|
||||
NEED_PREPARE_DNS=true
|
||||
;;
|
||||
e900v22c)
|
||||
e900v22c_rootfs
|
||||
local arch="aarch64"
|
||||
local device_type="video1"
|
||||
local network_type=""
|
||||
NEED_PREPARE_DNS=true
|
||||
;;
|
||||
octopus-flanet)
|
||||
octopus_flanet_rootfs
|
||||
local arch="aarch64"
|
||||
local device_type="video1"
|
||||
local network_type=""
|
||||
NEED_PREPARE_DNS=true
|
||||
;;
|
||||
*)
|
||||
echo "错误:未知或不支持的目标 '$target'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
mount_rootfs
|
||||
|
||||
#服务自启
|
||||
sudo chroot --userspec "root:root" $ROOTFS bash -c " \
|
||||
cat /One-KVM/configs/os/sudoers/v2-hdmiusb >> /etc/sudoers \
|
||||
&& cat /One-KVM/configs/os/udev/v2-hdmiusb-generic.rules > /etc/udev/rules.d/99-kvmd.rules \
|
||||
&& echo 'libcomposite' >> /etc/modules \
|
||||
&& mv /usr/local/bin/kvmd* /usr/bin \
|
||||
&& cp /One-KVM/configs/os/services/* /etc/systemd/system/ \
|
||||
&& cp /One-KVM/configs/os/tmpfiles.conf /usr/lib/tmpfiles.d/ \
|
||||
&& chmod +x /etc/update-motd.d/* \
|
||||
&& echo 'kvmd ALL=(ALL) NOPASSWD: /etc/kvmd/custom_atx/gpio.sh' >> /etc/sudoers \
|
||||
&& echo 'kvmd ALL=(ALL) NOPASSWD: /etc/kvmd/custom_atx/usbrelay_hid.sh' >> /etc/sudoers \
|
||||
&& systemd-sysusers /One-KVM/configs/os/sysusers.conf \
|
||||
&& ln -sf /usr/share/tesseract-ocr/*/tessdata /usr/share/tessdata \
|
||||
&& sed -i 's/ch9329/otg/g' /etc/kvmd/override.yaml \
|
||||
&& sed -i 's/device: \/dev\/ttyUSB0//g' /etc/kvmd/override.yaml \
|
||||
&& sed -i 's/8080/80/g' /etc/kvmd/override.yaml \
|
||||
&& sed -i 's/4430/443/g' /etc/kvmd/override.yaml \
|
||||
&& sed -i 's/#type: otg/type: otg/g' /etc/kvmd/override.yaml \
|
||||
&& chown kvmd -R /var/lib/kvmd/msd/ \
|
||||
&& sed -i 's/localhost.localdomain/onecloud/g' /etc/kvmd/meta.yaml \
|
||||
&& systemctl enable kvmd kvmd-otg kvmd-nginx kvmd-vnc kvmd-ipmi kvmd-webterm kvmd-janus \
|
||||
&& systemctl disable nginx janus \
|
||||
&& rm -r /One-KVM "
|
||||
install_and_configure_kvmd "$arch" "$device_type" "$network_type"
|
||||
|
||||
write_meta "$target"
|
||||
|
||||
unmount_all
|
||||
|
||||
sudo chroot --userspec "root:root" $ROOTFS bash -c " \
|
||||
sed -i '2c ATX=GPIO' /etc/kvmd/atx.sh \
|
||||
&& sed -i 's/SHUTDOWNPIN/gpiochip1 7/g' /etc/kvmd/custom_atx/gpio.sh \
|
||||
&& sed -i 's/REBOOTPIN/gpiochip0 11/g' /etc/kvmd/custom_atx/gpio.sh "
|
||||
case "$target" in
|
||||
onecloud)
|
||||
pack_img_onecloud
|
||||
;;
|
||||
vm)
|
||||
pack_img "Vm"
|
||||
;;
|
||||
cumebox2)
|
||||
pack_img "Cumebox2"
|
||||
;;
|
||||
chainedbox)
|
||||
pack_img "Chainedbox"
|
||||
;;
|
||||
e900v22c)
|
||||
pack_img "E900v22c"
|
||||
;;
|
||||
octopus-flanet)
|
||||
pack_img "Octopus-Flanet"
|
||||
;;
|
||||
*)
|
||||
echo "错误:未知的打包类型 for '$target'" >&2
|
||||
;;
|
||||
esac
|
||||
|
||||
#卸载镜像
|
||||
sudo umount $ROOTFS/sys
|
||||
sudo umount $ROOTFS/dev
|
||||
sudo umount $ROOTFS/proc
|
||||
sudo umount $ROOTFS
|
||||
echo "=================================================="
|
||||
echo "信息:目标 $target 构建完成!"
|
||||
echo "=================================================="
|
||||
}
|
||||
|
||||
#打包镜像
|
||||
sudo rm $SRCPATH/tmp/7.rootfs.PARTITION.sparse
|
||||
sudo img2simg $SRCPATH/tmp/rootfs.img $SRCPATH/tmp/7.rootfs.PARTITION.sparse
|
||||
sudo $SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64 pack $SRCPATH/output/One-KVM_by-SilentWind_Onecloud_241004.burn.img $SRCPATH/tmp/
|
||||
sudo rm $SRCPATH/tmp/*
|
||||
# --- 主逻辑 ---
|
||||
|
||||
# 检查是否提供了目标参数
|
||||
if [ -z "$1" ]; then
|
||||
echo "用法: $0 <target|all>"
|
||||
echo "可用目标: onecloud, cumebox2, chainedbox, vm, e900v22c, octopus-flanet"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 设置脚本立即退出模式
|
||||
set -eo pipefail
|
||||
|
||||
# 检查必要的外部工具
|
||||
check_required_tools "$1"
|
||||
|
||||
# 执行构建
|
||||
if [ "$1" = "all" ]; then
|
||||
echo "信息:开始构建所有目标..."
|
||||
build_target "onecloud"
|
||||
build_target "cumebox2"
|
||||
build_target "chainedbox"
|
||||
build_target "vm"
|
||||
build_target "e900v22c"
|
||||
build_target "octopus-flanet"
|
||||
echo "信息:所有目标构建完成。"
|
||||
else
|
||||
build_target "$1"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
195
build/functions/common.sh
Executable file
195
build/functions/common.sh
Executable file
@@ -0,0 +1,195 @@
|
||||
#!/bin/bash
|
||||
|
||||
# --- 辅助函数 ---
|
||||
|
||||
# 获取 Git 提交 ID
|
||||
get_git_commit_id() {
|
||||
if git rev-parse --is-inside-work-tree &>/dev/null; then
|
||||
git rev-parse --short HEAD 2>/dev/null || echo ""
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# 查找并设置一个可用的 loop 设备
|
||||
find_loop_device() {
|
||||
echo "信息:查找可用的 loop 设备..."
|
||||
# 只使用 --find 来获取设备名
|
||||
LOOPDEV=$(sudo losetup --find)
|
||||
if [[ -z "$LOOPDEV" || ! -e "$LOOPDEV" ]]; then
|
||||
echo "错误:再次尝试后仍无法找到可用的 loop 设备。" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "信息:找到可用 loop 设备名:$LOOPDEV"
|
||||
}
|
||||
|
||||
# 检查并创建目录
|
||||
ensure_dir() {
|
||||
if [[ ! -d "$1" ]]; then
|
||||
echo "信息:创建目录 $1 ..."
|
||||
sudo mkdir -p "$1" || { echo "错误:创建目录 $1 失败" >&2; exit 1; }
|
||||
fi
|
||||
}
|
||||
|
||||
# 执行 chroot 命令
|
||||
run_in_chroot() {
|
||||
echo "信息:在 chroot 环境 ($ROOTFS) 中执行命令..."
|
||||
sudo chroot --userspec "root:root" "$ROOTFS" bash -ec "$1" || { echo "错误:在 chroot 环境中执行命令失败" >&2; exit 1; }
|
||||
echo "信息:chroot 命令执行完成。"
|
||||
}
|
||||
|
||||
# --- 清理函数 ---
|
||||
cleanup() {
|
||||
echo "信息:执行清理操作..."
|
||||
# 尝试卸载 chroot 环境下的挂载点
|
||||
if [[ "$DEV_MOUNTED" -eq 1 ]]; then
|
||||
echo "信息:卸载 $ROOTFS/dev ..."
|
||||
sudo umount "$ROOTFS/dev" || echo "警告:卸载 $ROOTFS/dev 失败,可能已被卸载"
|
||||
DEV_MOUNTED=0
|
||||
fi
|
||||
if [[ "$SYS_MOUNTED" -eq 1 ]]; then
|
||||
echo "信息:卸载 $ROOTFS/sys ..."
|
||||
sudo umount "$ROOTFS/sys" || echo "警告:卸载 $ROOTFS/sys 失败,可能已被卸载"
|
||||
SYS_MOUNTED=0
|
||||
fi
|
||||
if [[ "$PROC_MOUNTED" -eq 1 ]]; then
|
||||
echo "信息:卸载 $ROOTFS/proc ..."
|
||||
sudo umount "$ROOTFS/proc" || echo "警告:卸载 $ROOTFS/proc 失败,可能已被卸载"
|
||||
PROC_MOUNTED=0
|
||||
fi
|
||||
|
||||
# 尝试卸载主根文件系统
|
||||
if [[ "$ROOTFS_MOUNTED" -eq 1 && -d "$ROOTFS" ]]; then
|
||||
echo "信息:卸载 $ROOTFS ..."
|
||||
sudo umount "$ROOTFS" || sudo umount -l "$ROOTFS" || echo "警告:卸载 $ROOTFS 失败"
|
||||
ROOTFS_MOUNTED=0
|
||||
fi
|
||||
# 尝试卸载引导文件系统 (如果使用)
|
||||
if [[ "$BOOTFS_MOUNTED" -eq 1 && -d "$BOOTFS" ]]; then
|
||||
echo "信息:卸载 $BOOTFS ..."
|
||||
sudo umount "$BOOTFS" || sudo umount -l "$BOOTFS" || echo "警告:卸载 $BOOTFS 失败"
|
||||
BOOTFS_MOUNTED=0
|
||||
fi
|
||||
|
||||
# 尝试分离 loop 设备
|
||||
if [[ -n "$LOOPDEV" && -b "$LOOPDEV" ]]; then
|
||||
echo "信息:尝试 zerofree $LOOPDEV ..."
|
||||
sudo zerofree "$LOOPDEV" || echo "警告:zerofree $LOOPDEV 失败,可能文件系统不支持或未干净卸载"
|
||||
echo "信息:分离 loop 设备 $LOOPDEV ..."
|
||||
sudo losetup -d "$LOOPDEV" || echo "警告:分离 $LOOPDEV 失败"
|
||||
LOOPDEV=""
|
||||
fi
|
||||
|
||||
# 尝试删除 Docker 容器
|
||||
echo "信息:检查并删除 Docker 容器 $DOCKER_CONTAINER_NAME ..."
|
||||
if sudo docker ps -a --format '{{.Names}}' | grep -q "^${DOCKER_CONTAINER_NAME}$"; then
|
||||
sudo docker rm -f "$DOCKER_CONTAINER_NAME" || echo "警告:删除 Docker 容器 $DOCKER_CONTAINER_NAME 失败"
|
||||
else
|
||||
echo "信息:Docker 容器 $DOCKER_CONTAINER_NAME 不存在或已被删除。"
|
||||
fi
|
||||
|
||||
# 清理临时目录和挂载点目录
|
||||
echo "信息:清理临时文件和目录..."
|
||||
sudo rm -rf "$PREBUILT_DIR"
|
||||
# 只删除挂载点目录本身
|
||||
if [[ -d "$ROOTFS" ]]; then
|
||||
sudo rmdir "$ROOTFS" || echo "警告:删除目录 $ROOTFS 失败,可能非空"
|
||||
fi
|
||||
if [[ -d "$BOOTFS" ]]; then
|
||||
sudo rmdir "$BOOTFS" || echo "警告:删除目录 $BOOTFS 失败,可能非空"
|
||||
fi
|
||||
|
||||
echo "信息:清理完成。"
|
||||
}
|
||||
|
||||
# 在打包镜像前调用此函数,确保干净卸载所有挂载点和loop设备
|
||||
unmount_all() {
|
||||
echo "信息:执行卸载操作,准备打包..."
|
||||
# 卸载 chroot 环境下的挂载点
|
||||
if [[ "$DEV_MOUNTED" -eq 1 ]]; then
|
||||
echo "信息:卸载 $ROOTFS/dev ..."
|
||||
sudo umount "$ROOTFS/dev" || echo "警告:卸载 $ROOTFS/dev 失败,可能已被卸载"
|
||||
DEV_MOUNTED=0
|
||||
fi
|
||||
if [[ "$SYS_MOUNTED" -eq 1 ]]; then
|
||||
echo "信息:卸载 $ROOTFS/sys ..."
|
||||
sudo umount "$ROOTFS/sys" || echo "警告:卸载 $ROOTFS/sys 失败,可能已被卸载"
|
||||
SYS_MOUNTED=0
|
||||
fi
|
||||
if [[ "$PROC_MOUNTED" -eq 1 ]]; then
|
||||
echo "信息:卸载 $ROOTFS/proc ..."
|
||||
sudo umount "$ROOTFS/proc" || echo "警告:卸载 $ROOTFS/proc 失败,可能已被卸载"
|
||||
PROC_MOUNTED=0
|
||||
fi
|
||||
|
||||
# 卸载主根文件系统
|
||||
if [[ "$ROOTFS_MOUNTED" -eq 1 && -d "$ROOTFS" ]]; then
|
||||
echo "信息:卸载 $ROOTFS ..."
|
||||
sudo umount "$ROOTFS" || sudo umount -l "$ROOTFS" || echo "警告:卸载 $ROOTFS 失败"
|
||||
ROOTFS_MOUNTED=0
|
||||
fi
|
||||
|
||||
# 尝试分离 loop 设备前执行 zerofree(如果文件系统支持)
|
||||
if [[ -n "$LOOPDEV" && -b "$LOOPDEV" ]]; then
|
||||
echo "信息:尝试 zerofree $LOOPDEV ..."
|
||||
sudo zerofree "$LOOPDEV" || echo "警告:zerofree $LOOPDEV 失败,可能文件系统不支持或未干净卸载"
|
||||
echo "信息:分离 loop 设备 $LOOPDEV ..."
|
||||
sudo losetup -d "$LOOPDEV" || echo "警告:分离 $LOOPDEV 失败"
|
||||
LOOPDEV=""
|
||||
fi
|
||||
|
||||
sudo rm -rf "$PREBUILT_DIR"
|
||||
|
||||
echo "信息:卸载操作完成,可以安全打包镜像。"
|
||||
}
|
||||
|
||||
# 挂载根文件系统
|
||||
mount_rootfs() {
|
||||
echo "信息:挂载根文件系统到 $ROOTFS ..."
|
||||
ensure_dir "$ROOTFS"
|
||||
sudo mount "$LOOPDEV" "$ROOTFS" || { echo "错误:挂载 $LOOPDEV 到 $ROOTFS 失败" >&2; exit 1; }
|
||||
ROOTFS_MOUNTED=1
|
||||
|
||||
echo "信息:挂载 proc, sys, dev 到 chroot 环境..."
|
||||
ensure_dir "$ROOTFS/proc"
|
||||
sudo mount -t proc proc "$ROOTFS/proc" || { echo "错误:挂载 proc 到 $ROOTFS/proc 失败" >&2; exit 1; }
|
||||
PROC_MOUNTED=1
|
||||
|
||||
ensure_dir "$ROOTFS/sys"
|
||||
sudo mount -t sysfs sys "$ROOTFS/sys" || { echo "错误:挂载 sys 到 $ROOTFS/sys 失败" >&2; exit 1; }
|
||||
SYS_MOUNTED=1
|
||||
|
||||
ensure_dir "$ROOTFS/dev"
|
||||
sudo mount -o bind /dev "$ROOTFS/dev" || { echo "错误:绑定挂载 /dev 到 $ROOTFS/dev 失败" >&2; exit 1; }
|
||||
DEV_MOUNTED=1
|
||||
echo "信息:根文件系统及虚拟文件系统挂载完成。"
|
||||
}
|
||||
|
||||
# 设置元数据
|
||||
write_meta() {
|
||||
local hostname="$1"
|
||||
echo "信息:在 chroot 环境中设置主机名/元数据为 $hostname ..."
|
||||
run_in_chroot "sed -i 's/localhost.localdomain/$hostname/g' /etc/kvmd/meta.yaml"
|
||||
}
|
||||
|
||||
# 检查必要的外部工具
|
||||
check_required_tools() {
|
||||
local required_tools="sudo docker losetup mount umount parted e2fsck resize2fs qemu-img curl tar python3 pip3 rsync git simg2img img2simg dd cat rm mkdir mv cp sed chmod chown ln grep printf id"
|
||||
|
||||
for cmd in $required_tools; do
|
||||
if ! command -v "$cmd" &> /dev/null; then
|
||||
echo "错误:必需的命令 '$cmd' 未找到。请安装相应软件包。" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# 检查特定工具 (如果脚本中使用了)
|
||||
if ! command -v "$SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64" &> /dev/null && [[ "$1" == "onecloud" || "$1" == "all" ]]; then
|
||||
if [ -f "$SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64" ]; then
|
||||
echo "信息:找到 AmlImg 工具,尝试设置执行权限..."
|
||||
sudo chmod +x "$SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64" || echo "警告:设置 AmlImg 执行权限失败"
|
||||
else
|
||||
echo "错误:构建 onecloud 需要 '$SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64',但未找到。" >&2
|
||||
fi
|
||||
fi
|
||||
}
|
||||
212
build/functions/devices.sh
Executable file
212
build/functions/devices.sh
Executable file
@@ -0,0 +1,212 @@
|
||||
#!/bin/bash
|
||||
|
||||
# --- 设备特定的 Rootfs 准备函数 ---
|
||||
|
||||
onecloud_rootfs() {
|
||||
local unpacker="$SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64"
|
||||
local source_image="$SRCPATH/image/onecloud/Armbian_by-SilentWind_24.5.0-trunk_Onecloud_bookworm_legacy_5.9.0-rc7_minimal.burn.img"
|
||||
local bootfs_img="$TMPDIR/bootfs.img"
|
||||
local rootfs_img="$TMPDIR/rootfs.img"
|
||||
local bootfs_sparse="$TMPDIR/6.boot.PARTITION.sparse"
|
||||
local rootfs_sparse="$TMPDIR/7.rootfs.PARTITION.sparse"
|
||||
local bootfs_loopdev="" # 存储 bootfs 使用的 loop 设备
|
||||
local add_size_mb=400
|
||||
|
||||
echo "信息:准备 Onecloud Rootfs..."
|
||||
ensure_dir "$TMPDIR"
|
||||
ensure_dir "$BOOTFS"
|
||||
|
||||
echo "信息:解包 Onecloud burn 镜像..."
|
||||
sudo "$unpacker" unpack "$source_image" "$TMPDIR" || { echo "错误:解包失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:转换 bootfs 和 rootfs sparse 镜像到 raw 格式..."
|
||||
sudo simg2img "$bootfs_sparse" "$bootfs_img" || { echo "错误:转换 bootfs sparse 镜像失败" >&2; exit 1; }
|
||||
sudo simg2img "$rootfs_sparse" "$rootfs_img" || { echo "错误:转换 rootfs sparse 镜像失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:挂载 bootfs 并修复 DTB..."
|
||||
find_loop_device # 查找一个 loop 设备给 bootfs
|
||||
bootfs_loopdev="$LOOPDEV" # 保存这个设备名
|
||||
echo "信息:将 $bootfs_img 关联到 $bootfs_loopdev..."
|
||||
sudo losetup "$bootfs_loopdev" "$bootfs_img" || { echo "错误:关联 bootfs 镜像到 $bootfs_loopdev 失败" >&2; exit 1; }
|
||||
sudo mount "$bootfs_loopdev" "$BOOTFS" || { echo "错误:挂载 bootfs ($bootfs_loopdev) 失败" >&2; exit 1; }
|
||||
BOOTFS_MOUNTED=1
|
||||
sudo cp "$SRCPATH/image/onecloud/meson8b-onecloud-fix.dtb" "$BOOTFS/dtb/meson8b-onecloud.dtb" || { echo "错误:复制修复后的 DTB 文件失败" >&2; exit 1; }
|
||||
sudo umount "$BOOTFS" || { echo "警告:卸载 bootfs ($BOOTFS) 失败" >&2; BOOTFS_MOUNTED=0; } # 卸载失败不应中断流程
|
||||
BOOTFS_MOUNTED=0
|
||||
echo "信息:分离 bootfs loop 设备 $bootfs_loopdev..."
|
||||
sudo losetup -d "$bootfs_loopdev" || { echo "警告:分离 bootfs loop 设备 $bootfs_loopdev 失败" >&2; }
|
||||
# bootfs_loopdev 对应的设备现在是空闲的
|
||||
|
||||
echo "信息:扩展 rootfs 镜像 (${add_size_mb}MB)..."
|
||||
sudo dd if=/dev/zero bs=1M count="$add_size_mb" >> "$rootfs_img" || { echo "错误:扩展 rootfs 镜像失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:检查并调整 rootfs 文件系统大小 (在文件上)..."
|
||||
# 注意:e2fsck/resize2fs 现在直接操作镜像文件,而不是 loop 设备
|
||||
sudo e2fsck -f -y "$rootfs_img" || { echo "警告:e2fsck 检查 rootfs 镜像文件失败" >&2; exit 1; }
|
||||
sudo resize2fs "$rootfs_img" || { echo "错误:resize2fs 调整 rootfs 镜像文件大小失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:设置 rootfs loop 设备..."
|
||||
find_loop_device # 重新查找一个可用的 loop 设备 (可能是刚才释放的那个)
|
||||
echo "信息:将 $rootfs_img 关联到 $LOOPDEV..."
|
||||
sudo losetup "$LOOPDEV" "$rootfs_img" || { echo "错误:关联 rootfs 镜像到 $LOOPDEV 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:Onecloud Rootfs 准备完成。 Loop 设备 $LOOPDEV 已关联 $rootfs_img"
|
||||
}
|
||||
|
||||
cumebox2_rootfs() {
|
||||
local source_image="$SRCPATH/image/cumebox2/Armbian_25.2.2_Khadas-vim1_bookworm_current_6.12.17_minimal.img"
|
||||
local target_image="$TMPDIR/rootfs.img"
|
||||
local offset=$((8192 * 512))
|
||||
|
||||
echo "信息:准备 Cumebox2 Rootfs..."
|
||||
ensure_dir "$TMPDIR"
|
||||
cp "$source_image" "$target_image" || { echo "错误:复制 Cumebox2 原始镜像失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:调整镜像分区大小..."
|
||||
sudo parted -s "$target_image" resizepart 1 100% || { echo "错误:使用 parted 调整分区大小失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:设置带偏移量的 loop 设备..."
|
||||
find_loop_device # 查找设备名
|
||||
echo "信息:将 $target_image (偏移 $offset) 关联到 $LOOPDEV..."
|
||||
sudo losetup --offset "$offset" "$LOOPDEV" "$target_image" || { echo "错误:设置带偏移量的 loop 设备 $LOOPDEV 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:检查并调整文件系统大小 (在 loop 设备上)..."
|
||||
sudo e2fsck -f -y "$LOOPDEV" || { echo "警告:e2fsck 检查 $LOOPDEV 失败" >&2; exit 1; }
|
||||
sudo resize2fs "$LOOPDEV" || { echo "错误:resize2fs 调整 $LOOPDEV 大小失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:Cumebox2 Rootfs 准备完成,loop 设备 $LOOPDEV 已就绪。"
|
||||
}
|
||||
|
||||
chainedbox_rootfs_and_fix_dtb() {
|
||||
local source_image="$SRCPATH/image/chainedbox/Armbian_24.11.0_rockchip_chainedbox_bookworm_6.1.112_server_2024.10.02_add800m.img"
|
||||
local target_image="$TMPDIR/rootfs.img"
|
||||
local boot_offset=$((32768 * 512))
|
||||
local rootfs_offset=$((1081344 * 512))
|
||||
local bootfs_loopdev=""
|
||||
|
||||
echo "信息:准备 Chainedbox Rootfs 并修复 DTB..."
|
||||
ensure_dir "$TMPDIR"; ensure_dir "$BOOTFS"
|
||||
cp "$source_image" "$target_image" || { echo "错误:复制 Chainedbox 原始镜像失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:挂载 boot 分区并修复 DTB..."
|
||||
find_loop_device # 找 loop 给 boot
|
||||
bootfs_loopdev="$LOOPDEV"
|
||||
echo "信息:将 $target_image (偏移 $boot_offset) 关联到 $bootfs_loopdev..."
|
||||
sudo losetup --offset "$boot_offset" "$bootfs_loopdev" "$target_image" || { echo "错误:设置 boot 分区 loop 设备 $bootfs_loopdev 失败" >&2; exit 1; }
|
||||
sudo mount "$bootfs_loopdev" "$BOOTFS" || { echo "错误:挂载 boot 分区 ($bootfs_loopdev) 失败" >&2; exit 1; }
|
||||
BOOTFS_MOUNTED=1
|
||||
sudo cp "$SRCPATH/image/chainedbox/rk3328-l1pro-1296mhz-fix.dtb" "$BOOTFS/dtb/rockchip/rk3328-l1pro-1296mhz.dtb" || { echo "错误:复制修复后的 DTB 文件失败" >&2; exit 1; }
|
||||
sudo umount "$BOOTFS" || { echo "警告:卸载 boot 分区 ($BOOTFS) 失败" >&2; BOOTFS_MOUNTED=0; }
|
||||
BOOTFS_MOUNTED=0
|
||||
echo "信息:分离 boot loop 设备 $bootfs_loopdev..."
|
||||
sudo losetup -d "$bootfs_loopdev" || { echo "警告:分离 boot 分区 loop 设备 $bootfs_loopdev 失败" >&2; }
|
||||
|
||||
echo "信息:设置 rootfs 分区的 loop 设备..."
|
||||
find_loop_device # 找 loop 给 rootfs
|
||||
echo "信息:将 $target_image (偏移 $rootfs_offset) 关联到 $LOOPDEV..."
|
||||
sudo losetup --offset "$rootfs_offset" "$LOOPDEV" "$target_image" || { echo "错误:设置 rootfs 分区 loop 设备 $LOOPDEV 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:Chainedbox Rootfs 准备完成,loop 设备 $LOOPDEV 已就绪。"
|
||||
}
|
||||
|
||||
vm_rootfs() {
|
||||
local source_image="$SRCPATH/image/vm/Armbian_25.2.1_Uefi-x86_bookworm_current_6.12.13_minimal.img"
|
||||
local target_image="$TMPDIR/rootfs.img"
|
||||
local offset=$((540672 * 512))
|
||||
|
||||
echo "信息:准备 Vm Rootfs..."
|
||||
ensure_dir "$TMPDIR"
|
||||
cp "$source_image" "$target_image" || { echo "错误:复制 Vm 原始镜像失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:设置带偏移量的 loop 设备..."
|
||||
find_loop_device # 查找设备名
|
||||
echo "信息:将 $target_image (偏移 $offset) 关联到 $LOOPDEV..."
|
||||
sudo losetup --offset "$offset" "$LOOPDEV" "$target_image" || { echo "错误:设置带偏移量的 loop 设备 $LOOPDEV 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:Vm Rootfs 准备完成,loop 设备 $LOOPDEV 已就绪。"
|
||||
}
|
||||
|
||||
e900v22c_rootfs() {
|
||||
local source_image="$SRCPATH/image/e900v22c/Armbian_23.08.0_amlogic_s905l3a_bookworm_5.15.123_server_2023.08.01.img"
|
||||
local target_image="$TMPDIR/rootfs.img"
|
||||
local offset=$((532480 * 512))
|
||||
local add_size_mb=400
|
||||
|
||||
echo "信息:准备 E900V22C Rootfs..."
|
||||
ensure_dir "$TMPDIR"
|
||||
cp "$source_image" "$target_image" || { echo "错误:复制 E900V22C 原始镜像失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:扩展镜像文件 (${add_size_mb}MB)..."
|
||||
sudo dd if=/dev/zero bs=1M count="$add_size_mb" >> "$target_image" || { echo "错误:扩展镜像文件失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:调整镜像分区大小 (分区 2)..."
|
||||
sudo parted -s "$target_image" resizepart 2 100% || { echo "错误:使用 parted 调整分区 2 大小失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:设置带偏移量的 loop 设备..."
|
||||
find_loop_device # 查找设备名
|
||||
echo "信息:将 $target_image (偏移 $offset) 关联到 $LOOPDEV..."
|
||||
sudo losetup --offset "$offset" "$LOOPDEV" "$target_image" || { echo "错误:设置带偏移量的 loop 设备 $LOOPDEV 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:检查并调整文件系统大小 (在 loop 设备上)..."
|
||||
sudo e2fsck -f -y "$LOOPDEV" || { echo "警告:e2fsck 检查 $LOOPDEV 失败" >&2; exit 1; }
|
||||
sudo resize2fs "$LOOPDEV" || { echo "错误:resize2fs 调整 $LOOPDEV 大小失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:E900V22C Rootfs 准备完成,loop 设备 $LOOPDEV 已就绪。"
|
||||
}
|
||||
|
||||
octopus_flanet_rootfs() {
|
||||
local source_image="$SRCPATH/image/octopus-flanet/Armbian_24.11.0_amlogic_s912_bookworm_6.1.114_server_2024.11.01.img"
|
||||
local target_image="$TMPDIR/rootfs.img"
|
||||
local boot_offset=$((8192 * 512))
|
||||
local rootfs_offset=$((1056768 * 512))
|
||||
local add_size_mb=400
|
||||
local bootfs_loopdev=""
|
||||
|
||||
echo "信息:准备 Octopus-Planet Rootfs..."
|
||||
ensure_dir "$TMPDIR"; ensure_dir "$BOOTFS"
|
||||
cp "$source_image" "$target_image" || { echo "错误:复制 Octopus-Planet 原始镜像失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:挂载 boot 分区并修改 uEnv.txt (使用 VIM2 DTB)..."
|
||||
find_loop_device # 找 loop 给 boot
|
||||
bootfs_loopdev="$LOOPDEV"
|
||||
echo "信息:将 $target_image (偏移 $boot_offset) 关联到 $bootfs_loopdev..."
|
||||
sudo losetup --offset "$boot_offset" "$bootfs_loopdev" "$target_image" || { echo "错误:设置 boot 分区 loop 设备 $bootfs_loopdev 失败" >&2; exit 1; }
|
||||
sudo mount "$bootfs_loopdev" "$BOOTFS" || { echo "错误:挂载 boot 分区 ($bootfs_loopdev) 失败" >&2; exit 1; }
|
||||
BOOTFS_MOUNTED=1
|
||||
sudo sed -i "s/meson-gxm-octopus-planet.dtb/meson-gxm-khadas-vim2.dtb/g" "$BOOTFS/uEnv.txt" || { echo "错误:修改 uEnv.txt 失败" >&2; exit 1; }
|
||||
sudo umount "$BOOTFS" || { echo "警告:卸载 boot 分区 ($BOOTFS) 失败" >&2; BOOTFS_MOUNTED=0; }
|
||||
BOOTFS_MOUNTED=0
|
||||
echo "信息:分离 boot loop 设备 $bootfs_loopdev..."
|
||||
sudo losetup -d "$bootfs_loopdev" || { echo "警告:分离 boot 分区 loop 设备 $bootfs_loopdev 失败" >&2; }
|
||||
|
||||
echo "信息:调整镜像分区大小 (分区 2)..."
|
||||
sudo parted -s "$target_image" resizepart 2 100% || { echo "错误:使用 parted 调整分区 2 大小失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:设置 rootfs 分区的 loop 设备..."
|
||||
find_loop_device # 找 loop 给 rootfs
|
||||
echo "信息:将 $target_image (偏移 $rootfs_offset) 关联到 $LOOPDEV..."
|
||||
sudo losetup --offset "$rootfs_offset" "$LOOPDEV" "$target_image" || { echo "错误:设置 rootfs 分区 loop 设备 $LOOPDEV 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:检查并调整文件系统大小 (在 loop 设备上)..."
|
||||
sudo e2fsck -f -y "$LOOPDEV" || { echo "警告:e2fsck 检查 $LOOPDEV 失败" >&2; exit 1; }
|
||||
sudo resize2fs "$LOOPDEV" || { echo "错误:resize2fs 调整 $LOOPDEV 大小失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:Octopus-Planet Rootfs 准备完成,loop 设备 $LOOPDEV 已就绪。"
|
||||
}
|
||||
|
||||
# --- 特定设备的文件配置函数 ---
|
||||
|
||||
config_cumebox2_files() {
|
||||
echo "信息:为 Cumebox2 配置特定文件 (OLED, DTB)..."
|
||||
ensure_dir "$ROOTFS/etc/oled"
|
||||
# 注意 DTB 路径可能需要根据实际 Armbian 版本调整
|
||||
sudo cp "$SRCPATH/image/cumebox2/v-fix.dtb" "$ROOTFS/boot/dtb/amlogic/meson-gxl-s905x-khadas-vim.dtb" || echo "警告:复制 Cumebox2 DTB 失败"
|
||||
sudo cp "$SRCPATH/image/cumebox2/ssd" "$ROOTFS/usr/bin/" || echo "警告:复制 Cumebox2 ssd 脚本失败"
|
||||
sudo chmod +x "$ROOTFS/usr/bin/ssd" || echo "警告:设置 ssd 脚本执行权限失败"
|
||||
sudo cp "$SRCPATH/image/cumebox2/config.json" "$ROOTFS/etc/oled/config.json" || echo "警告:复制 OLED 配置文件失败"
|
||||
}
|
||||
|
||||
config_octopus_flanet_files() {
|
||||
echo "信息:为 Octopus-Planet 配置特定文件 (model_database.conf)..."
|
||||
sudo cp "$SRCPATH/image/octopus-flanet/model_database.conf" "$ROOTFS/etc/model_database.conf" || echo "警告:复制 model_database.conf 失败"
|
||||
}
|
||||
313
build/functions/install.sh
Executable file
313
build/functions/install.sh
Executable file
@@ -0,0 +1,313 @@
|
||||
#!/bin/bash
|
||||
|
||||
# --- 预准备 ---
|
||||
|
||||
prepare_dns_and_mirrors() {
|
||||
echo "信息:在 chroot 环境中准备 DNS 和更换软件源..."
|
||||
run_in_chroot "
|
||||
mkdir -p /run/systemd/resolve/ \\
|
||||
&& touch /run/systemd/resolve/stub-resolv.conf \\
|
||||
&& printf '%s\\n' 'nameserver 1.1.1.1' 'nameserver 1.0.0.1' > /etc/resolv.conf \\
|
||||
&& echo '信息:尝试更换镜像源...' \\
|
||||
&& bash <(curl -sSL https://gitee.com/SuperManito/LinuxMirrors/raw/main/ChangeMirrors.sh) \\
|
||||
--source mirrors.tuna.tsinghua.edu.cn --upgrade-software false --web-protocol http || echo '警告:更换镜像源脚本执行失败,可能网络不通或脚本已更改'
|
||||
"
|
||||
}
|
||||
|
||||
delete_armbian_verify(){
|
||||
echo "信息:在 chroot 环境中修改 Armbian 软件源..."
|
||||
run_in_chroot "echo 'deb http://mirrors.ustc.edu.cn/armbian bullseye main bullseye-utils bullseye-desktop' > /etc/apt/sources.list.d/armbian.list"
|
||||
}
|
||||
|
||||
prepare_external_binaries() {
|
||||
local platform="$1" # linux/armhf or linux/amd64 or linux/aarch64
|
||||
local docker_image="registry.cn-hangzhou.aliyuncs.com/silentwind/kvmd-stage-0"
|
||||
|
||||
echo "信息:准备外部预编译二进制文件 (平台: $platform)..."
|
||||
ensure_dir "$PREBUILT_DIR"
|
||||
|
||||
echo "信息:拉取 Docker 镜像 $docker_image (平台: $platform)..."
|
||||
sudo docker pull --platform "$platform" "$docker_image" || { echo "错误:拉取 Docker 镜像 $docker_image 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:创建 Docker 容器 $DOCKER_CONTAINER_NAME ..."
|
||||
sudo docker create --name "$DOCKER_CONTAINER_NAME" "$docker_image" || { echo "错误:创建 Docker 容器 $DOCKER_CONTAINER_NAME 失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:从 Docker 容器导出文件到 $PREBUILT_DIR ..."
|
||||
sudo docker export "$DOCKER_CONTAINER_NAME" | sudo tar -xf - -C "$PREBUILT_DIR" || { echo "错误:导出并解压 Docker 容器内容失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:预编译二进制文件准备完成,存放于 $PREBUILT_DIR"
|
||||
|
||||
# 删除 Docker 容器
|
||||
sudo docker rm -f "$DOCKER_CONTAINER_NAME" || { echo "错误:删除 Docker 容器 $DOCKER_CONTAINER_NAME 失败" >&2; exit 1; }
|
||||
}
|
||||
|
||||
config_base_files() {
|
||||
local platform_id="$1" # e.g., "onecloud", "cumebox2"
|
||||
echo "信息:配置基础文件和目录结构 ($platform_id)..."
|
||||
|
||||
echo "信息:创建 KVMD 相关目录..."
|
||||
ensure_dir "$ROOTFS/etc/kvmd/override.d"
|
||||
ensure_dir "$ROOTFS/etc/kvmd/vnc"
|
||||
ensure_dir "$ROOTFS/var/lib/kvmd/msd/images"
|
||||
ensure_dir "$ROOTFS/var/lib/kvmd/msd/meta"
|
||||
ensure_dir "$ROOTFS/opt/vc/bin"
|
||||
ensure_dir "$ROOTFS/usr/share/kvmd"
|
||||
ensure_dir "$ROOTFS/One-KVM"
|
||||
ensure_dir "$ROOTFS/usr/share/janus/javascript"
|
||||
ensure_dir "$ROOTFS/usr/lib/ustreamer/janus"
|
||||
ensure_dir "$ROOTFS/run/kvmd"
|
||||
ensure_dir "$ROOTFS/tmp/wheel/"
|
||||
ensure_dir "$ROOTFS/usr/lib/janus/transports/"
|
||||
ensure_dir "$ROOTFS/usr/lib/janus/loggers"
|
||||
|
||||
echo "信息:复制 One-KVM 源码..."
|
||||
sudo rsync -a --exclude={.git,.github,output,tmp} . "$ROOTFS/One-KVM/" || { echo "错误:复制 One-KVM 源码失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:复制配置文件..."
|
||||
sudo cp -r configs/kvmd/* configs/nginx configs/janus "$ROOTFS/etc/kvmd/"
|
||||
sudo cp -r web extras contrib/keymaps "$ROOTFS/usr/share/kvmd/"
|
||||
sudo cp testenv/fakes/vcgencmd "$ROOTFS/usr/bin/"
|
||||
sudo cp -r testenv/js/* "$ROOTFS/usr/share/janus/javascript/"
|
||||
sudo cp "build/platform/$platform_id" "$ROOTFS/usr/share/kvmd/platform" || { echo "错误:复制平台文件 build/platform/$platform_id 失败" >&2; exit 1; }
|
||||
sudo cp scripts/kvmd-gencert scripts/kvmd-bootconfig scripts/kvmd-certbot scripts/kvmd-udev-hdmiusb-check scripts/kvmd-udev-restart-pass build/scripts/kvmd-firstrun.sh "$ROOTFS/usr/bin/"
|
||||
sudo chmod +x "$ROOTFS/usr/bin/kvmd-gencert" "$ROOTFS/usr/bin/kvmd-bootconfig" "$ROOTFS/usr/bin/kvmd-certbot" "$ROOTFS/usr/bin/kvmd-udev-hdmiusb-check" "$ROOTFS/usr/bin/kvmd-udev-restart-pass" "$ROOTFS/usr/bin/kvmd-firstrun.sh"
|
||||
|
||||
if [ -f "$SRCPATH/image/$platform_id/rc.local" ]; then
|
||||
echo "信息:复制设备特定的 rc.local 文件..."
|
||||
sudo cp "$SRCPATH/image/$platform_id/rc.local" "$ROOTFS/etc/"
|
||||
fi
|
||||
|
||||
echo "信息:从预编译目录复制二进制文件和库..."
|
||||
sudo cp "$PREBUILT_DIR/tmp/lib/"* "$ROOTFS/lib/"*-linux-*/ || echo "警告:复制 /tmp/lib/* 失败,可能源目录或目标目录不存在或不匹配"
|
||||
sudo cp "$PREBUILT_DIR/tmp/ustreamer/ustreamer" "$PREBUILT_DIR/tmp/ustreamer/ustreamer-dump" "$PREBUILT_DIR/usr/bin/janus" "$ROOTFS/usr/bin/" || { echo "错误:复制 ustreamer/janus 二进制文件失败" >&2; exit 1; }
|
||||
sudo cp "$PREBUILT_DIR/tmp/ustreamer/janus/libjanus_ustreamer.so" "$ROOTFS/usr/lib/ustreamer/janus/" || { echo "错误:复制 libjanus_ustreamer.so 失败" >&2; exit 1; }
|
||||
sudo cp "$PREBUILT_DIR/tmp/wheel/"*.whl "$ROOTFS/tmp/wheel/" || { echo "错误:复制 Python wheel 文件失败" >&2; exit 1; }
|
||||
sudo cp "$PREBUILT_DIR/usr/lib/janus/transports/"* "$ROOTFS/usr/lib/janus/transports/" || { echo "错误:复制 Janus transports 失败" >&2; exit 1; }
|
||||
|
||||
# 禁用 apt-file
|
||||
if [ -f "$ROOTFS/etc/apt/apt.conf.d/50apt-file.conf" ]; then
|
||||
echo "信息:禁用 apt-file 配置..."
|
||||
sudo mv "$ROOTFS/etc/apt/apt.conf.d/50apt-file.conf" "$ROOTFS/etc/apt/apt.conf.d/50apt-file.conf.disabled"
|
||||
fi
|
||||
echo "信息:基础文件配置完成。"
|
||||
}
|
||||
|
||||
# --- KVMD 安装与配置 ---
|
||||
|
||||
install_base_packages() {
|
||||
echo "信息:在 chroot 环境中更新源并安装基础软件包..."
|
||||
run_in_chroot "
|
||||
apt-get update && \\
|
||||
apt install -y --no-install-recommends \\
|
||||
libxkbcommon-x11-0 nginx tesseract-ocr tesseract-ocr-eng tesseract-ocr-chi-sim \\
|
||||
iptables network-manager curl kmod libmicrohttpd12 libjansson4 libssl3 \\
|
||||
libsofia-sip-ua0 libglib2.0-0 libopus0 libogg0 libcurl4 libconfig9 \\
|
||||
python3-pip net-tools && \\
|
||||
apt clean && \\
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
"
|
||||
}
|
||||
|
||||
configure_network() {
|
||||
local network_type="$1" # "systemd-networkd" or others (default network-manager)
|
||||
if [ "$network_type" = "systemd-networkd" ]; then
|
||||
echo "信息:在 chroot 环境中配置 systemd-networkd..."
|
||||
|
||||
# 检查是否为onecloud平台,如果是则使用随机MAC地址生成机制
|
||||
if [ "$TARGET_DEVICE_NAME" = "onecloud" ]; then
|
||||
echo "信息:为onecloud平台配置随机MAC地址生成机制..."
|
||||
|
||||
# 复制MAC地址生成脚本
|
||||
sudo cp "$SCRIPT_DIR/scripts/generate-random-mac.sh" "$ROOTFS/usr/local/bin/"
|
||||
sudo chmod +x "$ROOTFS/usr/local/bin/generate-random-mac.sh"
|
||||
|
||||
# 复制systemd服务文件
|
||||
sudo cp "$SCRIPT_DIR/services/kvmd-generate-mac.service" "$ROOTFS/etc/systemd/system/"
|
||||
|
||||
# 创建初始网络配置文件(不包含MAC地址,将由脚本生成)
|
||||
run_in_chroot "
|
||||
echo -e '[Match]\\nName=eth0\\n\\n[Network]\\nDHCP=yes' > /etc/systemd/network/99-eth0.network && \\
|
||||
systemctl mask NetworkManager && \\
|
||||
systemctl unmask systemd-networkd && \\
|
||||
systemctl enable systemd-networkd systemd-resolved && \\
|
||||
systemctl enable kvmd-generate-mac.service
|
||||
"
|
||||
echo "信息:onecloud随机MAC地址生成机制配置完成"
|
||||
fi
|
||||
else
|
||||
echo "信息:使用默认的网络管理器 (NetworkManager)..."
|
||||
# 可能需要确保 NetworkManager 是启用的 (通常默认是)
|
||||
run_in_chroot "systemctl enable NetworkManager"
|
||||
fi
|
||||
}
|
||||
|
||||
install_python_deps() {
|
||||
echo "信息:在 chroot 环境中安装 Python 依赖 (wheels)..."
|
||||
run_in_chroot "
|
||||
pip3 install --no-cache-dir --break-system-packages /tmp/wheel/*.whl && \\
|
||||
pip3 cache purge && \\
|
||||
rm -rf /tmp/wheel
|
||||
"
|
||||
}
|
||||
|
||||
configure_kvmd_core() {
|
||||
echo "信息:在 chroot 环境中安装和配置 KVMD 核心..."
|
||||
|
||||
# 复制KVMD首次运行脚本和服务
|
||||
echo "信息:配置KVMD首次运行初始化服务..."
|
||||
sudo cp "build/services/kvmd-firstrun.service" "$ROOTFS/etc/systemd/system/"
|
||||
|
||||
# 安装KVMD但不执行需要在首次运行时完成的操作
|
||||
run_in_chroot "
|
||||
cd /One-KVM && \\
|
||||
python3 setup.py install && \\
|
||||
systemctl enable kvmd-firstrun.service
|
||||
"
|
||||
|
||||
echo "信息:KVMD核心安装完成,证书生成等初始化操作将在首次开机时执行"
|
||||
}
|
||||
|
||||
configure_system() {
|
||||
echo "信息:在 chroot 环境中配置系统级设置 (sudoers, udev, services)..."
|
||||
run_in_chroot "
|
||||
cat /One-KVM/configs/os/sudoers/v2-hdmiusb >> /etc/sudoers && \\
|
||||
cat /One-KVM/configs/os/udev/v2-hdmiusb-rpi4.rules > /etc/udev/rules.d/99-kvmd.rules && \\
|
||||
echo 'libcomposite' >> /etc/modules && \\
|
||||
mv /usr/local/bin/kvmd* /usr/bin/ || echo '信息:/usr/local/bin/kvmd* 未找到或移动失败,可能已在/usr/bin' && \\
|
||||
cp /One-KVM/configs/os/services/* /etc/systemd/system/ && \\
|
||||
cp /One-KVM/configs/os/tmpfiles.conf /usr/lib/tmpfiles.d/ && \\
|
||||
mv /etc/kvmd/supervisord.conf /etc/supervisord.conf && \\
|
||||
chmod +x /etc/update-motd.d/* || echo '警告:chmod /etc/update-motd.d/* 失败' && \\
|
||||
echo 'kvmd ALL=(ALL) NOPASSWD: /etc/kvmd/custom_atx/gpio.sh' >> /etc/sudoers && \\
|
||||
echo 'kvmd ALL=(ALL) NOPASSWD: /etc/kvmd/custom_atx/usbrelay_hid.sh' >> /etc/sudoers && \\
|
||||
systemd-sysusers /One-KVM/configs/os/sysusers.conf && \\
|
||||
systemd-sysusers /One-KVM/configs/os/kvmd-webterm.conf && \\
|
||||
ln -sf /usr/share/tesseract-ocr/*/tessdata /usr/share/tessdata || echo '警告:创建 tesseract 链接失败' && \\
|
||||
sed -i 's/8080/80/g' /etc/kvmd/override.yaml && \\
|
||||
sed -i 's/4430/443/g' /etc/kvmd/override.yaml && \\
|
||||
chown kvmd -R /var/lib/kvmd/msd/ && \\
|
||||
systemctl enable kvmd kvmd-otg kvmd-nginx kvmd-vnc kvmd-ipmi kvmd-webterm kvmd-janus kvmd-media && \\
|
||||
systemctl disable nginx && \\
|
||||
rm -rf /One-KVM
|
||||
"
|
||||
}
|
||||
|
||||
install_webterm() {
|
||||
local arch="$1" # armhf, aarch64, x86_64
|
||||
local ttyd_arch="$arch"
|
||||
|
||||
if [ "$arch" = "armhf" ]; then
|
||||
ttyd_arch="armv7"
|
||||
elif [ "$arch" = "amd64" ]; then
|
||||
ttyd_arch="x86_64" # ttyd 通常用 x86_64
|
||||
fi
|
||||
|
||||
echo "信息:在 chroot 环境中下载并安装 ttyd ($ttyd_arch)..."
|
||||
run_in_chroot "
|
||||
curl -L https://gh.llkk.cc/https://github.com/tsl0922/ttyd/releases/download/1.7.7/ttyd.${ttyd_arch} -o /usr/bin/ttyd && \\
|
||||
chmod +x /usr/bin/ttyd && \\
|
||||
mkdir -p /home/kvmd-webterm && \\
|
||||
chown kvmd-webterm /home/kvmd-webterm
|
||||
"
|
||||
}
|
||||
|
||||
apply_kvmd_tweaks() {
|
||||
local arch="$1" # armhf, aarch64, x86_64
|
||||
local device_type="$2" # "gpio" or "video1" or other
|
||||
local atx_setting=""
|
||||
local hid_setting=""
|
||||
|
||||
echo "信息:根据架构 ($arch) 和设备类型 ($device_type) 调整 KVMD 配置..."
|
||||
|
||||
if [ "$arch" = "x86_64" ] || [ "$arch" = "amd64" ]; then
|
||||
echo "信息:目标平台为 x86_64/amd64 架构,禁用 OTG,设置 ATX 为 USBRELAY_HID..."
|
||||
run_in_chroot "
|
||||
systemctl disable kvmd-otg && \\
|
||||
sed -i 's/^ATX=.*/ATX=USBRELAY_HID/' /etc/kvmd/atx.sh && \\
|
||||
sed -i 's/device: \/dev\/ttyUSB0/device: \/dev\/kvmd-hid/g' /etc/kvmd/override.yaml
|
||||
"
|
||||
else
|
||||
echo "信息::目标平台为 ARM 架构 ($arch)..."
|
||||
# ARM 架构,配置 HID 为 OTG
|
||||
hid_setting="otg"
|
||||
run_in_chroot "
|
||||
sed -i 's/#type: otg/type: otg/g' /etc/kvmd/override.yaml && \\
|
||||
sed -i 's/device: \/dev\/ttyUSB0/#device: \/dev\/ttyUSB0/g' /etc/kvmd/override.yaml # 注释掉 ttyUSB0
|
||||
"
|
||||
echo "信息:设置 HID 为 $hid_setting"
|
||||
run_in_chroot "sed -i 's/type: ch9329/type: $hid_setting/g' /etc/kvmd/override.yaml"
|
||||
|
||||
|
||||
# 根据 device_type 配置 ATX
|
||||
if [ "$device_type" = "gpio" ]; then
|
||||
echo "信息:电源控制设备类型为 gpio,设置 ATX 为 GPIO 并配置引脚..."
|
||||
atx_setting="GPIO"
|
||||
run_in_chroot "
|
||||
sed -i 's/^ATX=.*/ATX=GPIO/' /etc/kvmd/atx.sh && \\
|
||||
sed -i 's/SHUTDOWNPIN/gpiochip1 7/g' /etc/kvmd/custom_atx/gpio.sh && \\
|
||||
sed -i 's/REBOOTPIN/gpiochip0 11/g' /etc/kvmd/custom_atx/gpio.sh
|
||||
"
|
||||
else
|
||||
echo "信息:电源控制设备类型不是 gpio ($device_type),设置 ATX 为 USBRELAY_HID..."
|
||||
atx_setting="USBRELAY_HID"
|
||||
run_in_chroot "sed -i 's/^ATX=.*/ATX=USBRELAY_HID/' /etc/kvmd/atx.sh"
|
||||
fi
|
||||
|
||||
# 配置视频设备
|
||||
if [ "$device_type" = "video1" ]; then
|
||||
echo "信息:视频设备类型为 video1,设置视频设备为 /dev/video1..."
|
||||
run_in_chroot "sed -i 's|/dev/video0|/dev/video1|g' /etc/kvmd/override.yaml"
|
||||
elif [ "$device_type" = "kvmd-video" ]; then
|
||||
echo "信息:视频设备类型为 kvmd-video,设置视频设备为 /dev/kvmd-video..."
|
||||
run_in_chroot "sed -i 's|/dev/video0|/dev/kvmd-video|g' /etc/kvmd/override.yaml"
|
||||
else
|
||||
echo "信息:使用默认视频设备 /dev/video0..."
|
||||
fi
|
||||
fi
|
||||
echo "信息:KVMD 配置调整完成。"
|
||||
}
|
||||
|
||||
# --- 整体安装流程 ---
|
||||
install_and_configure_kvmd() {
|
||||
local arch="$1" # 架构: armhf, aarch64, x86_64/amd64
|
||||
local device_type="$2" # 设备特性: "gpio", "video1", "" (空或其他)
|
||||
local network_type="$3" # 网络配置: "systemd-networkd", "" (默认 network-manager)
|
||||
local host_arch="" # Docker 平台架构: arm, aarch64, amd64
|
||||
|
||||
# 映射架构名称
|
||||
case "$arch" in
|
||||
armhf) host_arch="arm" ;;
|
||||
aarch64) host_arch="arm64" ;; # docker aarch64 平台名是 arm64
|
||||
x86_64|amd64) host_arch="amd64"; arch="x86_64" ;; # 统一内部使用 x86_64
|
||||
*) echo "错误:不支持的架构 $arch"; exit 1 ;;
|
||||
esac
|
||||
|
||||
|
||||
prepare_external_binaries "linux/$host_arch"
|
||||
config_base_files "$TARGET_DEVICE_NAME" # 使用全局变量传递设备名
|
||||
|
||||
# 特定设备的额外文件配置 (如果存在)
|
||||
if declare -f "config_${TARGET_DEVICE_NAME}_files" > /dev/null; then
|
||||
echo "信息:执行特定设备的文件配置函数 config_${TARGET_DEVICE_NAME}_files ..."
|
||||
"config_${TARGET_DEVICE_NAME}_files"
|
||||
fi
|
||||
|
||||
# 某些镜像可能需要准备DNS和换源
|
||||
if [[ "$NEED_PREPARE_DNS" = true ]]; then
|
||||
prepare_dns_and_mirrors
|
||||
fi
|
||||
# 可选:强制使用特定armbian源
|
||||
# delete_armbian_verify
|
||||
|
||||
# 执行安装步骤
|
||||
install_base_packages
|
||||
configure_network "$network_type"
|
||||
install_python_deps
|
||||
configure_kvmd_core
|
||||
configure_system
|
||||
install_webterm "$arch" # 传递原始架构名给ttyd下载
|
||||
apply_kvmd_tweaks "$arch" "$device_type"
|
||||
|
||||
run_in_chroot "df -h" # 显示最终磁盘使用情况
|
||||
echo "信息:One-KVM 安装和配置完成。"
|
||||
}
|
||||
65
build/functions/packaging.sh
Executable file
65
build/functions/packaging.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/bash
|
||||
|
||||
# --- 打包函数 ---
|
||||
|
||||
pack_img() {
|
||||
local device_name_friendly="$1" # e.g., "Vm", "Cumebox2"
|
||||
local target_img_name="One-KVM_by-SilentWind_${device_name_friendly}_${DATE}.img"
|
||||
local source_img="$TMPDIR/rootfs.img"
|
||||
|
||||
echo "信息:开始打包镜像 ($device_name_friendly)..."
|
||||
ensure_dir "$OUTPUTDIR"
|
||||
|
||||
# 确保在打包前已经正确卸载了所有挂载点和loop设备
|
||||
if [[ "$ROOTFS_MOUNTED" -eq 1 || "$DEV_MOUNTED" -eq 1 || "$SYS_MOUNTED" -eq 1 || "$PROC_MOUNTED" -eq 1 || -n "$LOOPDEV" && -b "$LOOPDEV" ]]; then
|
||||
echo "警告:发现未卸载的挂载点或loop设备,尝试再次卸载..."
|
||||
unmount_all
|
||||
fi
|
||||
|
||||
echo "信息:移动镜像文件 $source_img 到 $OUTPUTDIR/$target_img_name ..."
|
||||
sudo mv "$source_img" "$OUTPUTDIR/$target_img_name" || { echo "错误:移动镜像文件失败" >&2; exit 1; }
|
||||
|
||||
if [ "$device_name_friendly" = "Vm" ]; then
|
||||
echo "信息:为 Vm 目标转换镜像格式 (vmdk, vdi)..."
|
||||
local raw_img="$OUTPUTDIR/$target_img_name"
|
||||
local vmdk_img="$OUTPUTDIR/One-KVM_by-SilentWind_Vmare-uefi_${DATE}.vmdk"
|
||||
local vdi_img="$OUTPUTDIR/One-KVM_by-SilentWind_Virtualbox-uefi_${DATE}.vdi"
|
||||
|
||||
echo "信息:转换为 VMDK..."
|
||||
sudo qemu-img convert -f raw -O vmdk "$raw_img" "$vmdk_img" || echo "警告:转换为 VMDK 失败"
|
||||
echo "信息:转换为 VDI..."
|
||||
sudo qemu-img convert -f raw -O vdi "$raw_img" "$vdi_img" || echo "警告:转换为 VDI 失败"
|
||||
fi
|
||||
echo "信息:镜像打包完成: $OUTPUTDIR/$target_img_name"
|
||||
}
|
||||
|
||||
pack_img_onecloud() {
|
||||
local target_img_name="One-KVM_by-SilentWind_Onecloud_${DATE}.burn.img"
|
||||
local rootfs_raw_img="$TMPDIR/rootfs.img"
|
||||
local rootfs_sparse_img="$TMPDIR/7.rootfs.PARTITION.sparse"
|
||||
local aml_packer="$SRCPATH/image/onecloud/AmlImg_v0.3.1_linux_amd64"
|
||||
|
||||
echo "信息:开始为 Onecloud 打包 burn 镜像..."
|
||||
ensure_dir "$OUTPUTDIR"
|
||||
|
||||
# 确保在打包前已经正确卸载了所有挂载点和loop设备
|
||||
if [[ "$ROOTFS_MOUNTED" -eq 1 || "$DEV_MOUNTED" -eq 1 || "$SYS_MOUNTED" -eq 1 || "$PROC_MOUNTED" -eq 1 || -n "$LOOPDEV" && -b "$LOOPDEV" ]]; then
|
||||
echo "警告:发现未卸载的挂载点或loop设备,尝试再次卸载..."
|
||||
unmount_all
|
||||
fi
|
||||
|
||||
echo "信息:将 raw rootfs 转换为 sparse image..."
|
||||
# 先删除可能存在的旧 sparse 文件
|
||||
sudo rm -f "$rootfs_sparse_img"
|
||||
sudo img2simg "$rootfs_raw_img" "$rootfs_sparse_img" || { echo "错误:img2simg 转换失败" >&2; exit 1; }
|
||||
sudo rm "$rootfs_raw_img" # 删除 raw 文件,因为它已被转换
|
||||
|
||||
echo "信息:使用 AmlImg 工具打包..."
|
||||
sudo chmod +x "$aml_packer"
|
||||
sudo "$aml_packer" pack "$OUTPUTDIR/$target_img_name" "$TMPDIR/" || { echo "错误:AmlImg 打包失败" >&2; exit 1; }
|
||||
|
||||
echo "信息:清理 Onecloud 临时文件..."
|
||||
sudo rm -f "$TMPDIR/6.boot.PARTITION.sparse" "$TMPDIR/7.rootfs.PARTITION.sparse" "$TMPDIR/dts.img"
|
||||
|
||||
echo "信息:Onecloud burn 镜像打包完成: $OUTPUTDIR/$target_img_name"
|
||||
}
|
||||
200
build/init.sh
200
build/init.sh
@@ -1,41 +1,112 @@
|
||||
#!/bin/bash
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
# 定义颜色代码
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[0;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
echo -e "${GREEN}One-KVM pre-starting...${NC}"
|
||||
# 输出日志的函数
|
||||
log_info() {
|
||||
echo -e "${GREEN}[INFO] $1${NC}"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN] $1${NC}"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR] $1${NC}"
|
||||
}
|
||||
|
||||
# 初始化检查
|
||||
log_info "One-KVM 正在启动..."
|
||||
|
||||
# 首次初始化配置
|
||||
if [ ! -f /etc/kvmd/.init_flag ]; then
|
||||
echo -e "${GREEN}One-KVM is initializing first...${NC}" \
|
||||
&& mkdir -p /etc/kvmd/ \
|
||||
&& mv /etc/kvmd_backup/* /etc/kvmd/ \
|
||||
&& touch /etc/kvmd/.docker_flag \
|
||||
&& sed -i 's/localhost.localdomain/docker/g' /etc/kvmd/meta.yaml \
|
||||
&& sed -i 's/localhost/localhost:4430/g' /etc/kvmd/kvm_input.sh \
|
||||
&& /usr/share/kvmd/kvmd-gencert --do-the-thing \
|
||||
&& /usr/share/kvmd/kvmd-gencert --do-the-thing --vnc \
|
||||
|| echo -e "${RED}One-KVM config moving and self-signed SSL certificates init failed.${NC}"
|
||||
|
||||
if [ "$NOSSL" == 1 ]; then
|
||||
echo -e "${GREEN}One-KVM self-signed SSL is disabled.${NC}" \
|
||||
&& python -m kvmd.apps.ngxmkconf /etc/kvmd/nginx/nginx.conf.mako /etc/kvmd/nginx/nginx.conf -o nginx/https/enabled=false \
|
||||
|| echo -e "${RED}One-KVM nginx config init failed.${NC}"
|
||||
log_info "首次初始化配置..."
|
||||
|
||||
# 创建必要目录并移动配置文件
|
||||
if mkdir -p /etc/kvmd/ && \
|
||||
mv /etc/kvmd_backup/* /etc/kvmd/ && \
|
||||
touch /etc/kvmd/.docker_flag && \
|
||||
sed -i 's/localhost.localdomain/docker/g' /etc/kvmd/meta.yaml && \
|
||||
sed -i 's/localhost/localhost:4430/g' /etc/kvmd/kvm_input.sh; then
|
||||
log_info "移动配置文件完成"
|
||||
else
|
||||
python -m kvmd.apps.ngxmkconf /etc/kvmd/nginx/nginx.conf.mako /etc/kvmd/nginx/nginx.conf \
|
||||
|| echo -e "${RED}One-KVM nginx config init failed.${NC}"
|
||||
log_error "移动配置文件失败"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# SSL证书配置
|
||||
if ! /usr/share/kvmd/kvmd-gencert --do-the-thing; then
|
||||
log_error "Nginx SSL 证书生成失败"
|
||||
exit 1
|
||||
fi
|
||||
if ! /usr/share/kvmd/kvmd-gencert --do-the-thing --vnc; then
|
||||
log_error "VNC SSL 证书生成失败"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 设置用户名和密码
|
||||
if [ ! -z "$USERNAME" ] && [ ! -z "$PASSWORD" ]; then
|
||||
if python -m kvmd.apps.htpasswd del admin \
|
||||
&& echo "$PASSWORD" | python -m kvmd.apps.htpasswd set -i "$USERNAME" \
|
||||
&& echo "$PASSWORD -> $USERNAME:$PASSWORD" > /etc/kvmd/vncpasswd \
|
||||
&& echo "$USERNAME:$PASSWORD -> $USERNAME:$PASSWORD" > /etc/kvmd/ipmipasswd; then
|
||||
log_info "用户凭据设置成功"
|
||||
else
|
||||
log_error "用户凭据设置失败"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
log_warn "未设置 USERNAME 和 PASSWORD 环境变量,使用默认值(admin/admin)"
|
||||
fi
|
||||
|
||||
# SSL开关配置
|
||||
if [ "$NOSSL" == 1 ]; then
|
||||
log_info "已禁用SSL"
|
||||
if ! python -m kvmd.apps.ngxmkconf /etc/kvmd/nginx/nginx.conf.mako /etc/kvmd/nginx/nginx.conf -o nginx/https/enabled=false; then
|
||||
log_error "Nginx 配置失败"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if ! python -m kvmd.apps.ngxmkconf /etc/kvmd/nginx/nginx.conf.mako /etc/kvmd/nginx/nginx.conf; then
|
||||
log_error "Nginx 配置失败"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# 认证配置
|
||||
if [ "$NOAUTH" == "1" ]; then
|
||||
sed -i "s/enabled: true/enabled: false/g" /etc/kvmd/override.yaml \
|
||||
&& echo -e "${GREEN}One-KVM auth is disabled.${NC}"
|
||||
sed -i "s/enabled: true/enabled: false/g" /etc/kvmd/override.yaml
|
||||
log_info "已禁用认证"
|
||||
fi
|
||||
|
||||
#add supervisord conf
|
||||
if [ "$NOWEBTERM" == "1" ]; then
|
||||
echo -e "${GREEN}One-KVM webterm is disabled.${NC}"
|
||||
log_info "已禁用 WebTerm 功能"
|
||||
rm -r /usr/share/kvmd/extras/webterm
|
||||
else
|
||||
cat >> /etc/kvmd/supervisord.conf << EOF
|
||||
@@ -58,7 +129,7 @@ EOF
|
||||
fi
|
||||
|
||||
if [ "$NOVNC" == "1" ]; then
|
||||
echo -e "${GREEN}One-KVM VNC is disabled.${NC}"
|
||||
log_info "已禁用 VNC 功能"
|
||||
rm -r /usr/share/kvmd/extras/vnc
|
||||
else
|
||||
cat >> /etc/kvmd/supervisord.conf << EOF
|
||||
@@ -77,7 +148,7 @@ EOF
|
||||
fi
|
||||
|
||||
if [ "$NOIPMI" == "1" ]; then
|
||||
echo -e "${GREEN}One-KVM IPMI is disabled.${NC}"
|
||||
log_info "已禁用IPMI功能"
|
||||
rm -r /usr/share/kvmd/extras/ipmi
|
||||
else
|
||||
cat >> /etc/kvmd/supervisord.conf << EOF
|
||||
@@ -97,48 +168,77 @@ EOF
|
||||
|
||||
#switch OTG config
|
||||
if [ "$OTG" == "1" ]; then
|
||||
echo -e "${GREEN}One-KVM OTG is enabled.${NC}"
|
||||
log_info "已启用 OTG 功能"
|
||||
sed -i "s/ch9329/otg/g" /etc/kvmd/override.yaml
|
||||
sed -i "s/device: \/dev\/ttyUSB0//g" /etc/kvmd/override.yaml
|
||||
sed -i "s/device: \/dev\/ttyUSB0//g" /etc/kvmd/override.yaml
|
||||
if [ "$NOMSD" == 1 ]; then
|
||||
log_info "已禁用 MSD 功能"
|
||||
else
|
||||
sed -i "s/#type: otg/type: otg/g" /etc/kvmd/override.yaml
|
||||
fi
|
||||
fi
|
||||
|
||||
#if [ ! -z "$SHUTDOWNPIN" ! -z "$REBOOTPIN" ]; then
|
||||
|
||||
if [ ! -z "$VIDEONUM" ]; then
|
||||
sed -i "s/\/dev\/video0/\/dev\/video$VIDEONUM/g" /etc/kvmd/override.yaml \
|
||||
&& echo -e "${GREEN}One-KVM video device is set to /dev/video$VIDEONUM.${NC}"
|
||||
if sed -i "s/\/dev\/video0/\/dev\/video$VIDEONUM/g" /etc/kvmd/override.yaml && \
|
||||
sed -i "s/\/dev\/video0/\/dev\/video$VIDEONUM/g" /etc/kvmd/janus/janus.plugin.ustreamer.jcfg; then
|
||||
log_info "视频设备已设置为 /dev/video$VIDEONUM"
|
||||
fi
|
||||
fi
|
||||
|
||||
#set htpasswd
|
||||
if [ ! -z "$USERNAME" ] && [ ! -z "$PASSWORD" ]; then
|
||||
python -m kvmd.apps.htpasswd del admin \
|
||||
&& echo $PASSWORD | python -m kvmd.apps.htpasswd set -i "$USERNAME" \
|
||||
&& echo "$PASSWORD -> $USERNAME:$PASSWORD" > /etc/kvmd/vncpasswd \
|
||||
&& echo "$USERNAME:$PASSWORD -> $USERNAME:$PASSWORD" > /etc/kvmd/ipmipasswd \
|
||||
|| echo -e "${RED}One-KVM htpasswd init failed.${NC}"
|
||||
else
|
||||
echo -e "${YELLOW} USERNAME and PASSWORD environment variables is not set, using defalut(admin/admin).${NC}"
|
||||
if [ ! -z "$AUDIONUM" ]; then
|
||||
if sed -i "s/hw:0/hw:$AUDIONUM/g" /etc/kvmd/janus/janus.plugin.ustreamer.jcfg; then
|
||||
log_info "音频设备已设置为 hw:$AUDIONUM"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$NOMSD" == 1 ]; then
|
||||
echo -e "${GREEN}One-KVM MSD is disabled.${NC}"
|
||||
else
|
||||
sed -i "s/#type: otg/type: otg/g" /etc/kvmd/override.yaml
|
||||
if [ ! -z "$CH9329SPEED" ]; then
|
||||
if sed -i "s/speed: 9600/speed: $CH9329SPEED/g" /etc/kvmd/override.yaml; then
|
||||
log_info "CH9329 串口速率已设置为 $CH9329SPEED"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$CH9329TIMEOUT" ]; then
|
||||
if sed -i "s/read_timeout: 0.3/read_timeout: $CH9329TIMEOUT/g" /etc/kvmd/override.yaml; then
|
||||
log_info "CH9329 超时已设置为 $CH9329TIMEOUT 秒"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$H264PRESET" ]; then
|
||||
if sed -i "s/ultrafast/$H264PRESET/g" /etc/kvmd/override.yaml; then
|
||||
log_info "H264 预设已设置为 $H264PRESET"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$VIDEOFORMAT" ]; then
|
||||
if sed -i "s/format=mjpeg/format=$VIDFORMAT/g" /etc/kvmd/override.yaml; then
|
||||
log_info "视频输入格式已设置为 $VIDFORMAT"
|
||||
fi
|
||||
fi
|
||||
|
||||
touch /etc/kvmd/.init_flag
|
||||
log_info "初始化配置完成"
|
||||
fi
|
||||
|
||||
#Trying usb_gadget
|
||||
# OTG设备配置
|
||||
if [ "$OTG" == "1" ]; then
|
||||
echo "Trying OTG Port..."
|
||||
log_info "正在配置 OTG 设备..."
|
||||
rm -r /run/kvmd/otg &> /dev/null
|
||||
modprobe libcomposite || echo -e "${RED}Linux libcomposite module modprobe failed.${NC}"
|
||||
python -m kvmd.apps.otg start \
|
||||
&& ln -s /dev/hidg1 /dev/kvmd-hid-mouse \
|
||||
&& ln -s /dev/hidg0 /dev/kvmd-hid-keyboard \
|
||||
|| echo -e "${RED}OTG Port mount failed.${NC}"
|
||||
|
||||
if ! modprobe libcomposite; then
|
||||
log_error "加载 libcomposite 模块失败"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if python -m kvmd.apps.otg start; then
|
||||
ln -s /dev/hidg1 /dev/kvmd-hid-mouse
|
||||
ln -s /dev/hidg0 /dev/kvmd-hid-keyboard
|
||||
ln -s /dev/hidg2 /dev/kvmd-hid-mouse-alt
|
||||
log_info "OTG 设备配置完成"
|
||||
else
|
||||
log_warn "OTG 设备挂载失败"
|
||||
#exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}One-KVM starting...${NC}"
|
||||
log_info "One-KVM 配置文件准备完成,正在启动服务..."
|
||||
exec supervisord -c /etc/kvmd/supervisord.conf
|
||||
3
build/platform/chainedbox
Normal file
3
build/platform/chainedbox
Normal file
@@ -0,0 +1,3 @@
|
||||
PIKVM_MODEL=v2_model
|
||||
PIKVM_VIDEO=usb_video
|
||||
PIKVM_BOARD=chainedbox
|
||||
3
build/platform/cumebox2
Normal file
3
build/platform/cumebox2
Normal file
@@ -0,0 +1,3 @@
|
||||
PIKVM_MODEL=v2_model
|
||||
PIKVM_VIDEO=usb_video
|
||||
PIKVM_BOARD=cumebox2
|
||||
3
build/platform/e900v22c
Normal file
3
build/platform/e900v22c
Normal file
@@ -0,0 +1,3 @@
|
||||
PIKVM_MODEL=v2_model
|
||||
PIKVM_VIDEO=usb_video
|
||||
PIKVM_BOARD=e900v22c
|
||||
3
build/platform/octopus-flanet
Normal file
3
build/platform/octopus-flanet
Normal file
@@ -0,0 +1,3 @@
|
||||
PIKVM_MODEL=v2_model
|
||||
PIKVM_VIDEO=usb_video
|
||||
PIKVM_BOARD=octopus-flanet
|
||||
3
build/platform/vm
Normal file
3
build/platform/vm
Normal file
@@ -0,0 +1,3 @@
|
||||
PIKVM_MODEL=v2_model
|
||||
PIKVM_VIDEO=usb_video
|
||||
PIKVM_BOARD=vm
|
||||
64
build/scripts/generate-random-mac.sh
Normal file
64
build/scripts/generate-random-mac.sh
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 为onecloud平台生成随机MAC地址的一次性脚本
|
||||
# 此脚本在首次开机时执行,为eth0网卡生成并应用随机MAC地址
|
||||
|
||||
set -e
|
||||
|
||||
NETWORK_CONFIG="/etc/systemd/network/99-eth0.network"
|
||||
LOCK_FILE="/var/lib/kvmd/.mac-generated"
|
||||
|
||||
# 检查是否已经执行过
|
||||
if [ -f "$LOCK_FILE" ]; then
|
||||
echo "MAC地址已经生成过,跳过执行"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 生成随机MAC地址 (使用本地管理的MAC地址前缀)
|
||||
generate_random_mac() {
|
||||
# 使用本地管理的MAC地址前缀 (第二位设为2、6、A、E中的一个)
|
||||
# 这样可以避免与真实硬件MAC地址冲突
|
||||
printf "02:%02x:%02x:%02x:%02x:%02x\n" \
|
||||
$((RANDOM % 256)) \
|
||||
$((RANDOM % 256)) \
|
||||
$((RANDOM % 256)) \
|
||||
$((RANDOM % 256)) \
|
||||
$((RANDOM % 256))
|
||||
}
|
||||
|
||||
echo "正在为onecloud生成随机MAC地址..."
|
||||
|
||||
# 生成新的MAC地址
|
||||
NEW_MAC=$(generate_random_mac)
|
||||
echo "生成的MAC地址: $NEW_MAC"
|
||||
|
||||
# 备份原配置文件
|
||||
if [ -f "$NETWORK_CONFIG" ]; then
|
||||
cp "$NETWORK_CONFIG" "${NETWORK_CONFIG}.backup"
|
||||
fi
|
||||
|
||||
# 更新网络配置文件
|
||||
cat > "$NETWORK_CONFIG" << EOF
|
||||
[Match]
|
||||
Name=eth0
|
||||
|
||||
[Network]
|
||||
DHCP=yes
|
||||
|
||||
[Link]
|
||||
MACAddress=$NEW_MAC
|
||||
EOF
|
||||
|
||||
echo "已更新网络配置文件: $NETWORK_CONFIG"
|
||||
|
||||
# 创建锁定文件,防止重复执行
|
||||
mkdir -p "$(dirname "$LOCK_FILE")"
|
||||
echo "MAC地址生成时间: $(date)" > "$LOCK_FILE"
|
||||
|
||||
# 禁用此服务,确保只运行一次
|
||||
systemctl disable kvmd-generate-mac.service
|
||||
|
||||
echo "随机MAC地址生成完成: $NEW_MAC"
|
||||
echo "服务已自动禁用,下次开机不会再执行"
|
||||
|
||||
exit 0
|
||||
34
build/scripts/kvmd-firstrun.sh
Normal file
34
build/scripts/kvmd-firstrun.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
# KVMD首次运行初始化脚本
|
||||
# 在首次开机时执行KVMD服务启动前的必要初始化操作
|
||||
|
||||
set -e
|
||||
|
||||
LOCK_FILE="/var/lib/kvmd/.kvmd-firstrun-completed"
|
||||
|
||||
# 检查是否已经执行过
|
||||
[ -f "$LOCK_FILE" ] && { echo "[KVMD-FirstRun] 初始化已完成,跳过执行"; exit 0; }
|
||||
|
||||
echo "[KVMD-FirstRun] 开始KVMD首次运行初始化..."
|
||||
|
||||
# 1. 生成KVMD主证书
|
||||
echo "[KVMD-FirstRun] 生成KVMD主证书..."
|
||||
kvmd-gencert --do-the-thing
|
||||
|
||||
# 2. 生成VNC证书
|
||||
echo "[KVMD-FirstRun] 生成VNC证书..."
|
||||
kvmd-gencert --do-the-thing --vnc
|
||||
|
||||
# 3. 生成nginx配置文件
|
||||
echo "[KVMD-FirstRun] 生成nginx配置文件..."
|
||||
kvmd-nginx-mkconf /etc/kvmd/nginx/nginx.conf.mako /etc/kvmd/nginx/nginx.conf || echo "[KVMD-FirstRun] 警告: nginx配置生成失败"
|
||||
|
||||
# 创建锁定文件
|
||||
mkdir -p "$(dirname "$LOCK_FILE")"
|
||||
echo "KVMD首次运行初始化完成 - $(date)" > "$LOCK_FILE"
|
||||
|
||||
# 禁用服务
|
||||
systemctl disable kvmd-firstrun.service || echo "[KVMD-FirstRun] 警告: 服务禁用失败"
|
||||
|
||||
echo "[KVMD-FirstRun] 初始化完成!"
|
||||
26
build/services/kvmd-firstrun.service
Normal file
26
build/services/kvmd-firstrun.service
Normal file
@@ -0,0 +1,26 @@
|
||||
[Unit]
|
||||
Description=KVMD First Run Initialization (One-time)
|
||||
Documentation=https://github.com/your-repo/One-KVM
|
||||
Before=kvmd.service
|
||||
Before=kvmd-nginx.service
|
||||
Before=kvmd-otg.service
|
||||
Before=kvmd-vnc.service
|
||||
Before=kvmd-ipmi.service
|
||||
Before=kvmd-webterm.service
|
||||
Before=kvmd-janus.service
|
||||
Before=kvmd-media.service
|
||||
After=local-fs.target
|
||||
After=network.target
|
||||
Wants=local-fs.target
|
||||
ConditionPathExists=!/var/lib/kvmd/.kvmd-firstrun-completed
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/kvmd-firstrun.sh
|
||||
RemainAfterExit=yes
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
TimeoutStartSec=300
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
18
build/services/kvmd-generate-mac.service
Normal file
18
build/services/kvmd-generate-mac.service
Normal file
@@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Generate Random MAC Address for OneCloud (One-time)
|
||||
Documentation=https://github.com/your-repo/One-KVM
|
||||
Before=systemd-networkd.service
|
||||
Before=network-pre.target
|
||||
Wants=network-pre.target
|
||||
After=local-fs.target
|
||||
ConditionPathExists=!/var/lib/kvmd/.mac-generated
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/local/bin/generate-random-mac.sh
|
||||
RemainAfterExit=yes
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,4 +1,7 @@
|
||||
video: {
|
||||
sink = "kvmd::ustreamer::h264"
|
||||
}
|
||||
|
||||
acap: {
|
||||
device = "hw:0"
|
||||
tc358743 = "/dev/video0"
|
||||
}
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
#!/bin/bash
|
||||
#!/bin/bash
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
if [ -e /etc/update-motd.d/10-armbian-header ]; then /etc/update-motd.d/10-armbian-header; fi
|
||||
if [ -e /etc/update-motd.d/30-armbian-sysinfo ]; then /etc/update-motd.d/30-armbian-sysinfo; fi
|
||||
@@ -15,8 +35,6 @@ printf "
|
||||
|
||||
____________________________________________________________________________
|
||||
|
||||
欢迎使用 One-KVM,基于开源程序 PiKVM 的 IP-KVM 应用
|
||||
|
||||
项目链接:
|
||||
* One-KVM:https://github.com/mofeng-git/One-KVM
|
||||
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
#!/bin/bash
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
echo $ATX
|
||||
case $ATX in
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
#!/bin/bash
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
#!/bin/bash
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
case $1 in
|
||||
short)
|
||||
gpioset -m time -s 1 SHUTDOWNPIN=0
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
import sys
|
||||
import hid
|
||||
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
#!/bin/bash
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
case $1 in
|
||||
short)
|
||||
python3 /etc/kvmd/custom_atx/usbrelay_hid.py 1 on
|
||||
|
||||
100
configs/kvmd/main/v4plus-hdmi-rpi4.yaml
Normal file
100
configs/kvmd/main/v4plus-hdmi-rpi4.yaml
Normal file
@@ -0,0 +1,100 @@
|
||||
# Don't touch this file otherwise your device may stop working.
|
||||
# Use override.yaml to modify required settings.
|
||||
# You can find a working configuration in /usr/share/kvmd/configs.default/kvmd.
|
||||
|
||||
override: !include [override.d, override.yaml]
|
||||
|
||||
logging: !include logging.yaml
|
||||
|
||||
kvmd:
|
||||
auth: !include auth.yaml
|
||||
|
||||
info:
|
||||
hw:
|
||||
ignore_past: true
|
||||
fan:
|
||||
unix: /run/kvmd/fan.sock
|
||||
|
||||
hid:
|
||||
type: otg
|
||||
mouse_alt:
|
||||
device: /dev/kvmd-hid-mouse-alt
|
||||
|
||||
atx:
|
||||
type: gpio
|
||||
power_led_pin: 4
|
||||
hdd_led_pin: 5
|
||||
power_switch_pin: 23
|
||||
reset_switch_pin: 27
|
||||
|
||||
msd:
|
||||
type: otg
|
||||
|
||||
streamer:
|
||||
h264_bitrate:
|
||||
default: 5000
|
||||
cmd:
|
||||
- "/usr/bin/ustreamer"
|
||||
- "--device=/dev/kvmd-video"
|
||||
- "--persistent"
|
||||
- "--dv-timings"
|
||||
- "--format=uyvy"
|
||||
- "--format-swap-rgb"
|
||||
- "--buffers=8"
|
||||
- "--encoder=m2m-image"
|
||||
- "--workers=3"
|
||||
- "--quality={quality}"
|
||||
- "--desired-fps={desired_fps}"
|
||||
- "--drop-same-frames=30"
|
||||
- "--unix={unix}"
|
||||
- "--unix-rm"
|
||||
- "--unix-mode=0660"
|
||||
- "--exit-on-parent-death"
|
||||
- "--process-name-prefix={process_name_prefix}"
|
||||
- "--notify-parent"
|
||||
- "--no-log-colors"
|
||||
- "--jpeg-sink=kvmd::ustreamer::jpeg"
|
||||
- "--jpeg-sink-mode=0660"
|
||||
- "--h264-sink=kvmd::ustreamer::h264"
|
||||
- "--h264-sink-mode=0660"
|
||||
- "--h264-bitrate={h264_bitrate}"
|
||||
- "--h264-gop={h264_gop}"
|
||||
|
||||
gpio:
|
||||
drivers:
|
||||
__v4_locator__:
|
||||
type: locator
|
||||
|
||||
scheme:
|
||||
__v3_usb_breaker__:
|
||||
pin: 22
|
||||
mode: output
|
||||
initial: true
|
||||
pulse: false
|
||||
|
||||
__v4_locator__:
|
||||
driver: __v4_locator__
|
||||
pin: 12
|
||||
mode: output
|
||||
pulse: false
|
||||
|
||||
__v4_const1__:
|
||||
pin: 6
|
||||
mode: output
|
||||
initial: false
|
||||
switch: false
|
||||
pulse: false
|
||||
|
||||
|
||||
media:
|
||||
memsink:
|
||||
h264:
|
||||
sink: "kvmd::ustreamer::h264"
|
||||
|
||||
|
||||
vnc:
|
||||
memsink:
|
||||
jpeg:
|
||||
sink: "kvmd::ustreamer::jpeg"
|
||||
h264:
|
||||
sink: "kvmd::ustreamer::h264"
|
||||
@@ -6,4 +6,9 @@
|
||||
server:
|
||||
host: localhost.localdomain
|
||||
|
||||
kvm: {}
|
||||
kvm: {
|
||||
base_on: PiKVM,
|
||||
app_name: One-KVM,
|
||||
main_version: 241204,
|
||||
author: SilentWind
|
||||
}
|
||||
|
||||
@@ -2,16 +2,14 @@ kvmd:
|
||||
auth:
|
||||
enabled: true
|
||||
|
||||
server:
|
||||
unix_mode: 0666
|
||||
access_log_format: '[%P / %{X-Real-IP}i] ''%r'' => 响应:%s;大小:%b;来源:''%{Referer}i'';用户代理:''%{User-Agent}i'''
|
||||
|
||||
atx:
|
||||
type: disabled
|
||||
|
||||
hid:
|
||||
type: ch9329
|
||||
device: /dev/ttyUSB0
|
||||
speed: 9600
|
||||
read_timeout: 0.3
|
||||
|
||||
jiggler:
|
||||
active: false
|
||||
@@ -23,6 +21,9 @@ kvmd:
|
||||
msd:
|
||||
#type: otg
|
||||
remount_cmd: /bin/true
|
||||
msd_path: /var/lib/kvmd/msd
|
||||
normalfiles_path: NormalFiles
|
||||
normalfiles_size: 256
|
||||
|
||||
ocr:
|
||||
langs:
|
||||
@@ -31,16 +32,16 @@ kvmd:
|
||||
|
||||
streamer:
|
||||
resolution:
|
||||
default: 1280x720
|
||||
default: 1920x1080
|
||||
|
||||
forever: true
|
||||
|
||||
desired_fps:
|
||||
default: 30
|
||||
default: 60
|
||||
max: 60
|
||||
|
||||
h264_bitrate:
|
||||
default: 2000
|
||||
default: 8000
|
||||
|
||||
cmd:
|
||||
- "/usr/bin/ustreamer"
|
||||
@@ -65,6 +66,7 @@ kvmd:
|
||||
- "--jpeg-sink-mode=0660"
|
||||
- "--h264-bitrate={h264_bitrate}"
|
||||
- "--h264-gop={h264_gop}"
|
||||
- "--h264-preset=ultrafast"
|
||||
- "--slowdown"
|
||||
gpio:
|
||||
drivers:
|
||||
@@ -148,6 +150,18 @@ vnc:
|
||||
h264:
|
||||
sink: "kvmd::ustreamer::h264"
|
||||
|
||||
media:
|
||||
memsink:
|
||||
h264:
|
||||
sink: 'kvmd::ustreamer::h264'
|
||||
|
||||
jpeg:
|
||||
sink: 'kvmd::ustreamer::jpeg'
|
||||
janus:
|
||||
stun:
|
||||
host: stun.cloudflare.com
|
||||
port: 3478
|
||||
|
||||
otgnet:
|
||||
commands:
|
||||
post_start_cmd:
|
||||
@@ -159,9 +173,4 @@ nginx:
|
||||
http:
|
||||
port: 8080
|
||||
https:
|
||||
port: 4430
|
||||
|
||||
|
||||
languages:
|
||||
console: zh
|
||||
web: zh
|
||||
port: 4430
|
||||
@@ -32,6 +32,16 @@ stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes = 0
|
||||
redirect_stderr=true
|
||||
|
||||
[program:kvmd-media]
|
||||
command=python -m kvmd.apps.media --run
|
||||
autostart=true
|
||||
autorestart=true
|
||||
priority=13
|
||||
stopasgroup=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes = 0
|
||||
redirect_stderr=true
|
||||
|
||||
[program:kvmd-nginx]
|
||||
command=nginx -c /etc/kvmd/nginx/nginx.conf -g 'daemon off;user root; error_log stderr;'
|
||||
autostart=true
|
||||
|
||||
16
configs/os/services/kvmd-media.service
Normal file
16
configs/os/services/kvmd-media.service
Normal file
@@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=PiKVM - Media proxy server
|
||||
After=kvmd.service
|
||||
|
||||
[Service]
|
||||
User=kvmd-media
|
||||
Group=kvmd-media
|
||||
Type=simple
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
|
||||
ExecStart=/usr/bin/kvmd-media --run
|
||||
TimeoutStopSec=3
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
12
configs/os/services/kvmd-oled-reboot.service
Normal file
12
configs/os/services/kvmd-oled-reboot.service
Normal file
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=PiKVM - Display reboot message on the OLED
|
||||
DefaultDependencies=no
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/bash -c "kill -USR1 `systemctl show -P MainPID kvmd-oled`"
|
||||
ExecStop=/bin/true
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=reboot.target
|
||||
14
configs/os/services/kvmd-oled-shutdown.service
Normal file
14
configs/os/services/kvmd-oled-shutdown.service
Normal file
@@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Description=PiKVM - Display shutdown message on the OLED
|
||||
Conflicts=reboot.target
|
||||
Before=shutdown.target poweroff.target halt.target
|
||||
DefaultDependencies=no
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/bash -c "kill -USR2 `systemctl show -P MainPID kvmd-oled`"
|
||||
ExecStop=/bin/true
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=shutdown.target
|
||||
15
configs/os/services/kvmd-oled.service
Normal file
15
configs/os/services/kvmd-oled.service
Normal file
@@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=PiKVM - A small OLED daemon
|
||||
After=systemd-modules-load.service
|
||||
ConditionPathExists=/dev/i2c-1
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
ExecStartPre=/usr/bin/kvmd-oled --interval=3 --clear-on-exit --image=@hello.ppm
|
||||
ExecStart=/usr/bin/kvmd-oled
|
||||
TimeoutStopSec=3
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,15 +0,0 @@
|
||||
[Unit]
|
||||
Description=PiKVM - Video Passthrough on V4 Plus
|
||||
Wants=dev-kvmd\x2dvideo.device
|
||||
After=dev-kvmd\x2dvideo.device systemd-modules-load.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
|
||||
ExecStart=/usr/bin/ustreamer-v4p --unix-follow /run/kvmd/ustreamer.sock
|
||||
TimeoutStopSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -2,11 +2,11 @@
|
||||
Description=PiKVM - EDID loader for TC358743
|
||||
Wants=dev-kvmd\x2dvideo.device
|
||||
After=dev-kvmd\x2dvideo.device systemd-modules-load.service
|
||||
Before=kvmd.service kvmd-pass.service
|
||||
Before=kvmd.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/v4l2-ctl --device=/dev/kvmd-video --set-edid=file=/etc/kvmd/tc358743-edid.hex --fix-edid-checksums --info-edid
|
||||
ExecStart=/usr/bin/v4l2-ctl --device=/dev/kvmd-video --set-edid=file=/etc/kvmd/tc358743-edid.hex --info-edid
|
||||
ExecStop=/usr/bin/v4l2-ctl --device=/dev/kvmd-video --clear-edid
|
||||
RemainAfterExit=true
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
g kvmd - -
|
||||
g kvmd-media - -
|
||||
g kvmd-pst - -
|
||||
g kvmd-ipmi - -
|
||||
g kvmd-vnc - -
|
||||
@@ -7,6 +8,7 @@ g kvmd-janus - -
|
||||
g kvmd-certbot - -
|
||||
|
||||
u kvmd - "PiKVM - The main daemon" -
|
||||
u kvmd-media - "PiKVM - The media proxy"
|
||||
u kvmd-pst - "PiKVM - Persistent storage" -
|
||||
u kvmd-ipmi - "PiKVM - IPMI to KVMD proxy" -
|
||||
u kvmd-vnc - "PiKVM - VNC to KVMD/Streamer proxy" -
|
||||
@@ -19,6 +21,10 @@ m kvmd gpio
|
||||
m kvmd uucp
|
||||
m kvmd spi
|
||||
m kvmd systemd-journal
|
||||
m kvmd kvmd-media
|
||||
m kvmd kvmd-pst
|
||||
|
||||
m kvmd-media kvmd
|
||||
|
||||
m kvmd-pst kvmd
|
||||
|
||||
@@ -31,6 +37,7 @@ m kvmd-janus kvmd
|
||||
m kvmd-janus audio
|
||||
|
||||
m kvmd-nginx kvmd
|
||||
m kvmd-nginx kvmd-media
|
||||
m kvmd-nginx kvmd-janus
|
||||
m kvmd-nginx kvmd-certbot
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Here are described some bindings for PiKVM devices.
|
||||
# Do not edit this file.
|
||||
KERNEL=="ttyACM[0-9]*", SUBSYSTEM=="tty", SUBSYSTEMS=="usb", ATTRS{idVendor}=="1209", ATTRS{idProduct}=="eda3", SYMLINK+="kvmd-hid-bridge"
|
||||
KERNEL=="ttyACM[0-9]*", SUBSYSTEM=="tty", SUBSYSTEMS=="usb", ATTRS{idVendor}=="2e8a", ATTRS{idProduct}=="1080", SYMLINK+="kvmd-switch"
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
# https://unix.stackexchange.com/questions/66901/how-to-bind-usb-device-under-a-static-name
|
||||
# https://wiki.archlinux.org/index.php/Udev#Setting_static_device_names
|
||||
KERNEL=="video[0-9]*", SUBSYSTEM=="video4linux", SUBSYSTEMS=="usb", ATTR{index}=="0", GROUP="kvmd", SYMLINK+="kvmd-video"
|
||||
KERNEL=="hidg0", GROUP="kvmd", SYMLINK+="kvmd-hid-keyboard"
|
||||
KERNEL=="hidg1", GROUP="kvmd", SYMLINK+="kvmd-hid-mouse"
|
||||
KERNEL=="hidg2", GROUP="kvmd", SYMLINK+="kvmd-hid-mouse-alt"
|
||||
KERNEL=="ttyUSB0", GROUP="kvmd", SYMLINK+="kvmd-hid"
|
||||
5
extras/media/manifest.yaml
Normal file
5
extras/media/manifest.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
name: Media
|
||||
description: KVMD Media Proxy
|
||||
path: media
|
||||
daemon: kvmd-media
|
||||
place: -1
|
||||
3
extras/media/nginx.ctx-http.conf
Normal file
3
extras/media/nginx.ctx-http.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
upstream media {
|
||||
server unix:/run/kvmd/media.sock fail_timeout=0s max_fails=0;
|
||||
}
|
||||
7
extras/media/nginx.ctx-server.conf
Normal file
7
extras/media/nginx.ctx-server.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
location /api/media/ws {
|
||||
rewrite ^/api/media/ws$ /ws break;
|
||||
rewrite ^/api/media/ws\?(.*)$ /ws?$1 break;
|
||||
proxy_pass http://media;
|
||||
include /etc/kvmd/nginx/loc-proxy.conf;
|
||||
include /etc/kvmd/nginx/loc-websocket.conf;
|
||||
}
|
||||
@@ -31,7 +31,7 @@ endef
|
||||
.tinyusb:
|
||||
$(call libdep,tinyusb,hathach/tinyusb,d713571cd44f05d2fc72efc09c670787b74106e0)
|
||||
.ps2x2pico:
|
||||
$(call libdep,ps2x2pico,No0ne/ps2x2pico,404aaf02949d5bee8013e3b5d0b3239abf6e13bd)
|
||||
$(call libdep,ps2x2pico,No0ne/ps2x2pico,26ce89d597e598bb0ac636622e064202d91a9efc)
|
||||
deps: .pico-sdk .tinyusb .ps2x2pico
|
||||
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ target_sources(${target_name} PRIVATE
|
||||
${PS2_PATH}/ps2in.c
|
||||
${PS2_PATH}/ps2kb.c
|
||||
${PS2_PATH}/ps2ms.c
|
||||
${PS2_PATH}/scancodesets.c
|
||||
${PS2_PATH}/scancodes.c
|
||||
)
|
||||
target_link_options(${target_name} PRIVATE -Xlinker --print-memory-usage)
|
||||
target_compile_options(${target_name} PRIVATE -Wall -Wextra)
|
||||
|
||||
@@ -53,7 +53,7 @@ static u8 _kbd_keys[6] = {0};
|
||||
static u8 _mouse_buttons = 0;
|
||||
static s16 _mouse_abs_x = 0;
|
||||
static s16 _mouse_abs_y = 0;
|
||||
#define _MOUSE_CLEAR { _mouse_buttons = 0; _mouse_abs_x = 0; _mouse_abs_y = 0; }
|
||||
#define _MOUSE_CLEAR { _mouse_buttons = 0; }
|
||||
|
||||
|
||||
static void _kbd_sync_report(bool new);
|
||||
@@ -193,7 +193,7 @@ void ph_usb_send_clear(void) {
|
||||
if (PH_O_IS_MOUSE_USB) {
|
||||
_MOUSE_CLEAR;
|
||||
if (PH_O_IS_MOUSE_USB_ABS) {
|
||||
_mouse_abs_send_report(0, 0);
|
||||
_mouse_abs_send_report(_mouse_abs_x, _mouse_abs_y);
|
||||
} else { // PH_O_IS_MOUSE_USB_REL
|
||||
_mouse_rel_send_report(0, 0, 0, 0);
|
||||
}
|
||||
|
||||
22
kvmd.install
22
kvmd.install
@@ -27,7 +27,8 @@ post_upgrade() {
|
||||
done
|
||||
|
||||
chown kvmd /var/lib/kvmd/msd 2>/dev/null || true
|
||||
chown kvmd-pst /var/lib/kvmd/pst 2>/dev/null || true
|
||||
chown kvmd-pst:kvmd-pst /var/lib/kvmd/pst 2>/dev/null || true
|
||||
chmod 1775 /var/lib/kvmd/pst 2>/dev/null || true
|
||||
|
||||
if [ ! -e /etc/kvmd/nginx/ssl/server.crt ]; then
|
||||
echo "==> Generating KVMD-Nginx certificate ..."
|
||||
@@ -92,6 +93,25 @@ disable_overscan=1
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [[ "$(vercmp "$2" 4.4)" -lt 0 ]]; then
|
||||
systemctl disable kvmd-pass || true
|
||||
fi
|
||||
|
||||
if [[ "$(vercmp "$2" 4.5)" -lt 0 ]]; then
|
||||
sed -i 's/X-kvmd\.pst-user=kvmd-pst/X-kvmd.pst-user=kvmd-pst,X-kvmd.pst-group=kvmd-pst/g' /etc/fstab
|
||||
touch -t 200701011000 /etc/fstab
|
||||
fi
|
||||
|
||||
if [[ "$(vercmp "$2" 4.31)" -lt 0 ]]; then
|
||||
if [[ "$(systemctl is-enabled kvmd-janus || true)" = enabled || "$(systemctl is-enabled kvmd-janus-static || true)" = enabled ]]; then
|
||||
systemctl enable kvmd-media || true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$(vercmp "$2" 4.47)" -lt 0 ]]; then
|
||||
cp /usr/share/kvmd/configs.default/janus/janus.plugin.ustreamer.jcfg /etc/kvmd/janus || true
|
||||
fi
|
||||
|
||||
# Some update deletes /etc/motd, WTF
|
||||
# shellcheck disable=SC2015,SC2166
|
||||
[ ! -f /etc/motd -a -f /etc/motd.pacsave ] && mv /etc/motd.pacsave /etc/motd || true
|
||||
|
||||
@@ -20,4 +20,4 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
__version__ = "4.3"
|
||||
__version__ = "4.49"
|
||||
|
||||
@@ -83,9 +83,9 @@ class AioReader: # pylint: disable=too-many-instance-attributes
|
||||
self.__path,
|
||||
consumer=self.__consumer,
|
||||
config={tuple(pins): gpiod.LineSettings(edge_detection=gpiod.line.Edge.BOTH)},
|
||||
) as line_request:
|
||||
) as line_req:
|
||||
|
||||
line_request.wait_edge_events(0.1)
|
||||
line_req.wait_edge_events(0.1)
|
||||
self.__values = {
|
||||
pin: _DebouncedValue(
|
||||
initial=bool(value.value),
|
||||
@@ -93,14 +93,14 @@ class AioReader: # pylint: disable=too-many-instance-attributes
|
||||
notifier=self.__notifier,
|
||||
loop=self.__loop,
|
||||
)
|
||||
for (pin, value) in zip(pins, line_request.get_values(pins))
|
||||
for (pin, value) in zip(pins, line_req.get_values(pins))
|
||||
}
|
||||
self.__loop.call_soon_threadsafe(self.__notifier.notify)
|
||||
|
||||
while not self.__stop_event.is_set():
|
||||
if line_request.wait_edge_events(1):
|
||||
if line_req.wait_edge_events(1):
|
||||
new: dict[int, bool] = {}
|
||||
for event in line_request.read_edge_events():
|
||||
for event in line_req.read_edge_events():
|
||||
(pin, value) = self.__parse_event(event)
|
||||
new[pin] = value
|
||||
for (pin, value) in new.items():
|
||||
@@ -110,7 +110,7 @@ class AioReader: # pylint: disable=too-many-instance-attributes
|
||||
# Размер буфера ядра - 16 эвентов на линии. При превышении этого числа,
|
||||
# новые эвенты потеряются. Это не баг, это фича, как мне объяснили в LKML.
|
||||
# Штош. Будем с этим жить и синхронизировать состояния при таймауте.
|
||||
for (pin, value) in zip(pins, line_request.get_values(pins)):
|
||||
for (pin, value) in zip(pins, line_req.get_values(pins)):
|
||||
self.__values[pin].set(bool(value.value)) # type: ignore
|
||||
|
||||
def __parse_event(self, event: gpiod.EdgeEvent) -> tuple[int, bool]:
|
||||
|
||||
@@ -22,8 +22,6 @@
|
||||
|
||||
import subprocess
|
||||
|
||||
from .languages import Languages
|
||||
|
||||
from .logging import get_logger
|
||||
|
||||
from . import tools
|
||||
@@ -38,13 +36,13 @@ async def remount(name: str, base_cmd: list[str], rw: bool) -> bool:
|
||||
part.format(mode=mode)
|
||||
for part in base_cmd
|
||||
]
|
||||
logger.info(Languages().gettext("Remounting %s storage to %s: %s ..."), name, mode.upper(), tools.cmdfmt(cmd))
|
||||
logger.info("Remounting %s storage to %s: %s ...", name, mode.upper(), tools.cmdfmt(cmd))
|
||||
try:
|
||||
proc = await aioproc.log_process(cmd, logger)
|
||||
if proc.returncode != 0:
|
||||
assert proc.returncode is not None
|
||||
raise subprocess.CalledProcessError(proc.returncode, cmd)
|
||||
except Exception as err:
|
||||
logger.error(Languages().gettext("Can't remount %s storage: %s"), name, tools.efmt(err))
|
||||
except Exception as ex:
|
||||
logger.error("Can't remount %s storage: %s", name, tools.efmt(ex))
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -59,14 +59,25 @@ def queue_get_last_sync( # pylint: disable=invalid-name
|
||||
# =====
|
||||
class AioProcessNotifier:
|
||||
def __init__(self) -> None:
|
||||
self.__queue: "multiprocessing.Queue[None]" = multiprocessing.Queue()
|
||||
self.__queue: "multiprocessing.Queue[int]" = multiprocessing.Queue()
|
||||
|
||||
def notify(self) -> None:
|
||||
self.__queue.put_nowait(None)
|
||||
def notify(self, mask: int=0) -> None:
|
||||
self.__queue.put_nowait(mask)
|
||||
|
||||
async def wait(self) -> None:
|
||||
while not (await queue_get_last(self.__queue, 0.1))[0]:
|
||||
pass
|
||||
async def wait(self) -> int:
|
||||
while True:
|
||||
mask = await aiotools.run_async(self.__get)
|
||||
if mask >= 0:
|
||||
return mask
|
||||
|
||||
def __get(self) -> int:
|
||||
try:
|
||||
mask = self.__queue.get(timeout=0.1)
|
||||
while not self.__queue.empty():
|
||||
mask |= self.__queue.get()
|
||||
return mask
|
||||
except queue.Empty:
|
||||
return -1
|
||||
|
||||
|
||||
# =====
|
||||
|
||||
@@ -26,7 +26,6 @@ import asyncio
|
||||
import asyncio.subprocess
|
||||
import logging
|
||||
|
||||
from .languages import Languages
|
||||
import setproctitle
|
||||
|
||||
from .logging import get_logger
|
||||
@@ -86,7 +85,7 @@ async def log_stdout_infinite(proc: asyncio.subprocess.Process, logger: logging.
|
||||
else:
|
||||
empty += 1
|
||||
if empty == 100: # asyncio bug
|
||||
raise RuntimeError(Languages().gettext("Asyncio process: too many empty lines"))
|
||||
raise RuntimeError("Asyncio process: too many empty lines")
|
||||
|
||||
|
||||
async def kill_process(proc: asyncio.subprocess.Process, wait: float, logger: logging.Logger) -> None: # pylint: disable=no-member
|
||||
@@ -101,14 +100,14 @@ async def kill_process(proc: asyncio.subprocess.Process, wait: float, logger: lo
|
||||
if proc.returncode is not None:
|
||||
raise
|
||||
await proc.wait()
|
||||
logger.info(Languages().gettext("Process killed: retcode=%d"), proc.returncode)
|
||||
logger.info("Process killed: retcode=%d", proc.returncode)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception:
|
||||
if proc.returncode is None:
|
||||
logger.exception(Languages().gettext("Can't kill process pid=%d"), proc.pid)
|
||||
logger.exception("Can't kill process pid=%d", proc.pid)
|
||||
else:
|
||||
logger.info(Languages().gettext("Process killed: retcode=%d"), proc.returncode)
|
||||
logger.info("Process killed: retcode=%d", proc.returncode)
|
||||
|
||||
|
||||
def rename_process(suffix: str, prefix: str="kvmd") -> None:
|
||||
@@ -117,7 +116,7 @@ def rename_process(suffix: str, prefix: str="kvmd") -> None:
|
||||
|
||||
def settle(name: str, suffix: str, prefix: str="kvmd") -> logging.Logger:
|
||||
logger = get_logger(1)
|
||||
logger.info(Languages().gettext("Started %s pid=%d"), name, os.getpid())
|
||||
logger.info("Started %s pid=%d", name, os.getpid())
|
||||
os.setpgrp()
|
||||
rename_process(suffix, prefix)
|
||||
return logger
|
||||
|
||||
@@ -45,6 +45,11 @@ async def read_file(path: str) -> str:
|
||||
return (await file.read())
|
||||
|
||||
|
||||
async def write_file(path: str, text: str) -> None:
|
||||
async with aiofiles.open(path, "w") as file:
|
||||
await file.write(text)
|
||||
|
||||
|
||||
# =====
|
||||
def run(coro: Coroutine, final: (Coroutine | None)=None) -> None:
|
||||
# https://github.com/aio-libs/aiohttp/blob/a1d4dac1d/aiohttp/web.py#L515
|
||||
@@ -112,9 +117,9 @@ def shield_fg(aw: Awaitable): # type: ignore
|
||||
if inner.cancelled():
|
||||
outer.forced_cancel()
|
||||
else:
|
||||
err = inner.exception()
|
||||
if err is not None:
|
||||
outer.set_exception(err)
|
||||
ex = inner.exception()
|
||||
if ex is not None:
|
||||
outer.set_exception(ex)
|
||||
else:
|
||||
outer.set_result(inner.result())
|
||||
|
||||
@@ -166,7 +171,7 @@ def create_deadly_task(name: str, coro: Coroutine) -> asyncio.Task:
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception:
|
||||
logger.exception("Unhandled exception in deadly task, killing myself ...")
|
||||
logger.exception("Unhandled exception in deadly task %r, killing myself ...", name)
|
||||
pid = os.getpid()
|
||||
if pid == 1:
|
||||
os._exit(1) # Docker workaround # pylint: disable=protected-access
|
||||
@@ -232,25 +237,26 @@ async def close_writer(writer: asyncio.StreamWriter) -> bool:
|
||||
# =====
|
||||
class AioNotifier:
|
||||
def __init__(self) -> None:
|
||||
self.__queue: "asyncio.Queue[None]" = asyncio.Queue()
|
||||
self.__queue: "asyncio.Queue[int]" = asyncio.Queue()
|
||||
|
||||
def notify(self) -> None:
|
||||
self.__queue.put_nowait(None)
|
||||
def notify(self, mask: int=0) -> None:
|
||||
self.__queue.put_nowait(mask)
|
||||
|
||||
async def wait(self, timeout: (float | None)=None) -> None:
|
||||
async def wait(self, timeout: (float | None)=None) -> int:
|
||||
mask = 0
|
||||
if timeout is None:
|
||||
await self.__queue.get()
|
||||
mask = await self.__queue.get()
|
||||
else:
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
mask = await asyncio.wait_for(
|
||||
asyncio.ensure_future(self.__queue.get()),
|
||||
timeout=timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
return # False
|
||||
return -1
|
||||
while not self.__queue.empty():
|
||||
await self.__queue.get()
|
||||
# return True
|
||||
mask |= await self.__queue.get()
|
||||
return mask
|
||||
|
||||
|
||||
# =====
|
||||
@@ -296,7 +302,7 @@ class AioExclusiveRegion:
|
||||
def is_busy(self) -> bool:
|
||||
return self.__busy
|
||||
|
||||
async def enter(self) -> None:
|
||||
def enter(self) -> None:
|
||||
if not self.__busy:
|
||||
self.__busy = True
|
||||
try:
|
||||
@@ -308,22 +314,22 @@ class AioExclusiveRegion:
|
||||
return
|
||||
raise self.__exc_type()
|
||||
|
||||
async def exit(self) -> None:
|
||||
def exit(self) -> None:
|
||||
self.__busy = False
|
||||
if self.__notifier:
|
||||
self.__notifier.notify()
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.enter()
|
||||
def __enter__(self) -> None:
|
||||
self.enter()
|
||||
|
||||
async def __aexit__(
|
||||
def __exit__(
|
||||
self,
|
||||
_exc_type: type[BaseException],
|
||||
_exc: BaseException,
|
||||
_tb: types.TracebackType,
|
||||
) -> None:
|
||||
|
||||
await self.exit()
|
||||
self.exit()
|
||||
|
||||
|
||||
async def run_region_task(
|
||||
@@ -338,7 +344,7 @@ async def run_region_task(
|
||||
|
||||
async def wrapper() -> None:
|
||||
try:
|
||||
async with region:
|
||||
with region:
|
||||
entered.set_result(None)
|
||||
await func(*args, **kwargs)
|
||||
except region.get_exc_type():
|
||||
|
||||
@@ -31,12 +31,8 @@ import pygments
|
||||
import pygments.lexers.data
|
||||
import pygments.formatters
|
||||
|
||||
from gettext import translation
|
||||
|
||||
from .. import tools
|
||||
|
||||
from ..mouse import MouseRange
|
||||
|
||||
from ..plugins import UnknownPluginError
|
||||
from ..plugins.auth import get_auth_service_class
|
||||
from ..plugins.hid import get_hid_class
|
||||
@@ -105,9 +101,6 @@ from ..validators.hw import valid_otg_gadget
|
||||
from ..validators.hw import valid_otg_id
|
||||
from ..validators.hw import valid_otg_ethernet
|
||||
|
||||
from ..validators.languages import valid_languages
|
||||
|
||||
from ..languages import Languages
|
||||
|
||||
# =====
|
||||
def init(
|
||||
@@ -129,7 +122,6 @@ def init(
|
||||
add_help=add_help,
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
)
|
||||
|
||||
parser.add_argument("-c", "--config", default="/etc/kvmd/main.yaml", type=valid_abs_file,
|
||||
help="Set config file path", metavar="<file>")
|
||||
parser.add_argument("-o", "--set-options", default=[], nargs="+",
|
||||
@@ -153,18 +145,9 @@ def init(
|
||||
))
|
||||
raise SystemExit()
|
||||
config = _init_config(options.config, options.set_options, **load)
|
||||
|
||||
logging.captureWarnings(True)
|
||||
logging.config.dictConfig(config.logging)
|
||||
|
||||
if isinstance(config.get("languages"), dict) and isinstance(config["languages"].get("console"), str):
|
||||
i18n_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+"/i18n"
|
||||
Languages.init("message", i18n_path, config["languages"]["console"])
|
||||
gettext = Languages().gettext
|
||||
|
||||
logging.addLevelName(20, gettext("INFO"))
|
||||
logging.addLevelName(30, gettext("WARNING"))
|
||||
logging.addLevelName(40, gettext("ERROR"))
|
||||
|
||||
if cli_logging:
|
||||
logging.getLogger().handlers[0].setFormatter(logging.Formatter(
|
||||
"-- {levelname:>7} -- {message}",
|
||||
@@ -173,7 +156,10 @@ def init(
|
||||
|
||||
if check_run and not options.run:
|
||||
raise SystemExit(
|
||||
gettext("To prevent accidental startup, you must specify the --run option to start.\n")+gettext("Try the --help option to find out what this service does.\n")+gettext("Make sure you understand exactly what you are doing!"))
|
||||
"To prevent accidental startup, you must specify the --run option to start.\n"
|
||||
"Try the --help option to find out what this service does.\n"
|
||||
"Make sure you understand exactly what you are doing!"
|
||||
)
|
||||
|
||||
return (parser, remaining, config)
|
||||
|
||||
@@ -183,8 +169,8 @@ def _init_config(config_path: str, override_options: list[str], **load_flags: bo
|
||||
config_path = os.path.expanduser(config_path)
|
||||
try:
|
||||
raw_config: dict = load_yaml_file(config_path)
|
||||
except Exception as err:
|
||||
raise SystemExit(f"ConfigError: Can't read config file {config_path!r}:\n{tools.efmt(err)}")
|
||||
except Exception as ex:
|
||||
raise SystemExit(f"ConfigError: Can't read config file {config_path!r}:\n{tools.efmt(ex)}")
|
||||
if not isinstance(raw_config, dict):
|
||||
raise SystemExit(f"ConfigError: Top-level of the file {config_path!r} must be a dictionary")
|
||||
|
||||
@@ -199,8 +185,8 @@ def _init_config(config_path: str, override_options: list[str], **load_flags: bo
|
||||
config = make_config(raw_config, scheme)
|
||||
|
||||
return config
|
||||
except (ConfigError, UnknownPluginError) as err:
|
||||
raise SystemExit(f"ConfigError: {err}")
|
||||
except (ConfigError, UnknownPluginError) as ex:
|
||||
raise SystemExit(f"ConfigError: {ex}")
|
||||
|
||||
|
||||
def _patch_raw(raw_config: dict) -> None: # pylint: disable=too-many-branches
|
||||
@@ -419,19 +405,7 @@ def _get_config_scheme() -> dict:
|
||||
|
||||
"hid": {
|
||||
"type": Option("", type=valid_stripped_string_not_empty),
|
||||
|
||||
"keymap": Option("/usr/share/kvmd/keymaps/en-us", type=valid_abs_file),
|
||||
"ignore_keys": Option([], type=functools.partial(valid_string_list, subval=valid_hid_key)),
|
||||
|
||||
"mouse_x_range": {
|
||||
"min": Option(MouseRange.MIN, type=valid_hid_mouse_move),
|
||||
"max": Option(MouseRange.MAX, type=valid_hid_mouse_move),
|
||||
},
|
||||
"mouse_y_range": {
|
||||
"min": Option(MouseRange.MIN, type=valid_hid_mouse_move),
|
||||
"max": Option(MouseRange.MAX, type=valid_hid_mouse_move),
|
||||
},
|
||||
|
||||
"keymap": Option("/usr/share/kvmd/keymaps/en-us", type=valid_abs_file),
|
||||
# Dynamic content
|
||||
},
|
||||
|
||||
@@ -528,6 +502,37 @@ def _get_config_scheme() -> dict:
|
||||
"table": Option([], type=valid_ugpio_view_table),
|
||||
},
|
||||
},
|
||||
|
||||
"switch": {
|
||||
"device": Option("/dev/kvmd-switch", type=valid_abs_path, unpack_as="device_path"),
|
||||
"default_edid": Option("/etc/kvmd/switch-edid.hex", type=valid_abs_path, unpack_as="default_edid_path"),
|
||||
},
|
||||
},
|
||||
|
||||
"media": {
|
||||
"server": {
|
||||
"unix": Option("/run/kvmd/media.sock", type=valid_abs_path, unpack_as="unix_path"),
|
||||
"unix_rm": Option(True, type=valid_bool),
|
||||
"unix_mode": Option(0o660, type=valid_unix_mode),
|
||||
"heartbeat": Option(15.0, type=valid_float_f01),
|
||||
"access_log_format": Option("[%P / %{X-Real-IP}i] '%r' => %s; size=%b ---"
|
||||
" referer='%{Referer}i'; user_agent='%{User-Agent}i'"),
|
||||
},
|
||||
|
||||
"memsink": {
|
||||
"jpeg": {
|
||||
"sink": Option("", unpack_as="obj"),
|
||||
"lock_timeout": Option(1.0, type=valid_float_f01),
|
||||
"wait_timeout": Option(1.0, type=valid_float_f01),
|
||||
"drop_same_frames": Option(0.0, type=valid_float_f0),
|
||||
},
|
||||
"h264": {
|
||||
"sink": Option("", unpack_as="obj"),
|
||||
"lock_timeout": Option(1.0, type=valid_float_f01),
|
||||
"wait_timeout": Option(1.0, type=valid_float_f01),
|
||||
"drop_same_frames": Option(0.0, type=valid_float_f0),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"pst": {
|
||||
@@ -558,11 +563,12 @@ def _get_config_scheme() -> dict:
|
||||
"device_version": Option(-1, type=functools.partial(valid_number, min=-1, max=0xFFFF)),
|
||||
"usb_version": Option(0x0200, type=valid_otg_id),
|
||||
"max_power": Option(250, type=functools.partial(valid_number, min=50, max=500)),
|
||||
"remote_wakeup": Option(False, type=valid_bool),
|
||||
"remote_wakeup": Option(True, type=valid_bool),
|
||||
|
||||
"gadget": Option("kvmd", type=valid_otg_gadget),
|
||||
"config": Option("PiKVM device", type=valid_stripped_string_not_empty),
|
||||
"udc": Option("", type=valid_stripped_string),
|
||||
"endpoints": Option(9, type=valid_int_f0),
|
||||
"init_delay": Option(3.0, type=valid_float_f01),
|
||||
|
||||
"user": Option("kvmd", type=valid_user),
|
||||
@@ -576,6 +582,9 @@ def _get_config_scheme() -> dict:
|
||||
"mouse": {
|
||||
"start": Option(True, type=valid_bool),
|
||||
},
|
||||
"mouse_alt": {
|
||||
"start": Option(True, type=valid_bool),
|
||||
},
|
||||
},
|
||||
|
||||
"msd": {
|
||||
@@ -586,6 +595,18 @@ def _get_config_scheme() -> dict:
|
||||
"rw": Option(False, type=valid_bool),
|
||||
"removable": Option(True, type=valid_bool),
|
||||
"fua": Option(True, type=valid_bool),
|
||||
"inquiry_string": {
|
||||
"cdrom": {
|
||||
"vendor": Option("PiKVM", type=valid_stripped_string),
|
||||
"product": Option("Optical Drive", type=valid_stripped_string),
|
||||
"revision": Option("1.00", type=valid_stripped_string),
|
||||
},
|
||||
"flash": {
|
||||
"vendor": Option("PiKVM", type=valid_stripped_string),
|
||||
"product": Option("Flash Drive", type=valid_stripped_string),
|
||||
"revision": Option("1.00", type=valid_stripped_string),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -602,6 +623,11 @@ def _get_config_scheme() -> dict:
|
||||
"kvm_mac": Option("", type=valid_mac, if_empty=""),
|
||||
},
|
||||
|
||||
"audio": {
|
||||
"enabled": Option(False, type=valid_bool),
|
||||
"start": Option(True, type=valid_bool),
|
||||
},
|
||||
|
||||
"drives": {
|
||||
"enabled": Option(False, type=valid_bool),
|
||||
"start": Option(True, type=valid_bool),
|
||||
@@ -612,6 +638,18 @@ def _get_config_scheme() -> dict:
|
||||
"rw": Option(True, type=valid_bool),
|
||||
"removable": Option(True, type=valid_bool),
|
||||
"fua": Option(True, type=valid_bool),
|
||||
"inquiry_string": {
|
||||
"cdrom": {
|
||||
"vendor": Option("PiKVM", type=valid_stripped_string),
|
||||
"product": Option("Optical Drive", type=valid_stripped_string),
|
||||
"revision": Option("1.00", type=valid_stripped_string),
|
||||
},
|
||||
"flash": {
|
||||
"vendor": Option("PiKVM", type=valid_stripped_string),
|
||||
"product": Option("Flash Drive", type=valid_stripped_string),
|
||||
"revision": Option("1.00", type=valid_stripped_string),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -693,9 +731,10 @@ def _get_config_scheme() -> dict:
|
||||
},
|
||||
|
||||
"vnc": {
|
||||
"desired_fps": Option(30, type=valid_stream_fps),
|
||||
"mouse_output": Option("usb", type=valid_hid_mouse_output),
|
||||
"keymap": Option("/usr/share/kvmd/keymaps/en-us", type=valid_abs_file),
|
||||
"desired_fps": Option(30, type=valid_stream_fps),
|
||||
"mouse_output": Option("usb", type=valid_hid_mouse_output),
|
||||
"keymap": Option("/usr/share/kvmd/keymaps/en-us", type=valid_abs_file),
|
||||
"allow_cut_after": Option(3.0, type=valid_float_f0),
|
||||
|
||||
"server": {
|
||||
"host": Option("", type=valid_ip_or_host, if_empty=""),
|
||||
@@ -798,9 +837,4 @@ def _get_config_scheme() -> dict:
|
||||
"timeout": Option(300, type=valid_int_f1),
|
||||
"interval": Option(30, type=valid_int_f1),
|
||||
},
|
||||
|
||||
"languages": {
|
||||
"console": Option("default", type=valid_languages),
|
||||
"web": Option("default", type=valid_languages),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -22,259 +22,22 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import dataclasses
|
||||
import contextlib
|
||||
import subprocess
|
||||
import argparse
|
||||
import time
|
||||
|
||||
from typing import IO
|
||||
from typing import Generator
|
||||
from typing import Callable
|
||||
|
||||
from ...validators.basic import valid_bool
|
||||
from ...validators.basic import valid_int_f0
|
||||
|
||||
from ...edid import EdidNoBlockError
|
||||
from ...edid import Edid
|
||||
|
||||
# from .. import init
|
||||
|
||||
|
||||
# =====
|
||||
class NoBlockError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _smart_open(path: str, mode: str) -> Generator[IO, None, None]:
|
||||
fd = (0 if "r" in mode else 1)
|
||||
with (os.fdopen(fd, mode, closefd=False) if path == "-" else open(path, mode)) as file:
|
||||
yield file
|
||||
if "w" in mode:
|
||||
file.flush()
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CeaBlock:
|
||||
tag: int
|
||||
data: bytes
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
assert 0 < self.tag <= 0b111
|
||||
assert 0 < len(self.data) <= 0b11111
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
return len(self.data) + 1
|
||||
|
||||
def pack(self) -> bytes:
|
||||
header = (self.tag << 5) | len(self.data)
|
||||
return header.to_bytes() + self.data
|
||||
|
||||
@classmethod
|
||||
def first_from_raw(cls, raw: (bytes | list[int])) -> "_CeaBlock":
|
||||
assert 0 < raw[0] <= 0xFF
|
||||
tag = (raw[0] & 0b11100000) >> 5
|
||||
data_size = (raw[0] & 0b00011111)
|
||||
data = bytes(raw[1:data_size + 1])
|
||||
return _CeaBlock(tag, data)
|
||||
|
||||
|
||||
_CEA = 128
|
||||
_CEA_AUDIO = 1
|
||||
_CEA_SPEAKERS = 4
|
||||
|
||||
|
||||
class _Edid:
|
||||
# https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
|
||||
|
||||
def __init__(self, path: str) -> None:
|
||||
with _smart_open(path, "rb") as file:
|
||||
data = file.read()
|
||||
if data.startswith(b"\x00\xFF\xFF\xFF\xFF\xFF\xFF\x00"):
|
||||
self.__data = list(data)
|
||||
else:
|
||||
text = re.sub(r"\s", "", data.decode())
|
||||
self.__data = [
|
||||
int(text[index:index + 2], 16)
|
||||
for index in range(0, len(text), 2)
|
||||
]
|
||||
assert len(self.__data) == 256, f"Invalid EDID length: {len(self.__data)}, should be 256 bytes"
|
||||
assert self.__data[126] == 1, "Zero extensions number"
|
||||
assert (self.__data[_CEA + 0], self.__data[_CEA + 1]) == (0x02, 0x03), "Can't find CEA extension"
|
||||
|
||||
def write_hex(self, path: str) -> None:
|
||||
self.__update_checksums()
|
||||
text = "\n".join(
|
||||
"".join(
|
||||
f"{item:0{2}X}"
|
||||
for item in self.__data[index:index + 16]
|
||||
)
|
||||
for index in range(0, len(self.__data), 16)
|
||||
) + "\n"
|
||||
with _smart_open(path, "w") as file:
|
||||
file.write(text)
|
||||
|
||||
def write_bin(self, path: str) -> None:
|
||||
self.__update_checksums()
|
||||
with _smart_open(path, "wb") as file:
|
||||
file.write(bytes(self.__data))
|
||||
|
||||
def __update_checksums(self) -> None:
|
||||
self.__data[127] = 256 - (sum(self.__data[:127]) % 256)
|
||||
self.__data[255] = 256 - (sum(self.__data[128:255]) % 256)
|
||||
|
||||
# =====
|
||||
|
||||
def get_mfc_id(self) -> str:
|
||||
raw = self.__data[8] << 8 | self.__data[9]
|
||||
return bytes([
|
||||
((raw >> 10) & 0b11111) + 0x40,
|
||||
((raw >> 5) & 0b11111) + 0x40,
|
||||
(raw & 0b11111) + 0x40,
|
||||
]).decode("ascii")
|
||||
|
||||
def set_mfc_id(self, mfc_id: str) -> None:
|
||||
assert len(mfc_id) == 3, "Mfc ID must be 3 characters long"
|
||||
data = mfc_id.upper().encode("ascii")
|
||||
for ch in data:
|
||||
assert 0x41 <= ch <= 0x5A, "Mfc ID must contain only A-Z characters"
|
||||
raw = (
|
||||
(data[2] - 0x40)
|
||||
| ((data[1] - 0x40) << 5)
|
||||
| ((data[0] - 0x40) << 10)
|
||||
)
|
||||
self.__data[8] = (raw >> 8) & 0xFF
|
||||
self.__data[9] = raw & 0xFF
|
||||
|
||||
# =====
|
||||
|
||||
def get_product_id(self) -> int:
|
||||
return (self.__data[10] | self.__data[11] << 8)
|
||||
|
||||
def set_product_id(self, product_id: int) -> None:
|
||||
assert 0 <= product_id <= 0xFFFF, f"Product ID should be from 0 to {0xFFFF}"
|
||||
self.__data[10] = product_id & 0xFF
|
||||
self.__data[11] = (product_id >> 8) & 0xFF
|
||||
|
||||
# =====
|
||||
|
||||
def get_serial(self) -> int:
|
||||
return (
|
||||
self.__data[12]
|
||||
| self.__data[13] << 8
|
||||
| self.__data[14] << 16
|
||||
| self.__data[15] << 24
|
||||
)
|
||||
|
||||
def set_serial(self, serial: int) -> None:
|
||||
assert 0 <= serial <= 0xFFFFFFFF, f"Serial should be from 0 to {0xFFFFFFFF}"
|
||||
self.__data[12] = serial & 0xFF
|
||||
self.__data[13] = (serial >> 8) & 0xFF
|
||||
self.__data[14] = (serial >> 16) & 0xFF
|
||||
self.__data[15] = (serial >> 24) & 0xFF
|
||||
|
||||
# =====
|
||||
|
||||
def get_monitor_name(self) -> str:
|
||||
return self.__get_dtd_text(0xFC, "Monitor Name")
|
||||
|
||||
def set_monitor_name(self, text: str) -> None:
|
||||
self.__set_dtd_text(0xFC, "Monitor Name", text)
|
||||
|
||||
def get_monitor_serial(self) -> str:
|
||||
return self.__get_dtd_text(0xFF, "Monitor Serial")
|
||||
|
||||
def set_monitor_serial(self, text: str) -> None:
|
||||
self.__set_dtd_text(0xFF, "Monitor Serial", text)
|
||||
|
||||
def __get_dtd_text(self, d_type: int, name: str) -> str:
|
||||
index = self.__find_dtd_text(d_type, name)
|
||||
return bytes(self.__data[index:index + 13]).decode("cp437").strip()
|
||||
|
||||
def __set_dtd_text(self, d_type: int, name: str, text: str) -> None:
|
||||
index = self.__find_dtd_text(d_type, name)
|
||||
encoded = (text[:13] + "\n" + " " * 12)[:13].encode("cp437")
|
||||
for (offset, ch) in enumerate(encoded):
|
||||
self.__data[index + offset] = ch
|
||||
|
||||
def __find_dtd_text(self, d_type: int, name: str) -> int:
|
||||
for index in [54, 72, 90, 108]:
|
||||
if self.__data[index + 3] == d_type:
|
||||
return index + 5
|
||||
raise NoBlockError(f"Can't find DTD {name}")
|
||||
|
||||
# ===== CEA =====
|
||||
|
||||
def get_audio(self) -> bool:
|
||||
(cbs, _) = self.__parse_cea()
|
||||
audio = False
|
||||
speakers = False
|
||||
for cb in cbs:
|
||||
if cb.tag == _CEA_AUDIO:
|
||||
audio = True
|
||||
elif cb.tag == _CEA_SPEAKERS:
|
||||
speakers = True
|
||||
return (audio and speakers and self.__get_basic_audio())
|
||||
|
||||
def set_audio(self, enabled: bool) -> None:
|
||||
(cbs, dtds) = self.__parse_cea()
|
||||
cbs = [cb for cb in cbs if cb.tag not in [_CEA_AUDIO, _CEA_SPEAKERS]]
|
||||
if enabled:
|
||||
cbs.append(_CeaBlock(_CEA_AUDIO, b"\x09\x7f\x07"))
|
||||
cbs.append(_CeaBlock(_CEA_SPEAKERS, b"\x01\x00\x00"))
|
||||
self.__replace_cea(cbs, dtds)
|
||||
self.__set_basic_audio(enabled)
|
||||
|
||||
def __get_basic_audio(self) -> bool:
|
||||
return bool(self.__data[_CEA + 3] & 0b01000000)
|
||||
|
||||
def __set_basic_audio(self, enabled: bool) -> None:
|
||||
if enabled:
|
||||
self.__data[_CEA + 3] |= 0b01000000
|
||||
else:
|
||||
self.__data[_CEA + 3] &= (0xFF - 0b01000000) # ~X
|
||||
|
||||
def __parse_cea(self) -> tuple[list[_CeaBlock], bytes]:
|
||||
cea = self.__data[_CEA:]
|
||||
dtd_begin = cea[2]
|
||||
if dtd_begin == 0:
|
||||
return ([], b"")
|
||||
|
||||
cbs: list[_CeaBlock] = []
|
||||
if dtd_begin > 4:
|
||||
raw = cea[4:dtd_begin]
|
||||
while len(raw) != 0:
|
||||
cb = _CeaBlock.first_from_raw(raw)
|
||||
cbs.append(cb)
|
||||
raw = raw[cb.size:]
|
||||
|
||||
dtds = b""
|
||||
assert dtd_begin >= 4
|
||||
raw = cea[dtd_begin:]
|
||||
while len(raw) > (18 + 1) and raw[0] != 0:
|
||||
dtds += bytes(raw[:18])
|
||||
raw = raw[18:]
|
||||
|
||||
return (cbs, dtds)
|
||||
|
||||
def __replace_cea(self, cbs: list[_CeaBlock], dtds: bytes) -> None:
|
||||
cbs_packed = b""
|
||||
for cb in cbs:
|
||||
cbs_packed += cb.pack()
|
||||
|
||||
raw = cbs_packed + dtds
|
||||
assert len(raw) <= (128 - 4 - 1), "Too many CEA blocks or DTDs"
|
||||
|
||||
self.__data[_CEA + 2] = (0 if len(raw) == 0 else (len(cbs_packed) + 4))
|
||||
|
||||
for index in range(4, 127):
|
||||
try:
|
||||
ch = raw[index - 4]
|
||||
except IndexError:
|
||||
ch = 0
|
||||
self.__data[_CEA + index] = ch
|
||||
|
||||
|
||||
def _format_bool(value: bool) -> str:
|
||||
return ("yes" if value else "no")
|
||||
|
||||
@@ -283,7 +46,7 @@ def _make_format_hex(size: int) -> Callable[[int], str]:
|
||||
return (lambda value: ("0x{:0%dX} ({})" % (size * 2)).format(value, value))
|
||||
|
||||
|
||||
def _print_edid(edid: _Edid) -> None:
|
||||
def _print_edid(edid: Edid) -> None:
|
||||
for (key, get, fmt) in [
|
||||
("Manufacturer ID:", edid.get_mfc_id, str),
|
||||
("Product ID: ", edid.get_product_id, _make_format_hex(2)),
|
||||
@@ -294,7 +57,7 @@ def _print_edid(edid: _Edid) -> None:
|
||||
]:
|
||||
try:
|
||||
print(key, fmt(get()), file=sys.stderr) # type: ignore
|
||||
except NoBlockError:
|
||||
except EdidNoBlockError:
|
||||
pass
|
||||
|
||||
|
||||
@@ -348,12 +111,12 @@ def main(argv: (list[str] | None)=None) -> None: # pylint: disable=too-many-bra
|
||||
help="Presets directory", metavar="<dir>")
|
||||
options = parser.parse_args(argv[1:])
|
||||
|
||||
base: (_Edid | None) = None
|
||||
base: (Edid | None) = None
|
||||
if options.import_preset:
|
||||
imp = options.import_preset
|
||||
if "." in imp:
|
||||
(base_name, imp) = imp.split(".", 1) # v3.1080p-by-default
|
||||
base = _Edid(os.path.join(options.presets_path, f"{base_name}.hex"))
|
||||
base = Edid.from_file(os.path.join(options.presets_path, f"{base_name}.hex"))
|
||||
imp = f"_{imp}"
|
||||
options.imp = os.path.join(options.presets_path, f"{imp}.hex")
|
||||
|
||||
@@ -362,16 +125,16 @@ def main(argv: (list[str] | None)=None) -> None: # pylint: disable=too-many-bra
|
||||
options.export_hex = options.edid_path
|
||||
options.edid_path = options.imp
|
||||
|
||||
edid = _Edid(options.edid_path)
|
||||
edid = Edid.from_file(options.edid_path)
|
||||
changed = False
|
||||
|
||||
for cmd in dir(_Edid):
|
||||
for cmd in dir(Edid):
|
||||
if cmd.startswith("set_"):
|
||||
value = getattr(options, cmd)
|
||||
if value is None and base is not None:
|
||||
try:
|
||||
value = getattr(base, cmd.replace("set_", "get_"))()
|
||||
except NoBlockError:
|
||||
except EdidNoBlockError:
|
||||
pass
|
||||
if value is not None:
|
||||
getattr(edid, cmd)(value)
|
||||
@@ -400,8 +163,7 @@ def main(argv: (list[str] | None)=None) -> None: # pylint: disable=too-many-bra
|
||||
"/usr/bin/v4l2-ctl",
|
||||
f"--device={options.device_path}",
|
||||
f"--set-edid=file={orig_edid_path}",
|
||||
"--fix-edid-checksums",
|
||||
"--info-edid",
|
||||
], stdout=sys.stderr, check=True)
|
||||
except subprocess.CalledProcessError as err:
|
||||
raise SystemExit(str(err))
|
||||
except subprocess.CalledProcessError as ex:
|
||||
raise SystemExit(str(ex))
|
||||
|
||||
@@ -155,5 +155,5 @@ def main(argv: (list[str] | None)=None) -> None:
|
||||
options = parser.parse_args(argv[1:])
|
||||
try:
|
||||
options.cmd(config, options)
|
||||
except ValidatorError as err:
|
||||
raise SystemExit(str(err))
|
||||
except ValidatorError as ex:
|
||||
raise SystemExit(str(ex))
|
||||
|
||||
@@ -101,6 +101,7 @@ class IpmiServer(BaseIpmiServer): # pylint: disable=too-many-instance-attribute
|
||||
# =====
|
||||
|
||||
def handle_raw_request(self, request: dict, session: IpmiServerSession) -> None:
|
||||
# Parameter 'request' has been renamed to 'req' in overriding method
|
||||
handler = {
|
||||
(6, 1): (lambda _, session: self.send_device_id(session)), # Get device ID
|
||||
(6, 7): self.__get_power_state_handler, # Power state
|
||||
@@ -145,13 +146,13 @@ class IpmiServer(BaseIpmiServer): # pylint: disable=too-many-instance-attribute
|
||||
data = [int(result["leds"]["power"]), 0, 0]
|
||||
session.send_ipmi_response(data=data)
|
||||
|
||||
def __chassis_control_handler(self, request: dict, session: IpmiServerSession) -> None:
|
||||
def __chassis_control_handler(self, req: dict, session: IpmiServerSession) -> None:
|
||||
action = {
|
||||
0: "off_hard",
|
||||
1: "on",
|
||||
3: "reset_hard",
|
||||
5: "off",
|
||||
}.get(request["data"][0], "")
|
||||
}.get(req["data"][0], "")
|
||||
if action:
|
||||
if not self.__make_request(session, f"atx.switch_power({action})", "atx.switch_power", action=action):
|
||||
code = 0xC0 # Try again later
|
||||
@@ -171,8 +172,8 @@ class IpmiServer(BaseIpmiServer): # pylint: disable=too-many-instance-attribute
|
||||
async with self.__kvmd.make_session(credentials.kvmd_user, credentials.kvmd_passwd) as kvmd_session:
|
||||
func = functools.reduce(getattr, func_path.split("."), kvmd_session)
|
||||
return (await func(**kwargs))
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
|
||||
logger.error("[%s]: Can't perform request %s: %s", session.sockaddr[0], name, err)
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError) as ex:
|
||||
logger.error("[%s]: Can't perform request %s: %s", session.sockaddr[0], name, ex)
|
||||
raise
|
||||
|
||||
return aiotools.run_sync(runner())
|
||||
|
||||
@@ -11,16 +11,17 @@ from ... import aioproc
|
||||
|
||||
from ...logging import get_logger
|
||||
|
||||
from .stun import StunNatType
|
||||
from .stun import Stun
|
||||
|
||||
|
||||
# =====
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _Netcfg:
|
||||
nat_type: str = dataclasses.field(default="")
|
||||
src_ip: str = dataclasses.field(default="")
|
||||
ext_ip: str = dataclasses.field(default="")
|
||||
stun_host: str = dataclasses.field(default="")
|
||||
nat_type: StunNatType = dataclasses.field(default=StunNatType.ERROR)
|
||||
src_ip: str = dataclasses.field(default="")
|
||||
ext_ip: str = dataclasses.field(default="")
|
||||
stun_ip: str = dataclasses.field(default="")
|
||||
stun_port: int = dataclasses.field(default=0)
|
||||
|
||||
|
||||
@@ -92,8 +93,9 @@ class JanusRunner: # pylint: disable=too-many-instance-attributes
|
||||
|
||||
async def __get_netcfg(self) -> _Netcfg:
|
||||
src_ip = (self.__get_default_ip() or "0.0.0.0")
|
||||
(stun, (nat_type, ext_ip)) = await self.__get_stun_info(src_ip)
|
||||
return _Netcfg(nat_type, src_ip, ext_ip, stun.host, stun.port)
|
||||
info = await self.__stun.get_info(src_ip, 0)
|
||||
# В текущей реализации _Netcfg() это копия StunInfo()
|
||||
return _Netcfg(**dataclasses.asdict(info))
|
||||
|
||||
def __get_default_ip(self) -> str:
|
||||
try:
|
||||
@@ -111,17 +113,10 @@ class JanusRunner: # pylint: disable=too-many-instance-attributes
|
||||
for proto in [socket.AF_INET, socket.AF_INET6]:
|
||||
if proto in addrs:
|
||||
return addrs[proto][0]["addr"]
|
||||
except Exception as err:
|
||||
get_logger().error("Can't get default IP: %s", tools.efmt(err))
|
||||
except Exception as ex:
|
||||
get_logger().error("Can't get default IP: %s", tools.efmt(ex))
|
||||
return ""
|
||||
|
||||
async def __get_stun_info(self, src_ip: str) -> tuple[Stun, tuple[str, str]]:
|
||||
try:
|
||||
return (self.__stun, (await self.__stun.get_info(src_ip, 0)))
|
||||
except Exception as err:
|
||||
get_logger().error("Can't get STUN info: %s", tools.efmt(err))
|
||||
return (self.__stun, ("", ""))
|
||||
|
||||
# =====
|
||||
|
||||
@aiotools.atomic_fg
|
||||
@@ -162,7 +157,7 @@ class JanusRunner: # pylint: disable=too-many-instance-attributes
|
||||
async def __start_janus_proc(self, netcfg: _Netcfg) -> None:
|
||||
assert self.__janus_proc is None
|
||||
placeholders = {
|
||||
"o_stun_server": f"--stun-server={netcfg.stun_host}:{netcfg.stun_port}",
|
||||
"o_stun_server": f"--stun-server={netcfg.stun_ip}:{netcfg.stun_port}",
|
||||
**{
|
||||
key: str(value)
|
||||
for (key, value) in dataclasses.asdict(netcfg).items()
|
||||
|
||||
@@ -4,6 +4,7 @@ import ipaddress
|
||||
import struct
|
||||
import secrets
|
||||
import dataclasses
|
||||
import enum
|
||||
|
||||
from ... import tools
|
||||
from ... import aiotools
|
||||
@@ -12,29 +13,39 @@ from ...logging import get_logger
|
||||
|
||||
|
||||
# =====
|
||||
class StunNatType(enum.Enum):
|
||||
ERROR = ""
|
||||
BLOCKED = "Blocked"
|
||||
OPEN_INTERNET = "Open Internet"
|
||||
SYMMETRIC_UDP_FW = "Symmetric UDP Firewall"
|
||||
FULL_CONE_NAT = "Full Cone NAT"
|
||||
RESTRICTED_NAT = "Restricted NAT"
|
||||
RESTRICTED_PORT_NAT = "Restricted Port NAT"
|
||||
SYMMETRIC_NAT = "Symmetric NAT"
|
||||
CHANGED_ADDR_ERROR = "Error when testing on Changed-IP and Port"
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class StunAddress:
|
||||
ip: str
|
||||
class StunInfo:
|
||||
nat_type: StunNatType
|
||||
src_ip: str
|
||||
ext_ip: str
|
||||
stun_ip: str
|
||||
stun_port: int
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _StunAddress:
|
||||
ip: str
|
||||
port: int
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class StunResponse:
|
||||
ok: bool
|
||||
ext: (StunAddress | None) = dataclasses.field(default=None)
|
||||
src: (StunAddress | None) = dataclasses.field(default=None)
|
||||
changed: (StunAddress | None) = dataclasses.field(default=None)
|
||||
|
||||
|
||||
class StunNatType:
|
||||
BLOCKED = "Blocked"
|
||||
OPEN_INTERNET = "Open Internet"
|
||||
SYMMETRIC_UDP_FW = "Symmetric UDP Firewall"
|
||||
FULL_CONE_NAT = "Full Cone NAT"
|
||||
RESTRICTED_NAT = "Restricted NAT"
|
||||
RESTRICTED_PORT_NAT = "Restricted Port NAT"
|
||||
SYMMETRIC_NAT = "Symmetric NAT"
|
||||
CHANGED_ADDR_ERROR = "Error when testing on Changed-IP and Port"
|
||||
class _StunResponse:
|
||||
ok: bool
|
||||
ext: (_StunAddress | None) = dataclasses.field(default=None)
|
||||
src: (_StunAddress | None) = dataclasses.field(default=None)
|
||||
changed: (_StunAddress | None) = dataclasses.field(default=None)
|
||||
|
||||
|
||||
# =====
|
||||
@@ -50,58 +61,94 @@ class Stun:
|
||||
retries_delay: float,
|
||||
) -> None:
|
||||
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.__host = host
|
||||
self.__port = port
|
||||
self.__timeout = timeout
|
||||
self.__retries = retries
|
||||
self.__retries_delay = retries_delay
|
||||
|
||||
self.__stun_ip = ""
|
||||
self.__sock: (socket.socket | None) = None
|
||||
|
||||
async def get_info(self, src_ip: str, src_port: int) -> tuple[str, str]:
|
||||
(family, _, _, _, addr) = socket.getaddrinfo(src_ip, src_port, type=socket.SOCK_DGRAM)[0]
|
||||
async def get_info(self, src_ip: str, src_port: int) -> StunInfo:
|
||||
nat_type = StunNatType.ERROR
|
||||
ext_ip = ""
|
||||
try:
|
||||
with socket.socket(family, socket.SOCK_DGRAM) as self.__sock:
|
||||
(src_fam, _, _, _, src_addr) = (await self.__retried_getaddrinfo_udp(src_ip, src_port))[0]
|
||||
|
||||
stun_ips = [
|
||||
stun_addr[0]
|
||||
for (stun_fam, _, _, _, stun_addr) in (await self.__retried_getaddrinfo_udp(self.__host, self.__port))
|
||||
if stun_fam == src_fam
|
||||
]
|
||||
if not stun_ips:
|
||||
raise RuntimeError(f"Can't resolve {src_fam.name} address for STUN")
|
||||
if not self.__stun_ip or self.__stun_ip not in stun_ips:
|
||||
# On new IP, changed family, etc.
|
||||
self.__stun_ip = stun_ips[0]
|
||||
|
||||
with socket.socket(src_fam, socket.SOCK_DGRAM) as self.__sock:
|
||||
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.__sock.settimeout(self.__timeout)
|
||||
self.__sock.bind(addr)
|
||||
(nat_type, response) = await self.__get_nat_type(src_ip)
|
||||
return (nat_type, (response.ext.ip if response.ext is not None else ""))
|
||||
self.__sock.bind(src_addr)
|
||||
(nat_type, resp) = await self.__get_nat_type(src_ip)
|
||||
ext_ip = (resp.ext.ip if resp.ext is not None else "")
|
||||
except Exception as ex:
|
||||
get_logger(0).error("Can't get STUN info: %s", tools.efmt(ex))
|
||||
finally:
|
||||
self.__sock = None
|
||||
|
||||
async def __get_nat_type(self, src_ip: str) -> tuple[str, StunResponse]: # pylint: disable=too-many-return-statements
|
||||
first = await self.__make_request("First probe")
|
||||
return StunInfo(
|
||||
nat_type=nat_type,
|
||||
src_ip=src_ip,
|
||||
ext_ip=ext_ip,
|
||||
stun_ip=self.__stun_ip,
|
||||
stun_port=self.__port,
|
||||
)
|
||||
|
||||
async def __retried_getaddrinfo_udp(self, host: str, port: int) -> list:
|
||||
retries = self.__retries
|
||||
while True:
|
||||
try:
|
||||
return socket.getaddrinfo(host, port, type=socket.SOCK_DGRAM)
|
||||
except Exception:
|
||||
retries -= 1
|
||||
if retries == 0:
|
||||
raise
|
||||
await asyncio.sleep(self.__retries_delay)
|
||||
|
||||
async def __get_nat_type(self, src_ip: str) -> tuple[StunNatType, _StunResponse]: # pylint: disable=too-many-return-statements
|
||||
first = await self.__make_request("First probe", self.__stun_ip, b"")
|
||||
if not first.ok:
|
||||
return (StunNatType.BLOCKED, first)
|
||||
|
||||
request = struct.pack(">HHI", 0x0003, 0x0004, 0x00000006) # Change-Request
|
||||
response = await self.__make_request("Change request [ext_ip == src_ip]", request)
|
||||
req = struct.pack(">HHI", 0x0003, 0x0004, 0x00000006) # Change-Request
|
||||
resp = await self.__make_request("Change request [ext_ip == src_ip]", self.__stun_ip, req)
|
||||
|
||||
if first.ext is not None and first.ext.ip == src_ip:
|
||||
if response.ok:
|
||||
return (StunNatType.OPEN_INTERNET, response)
|
||||
return (StunNatType.SYMMETRIC_UDP_FW, response)
|
||||
if resp.ok:
|
||||
return (StunNatType.OPEN_INTERNET, resp)
|
||||
return (StunNatType.SYMMETRIC_UDP_FW, resp)
|
||||
|
||||
if response.ok:
|
||||
return (StunNatType.FULL_CONE_NAT, response)
|
||||
if resp.ok:
|
||||
return (StunNatType.FULL_CONE_NAT, resp)
|
||||
|
||||
if first.changed is None:
|
||||
raise RuntimeError(f"Changed addr is None: {first}")
|
||||
response = await self.__make_request("Change request [ext_ip != src_ip]", addr=first.changed)
|
||||
if not response.ok:
|
||||
return (StunNatType.CHANGED_ADDR_ERROR, response)
|
||||
resp = await self.__make_request("Change request [ext_ip != src_ip]", first.changed, b"")
|
||||
if not resp.ok:
|
||||
return (StunNatType.CHANGED_ADDR_ERROR, resp)
|
||||
|
||||
if response.ext == first.ext:
|
||||
request = struct.pack(">HHI", 0x0003, 0x0004, 0x00000002)
|
||||
response = await self.__make_request("Change port", request, addr=first.changed.ip)
|
||||
if response.ok:
|
||||
return (StunNatType.RESTRICTED_NAT, response)
|
||||
return (StunNatType.RESTRICTED_PORT_NAT, response)
|
||||
if resp.ext == first.ext:
|
||||
req = struct.pack(">HHI", 0x0003, 0x0004, 0x00000002)
|
||||
resp = await self.__make_request("Change port", first.changed.ip, req)
|
||||
if resp.ok:
|
||||
return (StunNatType.RESTRICTED_NAT, resp)
|
||||
return (StunNatType.RESTRICTED_PORT_NAT, resp)
|
||||
|
||||
return (StunNatType.SYMMETRIC_NAT, response)
|
||||
return (StunNatType.SYMMETRIC_NAT, resp)
|
||||
|
||||
async def __make_request(self, ctx: str, request: bytes=b"", addr: (StunAddress | str | None)=None) -> StunResponse:
|
||||
async def __make_request(self, ctx: str, addr: (_StunAddress | str), req: bytes) -> _StunResponse:
|
||||
# TODO: Support IPv6 and RFC 5389
|
||||
# The first 4 bytes of the response are the Type (2) and Length (2)
|
||||
# The 5th byte is Reserved
|
||||
@@ -111,32 +158,29 @@ class Stun:
|
||||
# More info at: https://tools.ietf.org/html/rfc3489#section-11.2.1
|
||||
# And at: https://tools.ietf.org/html/rfc5389#section-15.1
|
||||
|
||||
if isinstance(addr, StunAddress):
|
||||
if isinstance(addr, _StunAddress):
|
||||
addr_t = (addr.ip, addr.port)
|
||||
elif isinstance(addr, str):
|
||||
addr_t = (addr, self.port)
|
||||
else:
|
||||
assert addr is None
|
||||
addr_t = (self.host, self.port)
|
||||
else: # str
|
||||
addr_t = (addr, self.__port)
|
||||
|
||||
# https://datatracker.ietf.org/doc/html/rfc5389#section-6
|
||||
trans_id = b"\x21\x12\xA4\x42" + secrets.token_bytes(12)
|
||||
(response, error) = (b"", "")
|
||||
(resp, error) = (b"", "")
|
||||
for _ in range(self.__retries):
|
||||
(response, error) = await self.__inner_make_request(trans_id, request, addr_t)
|
||||
(resp, error) = await self.__inner_make_request(trans_id, req, addr_t)
|
||||
if not error:
|
||||
break
|
||||
await asyncio.sleep(self.__retries_delay)
|
||||
if error:
|
||||
get_logger(0).error("%s: Can't perform STUN request after %d retries; last error: %s",
|
||||
ctx, self.__retries, error)
|
||||
return StunResponse(ok=False)
|
||||
return _StunResponse(ok=False)
|
||||
|
||||
parsed: dict[str, StunAddress] = {}
|
||||
parsed: dict[str, _StunAddress] = {}
|
||||
offset = 0
|
||||
remaining = len(response)
|
||||
remaining = len(resp)
|
||||
while remaining > 0:
|
||||
(attr_type, attr_len) = struct.unpack(">HH", response[offset : offset + 4]) # noqa: E203
|
||||
(attr_type, attr_len) = struct.unpack(">HH", resp[offset : offset + 4]) # noqa: E203
|
||||
offset += 4
|
||||
field = {
|
||||
0x0001: "ext", # MAPPED-ADDRESS
|
||||
@@ -145,40 +189,40 @@ class Stun:
|
||||
0x0005: "changed", # CHANGED-ADDRESS
|
||||
}.get(attr_type)
|
||||
if field is not None:
|
||||
parsed[field] = self.__parse_address(response[offset:], (trans_id if attr_type == 0x0020 else b""))
|
||||
parsed[field] = self.__parse_address(resp[offset:], (trans_id if attr_type == 0x0020 else b""))
|
||||
offset += attr_len
|
||||
remaining -= (4 + attr_len)
|
||||
return StunResponse(ok=True, **parsed)
|
||||
return _StunResponse(ok=True, **parsed)
|
||||
|
||||
async def __inner_make_request(self, trans_id: bytes, request: bytes, addr: tuple[str, int]) -> tuple[bytes, str]:
|
||||
async def __inner_make_request(self, trans_id: bytes, req: bytes, addr: tuple[str, int]) -> tuple[bytes, str]:
|
||||
assert self.__sock is not None
|
||||
|
||||
request = struct.pack(">HH", 0x0001, len(request)) + trans_id + request # Bind Request
|
||||
req = struct.pack(">HH", 0x0001, len(req)) + trans_id + req # Bind Request
|
||||
|
||||
try:
|
||||
await aiotools.run_async(self.__sock.sendto, request, addr)
|
||||
except Exception as err:
|
||||
return (b"", f"Send error: {tools.efmt(err)}")
|
||||
await aiotools.run_async(self.__sock.sendto, req, addr)
|
||||
except Exception as ex:
|
||||
return (b"", f"Send error: {tools.efmt(ex)}")
|
||||
try:
|
||||
response = (await aiotools.run_async(self.__sock.recvfrom, 2048))[0]
|
||||
except Exception as err:
|
||||
return (b"", f"Recv error: {tools.efmt(err)}")
|
||||
resp = (await aiotools.run_async(self.__sock.recvfrom, 2048))[0]
|
||||
except Exception as ex:
|
||||
return (b"", f"Recv error: {tools.efmt(ex)}")
|
||||
|
||||
(response_type, payload_len) = struct.unpack(">HH", response[:4])
|
||||
if response_type != 0x0101:
|
||||
return (b"", f"Invalid response type: {response_type:#06x}")
|
||||
if trans_id != response[4:20]:
|
||||
(resp_type, payload_len) = struct.unpack(">HH", resp[:4])
|
||||
if resp_type != 0x0101:
|
||||
return (b"", f"Invalid response type: {resp_type:#06x}")
|
||||
if trans_id != resp[4:20]:
|
||||
return (b"", "Transaction ID mismatch")
|
||||
|
||||
return (response[20 : 20 + payload_len], "") # noqa: E203
|
||||
return (resp[20 : 20 + payload_len], "") # noqa: E203
|
||||
|
||||
def __parse_address(self, data: bytes, trans_id: bytes) -> StunAddress:
|
||||
def __parse_address(self, data: bytes, trans_id: bytes) -> _StunAddress:
|
||||
family = data[1]
|
||||
port = struct.unpack(">H", self.__trans_xor(data[2:4], trans_id))[0]
|
||||
if family == 0x01:
|
||||
return StunAddress(str(ipaddress.IPv4Address(self.__trans_xor(data[4:8], trans_id))), port)
|
||||
return _StunAddress(str(ipaddress.IPv4Address(self.__trans_xor(data[4:8], trans_id))), port)
|
||||
elif family == 0x02:
|
||||
return StunAddress(str(ipaddress.IPv6Address(self.__trans_xor(data[4:20], trans_id))), port)
|
||||
return _StunAddress(str(ipaddress.IPv6Address(self.__trans_xor(data[4:20], trans_id))), port)
|
||||
raise RuntimeError(f"Unknown family; received: {family}")
|
||||
|
||||
def __trans_xor(self, data: bytes, trans_id: bytes) -> bytes:
|
||||
|
||||
@@ -26,8 +26,6 @@ from ...plugins.hid import get_hid_class
|
||||
from ...plugins.atx import get_atx_class
|
||||
from ...plugins.msd import get_msd_class
|
||||
|
||||
from ...languages import Languages
|
||||
|
||||
from .. import init
|
||||
|
||||
from .auth import AuthManager
|
||||
@@ -37,6 +35,7 @@ from .ugpio import UserGpio
|
||||
from .streamer import Streamer
|
||||
from .snapshoter import Snapshoter
|
||||
from .ocr import Ocr
|
||||
from .switch import Switch
|
||||
from .server import KvmdServer
|
||||
|
||||
|
||||
@@ -58,7 +57,7 @@ def main(argv: (list[str] | None)=None) -> None:
|
||||
if config.kvmd.msd.type == "otg":
|
||||
msd_kwargs["gadget"] = config.otg.gadget # XXX: Small crutch to pass gadget name to the plugin
|
||||
|
||||
hid_kwargs = config.kvmd.hid._unpack(ignore=["type", "keymap", "ignore_keys", "mouse_x_range", "mouse_y_range"])
|
||||
hid_kwargs = config.kvmd.hid._unpack(ignore=["type", "keymap"])
|
||||
if config.kvmd.hid.type == "otg":
|
||||
hid_kwargs["udc"] = config.otg.udc # XXX: Small crutch to pass UDC to the plugin
|
||||
|
||||
@@ -92,6 +91,10 @@ def main(argv: (list[str] | None)=None) -> None:
|
||||
log_reader=(LogReader() if config.log_reader.enabled else None),
|
||||
user_gpio=UserGpio(config.gpio, global_config.otg),
|
||||
ocr=Ocr(**config.ocr._unpack()),
|
||||
switch=Switch(
|
||||
pst_unix_path=global_config.pst.server.unix,
|
||||
**config.switch._unpack(),
|
||||
),
|
||||
|
||||
hid=hid,
|
||||
atx=get_atx_class(config.atx.type)(**config.atx._unpack(ignore=["type"])),
|
||||
@@ -105,11 +108,8 @@ def main(argv: (list[str] | None)=None) -> None:
|
||||
),
|
||||
|
||||
keymap_path=config.hid.keymap,
|
||||
ignore_keys=config.hid.ignore_keys,
|
||||
mouse_x_range=(config.hid.mouse_x_range.min, config.hid.mouse_x_range.max),
|
||||
mouse_y_range=(config.hid.mouse_y_range.min, config.hid.mouse_y_range.max),
|
||||
|
||||
stream_forever=config.streamer.forever,
|
||||
).run(**config.server._unpack())
|
||||
|
||||
get_logger(0).info(Languages().gettext("Bye-bye"))
|
||||
get_logger(0).info("Bye-bye")
|
||||
|
||||
@@ -45,9 +45,9 @@ class AtxApi:
|
||||
return make_json_response(await self.__atx.get_state())
|
||||
|
||||
@exposed_http("POST", "/atx/power")
|
||||
async def __power_handler(self, request: Request) -> Response:
|
||||
action = valid_atx_power_action(request.query.get("action"))
|
||||
wait = valid_bool(request.query.get("wait", False))
|
||||
async def __power_handler(self, req: Request) -> Response:
|
||||
action = valid_atx_power_action(req.query.get("action"))
|
||||
wait = valid_bool(req.query.get("wait", False))
|
||||
await ({
|
||||
"on": self.__atx.power_on,
|
||||
"off": self.__atx.power_off,
|
||||
@@ -57,9 +57,9 @@ class AtxApi:
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/atx/click")
|
||||
async def __click_handler(self, request: Request) -> Response:
|
||||
button = valid_atx_button(request.query.get("button"))
|
||||
wait = valid_bool(request.query.get("wait", False))
|
||||
async def __click_handler(self, req: Request) -> Response:
|
||||
button = valid_atx_button(req.query.get("button"))
|
||||
wait = valid_bool(req.query.get("wait", False))
|
||||
await ({
|
||||
"power": self.__atx.click_power,
|
||||
"power_long": self.__atx.click_power_long,
|
||||
|
||||
@@ -43,34 +43,34 @@ from ..auth import AuthManager
|
||||
_COOKIE_AUTH_TOKEN = "auth_token"
|
||||
|
||||
|
||||
async def check_request_auth(auth_manager: AuthManager, exposed: HttpExposed, request: Request) -> None:
|
||||
async def check_request_auth(auth_manager: AuthManager, exposed: HttpExposed, req: Request) -> None:
|
||||
if auth_manager.is_auth_required(exposed):
|
||||
user = request.headers.get("X-KVMD-User", "")
|
||||
user = req.headers.get("X-KVMD-User", "")
|
||||
if user:
|
||||
user = valid_user(user)
|
||||
passwd = request.headers.get("X-KVMD-Passwd", "")
|
||||
set_request_auth_info(request, f"{user} (xhdr)")
|
||||
passwd = req.headers.get("X-KVMD-Passwd", "")
|
||||
set_request_auth_info(req, f"{user} (xhdr)")
|
||||
if not (await auth_manager.authorize(user, valid_passwd(passwd))):
|
||||
raise ForbiddenError()
|
||||
return
|
||||
|
||||
token = request.cookies.get(_COOKIE_AUTH_TOKEN, "")
|
||||
token = req.cookies.get(_COOKIE_AUTH_TOKEN, "")
|
||||
if token:
|
||||
user = auth_manager.check(valid_auth_token(token)) # type: ignore
|
||||
if not user:
|
||||
set_request_auth_info(request, "- (token)")
|
||||
set_request_auth_info(req, "- (token)")
|
||||
raise ForbiddenError()
|
||||
set_request_auth_info(request, f"{user} (token)")
|
||||
set_request_auth_info(req, f"{user} (token)")
|
||||
return
|
||||
|
||||
basic_auth = request.headers.get("Authorization", "")
|
||||
basic_auth = req.headers.get("Authorization", "")
|
||||
if basic_auth and basic_auth[:6].lower() == "basic ":
|
||||
try:
|
||||
(user, passwd) = base64.b64decode(basic_auth[6:]).decode("utf-8").split(":")
|
||||
except Exception:
|
||||
raise UnauthorizedError()
|
||||
user = valid_user(user)
|
||||
set_request_auth_info(request, f"{user} (basic)")
|
||||
set_request_auth_info(req, f"{user} (basic)")
|
||||
if not (await auth_manager.authorize(user, valid_passwd(passwd))):
|
||||
raise ForbiddenError()
|
||||
return
|
||||
@@ -85,9 +85,9 @@ class AuthApi:
|
||||
# =====
|
||||
|
||||
@exposed_http("POST", "/auth/login", auth_required=False)
|
||||
async def __login_handler(self, request: Request) -> Response:
|
||||
async def __login_handler(self, req: Request) -> Response:
|
||||
if self.__auth_manager.is_auth_enabled():
|
||||
credentials = await request.post()
|
||||
credentials = await req.post()
|
||||
token = await self.__auth_manager.login(
|
||||
user=valid_user(credentials.get("user", "")),
|
||||
passwd=valid_passwd(credentials.get("passwd", "")),
|
||||
@@ -98,9 +98,9 @@ class AuthApi:
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/auth/logout")
|
||||
async def __logout_handler(self, request: Request) -> Response:
|
||||
async def __logout_handler(self, req: Request) -> Response:
|
||||
if self.__auth_manager.is_auth_enabled():
|
||||
token = valid_auth_token(request.cookies.get(_COOKIE_AUTH_TOKEN, ""))
|
||||
token = valid_auth_token(req.cookies.get(_COOKIE_AUTH_TOKEN, ""))
|
||||
self.__auth_manager.logout(token)
|
||||
return make_json_response()
|
||||
|
||||
|
||||
@@ -55,10 +55,9 @@ class ExportApi:
|
||||
|
||||
@async_lru.alru_cache(maxsize=1, ttl=5)
|
||||
async def __get_prometheus_metrics(self) -> str:
|
||||
(atx_state, hw_state, fan_state, gpio_state) = await asyncio.gather(*[
|
||||
(atx_state, info_state, gpio_state) = await asyncio.gather(*[
|
||||
self.__atx.get_state(),
|
||||
self.__info_manager.get_submanager("hw").get_state(),
|
||||
self.__info_manager.get_submanager("fan").get_state(),
|
||||
self.__info_manager.get_state(["hw", "fan"]),
|
||||
self.__user_gpio.get_state(),
|
||||
])
|
||||
rows: list[str] = []
|
||||
@@ -67,13 +66,13 @@ class ExportApi:
|
||||
self.__append_prometheus_rows(rows, atx_state["leds"]["power"], "pikvm_atx_power") # type: ignore
|
||||
|
||||
for mode in sorted(UserGpioModes.ALL):
|
||||
for (channel, ch_state) in gpio_state[f"{mode}s"].items(): # type: ignore
|
||||
for (channel, ch_state) in gpio_state["state"][f"{mode}s"].items(): # type: ignore
|
||||
if not channel.startswith("__"): # Hide special GPIOs
|
||||
for key in ["online", "state"]:
|
||||
self.__append_prometheus_rows(rows, ch_state["state"], f"pikvm_gpio_{mode}_{key}_{channel}")
|
||||
|
||||
self.__append_prometheus_rows(rows, hw_state["health"], "pikvm_hw") # type: ignore
|
||||
self.__append_prometheus_rows(rows, fan_state, "pikvm_fan")
|
||||
self.__append_prometheus_rows(rows, info_state["hw"]["health"], "pikvm_hw") # type: ignore
|
||||
self.__append_prometheus_rows(rows, info_state["fan"], "pikvm_fan")
|
||||
|
||||
return "\n".join(rows)
|
||||
|
||||
|
||||
@@ -25,13 +25,12 @@ import stat
|
||||
import functools
|
||||
import struct
|
||||
|
||||
from typing import Iterable
|
||||
from typing import Callable
|
||||
|
||||
from aiohttp.web import Request
|
||||
from aiohttp.web import Response
|
||||
|
||||
from ....mouse import MouseRange
|
||||
|
||||
from ....keyboard.keysym import build_symmap
|
||||
from ....keyboard.printer import text_to_web_keys
|
||||
|
||||
@@ -59,12 +58,7 @@ class HidApi:
|
||||
def __init__(
|
||||
self,
|
||||
hid: BaseHid,
|
||||
|
||||
keymap_path: str,
|
||||
ignore_keys: list[str],
|
||||
|
||||
mouse_x_range: tuple[int, int],
|
||||
mouse_y_range: tuple[int, int],
|
||||
) -> None:
|
||||
|
||||
self.__hid = hid
|
||||
@@ -73,11 +67,6 @@ class HidApi:
|
||||
self.__default_keymap_name = os.path.basename(keymap_path)
|
||||
self.__ensure_symmap(self.__default_keymap_name)
|
||||
|
||||
self.__ignore_keys = ignore_keys
|
||||
|
||||
self.__mouse_x_range = mouse_x_range
|
||||
self.__mouse_y_range = mouse_y_range
|
||||
|
||||
# =====
|
||||
|
||||
@exposed_http("GET", "/hid")
|
||||
@@ -85,22 +74,22 @@ class HidApi:
|
||||
return make_json_response(await self.__hid.get_state())
|
||||
|
||||
@exposed_http("POST", "/hid/set_params")
|
||||
async def __set_params_handler(self, request: Request) -> Response:
|
||||
async def __set_params_handler(self, req: Request) -> Response:
|
||||
params = {
|
||||
key: validator(request.query.get(key))
|
||||
key: validator(req.query.get(key))
|
||||
for (key, validator) in [
|
||||
("keyboard_output", valid_hid_keyboard_output),
|
||||
("mouse_output", valid_hid_mouse_output),
|
||||
("jiggler", valid_bool),
|
||||
]
|
||||
if request.query.get(key) is not None
|
||||
if req.query.get(key) is not None
|
||||
}
|
||||
self.__hid.set_params(**params) # type: ignore
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/hid/set_connected")
|
||||
async def __set_connected_handler(self, request: Request) -> Response:
|
||||
self.__hid.set_connected(valid_bool(request.query.get("connected")))
|
||||
async def __set_connected_handler(self, req: Request) -> Response:
|
||||
self.__hid.set_connected(valid_bool(req.query.get("connected")))
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/hid/reset")
|
||||
@@ -128,13 +117,14 @@ class HidApi:
|
||||
return make_json_response(await self.get_keymaps())
|
||||
|
||||
@exposed_http("POST", "/hid/print")
|
||||
async def __print_handler(self, request: Request) -> Response:
|
||||
text = await request.text()
|
||||
limit = int(valid_int_f0(request.query.get("limit", 1024)))
|
||||
async def __print_handler(self, req: Request) -> Response:
|
||||
text = await req.text()
|
||||
limit = int(valid_int_f0(req.query.get("limit", 1024)))
|
||||
if limit > 0:
|
||||
text = text[:limit]
|
||||
symmap = self.__ensure_symmap(request.query.get("keymap", self.__default_keymap_name))
|
||||
self.__hid.send_key_events(text_to_web_keys(text, symmap))
|
||||
symmap = self.__ensure_symmap(req.query.get("keymap", self.__default_keymap_name))
|
||||
slow = valid_bool(req.query.get("slow", False))
|
||||
await self.__hid.send_key_events(text_to_web_keys(text, symmap), no_ignore_keys=True, slow=slow)
|
||||
return make_json_response()
|
||||
|
||||
def __ensure_symmap(self, keymap_name: str) -> dict[int, dict[int, str]]:
|
||||
@@ -159,17 +149,17 @@ class HidApi:
|
||||
async def __ws_bin_key_handler(self, _: WsSession, data: bytes) -> None:
|
||||
try:
|
||||
key = valid_hid_key(data[1:].decode("ascii"))
|
||||
state = valid_bool(data[0])
|
||||
state = bool(data[0] & 0b01)
|
||||
finish = bool(data[0] & 0b10)
|
||||
except Exception:
|
||||
return
|
||||
if key not in self.__ignore_keys:
|
||||
self.__hid.send_key_events([(key, state)])
|
||||
self.__hid.send_key_event(key, state, finish)
|
||||
|
||||
@exposed_ws(2)
|
||||
async def __ws_bin_mouse_button_handler(self, _: WsSession, data: bytes) -> None:
|
||||
try:
|
||||
button = valid_hid_mouse_button(data[1:].decode("ascii"))
|
||||
state = valid_bool(data[0])
|
||||
state = bool(data[0] & 0b01)
|
||||
except Exception:
|
||||
return
|
||||
self.__hid.send_mouse_button_event(button, state)
|
||||
@@ -182,19 +172,19 @@ class HidApi:
|
||||
to_y = valid_hid_mouse_move(to_y)
|
||||
except Exception:
|
||||
return
|
||||
self.__send_mouse_move_event(to_x, to_y)
|
||||
self.__hid.send_mouse_move_event(to_x, to_y)
|
||||
|
||||
@exposed_ws(4)
|
||||
async def __ws_bin_mouse_relative_handler(self, _: WsSession, data: bytes) -> None:
|
||||
self.__process_ws_bin_delta_request(data, self.__hid.send_mouse_relative_event)
|
||||
self.__process_ws_bin_delta_request(data, self.__hid.send_mouse_relative_events)
|
||||
|
||||
@exposed_ws(5)
|
||||
async def __ws_bin_mouse_wheel_handler(self, _: WsSession, data: bytes) -> None:
|
||||
self.__process_ws_bin_delta_request(data, self.__hid.send_mouse_wheel_event)
|
||||
self.__process_ws_bin_delta_request(data, self.__hid.send_mouse_wheel_events)
|
||||
|
||||
def __process_ws_bin_delta_request(self, data: bytes, handler: Callable[[int, int], None]) -> None:
|
||||
def __process_ws_bin_delta_request(self, data: bytes, handler: Callable[[Iterable[tuple[int, int]], bool], None]) -> None:
|
||||
try:
|
||||
squash = valid_bool(data[0])
|
||||
squash = bool(data[0] & 0b01)
|
||||
data = data[1:]
|
||||
deltas: list[tuple[int, int]] = []
|
||||
for index in range(0, len(data), 2):
|
||||
@@ -202,7 +192,7 @@ class HidApi:
|
||||
deltas.append((valid_hid_mouse_delta(delta_x), valid_hid_mouse_delta(delta_y)))
|
||||
except Exception:
|
||||
return
|
||||
self.__send_mouse_delta_event(deltas, squash, handler)
|
||||
handler(deltas, squash)
|
||||
|
||||
# =====
|
||||
|
||||
@@ -211,10 +201,10 @@ class HidApi:
|
||||
try:
|
||||
key = valid_hid_key(event["key"])
|
||||
state = valid_bool(event["state"])
|
||||
finish = valid_bool(event.get("finish", False))
|
||||
except Exception:
|
||||
return
|
||||
if key not in self.__ignore_keys:
|
||||
self.__hid.send_key_events([(key, state)])
|
||||
self.__hid.send_key_event(key, state, finish)
|
||||
|
||||
@exposed_ws("mouse_button")
|
||||
async def __ws_mouse_button_handler(self, _: WsSession, event: dict) -> None:
|
||||
@@ -232,17 +222,17 @@ class HidApi:
|
||||
to_y = valid_hid_mouse_move(event["to"]["y"])
|
||||
except Exception:
|
||||
return
|
||||
self.__send_mouse_move_event(to_x, to_y)
|
||||
self.__hid.send_mouse_move_event(to_x, to_y)
|
||||
|
||||
@exposed_ws("mouse_relative")
|
||||
async def __ws_mouse_relative_handler(self, _: WsSession, event: dict) -> None:
|
||||
self.__process_ws_delta_event(event, self.__hid.send_mouse_relative_event)
|
||||
self.__process_ws_delta_event(event, self.__hid.send_mouse_relative_events)
|
||||
|
||||
@exposed_ws("mouse_wheel")
|
||||
async def __ws_mouse_wheel_handler(self, _: WsSession, event: dict) -> None:
|
||||
self.__process_ws_delta_event(event, self.__hid.send_mouse_wheel_event)
|
||||
self.__process_ws_delta_event(event, self.__hid.send_mouse_wheel_events)
|
||||
|
||||
def __process_ws_delta_event(self, event: dict, handler: Callable[[int, int], None]) -> None:
|
||||
def __process_ws_delta_event(self, event: dict, handler: Callable[[Iterable[tuple[int, int]], bool], None]) -> None:
|
||||
try:
|
||||
raw_delta = event["delta"]
|
||||
deltas = [
|
||||
@@ -252,26 +242,26 @@ class HidApi:
|
||||
squash = valid_bool(event.get("squash", False))
|
||||
except Exception:
|
||||
return
|
||||
self.__send_mouse_delta_event(deltas, squash, handler)
|
||||
handler(deltas, squash)
|
||||
|
||||
# =====
|
||||
|
||||
@exposed_http("POST", "/hid/events/send_key")
|
||||
async def __events_send_key_handler(self, request: Request) -> Response:
|
||||
key = valid_hid_key(request.query.get("key"))
|
||||
if key not in self.__ignore_keys:
|
||||
if "state" in request.query:
|
||||
state = valid_bool(request.query["state"])
|
||||
self.__hid.send_key_events([(key, state)])
|
||||
else:
|
||||
self.__hid.send_key_events([(key, True), (key, False)])
|
||||
async def __events_send_key_handler(self, req: Request) -> Response:
|
||||
key = valid_hid_key(req.query.get("key"))
|
||||
if "state" in req.query:
|
||||
state = valid_bool(req.query["state"])
|
||||
finish = valid_bool(req.query.get("finish", False))
|
||||
self.__hid.send_key_event(key, state, finish)
|
||||
else:
|
||||
self.__hid.send_key_event(key, True, True)
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/hid/events/send_mouse_button")
|
||||
async def __events_send_mouse_button_handler(self, request: Request) -> Response:
|
||||
button = valid_hid_mouse_button(request.query.get("button"))
|
||||
if "state" in request.query:
|
||||
state = valid_bool(request.query["state"])
|
||||
async def __events_send_mouse_button_handler(self, req: Request) -> Response:
|
||||
button = valid_hid_mouse_button(req.query.get("button"))
|
||||
if "state" in req.query:
|
||||
state = valid_bool(req.query["state"])
|
||||
self.__hid.send_mouse_button_event(button, state)
|
||||
else:
|
||||
self.__hid.send_mouse_button_event(button, True)
|
||||
@@ -279,52 +269,22 @@ class HidApi:
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/hid/events/send_mouse_move")
|
||||
async def __events_send_mouse_move_handler(self, request: Request) -> Response:
|
||||
to_x = valid_hid_mouse_move(request.query.get("to_x"))
|
||||
to_y = valid_hid_mouse_move(request.query.get("to_y"))
|
||||
self.__send_mouse_move_event(to_x, to_y)
|
||||
async def __events_send_mouse_move_handler(self, req: Request) -> Response:
|
||||
to_x = valid_hid_mouse_move(req.query.get("to_x"))
|
||||
to_y = valid_hid_mouse_move(req.query.get("to_y"))
|
||||
self.__hid.send_mouse_move_event(to_x, to_y)
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/hid/events/send_mouse_relative")
|
||||
async def __events_send_mouse_relative_handler(self, request: Request) -> Response:
|
||||
return self.__process_http_delta_event(request, self.__hid.send_mouse_relative_event)
|
||||
async def __events_send_mouse_relative_handler(self, req: Request) -> Response:
|
||||
return self.__process_http_delta_event(req, self.__hid.send_mouse_relative_event)
|
||||
|
||||
@exposed_http("POST", "/hid/events/send_mouse_wheel")
|
||||
async def __events_send_mouse_wheel_handler(self, request: Request) -> Response:
|
||||
return self.__process_http_delta_event(request, self.__hid.send_mouse_wheel_event)
|
||||
async def __events_send_mouse_wheel_handler(self, req: Request) -> Response:
|
||||
return self.__process_http_delta_event(req, self.__hid.send_mouse_wheel_event)
|
||||
|
||||
def __process_http_delta_event(self, request: Request, handler: Callable[[int, int], None]) -> Response:
|
||||
delta_x = valid_hid_mouse_delta(request.query.get("delta_x"))
|
||||
delta_y = valid_hid_mouse_delta(request.query.get("delta_y"))
|
||||
def __process_http_delta_event(self, req: Request, handler: Callable[[int, int], None]) -> Response:
|
||||
delta_x = valid_hid_mouse_delta(req.query.get("delta_x"))
|
||||
delta_y = valid_hid_mouse_delta(req.query.get("delta_y"))
|
||||
handler(delta_x, delta_y)
|
||||
return make_json_response()
|
||||
|
||||
# =====
|
||||
|
||||
def __send_mouse_move_event(self, to_x: int, to_y: int) -> None:
|
||||
if self.__mouse_x_range != MouseRange.RANGE:
|
||||
to_x = MouseRange.remap(to_x, *self.__mouse_x_range)
|
||||
if self.__mouse_y_range != MouseRange.RANGE:
|
||||
to_y = MouseRange.remap(to_y, *self.__mouse_y_range)
|
||||
self.__hid.send_mouse_move_event(to_x, to_y)
|
||||
|
||||
def __send_mouse_delta_event(
|
||||
self,
|
||||
deltas: list[tuple[int, int]],
|
||||
squash: bool,
|
||||
handler: Callable[[int, int], None],
|
||||
) -> None:
|
||||
|
||||
if squash:
|
||||
prev = (0, 0)
|
||||
for cur in deltas:
|
||||
if abs(prev[0] + cur[0]) > 127 or abs(prev[1] + cur[1]) > 127:
|
||||
handler(*prev)
|
||||
prev = cur
|
||||
else:
|
||||
prev = (prev[0] + cur[0], prev[1] + cur[1])
|
||||
if prev[0] or prev[1]:
|
||||
handler(*prev)
|
||||
else:
|
||||
for xy in deltas:
|
||||
handler(*xy)
|
||||
|
||||
@@ -20,8 +20,6 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
import asyncio
|
||||
|
||||
from aiohttp.web import Request
|
||||
from aiohttp.web import Response
|
||||
|
||||
@@ -41,17 +39,13 @@ class InfoApi:
|
||||
# =====
|
||||
|
||||
@exposed_http("GET", "/info")
|
||||
async def __common_state_handler(self, request: Request) -> Response:
|
||||
fields = self.__valid_info_fields(request)
|
||||
results = dict(zip(fields, await asyncio.gather(*[
|
||||
self.__info_manager.get_submanager(field).get_state()
|
||||
for field in fields
|
||||
])))
|
||||
return make_json_response(results)
|
||||
async def __common_state_handler(self, req: Request) -> Response:
|
||||
fields = self.__valid_info_fields(req)
|
||||
return make_json_response(await self.__info_manager.get_state(fields))
|
||||
|
||||
def __valid_info_fields(self, request: Request) -> list[str]:
|
||||
subs = self.__info_manager.get_subs()
|
||||
def __valid_info_fields(self, req: Request) -> list[str]:
|
||||
available = self.__info_manager.get_subs()
|
||||
return sorted(valid_info_fields(
|
||||
arg=request.query.get("fields", ",".join(subs)),
|
||||
variants=subs,
|
||||
) or subs)
|
||||
arg=req.query.get("fields", ",".join(available)),
|
||||
variants=available,
|
||||
) or available)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
@@ -19,7 +20,6 @@
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
from aiohttp.web import Request
|
||||
from aiohttp.web import StreamResponse
|
||||
|
||||
@@ -47,12 +47,12 @@ class LogApi:
|
||||
# =====
|
||||
|
||||
@exposed_http("GET", "/log")
|
||||
async def __log_handler(self, request: Request) -> StreamResponse:
|
||||
async def __log_handler(self, req: Request) -> StreamResponse:
|
||||
if self.__log_reader is None:
|
||||
raise LogReaderDisabledError()
|
||||
seek = valid_log_seek(request.query.get("seek", 0))
|
||||
follow = valid_bool(request.query.get("follow", False))
|
||||
response = await start_streaming(request, "text/plain")
|
||||
seek = valid_log_seek(req.query.get("seek", 0))
|
||||
follow = valid_bool(req.query.get("follow", False))
|
||||
response = await start_streaming(req, "text/plain")
|
||||
try:
|
||||
async for record in self.__log_reader.poll_log(seek, follow):
|
||||
await response.write(("[%s %s] --- %s" % (
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
@@ -66,29 +67,34 @@ class MsdApi:
|
||||
return make_json_response(await self.__msd.get_state())
|
||||
|
||||
@exposed_http("POST", "/msd/set_params")
|
||||
async def __set_params_handler(self, request: Request) -> Response:
|
||||
async def __set_params_handler(self, req: Request) -> Response:
|
||||
params = {
|
||||
key: validator(request.query.get(param))
|
||||
key: validator(req.query.get(param))
|
||||
for (param, key, validator) in [
|
||||
("image", "name", (lambda arg: str(arg).strip() and valid_msd_image_name(arg))),
|
||||
("cdrom", "cdrom", valid_bool),
|
||||
("rw", "rw", valid_bool),
|
||||
]
|
||||
if request.query.get(param) is not None
|
||||
if req.query.get(param) is not None
|
||||
}
|
||||
await self.__msd.set_params(**params) # type: ignore
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/msd/set_connected")
|
||||
async def __set_connected_handler(self, request: Request) -> Response:
|
||||
await self.__msd.set_connected(valid_bool(request.query.get("connected")))
|
||||
async def __set_connected_handler(self, req: Request) -> Response:
|
||||
await self.__msd.set_connected(valid_bool(req.query.get("connected")))
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/msd/make_image")
|
||||
async def __set_zipped_handler(self, req: Request) -> Response:
|
||||
await self.__msd.make_image(valid_bool(req.query.get("zipped")))
|
||||
return make_json_response()
|
||||
|
||||
# =====
|
||||
|
||||
@exposed_http("GET", "/msd/read")
|
||||
async def __read_handler(self, request: Request) -> StreamResponse:
|
||||
name = valid_msd_image_name(request.query.get("image"))
|
||||
async def __read_handler(self, req: Request) -> StreamResponse:
|
||||
name = valid_msd_image_name(req.query.get("image"))
|
||||
compressors = {
|
||||
"": ("", None),
|
||||
"none": ("", None),
|
||||
@@ -96,7 +102,7 @@ class MsdApi:
|
||||
"zstd": (".zst", (lambda: zstandard.ZstdCompressor().compressobj())), # pylint: disable=unnecessary-lambda
|
||||
}
|
||||
(suffix, make_compressor) = compressors[check_string_in_list(
|
||||
arg=request.query.get("compress", ""),
|
||||
arg=req.query.get("compress", ""),
|
||||
name="Compression mode",
|
||||
variants=set(compressors),
|
||||
)]
|
||||
@@ -127,7 +133,7 @@ class MsdApi:
|
||||
src = compressed()
|
||||
size = -1
|
||||
|
||||
response = await start_streaming(request, "application/octet-stream", size, name + suffix)
|
||||
response = await start_streaming(req, "application/octet-stream", size, name + suffix)
|
||||
async for chunk in src:
|
||||
await response.write(chunk)
|
||||
return response
|
||||
@@ -135,28 +141,28 @@ class MsdApi:
|
||||
# =====
|
||||
|
||||
@exposed_http("POST", "/msd/write")
|
||||
async def __write_handler(self, request: Request) -> Response:
|
||||
unsafe_prefix = request.query.get("prefix", "") + "/"
|
||||
name = valid_msd_image_name(unsafe_prefix + request.query.get("image", ""))
|
||||
size = valid_int_f0(request.content_length)
|
||||
remove_incomplete = self.__get_remove_incomplete(request)
|
||||
async def __write_handler(self, req: Request) -> Response:
|
||||
unsafe_prefix = req.query.get("prefix", "") + "/"
|
||||
name = valid_msd_image_name(unsafe_prefix + req.query.get("image", ""))
|
||||
size = valid_int_f0(req.content_length)
|
||||
remove_incomplete = self.__get_remove_incomplete(req)
|
||||
written = 0
|
||||
async with self.__msd.write_image(name, size, remove_incomplete) as writer:
|
||||
chunk_size = writer.get_chunk_size()
|
||||
while True:
|
||||
chunk = await request.content.read(chunk_size)
|
||||
chunk = await req.content.read(chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
written = await writer.write_chunk(chunk)
|
||||
return make_json_response(self.__make_write_info(name, size, written))
|
||||
|
||||
@exposed_http("POST", "/msd/write_remote")
|
||||
async def __write_remote_handler(self, request: Request) -> (Response | StreamResponse): # pylint: disable=too-many-locals
|
||||
unsafe_prefix = request.query.get("prefix", "") + "/"
|
||||
url = valid_url(request.query.get("url"))
|
||||
insecure = valid_bool(request.query.get("insecure", False))
|
||||
timeout = valid_float_f01(request.query.get("timeout", 10.0))
|
||||
remove_incomplete = self.__get_remove_incomplete(request)
|
||||
async def __write_remote_handler(self, req: Request) -> (Response | StreamResponse): # pylint: disable=too-many-locals
|
||||
unsafe_prefix = req.query.get("prefix", "") + "/"
|
||||
url = valid_url(req.query.get("url"))
|
||||
insecure = valid_bool(req.query.get("insecure", False))
|
||||
timeout = valid_float_f01(req.query.get("timeout", 10.0))
|
||||
remove_incomplete = self.__get_remove_incomplete(req)
|
||||
|
||||
name = ""
|
||||
size = written = 0
|
||||
@@ -174,7 +180,7 @@ class MsdApi:
|
||||
read_timeout=(7 * 24 * 3600),
|
||||
) as remote:
|
||||
|
||||
name = str(request.query.get("image", "")).strip()
|
||||
name = str(req.query.get("image", "")).strip()
|
||||
if len(name) == 0:
|
||||
name = htclient.get_filename(remote)
|
||||
name = valid_msd_image_name(unsafe_prefix + name)
|
||||
@@ -184,7 +190,7 @@ class MsdApi:
|
||||
get_logger(0).info("Downloading image %r as %r to MSD ...", url, name)
|
||||
async with self.__msd.write_image(name, size, remove_incomplete) as writer:
|
||||
chunk_size = writer.get_chunk_size()
|
||||
response = await start_streaming(request, "application/x-ndjson")
|
||||
response = await start_streaming(req, "application/x-ndjson")
|
||||
await stream_write_info()
|
||||
last_report_ts = 0
|
||||
async for chunk in remote.content.iter_chunked(chunk_size):
|
||||
@@ -197,16 +203,16 @@ class MsdApi:
|
||||
await stream_write_info()
|
||||
return response
|
||||
|
||||
except Exception as err:
|
||||
except Exception as ex:
|
||||
if response is not None:
|
||||
await stream_write_info()
|
||||
await stream_json_exception(response, err)
|
||||
elif isinstance(err, aiohttp.ClientError):
|
||||
return make_json_exception(err, 400)
|
||||
await stream_json_exception(response, ex)
|
||||
elif isinstance(ex, aiohttp.ClientError):
|
||||
return make_json_exception(ex, 400)
|
||||
raise
|
||||
|
||||
def __get_remove_incomplete(self, request: Request) -> (bool | None):
|
||||
flag: (str | None) = request.query.get("remove_incomplete")
|
||||
def __get_remove_incomplete(self, req: Request) -> (bool | None):
|
||||
flag: (str | None) = req.query.get("remove_incomplete")
|
||||
return (valid_bool(flag) if flag is not None else None)
|
||||
|
||||
def __make_write_info(self, name: str, size: int, written: int) -> dict:
|
||||
@@ -215,8 +221,8 @@ class MsdApi:
|
||||
# =====
|
||||
|
||||
@exposed_http("POST", "/msd/remove")
|
||||
async def __remove_handler(self, request: Request) -> Response:
|
||||
await self.__msd.remove(valid_msd_image_name(request.query.get("image")))
|
||||
async def __remove_handler(self, req: Request) -> Response:
|
||||
await self.__msd.remove(valid_msd_image_name(req.query.get("image")))
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/msd/reset")
|
||||
|
||||
@@ -88,12 +88,12 @@ class RedfishApi:
|
||||
|
||||
@exposed_http("GET", "/redfish/v1/Systems/0")
|
||||
async def __server_handler(self, _: Request) -> Response:
|
||||
(atx_state, meta_state) = await asyncio.gather(*[
|
||||
(atx_state, info_state) = await asyncio.gather(*[
|
||||
self.__atx.get_state(),
|
||||
self.__info_manager.get_submanager("meta").get_state(),
|
||||
self.__info_manager.get_state(["meta"]),
|
||||
])
|
||||
try:
|
||||
host = str(meta_state.get("server", {})["host"]) # type: ignore
|
||||
host = str(info_state["meta"].get("server", {})["host"]) # type: ignore
|
||||
except Exception:
|
||||
host = ""
|
||||
return make_json_response({
|
||||
@@ -111,10 +111,10 @@ class RedfishApi:
|
||||
}, wrap_result=False)
|
||||
|
||||
@exposed_http("POST", "/redfish/v1/Systems/0/Actions/ComputerSystem.Reset")
|
||||
async def __power_handler(self, request: Request) -> Response:
|
||||
async def __power_handler(self, req: Request) -> Response:
|
||||
try:
|
||||
action = check_string_in_list(
|
||||
arg=(await request.json())["ResetType"],
|
||||
arg=(await req.json()).get("ResetType"),
|
||||
name="Redfish ResetType",
|
||||
variants=set(self.__actions),
|
||||
lower=False,
|
||||
|
||||
@@ -52,36 +52,36 @@ class StreamerApi:
|
||||
return make_json_response(await self.__streamer.get_state())
|
||||
|
||||
@exposed_http("GET", "/streamer/snapshot")
|
||||
async def __take_snapshot_handler(self, request: Request) -> Response:
|
||||
async def __take_snapshot_handler(self, req: Request) -> Response:
|
||||
snapshot = await self.__streamer.take_snapshot(
|
||||
save=valid_bool(request.query.get("save", False)),
|
||||
load=valid_bool(request.query.get("load", False)),
|
||||
allow_offline=valid_bool(request.query.get("allow_offline", False)),
|
||||
save=valid_bool(req.query.get("save", False)),
|
||||
load=valid_bool(req.query.get("load", False)),
|
||||
allow_offline=valid_bool(req.query.get("allow_offline", False)),
|
||||
)
|
||||
if snapshot:
|
||||
if valid_bool(request.query.get("ocr", False)):
|
||||
if valid_bool(req.query.get("ocr", False)):
|
||||
langs = self.__ocr.get_available_langs()
|
||||
return Response(
|
||||
body=(await self.__ocr.recognize(
|
||||
data=snapshot.data,
|
||||
langs=valid_string_list(
|
||||
arg=str(request.query.get("ocr_langs", "")).strip(),
|
||||
arg=str(req.query.get("ocr_langs", "")).strip(),
|
||||
subval=(lambda lang: check_string_in_list(lang, "OCR lang", langs)),
|
||||
name="OCR langs list",
|
||||
),
|
||||
left=int(valid_number(request.query.get("ocr_left", -1))),
|
||||
top=int(valid_number(request.query.get("ocr_top", -1))),
|
||||
right=int(valid_number(request.query.get("ocr_right", -1))),
|
||||
bottom=int(valid_number(request.query.get("ocr_bottom", -1))),
|
||||
left=int(valid_number(req.query.get("ocr_left", -1))),
|
||||
top=int(valid_number(req.query.get("ocr_top", -1))),
|
||||
right=int(valid_number(req.query.get("ocr_right", -1))),
|
||||
bottom=int(valid_number(req.query.get("ocr_bottom", -1))),
|
||||
)),
|
||||
headers=dict(snapshot.headers),
|
||||
content_type="text/plain",
|
||||
)
|
||||
elif valid_bool(request.query.get("preview", False)):
|
||||
elif valid_bool(req.query.get("preview", False)):
|
||||
data = await snapshot.make_preview(
|
||||
max_width=valid_int_f0(request.query.get("preview_max_width", 0)),
|
||||
max_height=valid_int_f0(request.query.get("preview_max_height", 0)),
|
||||
quality=valid_stream_quality(request.query.get("preview_quality", 80)),
|
||||
max_width=valid_int_f0(req.query.get("preview_max_width", 0)),
|
||||
max_height=valid_int_f0(req.query.get("preview_max_height", 0)),
|
||||
quality=valid_stream_quality(req.query.get("preview_quality", 80)),
|
||||
)
|
||||
else:
|
||||
data = snapshot.data
|
||||
@@ -97,25 +97,6 @@ class StreamerApi:
|
||||
self.__streamer.remove_snapshot()
|
||||
return make_json_response()
|
||||
|
||||
# =====
|
||||
|
||||
async def get_ocr(self) -> dict: # XXX: Ugly hack
|
||||
enabled = self.__ocr.is_available()
|
||||
default: list[str] = []
|
||||
available: list[str] = []
|
||||
if enabled:
|
||||
default = self.__ocr.get_default_langs()
|
||||
available = self.__ocr.get_available_langs()
|
||||
return {
|
||||
"ocr": {
|
||||
"enabled": enabled,
|
||||
"langs": {
|
||||
"default": default,
|
||||
"available": available,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@exposed_http("GET", "/streamer/ocr")
|
||||
async def __ocr_handler(self, _: Request) -> Response:
|
||||
return make_json_response(await self.get_ocr())
|
||||
return make_json_response({"ocr": (await self.__ocr.get_state())})
|
||||
|
||||
164
kvmd/apps/kvmd/api/switch.py
Normal file
164
kvmd/apps/kvmd/api/switch.py
Normal file
@@ -0,0 +1,164 @@
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
from aiohttp.web import Request
|
||||
from aiohttp.web import Response
|
||||
|
||||
from ....htserver import exposed_http
|
||||
from ....htserver import make_json_response
|
||||
|
||||
from ....validators.basic import valid_bool
|
||||
from ....validators.basic import valid_int_f0
|
||||
from ....validators.basic import valid_stripped_string_not_empty
|
||||
from ....validators.kvm import valid_atx_power_action
|
||||
from ....validators.kvm import valid_atx_button
|
||||
from ....validators.switch import valid_switch_port_name
|
||||
from ....validators.switch import valid_switch_edid_id
|
||||
from ....validators.switch import valid_switch_edid_data
|
||||
from ....validators.switch import valid_switch_color
|
||||
from ....validators.switch import valid_switch_atx_click_delay
|
||||
|
||||
from ..switch import Switch
|
||||
from ..switch import Colors
|
||||
|
||||
|
||||
# =====
|
||||
class SwitchApi:
|
||||
def __init__(self, switch: Switch) -> None:
|
||||
self.__switch = switch
|
||||
|
||||
# =====
|
||||
|
||||
@exposed_http("GET", "/switch")
|
||||
async def __state_handler(self, _: Request) -> Response:
|
||||
return make_json_response(await self.__switch.get_state())
|
||||
|
||||
@exposed_http("POST", "/switch/set_active")
|
||||
async def __set_active_port_handler(self, req: Request) -> Response:
|
||||
port = valid_int_f0(req.query.get("port"))
|
||||
await self.__switch.set_active_port(port)
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/switch/set_beacon")
|
||||
async def __set_beacon_handler(self, req: Request) -> Response:
|
||||
on = valid_bool(req.query.get("state"))
|
||||
if "port" in req.query:
|
||||
port = valid_int_f0(req.query.get("port"))
|
||||
await self.__switch.set_port_beacon(port, on)
|
||||
elif "uplink" in req.query:
|
||||
unit = valid_int_f0(req.query.get("uplink"))
|
||||
await self.__switch.set_uplink_beacon(unit, on)
|
||||
else: # Downlink
|
||||
unit = valid_int_f0(req.query.get("downlink"))
|
||||
await self.__switch.set_downlink_beacon(unit, on)
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/switch/set_port_params")
|
||||
async def __set_port_params(self, req: Request) -> Response:
|
||||
port = valid_int_f0(req.query.get("port"))
|
||||
params = {
|
||||
param: validator(req.query.get(param))
|
||||
for (param, validator) in [
|
||||
("edid_id", (lambda arg: valid_switch_edid_id(arg, allow_default=True))),
|
||||
("name", valid_switch_port_name),
|
||||
("atx_click_power_delay", valid_switch_atx_click_delay),
|
||||
("atx_click_power_long_delay", valid_switch_atx_click_delay),
|
||||
("atx_click_reset_delay", valid_switch_atx_click_delay),
|
||||
]
|
||||
if req.query.get(param) is not None
|
||||
}
|
||||
await self.__switch.set_port_params(port, **params) # type: ignore
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/switch/set_colors")
|
||||
async def __set_colors(self, req: Request) -> Response:
|
||||
params = {
|
||||
param: valid_switch_color(req.query.get(param), allow_default=True)
|
||||
for param in Colors.ROLES
|
||||
if req.query.get(param) is not None
|
||||
}
|
||||
await self.__switch.set_colors(**params)
|
||||
return make_json_response()
|
||||
|
||||
# =====
|
||||
|
||||
@exposed_http("POST", "/switch/reset")
|
||||
async def __reset(self, req: Request) -> Response:
|
||||
unit = valid_int_f0(req.query.get("unit"))
|
||||
bootloader = valid_bool(req.query.get("bootloader", False))
|
||||
await self.__switch.reboot_unit(unit, bootloader)
|
||||
return make_json_response()
|
||||
|
||||
# =====
|
||||
|
||||
@exposed_http("POST", "/switch/edids/create")
|
||||
async def __create_edid(self, req: Request) -> Response:
|
||||
name = valid_stripped_string_not_empty(req.query.get("name"))
|
||||
data_hex = valid_switch_edid_data(req.query.get("data"))
|
||||
edid_id = await self.__switch.create_edid(name, data_hex)
|
||||
return make_json_response({"id": edid_id})
|
||||
|
||||
@exposed_http("POST", "/switch/edids/change")
|
||||
async def __change_edid(self, req: Request) -> Response:
|
||||
edid_id = valid_switch_edid_id(req.query.get("id"), allow_default=False)
|
||||
params = {
|
||||
param: validator(req.query.get(param))
|
||||
for (param, validator) in [
|
||||
("name", valid_switch_port_name),
|
||||
("data", valid_switch_edid_data),
|
||||
]
|
||||
if req.query.get(param) is not None
|
||||
}
|
||||
if params:
|
||||
await self.__switch.change_edid(edid_id, **params)
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/switch/edids/remove")
|
||||
async def __remove_edid(self, req: Request) -> Response:
|
||||
edid_id = valid_switch_edid_id(req.query.get("id"), allow_default=False)
|
||||
await self.__switch.remove_edid(edid_id)
|
||||
return make_json_response()
|
||||
|
||||
# =====
|
||||
|
||||
@exposed_http("POST", "/switch/atx/power")
|
||||
async def __power_handler(self, req: Request) -> Response:
|
||||
port = valid_int_f0(req.query.get("port"))
|
||||
action = valid_atx_power_action(req.query.get("action"))
|
||||
await ({
|
||||
"on": self.__switch.atx_power_on,
|
||||
"off": self.__switch.atx_power_off,
|
||||
"off_hard": self.__switch.atx_power_off_hard,
|
||||
"reset_hard": self.__switch.atx_power_reset_hard,
|
||||
}[action])(port)
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/switch/atx/click")
|
||||
async def __click_handler(self, req: Request) -> Response:
|
||||
port = valid_int_f0(req.query.get("port"))
|
||||
button = valid_atx_button(req.query.get("button"))
|
||||
await ({
|
||||
"power": self.__switch.atx_click_power,
|
||||
"power_long": self.__switch.atx_click_power_long,
|
||||
"reset": self.__switch.atx_click_reset,
|
||||
}[button])(port)
|
||||
return make_json_response()
|
||||
@@ -42,23 +42,20 @@ class UserGpioApi:
|
||||
|
||||
@exposed_http("GET", "/gpio")
|
||||
async def __state_handler(self, _: Request) -> Response:
|
||||
return make_json_response({
|
||||
"model": (await self.__user_gpio.get_model()),
|
||||
"state": (await self.__user_gpio.get_state()),
|
||||
})
|
||||
return make_json_response(await self.__user_gpio.get_state())
|
||||
|
||||
@exposed_http("POST", "/gpio/switch")
|
||||
async def __switch_handler(self, request: Request) -> Response:
|
||||
channel = valid_ugpio_channel(request.query.get("channel"))
|
||||
state = valid_bool(request.query.get("state"))
|
||||
wait = valid_bool(request.query.get("wait", False))
|
||||
async def __switch_handler(self, req: Request) -> Response:
|
||||
channel = valid_ugpio_channel(req.query.get("channel"))
|
||||
state = valid_bool(req.query.get("state"))
|
||||
wait = valid_bool(req.query.get("wait", False))
|
||||
await self.__user_gpio.switch(channel, state, wait)
|
||||
return make_json_response()
|
||||
|
||||
@exposed_http("POST", "/gpio/pulse")
|
||||
async def __pulse_handler(self, request: Request) -> Response:
|
||||
channel = valid_ugpio_channel(request.query.get("channel"))
|
||||
delay = valid_float_f0(request.query.get("delay", 0.0))
|
||||
wait = valid_bool(request.query.get("wait", False))
|
||||
async def __pulse_handler(self, req: Request) -> Response:
|
||||
channel = valid_ugpio_channel(req.query.get("channel"))
|
||||
delay = valid_float_f0(req.query.get("delay", 0.0))
|
||||
wait = valid_bool(req.query.get("wait", False))
|
||||
await self.__user_gpio.pulse(channel, delay, wait)
|
||||
return make_json_response()
|
||||
|
||||
@@ -23,8 +23,6 @@
|
||||
import secrets
|
||||
import pyotp
|
||||
|
||||
from gettext import translation
|
||||
|
||||
from ...logging import get_logger
|
||||
|
||||
from ... import aiotools
|
||||
@@ -34,7 +32,6 @@ from ...plugins.auth import get_auth_service_class
|
||||
|
||||
from ...htserver import HttpExposed
|
||||
|
||||
from ...languages import Languages
|
||||
|
||||
# =====
|
||||
class AuthManager:
|
||||
@@ -52,32 +49,31 @@ class AuthManager:
|
||||
|
||||
totp_secret_path: str,
|
||||
) -> None:
|
||||
self.gettext=Languages().gettext
|
||||
|
||||
self.__enabled = enabled
|
||||
if not enabled:
|
||||
get_logger().warning(self.gettext("AUTHORIZATION IS DISABLED"))
|
||||
get_logger().warning("AUTHORIZATION IS DISABLED")
|
||||
|
||||
self.__unauth_paths = frozenset(unauth_paths) # To speed up
|
||||
for path in self.__unauth_paths:
|
||||
get_logger().warning(self.gettext("Authorization is disabled for API %r"), path)
|
||||
get_logger().warning("Authorization is disabled for API %r", path)
|
||||
|
||||
self.__internal_service: (BaseAuthService | None) = None
|
||||
if enabled:
|
||||
self.__internal_service = get_auth_service_class(internal_type)(**internal_kwargs)
|
||||
get_logger().info(self.gettext("Using internal auth service %r"), self.__internal_service.get_plugin_name())
|
||||
get_logger().info("Using internal auth service %r", self.__internal_service.get_plugin_name())
|
||||
|
||||
self.__force_internal_users = force_internal_users
|
||||
|
||||
self.__external_service: (BaseAuthService | None) = None
|
||||
if enabled and external_type:
|
||||
self.__external_service = get_auth_service_class(external_type)(**external_kwargs)
|
||||
get_logger().info(self.gettext("Using external auth service %r"), self.__external_service.get_plugin_name())
|
||||
get_logger().info("Using external auth service %r", self.__external_service.get_plugin_name())
|
||||
|
||||
self.__totp_secret_path = totp_secret_path
|
||||
|
||||
self.__tokens: dict[str, str] = {} # {token: user}
|
||||
|
||||
|
||||
def is_auth_enabled(self) -> bool:
|
||||
return self.__enabled
|
||||
|
||||
@@ -99,8 +95,8 @@ class AuthManager:
|
||||
secret = file.read().strip()
|
||||
if secret:
|
||||
code = passwd[-6:]
|
||||
if not pyotp.TOTP(secret).verify(code):
|
||||
get_logger().error(self.gettext("Got access denied for user %r by TOTP"), user)
|
||||
if not pyotp.TOTP(secret).verify(code, valid_window=1):
|
||||
get_logger().error("Got access denied for user %r by TOTP", user)
|
||||
return False
|
||||
passwd = passwd[:-6]
|
||||
|
||||
@@ -111,9 +107,9 @@ class AuthManager:
|
||||
|
||||
ok = (await service.authorize(user, passwd))
|
||||
if ok:
|
||||
get_logger().info(self.gettext("Authorized user %r via auth service %r"), user, service.get_plugin_name())
|
||||
get_logger().info("Authorized user %r via auth service %r", user, service.get_plugin_name())
|
||||
else:
|
||||
get_logger().error(self.gettext("Got access denied for user %r from auth service %r"), user, service.get_plugin_name())
|
||||
get_logger().error("Got access denied for user %r from auth service %r", user, service.get_plugin_name())
|
||||
return ok
|
||||
|
||||
async def login(self, user: str, passwd: str) -> (str | None):
|
||||
@@ -123,7 +119,7 @@ class AuthManager:
|
||||
if (await self.authorize(user, passwd)):
|
||||
token = self.__make_new_token()
|
||||
self.__tokens[token] = user
|
||||
get_logger().info(self.gettext("Logged in user %r"), user)
|
||||
get_logger().info("Logged in user %r", user)
|
||||
return token
|
||||
else:
|
||||
return None
|
||||
@@ -133,7 +129,7 @@ class AuthManager:
|
||||
token = secrets.token_hex(32)
|
||||
if token not in self.__tokens:
|
||||
return token
|
||||
raise AssertionError(self.gettext("Can't generate new unique token"))
|
||||
raise AssertionError("Can't generate new unique token")
|
||||
|
||||
def logout(self, token: str) -> None:
|
||||
assert self.__enabled
|
||||
@@ -144,7 +140,7 @@ class AuthManager:
|
||||
if r_user == user:
|
||||
count += 1
|
||||
del self.__tokens[r_token]
|
||||
get_logger().info(self.gettext("Logged out user %r (%d)"), user, count)
|
||||
get_logger().info("Logged out user %r (%d)", user, count)
|
||||
|
||||
def check(self, token: str) -> (str | None):
|
||||
assert self.__enabled
|
||||
|
||||
@@ -20,6 +20,10 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
import asyncio
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from ....yamlconf import Section
|
||||
|
||||
from .base import BaseInfoSubmanager
|
||||
@@ -34,17 +38,59 @@ from .fan import FanInfoSubmanager
|
||||
# =====
|
||||
class InfoManager:
|
||||
def __init__(self, config: Section) -> None:
|
||||
self.__subs = {
|
||||
self.__subs: dict[str, BaseInfoSubmanager] = {
|
||||
"system": SystemInfoSubmanager(config.kvmd.streamer.cmd),
|
||||
"auth": AuthInfoSubmanager(config.kvmd.auth.enabled),
|
||||
"meta": MetaInfoSubmanager(config.kvmd.info.meta),
|
||||
"auth": AuthInfoSubmanager(config.kvmd.auth.enabled),
|
||||
"meta": MetaInfoSubmanager(config.kvmd.info.meta),
|
||||
"extras": ExtrasInfoSubmanager(config),
|
||||
"hw": HwInfoSubmanager(**config.kvmd.info.hw._unpack()),
|
||||
"fan": FanInfoSubmanager(**config.kvmd.info.fan._unpack()),
|
||||
"hw": HwInfoSubmanager(**config.kvmd.info.hw._unpack()),
|
||||
"fan": FanInfoSubmanager(**config.kvmd.info.fan._unpack()),
|
||||
}
|
||||
self.__queue: "asyncio.Queue[tuple[str, (dict | None)]]" = asyncio.Queue()
|
||||
|
||||
def get_subs(self) -> set[str]:
|
||||
return set(self.__subs)
|
||||
|
||||
def get_submanager(self, name: str) -> BaseInfoSubmanager:
|
||||
return self.__subs[name]
|
||||
async def get_state(self, fields: (list[str] | None)=None) -> dict:
|
||||
fields = (fields or list(self.__subs))
|
||||
return dict(zip(fields, await asyncio.gather(*[
|
||||
self.__subs[field].get_state()
|
||||
for field in fields
|
||||
])))
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
await asyncio.gather(*[
|
||||
sub.trigger_state()
|
||||
for sub in self.__subs.values()
|
||||
])
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[dict, None]:
|
||||
# ==== Granularity table ====
|
||||
# - system -- Partial
|
||||
# - auth -- Partial
|
||||
# - meta -- Partial, nullable
|
||||
# - extras -- Partial, nullable
|
||||
# - hw -- Partial
|
||||
# - fan -- Partial
|
||||
# ===========================
|
||||
|
||||
while True:
|
||||
(field, value) = await self.__queue.get()
|
||||
yield {field: value}
|
||||
|
||||
async def systask(self) -> None:
|
||||
tasks = [
|
||||
asyncio.create_task(self.__poller(field))
|
||||
for field in self.__subs
|
||||
]
|
||||
try:
|
||||
await asyncio.gather(*tasks)
|
||||
except Exception:
|
||||
for task in tasks:
|
||||
task.cancel()
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
raise
|
||||
|
||||
async def __poller(self, field: str) -> None:
|
||||
async for state in self.__subs[field].poll_state():
|
||||
self.__queue.put_nowait((field, state))
|
||||
|
||||
@@ -20,6 +20,10 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from .... import aiotools
|
||||
|
||||
from .base import BaseInfoSubmanager
|
||||
|
||||
|
||||
@@ -27,6 +31,15 @@ from .base import BaseInfoSubmanager
|
||||
class AuthInfoSubmanager(BaseInfoSubmanager):
|
||||
def __init__(self, enabled: bool) -> None:
|
||||
self.__enabled = enabled
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
async def get_state(self) -> dict:
|
||||
return {"enabled": self.__enabled}
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify()
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[(dict | None), None]:
|
||||
while True:
|
||||
await self.__notifier.wait()
|
||||
yield (await self.get_state())
|
||||
|
||||
@@ -20,7 +20,17 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
|
||||
# =====
|
||||
class BaseInfoSubmanager:
|
||||
async def get_state(self) -> (dict | None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[(dict | None), None]:
|
||||
yield None
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
@@ -24,6 +25,8 @@ import os
|
||||
import re
|
||||
import asyncio
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from ....logging import get_logger
|
||||
|
||||
from ....yamlconf import Section
|
||||
@@ -42,13 +45,15 @@ from .base import BaseInfoSubmanager
|
||||
class ExtrasInfoSubmanager(BaseInfoSubmanager):
|
||||
def __init__(self, global_config: Section) -> None:
|
||||
self.__global_config = global_config
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
async def get_state(self) -> (dict | None):
|
||||
try:
|
||||
sui = sysunit.SystemdUnitInfo()
|
||||
await sui.open()
|
||||
except Exception as err:
|
||||
get_logger(0).error("Can't open systemd bus to get extras state: %s", tools.efmt(err))
|
||||
except Exception as ex:
|
||||
if not os.path.exists("/etc/kvmd/.docker_flag"):
|
||||
get_logger(0).error("Can't open systemd bus to get extras state: %s", tools.efmt(ex))
|
||||
sui = None
|
||||
try:
|
||||
extras: dict[str, dict] = {}
|
||||
@@ -66,6 +71,14 @@ class ExtrasInfoSubmanager(BaseInfoSubmanager):
|
||||
if sui is not None:
|
||||
await aiotools.shield_fg(sui.close())
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify()
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[(dict | None), None]:
|
||||
while True:
|
||||
await self.__notifier.wait()
|
||||
yield (await self.get_state())
|
||||
|
||||
def __get_extras_path(self, *parts: str) -> str:
|
||||
return os.path.join(self.__global_config.kvmd.info.extras, *parts)
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
|
||||
|
||||
import copy
|
||||
import asyncio
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
@@ -53,6 +52,8 @@ class FanInfoSubmanager(BaseInfoSubmanager):
|
||||
self.__timeout = timeout
|
||||
self.__state_poll = state_poll
|
||||
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
async def get_state(self) -> dict:
|
||||
monitored = await self.__get_monitored()
|
||||
return {
|
||||
@@ -60,24 +61,28 @@ class FanInfoSubmanager(BaseInfoSubmanager):
|
||||
"state": ((await self.__get_fan_state() if monitored else None)),
|
||||
}
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[dict, None]:
|
||||
prev_state: dict = {}
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify(1)
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[(dict | None), None]:
|
||||
prev: dict = {}
|
||||
while True:
|
||||
if self.__unix_path:
|
||||
pure = state = await self.get_state()
|
||||
if (await self.__notifier.wait(timeout=self.__state_poll)) > 0:
|
||||
prev = {}
|
||||
new = await self.get_state()
|
||||
pure = copy.deepcopy(new)
|
||||
if pure["state"] is not None:
|
||||
try:
|
||||
pure = copy.deepcopy(state)
|
||||
pure["state"]["service"]["now_ts"] = 0
|
||||
except Exception:
|
||||
pass
|
||||
if pure != prev_state:
|
||||
yield state
|
||||
prev_state = pure
|
||||
await asyncio.sleep(self.__state_poll)
|
||||
if pure != prev:
|
||||
prev = pure
|
||||
yield new
|
||||
else:
|
||||
await self.__notifier.wait()
|
||||
yield (await self.get_state())
|
||||
await aiotools.wait_infinite()
|
||||
|
||||
# =====
|
||||
|
||||
@@ -87,8 +92,8 @@ class FanInfoSubmanager(BaseInfoSubmanager):
|
||||
async with sysunit.SystemdUnitInfo() as sui:
|
||||
status = await sui.get_status(self.__daemon)
|
||||
return (status[0] or status[1])
|
||||
except Exception as err:
|
||||
get_logger(0).error("Can't get info about the service %r: %s", self.__daemon, tools.efmt(err))
|
||||
except Exception as ex:
|
||||
get_logger(0).error("Can't get info about the service %r: %s", self.__daemon, tools.efmt(ex))
|
||||
return False
|
||||
|
||||
async def __get_fan_state(self) -> (dict | None):
|
||||
@@ -97,8 +102,8 @@ class FanInfoSubmanager(BaseInfoSubmanager):
|
||||
async with session.get("http://localhost/state") as response:
|
||||
htclient.raise_not_200(response)
|
||||
return (await response.json())["result"]
|
||||
except Exception as err:
|
||||
get_logger(0).error("Can't read fan state: %s", err)
|
||||
except Exception as ex:
|
||||
get_logger(0).error("Can't read fan state: %s", ex)
|
||||
return None
|
||||
|
||||
def __make_http_session(self) -> aiohttp.ClientSession:
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import copy
|
||||
|
||||
from typing import Callable
|
||||
from typing import AsyncGenerator
|
||||
@@ -60,6 +61,8 @@ class HwInfoSubmanager(BaseInfoSubmanager):
|
||||
|
||||
self.__dt_cache: dict[str, str] = {}
|
||||
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
async def get_state(self) -> dict:
|
||||
(
|
||||
base,
|
||||
@@ -70,8 +73,8 @@ class HwInfoSubmanager(BaseInfoSubmanager):
|
||||
cpu_temp,
|
||||
mem,
|
||||
) = await asyncio.gather(
|
||||
self.__read_dt_file("model"),
|
||||
self.__read_dt_file("serial-number"),
|
||||
self.__read_dt_file("model", upper=False),
|
||||
self.__read_dt_file("serial-number", upper=True),
|
||||
self.__read_platform_file(),
|
||||
self.__get_throttling(),
|
||||
self.__get_cpu_percent(),
|
||||
@@ -97,18 +100,22 @@ class HwInfoSubmanager(BaseInfoSubmanager):
|
||||
},
|
||||
}
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify(1)
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[dict, None]:
|
||||
prev_state: dict = {}
|
||||
prev: dict = {}
|
||||
while True:
|
||||
state = await self.get_state()
|
||||
if state != prev_state:
|
||||
yield state
|
||||
prev_state = state
|
||||
await asyncio.sleep(self.__state_poll)
|
||||
if (await self.__notifier.wait(timeout=self.__state_poll)) > 0:
|
||||
prev = {}
|
||||
new = await self.get_state()
|
||||
if new != prev:
|
||||
prev = copy.deepcopy(new)
|
||||
yield new
|
||||
|
||||
# =====
|
||||
|
||||
async def __read_dt_file(self, name: str) -> (str | None):
|
||||
async def __read_dt_file(self, name: str, upper: bool) -> (str | None):
|
||||
if name not in self.__dt_cache:
|
||||
path = os.path.join(f"{env.PROCFS_PREFIX}/proc/device-tree", name)
|
||||
if not os.path.exists(path):
|
||||
@@ -161,8 +168,8 @@ class HwInfoSubmanager(BaseInfoSubmanager):
|
||||
+ system_all / total * 100
|
||||
+ (st.steal + st.guest) / total * 100
|
||||
)
|
||||
except Exception as err:
|
||||
get_logger(0).error("Can't get CPU percent: %s", err)
|
||||
except Exception as ex:
|
||||
get_logger(0).error("Can't get CPU percent: %s", ex)
|
||||
return None
|
||||
|
||||
async def __get_mem(self) -> dict:
|
||||
@@ -173,8 +180,8 @@ class HwInfoSubmanager(BaseInfoSubmanager):
|
||||
"total": st.total,
|
||||
"available": st.available,
|
||||
}
|
||||
except Exception as err:
|
||||
get_logger(0).error("Can't get memory info: %s", err)
|
||||
except Exception as ex:
|
||||
get_logger(0).error("Can't get memory info: %s", ex)
|
||||
return {
|
||||
"percent": None,
|
||||
"total": None,
|
||||
@@ -217,6 +224,6 @@ class HwInfoSubmanager(BaseInfoSubmanager):
|
||||
return None
|
||||
try:
|
||||
return parser(text)
|
||||
except Exception as err:
|
||||
get_logger(0).error("Can't parse [ %s ] output: %r: %s", tools.cmdfmt(cmd), text, tools.efmt(err))
|
||||
except Exception as ex:
|
||||
get_logger(0).error("Can't parse [ %s ] output: %r: %s", tools.cmdfmt(cmd), text, tools.efmt(ex))
|
||||
return None
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from ....logging import get_logger
|
||||
|
||||
from ....yamlconf.loader import load_yaml_file
|
||||
@@ -33,6 +35,7 @@ from .base import BaseInfoSubmanager
|
||||
class MetaInfoSubmanager(BaseInfoSubmanager):
|
||||
def __init__(self, meta_path: str) -> None:
|
||||
self.__meta_path = meta_path
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
async def get_state(self) -> (dict | None):
|
||||
try:
|
||||
@@ -40,3 +43,11 @@ class MetaInfoSubmanager(BaseInfoSubmanager):
|
||||
except Exception:
|
||||
get_logger(0).exception("Can't parse meta")
|
||||
return None
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify()
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[(dict | None), None]:
|
||||
while True:
|
||||
await self.__notifier.wait()
|
||||
yield (await self.get_state())
|
||||
|
||||
@@ -24,8 +24,11 @@ import os
|
||||
import asyncio
|
||||
import platform
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from ....logging import get_logger
|
||||
|
||||
from .... import aiotools
|
||||
from .... import aioproc
|
||||
|
||||
from .... import __version__
|
||||
@@ -37,6 +40,7 @@ from .base import BaseInfoSubmanager
|
||||
class SystemInfoSubmanager(BaseInfoSubmanager):
|
||||
def __init__(self, streamer_cmd: list[str]) -> None:
|
||||
self.__streamer_cmd = streamer_cmd
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
async def get_state(self) -> dict:
|
||||
streamer_info = await self.__get_streamer_info()
|
||||
@@ -50,6 +54,14 @@ class SystemInfoSubmanager(BaseInfoSubmanager):
|
||||
},
|
||||
}
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify()
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[(dict | None), None]:
|
||||
while True:
|
||||
await self.__notifier.wait()
|
||||
yield (await self.get_state())
|
||||
|
||||
# =====
|
||||
|
||||
async def __get_streamer_info(self) -> dict:
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# Copyright (C) 2023-2025 SilentWind <mofeng654321@hotmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
@@ -30,17 +31,12 @@ from xmlrpc.client import ServerProxy
|
||||
|
||||
from ...logging import get_logger
|
||||
|
||||
us_systemd_journal = True
|
||||
try:
|
||||
import systemd.journal
|
||||
except ImportError as e:
|
||||
get_logger(0).error("Failed to import module: %s", "systemd.journal")
|
||||
us_systemd_journal = False
|
||||
|
||||
try:
|
||||
except ImportError:
|
||||
import supervisor.xmlrpc
|
||||
except ImportError as e:
|
||||
get_logger(0).info("Failed to import module: %s", "supervisor.xmlrpc")
|
||||
us_systemd_journal = True
|
||||
us_systemd_journal = False
|
||||
|
||||
|
||||
# =====
|
||||
|
||||
@@ -37,6 +37,7 @@ from ctypes import c_void_p
|
||||
from ctypes import c_char
|
||||
|
||||
from typing import Generator
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from PIL import ImageOps
|
||||
from PIL import Image as PilImage
|
||||
@@ -76,8 +77,8 @@ def _load_libtesseract() -> (ctypes.CDLL | None):
|
||||
setattr(func, "restype", restype)
|
||||
setattr(func, "argtypes", argtypes)
|
||||
return lib
|
||||
except Exception as err:
|
||||
warnings.warn(f"Can't load libtesseract: {err}", RuntimeWarning)
|
||||
except Exception as ex:
|
||||
warnings.warn(f"Can't load libtesseract: {ex}", RuntimeWarning)
|
||||
return None
|
||||
|
||||
|
||||
@@ -107,9 +108,37 @@ class Ocr:
|
||||
def __init__(self, data_dir_path: str, default_langs: list[str]) -> None:
|
||||
self.__data_dir_path = data_dir_path
|
||||
self.__default_langs = default_langs
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return bool(_libtess)
|
||||
async def get_state(self) -> dict:
|
||||
enabled = bool(_libtess)
|
||||
default: list[str] = []
|
||||
available: list[str] = []
|
||||
if enabled:
|
||||
default = self.get_default_langs()
|
||||
available = self.get_available_langs()
|
||||
return {
|
||||
"enabled": enabled,
|
||||
"langs": {
|
||||
"default": default,
|
||||
"available": available,
|
||||
},
|
||||
}
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify()
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[dict, None]:
|
||||
# ===== Granularity table =====
|
||||
# - enabled -- Full
|
||||
# - langs -- Partial
|
||||
# =============================
|
||||
|
||||
while True:
|
||||
await self.__notifier.wait()
|
||||
yield (await self.get_state())
|
||||
|
||||
# =====
|
||||
|
||||
def get_default_langs(self) -> list[str]:
|
||||
return list(self.__default_langs)
|
||||
|
||||
@@ -20,8 +20,6 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
import asyncio
|
||||
import operator
|
||||
import dataclasses
|
||||
|
||||
from typing import Callable
|
||||
@@ -33,7 +31,7 @@ from aiohttp.web import Request
|
||||
from aiohttp.web import Response
|
||||
from aiohttp.web import WebSocketResponse
|
||||
|
||||
from ...languages import Languages
|
||||
from ... import __version__
|
||||
|
||||
from ...logging import get_logger
|
||||
|
||||
@@ -68,6 +66,7 @@ from .ugpio import UserGpio
|
||||
from .streamer import Streamer
|
||||
from .snapshoter import Snapshoter
|
||||
from .ocr import Ocr
|
||||
from .switch import Switch
|
||||
|
||||
from .api.auth import AuthApi
|
||||
from .api.auth import check_request_auth
|
||||
@@ -79,6 +78,7 @@ from .api.hid import HidApi
|
||||
from .api.atx import AtxApi
|
||||
from .api.msd import MsdApi
|
||||
from .api.streamer import StreamerApi
|
||||
from .api.switch import SwitchApi
|
||||
from .api.export import ExportApi
|
||||
from .api.redfish import RedfishApi
|
||||
|
||||
@@ -86,68 +86,61 @@ from .api.redfish import RedfishApi
|
||||
# =====
|
||||
class StreamerQualityNotSupported(OperationError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(Languages().gettext("This streamer does not support quality settings"))
|
||||
super().__init__("This streamer does not support quality settings")
|
||||
|
||||
|
||||
class StreamerResolutionNotSupported(OperationError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(Languages().gettext("This streamer does not support resolution settings"))
|
||||
super().__init__("This streamer does not support resolution settings")
|
||||
|
||||
|
||||
class StreamerH264NotSupported(OperationError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(Languages().gettext("This streamer does not support H264"))
|
||||
super().__init__("This streamer does not support H264")
|
||||
|
||||
|
||||
# =====
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _SubsystemEventSource:
|
||||
get_state: (Callable[[], Coroutine[Any, Any, dict]] | None) = None
|
||||
poll_state: (Callable[[], AsyncGenerator[dict, None]] | None) = None
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _Subsystem:
|
||||
name: str
|
||||
sysprep: (Callable[[], None] | None)
|
||||
systask: (Callable[[], Coroutine[Any, Any, None]] | None)
|
||||
cleanup: (Callable[[], Coroutine[Any, Any, dict]] | None)
|
||||
sources: dict[str, _SubsystemEventSource]
|
||||
name: str
|
||||
event_type: str
|
||||
sysprep: (Callable[[], None] | None)
|
||||
systask: (Callable[[], Coroutine[Any, Any, None]] | None)
|
||||
cleanup: (Callable[[], Coroutine[Any, Any, dict]] | None)
|
||||
trigger_state: (Callable[[], Coroutine[Any, Any, None]] | None) = None
|
||||
poll_state: (Callable[[], AsyncGenerator[dict, None]] | None) = None
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.event_type:
|
||||
assert self.trigger_state
|
||||
assert self.poll_state
|
||||
|
||||
@classmethod
|
||||
def make(cls, obj: object, name: str, event_type: str="") -> "_Subsystem":
|
||||
if isinstance(obj, BasePlugin):
|
||||
name = f"{name} ({obj.get_plugin_name()})"
|
||||
sub = _Subsystem(
|
||||
return _Subsystem(
|
||||
name=name,
|
||||
event_type=event_type,
|
||||
sysprep=getattr(obj, "sysprep", None),
|
||||
systask=getattr(obj, "systask", None),
|
||||
cleanup=getattr(obj, "cleanup", None),
|
||||
sources={},
|
||||
trigger_state=getattr(obj, "trigger_state", None),
|
||||
poll_state=getattr(obj, "poll_state", None),
|
||||
)
|
||||
if event_type:
|
||||
sub.add_source(
|
||||
event_type=event_type,
|
||||
get_state=getattr(obj, "get_state", None),
|
||||
poll_state=getattr(obj, "poll_state", None),
|
||||
)
|
||||
return sub
|
||||
|
||||
def add_source(
|
||||
self,
|
||||
event_type: str,
|
||||
get_state: (Callable[[], Coroutine[Any, Any, dict]] | None),
|
||||
poll_state: (Callable[[], AsyncGenerator[dict, None]] | None),
|
||||
) -> "_Subsystem":
|
||||
|
||||
assert event_type
|
||||
assert event_type not in self.sources, (self, event_type)
|
||||
assert get_state or poll_state, (self, event_type)
|
||||
self.sources[event_type] = _SubsystemEventSource(get_state, poll_state)
|
||||
return self
|
||||
|
||||
|
||||
class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-instance-attributes
|
||||
__EV_GPIO_STATE = "gpio"
|
||||
__EV_HID_STATE = "hid"
|
||||
__EV_HID_KEYMAPS_STATE = "hid_keymaps" # FIXME
|
||||
__EV_ATX_STATE = "atx"
|
||||
__EV_MSD_STATE = "msd"
|
||||
__EV_STREAMER_STATE = "streamer"
|
||||
__EV_OCR_STATE = "ocr"
|
||||
__EV_INFO_STATE = "info"
|
||||
__EV_SWITCH_STATE = "switch"
|
||||
|
||||
def __init__( # pylint: disable=too-many-arguments,too-many-locals
|
||||
self,
|
||||
auth_manager: AuthManager,
|
||||
@@ -155,6 +148,7 @@ class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-ins
|
||||
log_reader: (LogReader | None),
|
||||
user_gpio: UserGpio,
|
||||
ocr: Ocr,
|
||||
switch: Switch,
|
||||
|
||||
hid: BaseHid,
|
||||
atx: BaseAtx,
|
||||
@@ -163,9 +157,6 @@ class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-ins
|
||||
snapshoter: Snapshoter,
|
||||
|
||||
keymap_path: str,
|
||||
ignore_keys: list[str],
|
||||
mouse_x_range: tuple[int, int],
|
||||
mouse_y_range: tuple[int, int],
|
||||
|
||||
stream_forever: bool,
|
||||
) -> None:
|
||||
@@ -179,8 +170,7 @@ class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-ins
|
||||
|
||||
self.__stream_forever = stream_forever
|
||||
|
||||
self.__hid_api = HidApi(hid, keymap_path, ignore_keys, mouse_x_range, mouse_y_range) # Ugly hack to get keymaps state
|
||||
self.__streamer_api = StreamerApi(streamer, ocr) # Same hack to get ocr langs state
|
||||
self.__hid_api = HidApi(hid, keymap_path) # Ugly hack to get keymaps state
|
||||
self.__apis: list[object] = [
|
||||
self,
|
||||
AuthApi(auth_manager),
|
||||
@@ -190,43 +180,40 @@ class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-ins
|
||||
self.__hid_api,
|
||||
AtxApi(atx),
|
||||
MsdApi(msd),
|
||||
self.__streamer_api,
|
||||
StreamerApi(streamer, ocr),
|
||||
SwitchApi(switch),
|
||||
ExportApi(info_manager, atx, user_gpio),
|
||||
RedfishApi(info_manager, atx),
|
||||
]
|
||||
|
||||
self.__subsystems = [
|
||||
_Subsystem.make(auth_manager, "Auth manager"),
|
||||
_Subsystem.make(user_gpio, "User-GPIO", "gpio_state").add_source("gpio_model_state", user_gpio.get_model, None),
|
||||
_Subsystem.make(hid, "HID", "hid_state").add_source("hid_keymaps_state", self.__hid_api.get_keymaps, None),
|
||||
_Subsystem.make(atx, "ATX", "atx_state"),
|
||||
_Subsystem.make(msd, "MSD", "msd_state"),
|
||||
_Subsystem.make(streamer, "Streamer", "streamer_state").add_source("streamer_ocr_state", self.__streamer_api.get_ocr, None),
|
||||
*[
|
||||
_Subsystem.make(info_manager.get_submanager(sub), f"Info manager ({sub})", f"info_{sub}_state",)
|
||||
for sub in sorted(info_manager.get_subs())
|
||||
],
|
||||
_Subsystem.make(user_gpio, "User-GPIO", self.__EV_GPIO_STATE),
|
||||
_Subsystem.make(hid, "HID", self.__EV_HID_STATE),
|
||||
_Subsystem.make(atx, "ATX", self.__EV_ATX_STATE),
|
||||
_Subsystem.make(msd, "MSD", self.__EV_MSD_STATE),
|
||||
_Subsystem.make(streamer, "Streamer", self.__EV_STREAMER_STATE),
|
||||
_Subsystem.make(ocr, "OCR", self.__EV_OCR_STATE),
|
||||
_Subsystem.make(info_manager, "Info manager", self.__EV_INFO_STATE),
|
||||
_Subsystem.make(switch, "Switch", self.__EV_SWITCH_STATE),
|
||||
]
|
||||
|
||||
self.__streamer_notifier = aiotools.AioNotifier()
|
||||
self.__reset_streamer = False
|
||||
self.__new_streamer_params: dict = {}
|
||||
|
||||
self.gettext=Languages().gettext
|
||||
|
||||
# ===== STREAMER CONTROLLER
|
||||
|
||||
@exposed_http("POST", "/streamer/set_params")
|
||||
async def __streamer_set_params_handler(self, request: Request) -> Response:
|
||||
async def __streamer_set_params_handler(self, req: Request) -> Response:
|
||||
current_params = self.__streamer.get_params()
|
||||
for (name, validator, exc_cls) in [
|
||||
("quality", valid_stream_quality, StreamerQualityNotSupported),
|
||||
("desired_fps", valid_stream_fps, None),
|
||||
("resolution", valid_stream_resolution, StreamerResolutionNotSupported),
|
||||
("quality", valid_stream_quality, StreamerQualityNotSupported),
|
||||
("desired_fps", valid_stream_fps, None),
|
||||
("resolution", valid_stream_resolution, StreamerResolutionNotSupported),
|
||||
("h264_bitrate", valid_stream_h264_bitrate, StreamerH264NotSupported),
|
||||
("h264_gop", valid_stream_h264_gop, StreamerH264NotSupported),
|
||||
("h264_gop", valid_stream_h264_gop, StreamerH264NotSupported),
|
||||
]:
|
||||
value = request.query.get(name)
|
||||
value = req.query.get(name)
|
||||
if value:
|
||||
if name not in current_params:
|
||||
assert exc_cls is not None, name
|
||||
@@ -246,24 +233,21 @@ class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-ins
|
||||
# ===== WEBSOCKET
|
||||
|
||||
@exposed_http("GET", "/ws")
|
||||
async def __ws_handler(self, request: Request) -> WebSocketResponse:
|
||||
stream = valid_bool(request.query.get("stream", True))
|
||||
async with self._ws_session(request, stream=stream) as ws:
|
||||
states = [
|
||||
(event_type, src.get_state())
|
||||
for sub in self.__subsystems
|
||||
for (event_type, src) in sub.sources.items()
|
||||
if src.get_state
|
||||
]
|
||||
events = dict(zip(
|
||||
map(operator.itemgetter(0), states),
|
||||
await asyncio.gather(*map(operator.itemgetter(1), states)),
|
||||
))
|
||||
await asyncio.gather(*[
|
||||
ws.send_event(event_type, events.pop(event_type))
|
||||
for (event_type, _) in states
|
||||
])
|
||||
await ws.send_event("loop", {})
|
||||
async def __ws_handler(self, req: Request) -> WebSocketResponse:
|
||||
stream = valid_bool(req.query.get("stream", True))
|
||||
async with self._ws_session(req, stream=stream) as ws:
|
||||
(major, minor) = __version__.split(".")
|
||||
await ws.send_event("loop", {
|
||||
"version": {
|
||||
"major": int(major),
|
||||
"minor": int(minor),
|
||||
},
|
||||
})
|
||||
for sub in self.__subsystems:
|
||||
if sub.event_type:
|
||||
assert sub.trigger_state
|
||||
await sub.trigger_state()
|
||||
await self._broadcast_ws_event(self.__EV_HID_KEYMAPS_STATE, await self.__hid_api.get_keymaps()) # FIXME
|
||||
return (await self._ws_loop(ws))
|
||||
|
||||
@exposed_ws("ping")
|
||||
@@ -279,45 +263,45 @@ class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-ins
|
||||
aioproc.rename_process("main")
|
||||
super().run(**kwargs)
|
||||
|
||||
async def _check_request_auth(self, exposed: HttpExposed, request: Request) -> None:
|
||||
await check_request_auth(self.__auth_manager, exposed, request)
|
||||
async def _check_request_auth(self, exposed: HttpExposed, req: Request) -> None:
|
||||
await check_request_auth(self.__auth_manager, exposed, req)
|
||||
|
||||
async def _init_app(self) -> None:
|
||||
aiotools.create_deadly_task("Stream controller", self.__stream_controller())
|
||||
for sub in self.__subsystems:
|
||||
if sub.systask:
|
||||
aiotools.create_deadly_task(sub.name, sub.systask())
|
||||
for (event_type, src) in sub.sources.items():
|
||||
if src.poll_state:
|
||||
aiotools.create_deadly_task(f"{sub.name} [poller]", self.__poll_state(event_type, src.poll_state()))
|
||||
if sub.event_type:
|
||||
assert sub.poll_state
|
||||
aiotools.create_deadly_task(f"{sub.name} [poller]", self.__poll_state(sub.event_type, sub.poll_state()))
|
||||
aiotools.create_deadly_task("Stream snapshoter", self.__stream_snapshoter())
|
||||
self._add_exposed(*self.__apis)
|
||||
|
||||
async def _on_shutdown(self) -> None:
|
||||
logger = get_logger(0)
|
||||
logger.info(self.gettext("Waiting short tasks ..."))
|
||||
logger.info("Waiting short tasks ...")
|
||||
await aiotools.wait_all_short_tasks()
|
||||
logger.info(self.gettext("Stopping system tasks ..."))
|
||||
logger.info("Stopping system tasks ...")
|
||||
await aiotools.stop_all_deadly_tasks()
|
||||
logger.info(self.gettext("Disconnecting clients ..."))
|
||||
logger.info("Disconnecting clients ...")
|
||||
await self._close_all_wss()
|
||||
logger.info(self.gettext("On-Shutdown complete"))
|
||||
logger.info("On-Shutdown complete")
|
||||
|
||||
async def _on_cleanup(self) -> None:
|
||||
logger = get_logger(0)
|
||||
for sub in self.__subsystems:
|
||||
if sub.cleanup:
|
||||
logger.info(self.gettext("Cleaning up %s ..."), sub.name)
|
||||
logger.info("Cleaning up %s ...", sub.name)
|
||||
try:
|
||||
await sub.cleanup() # type: ignore
|
||||
except Exception:
|
||||
logger.exception(self.gettext("Cleanup error on %s"), sub.name)
|
||||
logger.info(self.gettext("On-Cleanup complete"))
|
||||
logger.exception("Cleanup error on %s", sub.name)
|
||||
logger.info("On-Cleanup complete")
|
||||
|
||||
async def _on_ws_opened(self) -> None:
|
||||
async def _on_ws_opened(self, _: WsSession) -> None:
|
||||
self.__streamer_notifier.notify()
|
||||
|
||||
async def _on_ws_closed(self) -> None:
|
||||
async def _on_ws_closed(self, _: WsSession) -> None:
|
||||
self.__hid.clear_events()
|
||||
self.__streamer_notifier.notify()
|
||||
|
||||
@@ -351,12 +335,12 @@ class KvmdServer(HttpServer): # pylint: disable=too-many-arguments,too-many-ins
|
||||
prev = cur
|
||||
await self.__streamer_notifier.wait()
|
||||
|
||||
async def __poll_state(self, event_type: str, poller: AsyncGenerator[dict, None]) -> None:
|
||||
async for state in poller:
|
||||
await self._broadcast_ws_event(event_type, state)
|
||||
|
||||
async def __stream_snapshoter(self) -> None:
|
||||
await self.__snapshoter.run(
|
||||
is_live=self.__has_stream_clients,
|
||||
notifier=self.__streamer_notifier,
|
||||
)
|
||||
|
||||
async def __poll_state(self, event_type: str, poller: AsyncGenerator[dict, None]) -> None:
|
||||
async for state in poller:
|
||||
await self._broadcast_ws_event(event_type, state)
|
||||
|
||||
@@ -123,10 +123,10 @@ class Snapshoter: # pylint: disable=too-many-instance-attributes
|
||||
|
||||
if self.__wakeup_key:
|
||||
logger.info("Waking up using key %r ...", self.__wakeup_key)
|
||||
self.__hid.send_key_events([
|
||||
(self.__wakeup_key, True),
|
||||
(self.__wakeup_key, False),
|
||||
])
|
||||
await self.__hid.send_key_events(
|
||||
keys=[(self.__wakeup_key, True), (self.__wakeup_key, False)],
|
||||
no_ignore_keys=True,
|
||||
)
|
||||
|
||||
if self.__wakeup_move:
|
||||
logger.info("Waking up using mouse move for %d units ...", self.__wakeup_move)
|
||||
|
||||
@@ -20,24 +20,23 @@
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
import io
|
||||
import signal
|
||||
import asyncio
|
||||
import asyncio.subprocess
|
||||
import dataclasses
|
||||
import functools
|
||||
import copy
|
||||
|
||||
from typing import AsyncGenerator
|
||||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
|
||||
from PIL import Image as PilImage
|
||||
|
||||
from ...languages import Languages
|
||||
|
||||
from ...logging import get_logger
|
||||
|
||||
from ...clients.streamer import StreamerSnapshot
|
||||
from ...clients.streamer import HttpStreamerClient
|
||||
from ...clients.streamer import HttpStreamerClientSession
|
||||
|
||||
from ... import tools
|
||||
from ... import aiotools
|
||||
from ... import aioproc
|
||||
@@ -45,40 +44,6 @@ from ... import htclient
|
||||
|
||||
|
||||
# =====
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class StreamerSnapshot:
|
||||
online: bool
|
||||
width: int
|
||||
height: int
|
||||
headers: tuple[tuple[str, str], ...]
|
||||
data: bytes
|
||||
|
||||
async def make_preview(self, max_width: int, max_height: int, quality: int) -> bytes:
|
||||
assert max_width >= 0
|
||||
assert max_height >= 0
|
||||
assert quality > 0
|
||||
|
||||
if max_width == 0 and max_height == 0:
|
||||
max_width = self.width // 5
|
||||
max_height = self.height // 5
|
||||
else:
|
||||
max_width = min((max_width or self.width), self.width)
|
||||
max_height = min((max_height or self.height), self.height)
|
||||
|
||||
if (max_width, max_height) == (self.width, self.height):
|
||||
return self.data
|
||||
return (await aiotools.run_async(self.__inner_make_preview, max_width, max_height, quality))
|
||||
|
||||
@functools.lru_cache(maxsize=1)
|
||||
def __inner_make_preview(self, max_width: int, max_height: int, quality: int) -> bytes:
|
||||
with io.BytesIO(self.data) as snapshot_bio:
|
||||
with io.BytesIO() as preview_bio:
|
||||
with PilImage.open(snapshot_bio) as image:
|
||||
image.thumbnail((max_width, max_height), PilImage.Resampling.LANCZOS)
|
||||
image.save(preview_bio, format="jpeg", quality=quality)
|
||||
return preview_bio.getvalue()
|
||||
|
||||
|
||||
class _StreamerParams:
|
||||
__DESIRED_FPS = "desired_fps"
|
||||
|
||||
@@ -138,7 +103,7 @@ class _StreamerParams:
|
||||
}
|
||||
|
||||
def get_limits(self) -> dict:
|
||||
limits = dict(self.__limits)
|
||||
limits = copy.deepcopy(self.__limits)
|
||||
if self.__has_resolution:
|
||||
limits[self.__AVAILABLE_RESOLUTIONS] = list(limits[self.__AVAILABLE_RESOLUTIONS])
|
||||
return limits
|
||||
@@ -172,6 +137,11 @@ class _StreamerParams:
|
||||
|
||||
|
||||
class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
__ST_FULL = 0xFF
|
||||
__ST_PARAMS = 0x01
|
||||
__ST_STREAMER = 0x02
|
||||
__ST_SNAPSHOT = 0x04
|
||||
|
||||
def __init__( # pylint: disable=too-many-arguments,too-many-locals
|
||||
self,
|
||||
|
||||
@@ -205,7 +175,6 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
self.__state_poll = state_poll
|
||||
|
||||
self.__unix_path = unix_path
|
||||
self.__timeout = timeout
|
||||
self.__snapshot_timeout = snapshot_timeout
|
||||
|
||||
self.__process_name_prefix = process_name_prefix
|
||||
@@ -222,15 +191,18 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
self.__streamer_task: (asyncio.Task | None) = None
|
||||
self.__streamer_proc: (asyncio.subprocess.Process | None) = None # pylint: disable=no-member
|
||||
|
||||
self.__http_session: (aiohttp.ClientSession | None) = None
|
||||
self.__client = HttpStreamerClient(
|
||||
name="jpeg",
|
||||
unix_path=self.__unix_path,
|
||||
timeout=timeout,
|
||||
user_agent=htclient.make_user_agent("KVMD"),
|
||||
)
|
||||
self.__client_session: (HttpStreamerClientSession | None) = None
|
||||
|
||||
self.__snapshot: (StreamerSnapshot | None) = None
|
||||
|
||||
self.__notifier = aiotools.AioNotifier()
|
||||
|
||||
self.gettext=Languages().gettext
|
||||
|
||||
|
||||
# =====
|
||||
|
||||
@aiotools.atomic_fg
|
||||
@@ -242,15 +214,15 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
if not self.__stop_wip:
|
||||
self.__stop_task.cancel()
|
||||
await asyncio.gather(self.__stop_task, return_exceptions=True)
|
||||
logger.info(self.gettext("Streamer stop cancelled"))
|
||||
logger.info("Streamer stop cancelled")
|
||||
return
|
||||
else:
|
||||
await asyncio.gather(self.__stop_task, return_exceptions=True)
|
||||
|
||||
if reset and self.__reset_delay > 0:
|
||||
logger.info(self.gettext("Waiting %.2f seconds for reset delay ..."), self.__reset_delay)
|
||||
logger.info("Waiting %.2f seconds for reset delay ...", self.__reset_delay)
|
||||
await asyncio.sleep(self.__reset_delay)
|
||||
logger.info(self.gettext("Starting streamer ..."))
|
||||
logger.info("Starting streamer ...")
|
||||
await self.__inner_start()
|
||||
|
||||
@aiotools.atomic_fg
|
||||
@@ -263,12 +235,12 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
if not self.__stop_wip:
|
||||
self.__stop_task.cancel()
|
||||
await asyncio.gather(self.__stop_task, return_exceptions=True)
|
||||
logger.info(self.gettext("Stopping streamer immediately ..."))
|
||||
logger.info("Stopping streamer immediately ...")
|
||||
await self.__inner_stop()
|
||||
else:
|
||||
await asyncio.gather(self.__stop_task, return_exceptions=True)
|
||||
else:
|
||||
logger.info(self.gettext("Stopping streamer immediately ..."))
|
||||
logger.info("Stopping streamer immediately ...")
|
||||
await self.__inner_stop()
|
||||
|
||||
elif not self.__stop_task:
|
||||
@@ -277,13 +249,13 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
try:
|
||||
await asyncio.sleep(self.__shutdown_delay)
|
||||
self.__stop_wip = True
|
||||
logger.info(self.gettext("Stopping streamer after delay ..."))
|
||||
logger.info("Stopping streamer after delay ...")
|
||||
await self.__inner_stop()
|
||||
finally:
|
||||
self.__stop_task = None
|
||||
self.__stop_wip = False
|
||||
|
||||
logger.info(self.gettext("Planning to stop streamer in %.2f seconds ..."), self.__shutdown_delay)
|
||||
logger.info("Planning to stop streamer in %.2f seconds ...", self.__shutdown_delay)
|
||||
self.__stop_task = asyncio.create_task(delayed_stop())
|
||||
|
||||
def is_working(self) -> bool:
|
||||
@@ -294,6 +266,7 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
|
||||
def set_params(self, params: dict) -> None:
|
||||
assert not self.__streamer_task
|
||||
self.__notifier.notify(self.__ST_PARAMS)
|
||||
return self.__params.set_params(params)
|
||||
|
||||
def get_params(self) -> dict:
|
||||
@@ -302,55 +275,80 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
# =====
|
||||
|
||||
async def get_state(self) -> dict:
|
||||
streamer_state = None
|
||||
return {
|
||||
"features": self.__params.get_features(),
|
||||
"limits": self.__params.get_limits(),
|
||||
"params": self.__params.get_params(),
|
||||
"streamer": (await self.__get_streamer_state()),
|
||||
"snapshot": self.__get_snapshot_state(),
|
||||
}
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
self.__notifier.notify(self.__ST_FULL)
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[dict, None]:
|
||||
# ==== Granularity table ====
|
||||
# - features -- Full
|
||||
# - limits -- Partial, paired with params
|
||||
# - params -- Partial, paired with limits
|
||||
# - streamer -- Partial, nullable
|
||||
# - snapshot -- Partial
|
||||
# ===========================
|
||||
|
||||
def signal_handler(*_: Any) -> None:
|
||||
get_logger(0).info("Got SIGUSR2, checking the stream state ...")
|
||||
self.__notifier.notify(self.__ST_STREAMER)
|
||||
|
||||
get_logger(0).info("Installing SIGUSR2 streamer handler ...")
|
||||
asyncio.get_event_loop().add_signal_handler(signal.SIGUSR2, signal_handler)
|
||||
|
||||
prev: dict = {}
|
||||
while True:
|
||||
new: dict = {}
|
||||
|
||||
mask = await self.__notifier.wait(timeout=self.__state_poll)
|
||||
if mask == self.__ST_FULL:
|
||||
new = await self.get_state()
|
||||
prev = copy.deepcopy(new)
|
||||
yield new
|
||||
continue
|
||||
|
||||
if mask < 0:
|
||||
mask = self.__ST_STREAMER
|
||||
|
||||
def check_update(key: str, value: (dict | None)) -> None:
|
||||
if prev.get(key) != value:
|
||||
new[key] = value
|
||||
|
||||
if mask & self.__ST_PARAMS:
|
||||
check_update("params", self.__params.get_params())
|
||||
if mask & self.__ST_STREAMER:
|
||||
check_update("streamer", await self.__get_streamer_state())
|
||||
if mask & self.__ST_SNAPSHOT:
|
||||
check_update("snapshot", self.__get_snapshot_state())
|
||||
|
||||
if new and prev != new:
|
||||
prev.update(copy.deepcopy(new))
|
||||
yield new
|
||||
|
||||
async def __get_streamer_state(self) -> (dict | None):
|
||||
if self.__streamer_task:
|
||||
session = self.__ensure_http_session()
|
||||
session = self.__ensure_client_session()
|
||||
try:
|
||||
async with session.get(self.__make_url("state")) as response:
|
||||
htclient.raise_not_200(response)
|
||||
streamer_state = (await response.json())["result"]
|
||||
return (await session.get_state())
|
||||
except (aiohttp.ClientConnectionError, aiohttp.ServerConnectionError):
|
||||
pass
|
||||
except Exception:
|
||||
get_logger().exception(self.gettext("Invalid streamer response from /state"))
|
||||
get_logger().exception("Invalid streamer response from /state")
|
||||
return None
|
||||
|
||||
snapshot: (dict | None) = None
|
||||
def __get_snapshot_state(self) -> dict:
|
||||
if self.__snapshot:
|
||||
snapshot = dataclasses.asdict(self.__snapshot)
|
||||
del snapshot["headers"]
|
||||
del snapshot["data"]
|
||||
|
||||
return {
|
||||
"limits": self.__params.get_limits(),
|
||||
"params": self.__params.get_params(),
|
||||
"snapshot": {"saved": snapshot},
|
||||
"streamer": streamer_state,
|
||||
"features": self.__params.get_features(),
|
||||
}
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[dict, None]:
|
||||
def signal_handler(*_: Any) -> None:
|
||||
get_logger(0).info(self.gettext("Got SIGUSR2, checking the stream state ..."))
|
||||
self.__notifier.notify()
|
||||
|
||||
get_logger(0).info(self.gettext("Installing SIGUSR2 streamer handler ..."))
|
||||
asyncio.get_event_loop().add_signal_handler(signal.SIGUSR2, signal_handler)
|
||||
|
||||
waiter_task: (asyncio.Task | None) = None
|
||||
prev_state: dict = {}
|
||||
while True:
|
||||
state = await self.get_state()
|
||||
if state != prev_state:
|
||||
yield state
|
||||
prev_state = state
|
||||
|
||||
if waiter_task is None:
|
||||
waiter_task = asyncio.create_task(self.__notifier.wait())
|
||||
if waiter_task in (await aiotools.wait_first(
|
||||
asyncio.ensure_future(asyncio.sleep(self.__state_poll)),
|
||||
waiter_task,
|
||||
))[0]:
|
||||
waiter_task = None
|
||||
return {"saved": snapshot}
|
||||
return {"saved": None}
|
||||
|
||||
# =====
|
||||
|
||||
@@ -358,43 +356,19 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
if load:
|
||||
return self.__snapshot
|
||||
logger = get_logger()
|
||||
session = self.__ensure_http_session()
|
||||
session = self.__ensure_client_session()
|
||||
try:
|
||||
async with session.get(
|
||||
self.__make_url("snapshot"),
|
||||
timeout=self.__snapshot_timeout,
|
||||
) as response:
|
||||
|
||||
htclient.raise_not_200(response)
|
||||
online = (response.headers["X-UStreamer-Online"] == "true")
|
||||
if online or allow_offline:
|
||||
snapshot = StreamerSnapshot(
|
||||
online=online,
|
||||
width=int(response.headers["X-UStreamer-Width"]),
|
||||
height=int(response.headers["X-UStreamer-Height"]),
|
||||
headers=tuple(
|
||||
(key, value)
|
||||
for (key, value) in tools.sorted_kvs(dict(response.headers))
|
||||
if key.lower().startswith("x-ustreamer-") or key.lower() in [
|
||||
"x-timestamp",
|
||||
"access-control-allow-origin",
|
||||
"cache-control",
|
||||
"pragma",
|
||||
"expires",
|
||||
]
|
||||
),
|
||||
data=bytes(await response.read()),
|
||||
)
|
||||
if save:
|
||||
self.__snapshot = snapshot
|
||||
self.__notifier.notify()
|
||||
return snapshot
|
||||
logger.error(self.gettext("Stream is offline, no signal or so"))
|
||||
|
||||
except (aiohttp.ClientConnectionError, aiohttp.ServerConnectionError) as err:
|
||||
logger.error(self.gettext("Can't connect to streamer: %s"), tools.efmt(err))
|
||||
snapshot = await session.take_snapshot(self.__snapshot_timeout)
|
||||
if snapshot.online or allow_offline:
|
||||
if save:
|
||||
self.__snapshot = snapshot
|
||||
self.__notifier.notify(self.__ST_SNAPSHOT)
|
||||
return snapshot
|
||||
logger.error("Stream is offline, no signal or so")
|
||||
except (aiohttp.ClientConnectionError, aiohttp.ServerConnectionError) as ex:
|
||||
logger.error("Can't connect to streamer: %s", tools.efmt(ex))
|
||||
except Exception:
|
||||
logger.exception(self.gettext("Invalid streamer response from /snapshot"))
|
||||
logger.exception("Invalid streamer response from /snapshot")
|
||||
return None
|
||||
|
||||
def remove_snapshot(self) -> None:
|
||||
@@ -405,25 +379,14 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
@aiotools.atomic_fg
|
||||
async def cleanup(self) -> None:
|
||||
await self.ensure_stop(immediately=True)
|
||||
if self.__http_session:
|
||||
await self.__http_session.close()
|
||||
self.__http_session = None
|
||||
if self.__client_session:
|
||||
await self.__client_session.close()
|
||||
self.__client_session = None
|
||||
|
||||
# =====
|
||||
|
||||
def __ensure_http_session(self) -> aiohttp.ClientSession:
|
||||
if not self.__http_session:
|
||||
kwargs: dict = {
|
||||
"headers": {"User-Agent": htclient.make_user_agent("KVMD")},
|
||||
"connector": aiohttp.UnixConnector(path=self.__unix_path),
|
||||
"timeout": aiohttp.ClientTimeout(total=self.__timeout),
|
||||
}
|
||||
self.__http_session = aiohttp.ClientSession(**kwargs)
|
||||
return self.__http_session
|
||||
|
||||
def __make_url(self, handle: str) -> str:
|
||||
assert not handle.startswith("/"), handle
|
||||
return f"http://localhost:0/{handle}"
|
||||
def __ensure_client_session(self) -> HttpStreamerClientSession:
|
||||
if not self.__client_session:
|
||||
self.__client_session = self.__client.make_session()
|
||||
return self.__client_session
|
||||
|
||||
# =====
|
||||
|
||||
@@ -451,14 +414,14 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
await self.__start_streamer_proc()
|
||||
assert self.__streamer_proc is not None
|
||||
await aioproc.log_stdout_infinite(self.__streamer_proc, logger)
|
||||
raise RuntimeError(self.gettext("Streamer unexpectedly died"))
|
||||
raise RuntimeError("Streamer unexpectedly died")
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception:
|
||||
if self.__streamer_proc:
|
||||
logger.exception(self.gettext("Unexpected streamer error: pid=%d"), self.__streamer_proc.pid)
|
||||
logger.exception("Unexpected streamer error: pid=%d", self.__streamer_proc.pid)
|
||||
else:
|
||||
logger.exception(self.gettext("Can't start streamer"))
|
||||
logger.exception("Can't start streamer")
|
||||
await self.__kill_streamer_proc()
|
||||
await asyncio.sleep(1)
|
||||
|
||||
@@ -478,14 +441,14 @@ class Streamer: # pylint: disable=too-many-instance-attributes
|
||||
logger.info("%s: %s", name, tools.cmdfmt(cmd))
|
||||
try:
|
||||
await aioproc.log_process(cmd, logger, prefix=name)
|
||||
except Exception as err:
|
||||
logger.exception(self.gettext("Can't execute command: %s"), err)
|
||||
except Exception as ex:
|
||||
logger.exception("Can't execute command: %s", ex)
|
||||
|
||||
async def __start_streamer_proc(self) -> None:
|
||||
assert self.__streamer_proc is None
|
||||
cmd = self.__make_cmd(self.__cmd)
|
||||
self.__streamer_proc = await aioproc.run_process(cmd)
|
||||
get_logger(0).info(self.gettext("Started streamer pid=%d: %s"), self.__streamer_proc.pid, tools.cmdfmt(cmd))
|
||||
get_logger(0).info("Started streamer pid=%d: %s", self.__streamer_proc.pid, tools.cmdfmt(cmd))
|
||||
|
||||
async def __kill_streamer_proc(self) -> None:
|
||||
if self.__streamer_proc:
|
||||
|
||||
400
kvmd/apps/kvmd/switch/__init__.py
Normal file
400
kvmd/apps/kvmd/switch/__init__.py
Normal file
@@ -0,0 +1,400 @@
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from .lib import OperationError
|
||||
from .lib import get_logger
|
||||
from .lib import aiotools
|
||||
from .lib import Inotify
|
||||
|
||||
from .types import Edid
|
||||
from .types import Edids
|
||||
from .types import Color
|
||||
from .types import Colors
|
||||
from .types import PortNames
|
||||
from .types import AtxClickPowerDelays
|
||||
from .types import AtxClickPowerLongDelays
|
||||
from .types import AtxClickResetDelays
|
||||
|
||||
from .chain import DeviceFoundEvent
|
||||
from .chain import ChainTruncatedEvent
|
||||
from .chain import PortActivatedEvent
|
||||
from .chain import UnitStateEvent
|
||||
from .chain import UnitAtxLedsEvent
|
||||
from .chain import Chain
|
||||
|
||||
from .state import StateCache
|
||||
|
||||
from .storage import Storage
|
||||
|
||||
|
||||
# =====
|
||||
class SwitchError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SwitchOperationError(OperationError, SwitchError):
|
||||
pass
|
||||
|
||||
|
||||
class SwitchUnknownEdidError(SwitchOperationError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__("No specified EDID ID found")
|
||||
|
||||
|
||||
# =====
|
||||
class Switch: # pylint: disable=too-many-public-methods
|
||||
__X_EDIDS = "edids"
|
||||
__X_COLORS = "colors"
|
||||
__X_PORT_NAMES = "port_names"
|
||||
__X_ATX_CP_DELAYS = "atx_cp_delays"
|
||||
__X_ATX_CPL_DELAYS = "atx_cpl_delays"
|
||||
__X_ATX_CR_DELAYS = "atx_cr_delays"
|
||||
|
||||
__X_ALL = frozenset([
|
||||
__X_EDIDS, __X_COLORS, __X_PORT_NAMES,
|
||||
__X_ATX_CP_DELAYS, __X_ATX_CPL_DELAYS, __X_ATX_CR_DELAYS,
|
||||
])
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
device_path: str,
|
||||
default_edid_path: str,
|
||||
pst_unix_path: str,
|
||||
) -> None:
|
||||
|
||||
self.__default_edid_path = default_edid_path
|
||||
|
||||
self.__chain = Chain(device_path)
|
||||
self.__cache = StateCache()
|
||||
self.__storage = Storage(pst_unix_path)
|
||||
|
||||
self.__lock = asyncio.Lock()
|
||||
|
||||
self.__save_notifier = aiotools.AioNotifier()
|
||||
|
||||
# =====
|
||||
|
||||
def __x_set_edids(self, edids: Edids, save: bool=True) -> None:
|
||||
self.__chain.set_edids(edids)
|
||||
self.__cache.set_edids(edids)
|
||||
if save:
|
||||
self.__save_notifier.notify()
|
||||
|
||||
def __x_set_colors(self, colors: Colors, save: bool=True) -> None:
|
||||
self.__chain.set_colors(colors)
|
||||
self.__cache.set_colors(colors)
|
||||
if save:
|
||||
self.__save_notifier.notify()
|
||||
|
||||
def __x_set_port_names(self, port_names: PortNames, save: bool=True) -> None:
|
||||
self.__cache.set_port_names(port_names)
|
||||
if save:
|
||||
self.__save_notifier.notify()
|
||||
|
||||
def __x_set_atx_cp_delays(self, delays: AtxClickPowerDelays, save: bool=True) -> None:
|
||||
self.__cache.set_atx_cp_delays(delays)
|
||||
if save:
|
||||
self.__save_notifier.notify()
|
||||
|
||||
def __x_set_atx_cpl_delays(self, delays: AtxClickPowerLongDelays, save: bool=True) -> None:
|
||||
self.__cache.set_atx_cpl_delays(delays)
|
||||
if save:
|
||||
self.__save_notifier.notify()
|
||||
|
||||
def __x_set_atx_cr_delays(self, delays: AtxClickResetDelays, save: bool=True) -> None:
|
||||
self.__cache.set_atx_cr_delays(delays)
|
||||
if save:
|
||||
self.__save_notifier.notify()
|
||||
|
||||
# =====
|
||||
|
||||
async def set_active_port(self, port: int) -> None:
|
||||
self.__chain.set_active_port(port)
|
||||
|
||||
# =====
|
||||
|
||||
async def set_port_beacon(self, port: int, on: bool) -> None:
|
||||
self.__chain.set_port_beacon(port, on)
|
||||
|
||||
async def set_uplink_beacon(self, unit: int, on: bool) -> None:
|
||||
self.__chain.set_uplink_beacon(unit, on)
|
||||
|
||||
async def set_downlink_beacon(self, unit: int, on: bool) -> None:
|
||||
self.__chain.set_downlink_beacon(unit, on)
|
||||
|
||||
# =====
|
||||
|
||||
async def atx_power_on(self, port: int) -> None:
|
||||
self.__inner_atx_cp(port, False, self.__X_ATX_CP_DELAYS)
|
||||
|
||||
async def atx_power_off(self, port: int) -> None:
|
||||
self.__inner_atx_cp(port, True, self.__X_ATX_CP_DELAYS)
|
||||
|
||||
async def atx_power_off_hard(self, port: int) -> None:
|
||||
self.__inner_atx_cp(port, True, self.__X_ATX_CPL_DELAYS)
|
||||
|
||||
async def atx_power_reset_hard(self, port: int) -> None:
|
||||
self.__inner_atx_cr(port, True)
|
||||
|
||||
async def atx_click_power(self, port: int) -> None:
|
||||
self.__inner_atx_cp(port, None, self.__X_ATX_CP_DELAYS)
|
||||
|
||||
async def atx_click_power_long(self, port: int) -> None:
|
||||
self.__inner_atx_cp(port, None, self.__X_ATX_CPL_DELAYS)
|
||||
|
||||
async def atx_click_reset(self, port: int) -> None:
|
||||
self.__inner_atx_cr(port, None)
|
||||
|
||||
def __inner_atx_cp(self, port: int, if_powered: (bool | None), x_delay: str) -> None:
|
||||
assert x_delay in [self.__X_ATX_CP_DELAYS, self.__X_ATX_CPL_DELAYS]
|
||||
delay = getattr(self.__cache, f"get_{x_delay}")()[port]
|
||||
self.__chain.click_power(port, delay, if_powered)
|
||||
|
||||
def __inner_atx_cr(self, port: int, if_powered: (bool | None)) -> None:
|
||||
delay = self.__cache.get_atx_cr_delays()[port]
|
||||
self.__chain.click_reset(port, delay, if_powered)
|
||||
|
||||
# =====
|
||||
|
||||
async def create_edid(self, name: str, data_hex: str) -> str:
|
||||
async with self.__lock:
|
||||
edids = self.__cache.get_edids()
|
||||
edid_id = edids.add(Edid.from_data(name, data_hex))
|
||||
self.__x_set_edids(edids)
|
||||
return edid_id
|
||||
|
||||
async def change_edid(
|
||||
self,
|
||||
edid_id: str,
|
||||
name: (str | None)=None,
|
||||
data_hex: (str | None)=None,
|
||||
) -> None:
|
||||
|
||||
assert edid_id != Edids.DEFAULT_ID
|
||||
async with self.__lock:
|
||||
edids = self.__cache.get_edids()
|
||||
if not edids.has_id(edid_id):
|
||||
raise SwitchUnknownEdidError()
|
||||
old = edids.get(edid_id)
|
||||
name = (name or old.name)
|
||||
data_hex = (data_hex or old.as_text())
|
||||
edids.set(edid_id, Edid.from_data(name, data_hex))
|
||||
self.__x_set_edids(edids)
|
||||
|
||||
async def remove_edid(self, edid_id: str) -> None:
|
||||
assert edid_id != Edids.DEFAULT_ID
|
||||
async with self.__lock:
|
||||
edids = self.__cache.get_edids()
|
||||
if not edids.has_id(edid_id):
|
||||
raise SwitchUnknownEdidError()
|
||||
edids.remove(edid_id)
|
||||
self.__x_set_edids(edids)
|
||||
|
||||
# =====
|
||||
|
||||
async def set_colors(self, **values: str) -> None:
|
||||
async with self.__lock:
|
||||
old = self.__cache.get_colors()
|
||||
new = {}
|
||||
for role in Colors.ROLES:
|
||||
if role in values:
|
||||
if values[role] != "default":
|
||||
new[role] = Color.from_text(values[role])
|
||||
# else reset to default
|
||||
else:
|
||||
new[role] = getattr(old, role)
|
||||
self.__x_set_colors(Colors(**new)) # type: ignore
|
||||
|
||||
# =====
|
||||
|
||||
async def set_port_params(
|
||||
self,
|
||||
port: int,
|
||||
edid_id: (str | None)=None,
|
||||
name: (str | None)=None,
|
||||
atx_click_power_delay: (float | None)=None,
|
||||
atx_click_power_long_delay: (float | None)=None,
|
||||
atx_click_reset_delay: (float | None)=None,
|
||||
) -> None:
|
||||
|
||||
async with self.__lock:
|
||||
if edid_id is not None:
|
||||
edids = self.__cache.get_edids()
|
||||
if not edids.has_id(edid_id):
|
||||
raise SwitchUnknownEdidError()
|
||||
edids.assign(port, edid_id)
|
||||
self.__x_set_edids(edids)
|
||||
|
||||
for (key, value) in [
|
||||
(self.__X_PORT_NAMES, name),
|
||||
(self.__X_ATX_CP_DELAYS, atx_click_power_delay),
|
||||
(self.__X_ATX_CPL_DELAYS, atx_click_power_long_delay),
|
||||
(self.__X_ATX_CR_DELAYS, atx_click_reset_delay),
|
||||
]:
|
||||
if value is not None:
|
||||
new = getattr(self.__cache, f"get_{key}")()
|
||||
new[port] = (value or None) # None == reset to default
|
||||
getattr(self, f"_Switch__x_set_{key}")(new)
|
||||
|
||||
# =====
|
||||
|
||||
async def reboot_unit(self, unit: int, bootloader: bool) -> None:
|
||||
self.__chain.reboot_unit(unit, bootloader)
|
||||
|
||||
# =====
|
||||
|
||||
async def get_state(self) -> dict:
|
||||
return self.__cache.get_state()
|
||||
|
||||
async def trigger_state(self) -> None:
|
||||
await self.__cache.trigger_state()
|
||||
|
||||
async def poll_state(self) -> AsyncGenerator[dict, None]:
|
||||
async for state in self.__cache.poll_state():
|
||||
yield state
|
||||
|
||||
# =====
|
||||
|
||||
async def systask(self) -> None:
|
||||
tasks = [
|
||||
asyncio.create_task(self.__systask_events()),
|
||||
asyncio.create_task(self.__systask_default_edid()),
|
||||
asyncio.create_task(self.__systask_storage()),
|
||||
]
|
||||
try:
|
||||
await asyncio.gather(*tasks)
|
||||
except Exception:
|
||||
for task in tasks:
|
||||
task.cancel()
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
raise
|
||||
|
||||
async def __systask_events(self) -> None:
|
||||
async for event in self.__chain.poll_events():
|
||||
match event:
|
||||
case DeviceFoundEvent():
|
||||
await self.__load_configs()
|
||||
case ChainTruncatedEvent():
|
||||
self.__cache.truncate(event.units)
|
||||
case PortActivatedEvent():
|
||||
self.__cache.update_active_port(event.port)
|
||||
case UnitStateEvent():
|
||||
self.__cache.update_unit_state(event.unit, event.state)
|
||||
case UnitAtxLedsEvent():
|
||||
self.__cache.update_unit_atx_leds(event.unit, event.atx_leds)
|
||||
|
||||
async def __load_configs(self) -> None:
|
||||
async with self.__lock:
|
||||
try:
|
||||
async with self.__storage.readable() as ctx:
|
||||
values = {
|
||||
key: await getattr(ctx, f"read_{key}")()
|
||||
for key in self.__X_ALL
|
||||
}
|
||||
data_hex = await aiotools.read_file(self.__default_edid_path)
|
||||
values["edids"].set_default(data_hex)
|
||||
except Exception:
|
||||
get_logger(0).exception("Can't load configs")
|
||||
else:
|
||||
for (key, value) in values.items():
|
||||
func = getattr(self, f"_Switch__x_set_{key}")
|
||||
if isinstance(value, tuple):
|
||||
func(*value, save=False)
|
||||
else:
|
||||
func(value, save=False)
|
||||
self.__chain.set_actual(True)
|
||||
|
||||
async def __systask_default_edid(self) -> None:
|
||||
logger = get_logger(0)
|
||||
async for _ in self.__poll_default_edid():
|
||||
async with self.__lock:
|
||||
edids = self.__cache.get_edids()
|
||||
try:
|
||||
data_hex = await aiotools.read_file(self.__default_edid_path)
|
||||
edids.set_default(data_hex)
|
||||
except Exception:
|
||||
logger.exception("Can't read default EDID, ignoring ...")
|
||||
else:
|
||||
self.__x_set_edids(edids, save=False)
|
||||
|
||||
async def __poll_default_edid(self) -> AsyncGenerator[None, None]:
|
||||
logger = get_logger(0)
|
||||
while True:
|
||||
while not os.path.exists(self.__default_edid_path):
|
||||
await asyncio.sleep(5)
|
||||
try:
|
||||
with Inotify() as inotify:
|
||||
await inotify.watch_all_changes(self.__default_edid_path)
|
||||
if os.path.islink(self.__default_edid_path):
|
||||
await inotify.watch_all_changes(os.path.realpath(self.__default_edid_path))
|
||||
yield None
|
||||
while True:
|
||||
need_restart = False
|
||||
need_notify = False
|
||||
for event in (await inotify.get_series(timeout=1)):
|
||||
need_notify = True
|
||||
if event.restart:
|
||||
logger.warning("Got fatal inotify event: %s; reinitializing ...", event)
|
||||
need_restart = True
|
||||
break
|
||||
if need_restart:
|
||||
break
|
||||
if need_notify:
|
||||
yield None
|
||||
except Exception:
|
||||
logger.exception("Unexpected watcher error")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
async def __systask_storage(self) -> None:
|
||||
# При остановке KVMD можем не успеть записать, ну да пофиг
|
||||
prevs = dict.fromkeys(self.__X_ALL)
|
||||
while True:
|
||||
await self.__save_notifier.wait()
|
||||
while (await self.__save_notifier.wait(5)):
|
||||
pass
|
||||
while True:
|
||||
try:
|
||||
async with self.__lock:
|
||||
write = {
|
||||
key: new
|
||||
for (key, old) in prevs.items()
|
||||
if (new := getattr(self.__cache, f"get_{key}")()) != old
|
||||
}
|
||||
if write:
|
||||
async with self.__storage.writable() as ctx:
|
||||
for (key, new) in write.items():
|
||||
func = getattr(ctx, f"write_{key}")
|
||||
if isinstance(new, tuple):
|
||||
await func(*new)
|
||||
else:
|
||||
await func(new)
|
||||
prevs[key] = new
|
||||
except Exception:
|
||||
get_logger(0).exception("Unexpected storage error")
|
||||
await asyncio.sleep(5)
|
||||
else:
|
||||
break
|
||||
440
kvmd/apps/kvmd/switch/chain.py
Normal file
440
kvmd/apps/kvmd/switch/chain.py
Normal file
@@ -0,0 +1,440 @@
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
import multiprocessing
|
||||
import queue
|
||||
import select
|
||||
import dataclasses
|
||||
import time
|
||||
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from .lib import get_logger
|
||||
from .lib import tools
|
||||
from .lib import aiotools
|
||||
from .lib import aioproc
|
||||
|
||||
from .types import Edids
|
||||
from .types import Colors
|
||||
|
||||
from .proto import Response
|
||||
from .proto import UnitState
|
||||
from .proto import UnitAtxLeds
|
||||
|
||||
from .device import Device
|
||||
from .device import DeviceError
|
||||
|
||||
|
||||
# =====
|
||||
class _BaseCmd:
|
||||
pass
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdSetActual(_BaseCmd):
|
||||
actual: bool
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdSetActivePort(_BaseCmd):
|
||||
port: int
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
assert self.port >= 0
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdSetPortBeacon(_BaseCmd):
|
||||
port: int
|
||||
on: bool
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdSetUnitBeacon(_BaseCmd):
|
||||
unit: int
|
||||
on: bool
|
||||
downlink: bool
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdSetEdids(_BaseCmd):
|
||||
edids: Edids
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdSetColors(_BaseCmd):
|
||||
colors: Colors
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdAtxClick(_BaseCmd):
|
||||
port: int
|
||||
delay: float
|
||||
reset: bool
|
||||
if_powered: (bool | None)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
assert self.port >= 0
|
||||
assert 0.001 <= self.delay <= (0xFFFF / 1000)
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class _CmdRebootUnit(_BaseCmd):
|
||||
unit: int
|
||||
bootloader: bool
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
assert self.unit >= 0
|
||||
|
||||
|
||||
class _UnitContext:
|
||||
__TIMEOUT = 5.0
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.state: (UnitState | None) = None
|
||||
self.atx_leds: (UnitAtxLeds | None) = None
|
||||
self.__rid = -1
|
||||
self.__deadline_ts = -1.0
|
||||
|
||||
def can_be_changed(self) -> bool:
|
||||
return (
|
||||
self.state is not None
|
||||
and not self.state.flags.changing_busy
|
||||
and self.changing_rid < 0
|
||||
)
|
||||
|
||||
# =====
|
||||
|
||||
@property
|
||||
def changing_rid(self) -> int:
|
||||
if self.__deadline_ts >= 0 and self.__deadline_ts < time.monotonic():
|
||||
self.__rid = -1
|
||||
self.__deadline_ts = -1
|
||||
return self.__rid
|
||||
|
||||
@changing_rid.setter
|
||||
def changing_rid(self, rid: int) -> None:
|
||||
self.__rid = rid
|
||||
self.__deadline_ts = ((time.monotonic() + self.__TIMEOUT) if rid >= 0 else -1)
|
||||
|
||||
# =====
|
||||
|
||||
def is_atx_allowed(self, ch: int) -> tuple[bool, bool]: # (allowed, power_led)
|
||||
if self.state is None or self.atx_leds is None:
|
||||
return (False, False)
|
||||
return ((not self.state.atx_busy[ch]), self.atx_leds.power[ch])
|
||||
|
||||
|
||||
# =====
|
||||
class BaseEvent:
|
||||
pass
|
||||
|
||||
|
||||
class DeviceFoundEvent(BaseEvent):
|
||||
pass
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class ChainTruncatedEvent(BaseEvent):
|
||||
units: int
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class PortActivatedEvent(BaseEvent):
|
||||
port: int
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class UnitStateEvent(BaseEvent):
|
||||
unit: int
|
||||
state: UnitState
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class UnitAtxLedsEvent(BaseEvent):
|
||||
unit: int
|
||||
atx_leds: UnitAtxLeds
|
||||
|
||||
|
||||
# =====
|
||||
class Chain: # pylint: disable=too-many-instance-attributes
|
||||
def __init__(self, device_path: str) -> None:
|
||||
self.__device = Device(device_path)
|
||||
|
||||
self.__actual = False
|
||||
|
||||
self.__edids = Edids()
|
||||
|
||||
self.__colors = Colors()
|
||||
|
||||
self.__units: list[_UnitContext] = []
|
||||
self.__active_port = -1
|
||||
|
||||
self.__cmd_queue: "multiprocessing.Queue[_BaseCmd]" = multiprocessing.Queue()
|
||||
self.__events_queue: "multiprocessing.Queue[BaseEvent]" = multiprocessing.Queue()
|
||||
|
||||
self.__stop_event = multiprocessing.Event()
|
||||
|
||||
def set_actual(self, actual: bool) -> None:
|
||||
# Флаг разрешения синхронизации EDID и прочих чувствительных вещей
|
||||
self.__queue_cmd(_CmdSetActual(actual))
|
||||
|
||||
# =====
|
||||
|
||||
def set_active_port(self, port: int) -> None:
|
||||
self.__queue_cmd(_CmdSetActivePort(port))
|
||||
|
||||
# =====
|
||||
|
||||
def set_port_beacon(self, port: int, on: bool) -> None:
|
||||
self.__queue_cmd(_CmdSetPortBeacon(port, on))
|
||||
|
||||
def set_uplink_beacon(self, unit: int, on: bool) -> None:
|
||||
self.__queue_cmd(_CmdSetUnitBeacon(unit, on, downlink=False))
|
||||
|
||||
def set_downlink_beacon(self, unit: int, on: bool) -> None:
|
||||
self.__queue_cmd(_CmdSetUnitBeacon(unit, on, downlink=True))
|
||||
|
||||
# =====
|
||||
|
||||
def set_edids(self, edids: Edids) -> None:
|
||||
self.__queue_cmd(_CmdSetEdids(edids)) # Will be copied because of multiprocessing.Queue()
|
||||
|
||||
def set_colors(self, colors: Colors) -> None:
|
||||
self.__queue_cmd(_CmdSetColors(colors))
|
||||
|
||||
# =====
|
||||
|
||||
def click_power(self, port: int, delay: float, if_powered: (bool | None)) -> None:
|
||||
self.__queue_cmd(_CmdAtxClick(port, delay, reset=False, if_powered=if_powered))
|
||||
|
||||
def click_reset(self, port: int, delay: float, if_powered: (bool | None)) -> None:
|
||||
self.__queue_cmd(_CmdAtxClick(port, delay, reset=True, if_powered=if_powered))
|
||||
|
||||
# =====
|
||||
|
||||
def reboot_unit(self, unit: int, bootloader: bool) -> None:
|
||||
self.__queue_cmd(_CmdRebootUnit(unit, bootloader))
|
||||
|
||||
# =====
|
||||
|
||||
async def poll_events(self) -> AsyncGenerator[BaseEvent, None]:
|
||||
proc = multiprocessing.Process(target=self.__subprocess, daemon=True)
|
||||
try:
|
||||
proc.start()
|
||||
while True:
|
||||
try:
|
||||
yield (await aiotools.run_async(self.__events_queue.get, True, 0.1))
|
||||
except queue.Empty:
|
||||
pass
|
||||
finally:
|
||||
if proc.is_alive():
|
||||
self.__stop_event.set()
|
||||
if proc.is_alive() or proc.exitcode is not None:
|
||||
await aiotools.run_async(proc.join)
|
||||
|
||||
# =====
|
||||
|
||||
def __queue_cmd(self, cmd: _BaseCmd) -> None:
|
||||
if not self.__stop_event.is_set():
|
||||
self.__cmd_queue.put_nowait(cmd)
|
||||
|
||||
def __queue_event(self, event: BaseEvent) -> None:
|
||||
if not self.__stop_event.is_set():
|
||||
self.__events_queue.put_nowait(event)
|
||||
|
||||
def __subprocess(self) -> None:
|
||||
logger = aioproc.settle("Switch", "switch")
|
||||
no_device_reported = False
|
||||
while True:
|
||||
try:
|
||||
if self.__device.has_device():
|
||||
no_device_reported = False
|
||||
with self.__device:
|
||||
logger.info("Switch found")
|
||||
self.__queue_event(DeviceFoundEvent())
|
||||
self.__main_loop()
|
||||
elif not no_device_reported:
|
||||
self.__queue_event(ChainTruncatedEvent(0))
|
||||
logger.info("Switch is missing")
|
||||
no_device_reported = True
|
||||
except DeviceError as ex:
|
||||
logger.error("%s", tools.efmt(ex))
|
||||
except Exception:
|
||||
logger.exception("Unexpected error in the Switch loop")
|
||||
tools.clear_queue(self.__cmd_queue)
|
||||
if self.__stop_event.is_set():
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
def __main_loop(self) -> None:
|
||||
self.__device.request_state()
|
||||
self.__device.request_atx_leds()
|
||||
while not self.__stop_event.is_set():
|
||||
if self.__select():
|
||||
for resp in self.__device.read_all():
|
||||
self.__update_units(resp)
|
||||
self.__adjust_start_port()
|
||||
self.__finish_changing_request(resp)
|
||||
self.__consume_commands()
|
||||
self.__ensure_config()
|
||||
|
||||
def __select(self) -> bool:
|
||||
try:
|
||||
return bool(select.select([
|
||||
self.__device.get_fd(),
|
||||
self.__cmd_queue._reader, # type: ignore # pylint: disable=protected-access
|
||||
], [], [], 1)[0])
|
||||
except Exception as ex:
|
||||
raise DeviceError(ex)
|
||||
|
||||
def __consume_commands(self) -> None:
|
||||
while not self.__cmd_queue.empty():
|
||||
cmd = self.__cmd_queue.get()
|
||||
match cmd:
|
||||
case _CmdSetActual():
|
||||
self.__actual = cmd.actual
|
||||
|
||||
case _CmdSetActivePort():
|
||||
# Может быть вызвано изнутри при синхронизации
|
||||
self.__active_port = cmd.port
|
||||
self.__queue_event(PortActivatedEvent(self.__active_port))
|
||||
|
||||
case _CmdSetPortBeacon():
|
||||
(unit, ch) = self.get_real_unit_channel(cmd.port)
|
||||
self.__device.request_beacon(unit, ch, cmd.on)
|
||||
|
||||
case _CmdSetUnitBeacon():
|
||||
ch = (4 if cmd.downlink else 5)
|
||||
self.__device.request_beacon(cmd.unit, ch, cmd.on)
|
||||
|
||||
case _CmdAtxClick():
|
||||
(unit, ch) = self.get_real_unit_channel(cmd.port)
|
||||
if unit < len(self.__units):
|
||||
(allowed, powered) = self.__units[unit].is_atx_allowed(ch)
|
||||
if allowed and (cmd.if_powered is None or cmd.if_powered == powered):
|
||||
delay_ms = min(int(cmd.delay * 1000), 0xFFFF)
|
||||
if cmd.reset:
|
||||
self.__device.request_atx_cr(unit, ch, delay_ms)
|
||||
else:
|
||||
self.__device.request_atx_cp(unit, ch, delay_ms)
|
||||
|
||||
case _CmdSetEdids():
|
||||
self.__edids = cmd.edids
|
||||
|
||||
case _CmdSetColors():
|
||||
self.__colors = cmd.colors
|
||||
|
||||
case _CmdRebootUnit():
|
||||
self.__device.request_reboot(cmd.unit, cmd.bootloader)
|
||||
|
||||
def __update_units(self, resp: Response) -> None:
|
||||
units = resp.header.unit + 1
|
||||
while len(self.__units) < units:
|
||||
self.__units.append(_UnitContext())
|
||||
|
||||
match resp.body:
|
||||
case UnitState():
|
||||
if not resp.body.flags.has_downlink and len(self.__units) > units:
|
||||
del self.__units[units:]
|
||||
self.__queue_event(ChainTruncatedEvent(units))
|
||||
self.__units[resp.header.unit].state = resp.body
|
||||
self.__queue_event(UnitStateEvent(resp.header.unit, resp.body))
|
||||
|
||||
case UnitAtxLeds():
|
||||
self.__units[resp.header.unit].atx_leds = resp.body
|
||||
self.__queue_event(UnitAtxLedsEvent(resp.header.unit, resp.body))
|
||||
|
||||
def __adjust_start_port(self) -> None:
|
||||
if self.__active_port < 0:
|
||||
for (unit, ctx) in enumerate(self.__units):
|
||||
if ctx.state is not None and ctx.state.ch < 4:
|
||||
# Trigger queue select()
|
||||
port = self.get_virtual_port(unit, ctx.state.ch)
|
||||
get_logger().info("Found an active port %d on [%d:%d]: Syncing ...",
|
||||
port, unit, ctx.state.ch)
|
||||
self.set_active_port(port)
|
||||
break
|
||||
|
||||
def __finish_changing_request(self, resp: Response) -> None:
|
||||
if self.__units[resp.header.unit].changing_rid == resp.header.rid:
|
||||
self.__units[resp.header.unit].changing_rid = -1
|
||||
|
||||
# =====
|
||||
|
||||
def __ensure_config(self) -> None:
|
||||
for (unit, ctx) in enumerate(self.__units):
|
||||
if ctx.state is not None:
|
||||
self.__ensure_config_port(unit, ctx)
|
||||
if self.__actual:
|
||||
self.__ensure_config_edids(unit, ctx)
|
||||
self.__ensure_config_colors(unit, ctx)
|
||||
|
||||
def __ensure_config_port(self, unit: int, ctx: _UnitContext) -> None:
|
||||
assert ctx.state is not None
|
||||
if self.__active_port >= 0 and ctx.can_be_changed():
|
||||
ch = self.get_unit_target_channel(unit, self.__active_port)
|
||||
if ctx.state.ch != ch:
|
||||
get_logger().info("Switching for active port %d: [%d:%d] -> [%d:%d] ...",
|
||||
self.__active_port, unit, ctx.state.ch, unit, ch)
|
||||
ctx.changing_rid = self.__device.request_switch(unit, ch)
|
||||
|
||||
def __ensure_config_edids(self, unit: int, ctx: _UnitContext) -> None:
|
||||
assert self.__actual
|
||||
assert ctx.state is not None
|
||||
if ctx.can_be_changed():
|
||||
for ch in range(4):
|
||||
port = self.get_virtual_port(unit, ch)
|
||||
edid = self.__edids.get_edid_for_port(port)
|
||||
if not ctx.state.compare_edid(ch, edid):
|
||||
get_logger().info("Changing EDID on port %d on [%d:%d]: %d/%d -> %d/%d (%s) ...",
|
||||
port, unit, ch,
|
||||
ctx.state.video_crc[ch], ctx.state.video_edid[ch],
|
||||
edid.crc, edid.valid, edid.name)
|
||||
ctx.changing_rid = self.__device.request_set_edid(unit, ch, edid)
|
||||
break # Busy globally
|
||||
|
||||
def __ensure_config_colors(self, unit: int, ctx: _UnitContext) -> None:
|
||||
assert self.__actual
|
||||
assert ctx.state is not None
|
||||
for np in range(6):
|
||||
if self.__colors.crc != ctx.state.np_crc[np]:
|
||||
# get_logger().info("Changing colors on NP [%d:%d]: %d -> %d ...",
|
||||
# unit, np, ctx.state.np_crc[np], self.__colors.crc)
|
||||
self.__device.request_set_colors(unit, np, self.__colors)
|
||||
|
||||
# =====
|
||||
|
||||
@classmethod
|
||||
def get_real_unit_channel(cls, port: int) -> tuple[int, int]:
|
||||
return (port // 4, port % 4)
|
||||
|
||||
@classmethod
|
||||
def get_unit_target_channel(cls, unit: int, port: int) -> int:
|
||||
(t_unit, t_ch) = cls.get_real_unit_channel(port)
|
||||
if unit != t_unit:
|
||||
t_ch = 4
|
||||
return t_ch
|
||||
|
||||
@classmethod
|
||||
def get_virtual_port(cls, unit: int, ch: int) -> int:
|
||||
return (unit * 4) + ch
|
||||
196
kvmd/apps/kvmd/switch/device.py
Normal file
196
kvmd/apps/kvmd/switch/device.py
Normal file
@@ -0,0 +1,196 @@
|
||||
# ========================================================================== #
|
||||
# #
|
||||
# KVMD - The main PiKVM daemon. #
|
||||
# #
|
||||
# Copyright (C) 2018-2024 Maxim Devaev <mdevaev@gmail.com> #
|
||||
# #
|
||||
# This program is free software: you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation, either version 3 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>. #
|
||||
# #
|
||||
# ========================================================================== #
|
||||
|
||||
|
||||
import os
|
||||
import random
|
||||
import types
|
||||
|
||||
import serial
|
||||
|
||||
from .lib import tools
|
||||
|
||||
from .types import Edid
|
||||
from .types import Colors
|
||||
|
||||
from .proto import Packable
|
||||
from .proto import Request
|
||||
from .proto import Response
|
||||
from .proto import Header
|
||||
|
||||
from .proto import BodySwitch
|
||||
from .proto import BodySetBeacon
|
||||
from .proto import BodyAtxClick
|
||||
from .proto import BodySetEdid
|
||||
from .proto import BodyClearEdid
|
||||
from .proto import BodySetColors
|
||||
|
||||
|
||||
# =====
|
||||
class DeviceError(Exception):
|
||||
def __init__(self, ex: Exception):
|
||||
super().__init__(tools.efmt(ex))
|
||||
|
||||
|
||||
class Device:
|
||||
__SPEED = 115200
|
||||
__TIMEOUT = 5.0
|
||||
|
||||
def __init__(self, device_path: str) -> None:
|
||||
self.__device_path = device_path
|
||||
self.__rid = random.randint(1, 0xFFFF)
|
||||
self.__tty: (serial.Serial | None) = None
|
||||
self.__buf: bytes = b""
|
||||
|
||||
def __enter__(self) -> "Device":
|
||||
try:
|
||||
self.__tty = serial.Serial(
|
||||
self.__device_path,
|
||||
baudrate=self.__SPEED,
|
||||
timeout=self.__TIMEOUT,
|
||||
)
|
||||
except Exception as ex:
|
||||
raise DeviceError(ex)
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
_exc_type: type[BaseException],
|
||||
_exc: BaseException,
|
||||
_tb: types.TracebackType,
|
||||
) -> None:
|
||||
|
||||
if self.__tty is not None:
|
||||
try:
|
||||
self.__tty.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.__tty = None
|
||||
|
||||
def has_device(self) -> bool:
|
||||
return os.path.exists(self.__device_path)
|
||||
|
||||
def get_fd(self) -> int:
|
||||
assert self.__tty is not None
|
||||
return self.__tty.fd
|
||||
|
||||
def read_all(self) -> list[Response]:
|
||||
assert self.__tty is not None
|
||||
try:
|
||||
if not self.__tty.in_waiting:
|
||||
return []
|
||||
self.__buf += self.__tty.read_all()
|
||||
except Exception as ex:
|
||||
raise DeviceError(ex)
|
||||
|
||||
results: list[Response] = []
|
||||
while True:
|
||||
try:
|
||||
begin = self.__buf.index(0xF1)
|
||||
except ValueError:
|
||||
break
|
||||
try:
|
||||
end = self.__buf.index(0xF2, begin)
|
||||
except ValueError:
|
||||
break
|
||||
msg = self.__buf[begin + 1:end]
|
||||
if 0xF1 in msg:
|
||||
# raise RuntimeError(f"Found 0xF1 inside the message: {msg!r}")
|
||||
break
|
||||
self.__buf = self.__buf[end + 1:]
|
||||
msg = self.__unescape(msg)
|
||||
resp = Response.unpack(msg)
|
||||
if resp is not None:
|
||||
results.append(resp)
|
||||
return results
|
||||
|
||||
def __unescape(self, msg: bytes) -> bytes:
|
||||
if 0xF0 not in msg:
|
||||
return msg
|
||||
unesc: list[int] = []
|
||||
esc = False
|
||||
for ch in msg:
|
||||
if ch == 0xF0:
|
||||
esc = True
|
||||
else:
|
||||
if esc:
|
||||
ch ^= 0xFF
|
||||
esc = False
|
||||
unesc.append(ch)
|
||||
return bytes(unesc)
|
||||
|
||||
def request_reboot(self, unit: int, bootloader: bool) -> int:
|
||||
return self.__send_request((Header.BOOTLOADER if bootloader else Header.REBOOT), unit, None)
|
||||
|
||||
def request_state(self) -> int:
|
||||
return self.__send_request(Header.STATE, 0xFF, None)
|
||||
|
||||
def request_switch(self, unit: int, ch: int) -> int:
|
||||
return self.__send_request(Header.SWITCH, unit, BodySwitch(ch))
|
||||
|
||||
def request_beacon(self, unit: int, ch: int, on: bool) -> int:
|
||||
return self.__send_request(Header.BEACON, unit, BodySetBeacon(ch, on))
|
||||
|
||||
def request_atx_leds(self) -> int:
|
||||
return self.__send_request(Header.ATX_LEDS, 0xFF, None)
|
||||
|
||||
def request_atx_cp(self, unit: int, ch: int, delay_ms: int) -> int:
|
||||
return self.__send_request(Header.ATX_CLICK, unit, BodyAtxClick(ch, BodyAtxClick.POWER, delay_ms))
|
||||
|
||||
def request_atx_cr(self, unit: int, ch: int, delay_ms: int) -> int:
|
||||
return self.__send_request(Header.ATX_CLICK, unit, BodyAtxClick(ch, BodyAtxClick.RESET, delay_ms))
|
||||
|
||||
def request_set_edid(self, unit: int, ch: int, edid: Edid) -> int:
|
||||
if edid.valid:
|
||||
return self.__send_request(Header.SET_EDID, unit, BodySetEdid(ch, edid))
|
||||
return self.__send_request(Header.CLEAR_EDID, unit, BodyClearEdid(ch))
|
||||
|
||||
def request_set_colors(self, unit: int, ch: int, colors: Colors) -> int:
|
||||
return self.__send_request(Header.SET_COLORS, unit, BodySetColors(ch, colors))
|
||||
|
||||
def __send_request(self, op: int, unit: int, body: (Packable | None)) -> int:
|
||||
assert self.__tty is not None
|
||||
req = Request(Header(
|
||||
proto=1,
|
||||
rid=self.__get_next_rid(),
|
||||
op=op,
|
||||
unit=unit,
|
||||
), body)
|
||||
data: list[int] = [0xF1]
|
||||
for ch in req.pack():
|
||||
if 0xF0 <= ch <= 0xF2:
|
||||
data.append(0xF0)
|
||||
ch ^= 0xFF
|
||||
data.append(ch)
|
||||
data.append(0xF2)
|
||||
try:
|
||||
self.__tty.write(bytes(data))
|
||||
self.__tty.flush()
|
||||
except Exception as ex:
|
||||
raise DeviceError(ex)
|
||||
return req.header.rid
|
||||
|
||||
def __get_next_rid(self) -> int:
|
||||
rid = self.__rid
|
||||
self.__rid += 1
|
||||
if self.__rid > 0xFFFF:
|
||||
self.__rid = 1
|
||||
return rid
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user