mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-26 01:33:05 +00:00
Compare commits
1096 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
847755b1c2 | ||
|
|
b57455d0ee | ||
|
|
2a257edff9 | ||
|
|
d47699331c | ||
|
|
90c64d77dd | ||
|
|
e1ada3ffe2 | ||
|
|
b62c79fda6 | ||
|
|
3d9e7dd4b1 | ||
|
|
8ff15865a7 | ||
|
|
48899d55d1 | ||
|
|
1cc4107bfe | ||
|
|
b13cd03706 | ||
|
|
69f024492b | ||
|
|
a9c46cd7e0 | ||
|
|
a9e9f5b085 | ||
|
|
e12b1fe14e | ||
|
|
7ce66a7bf3 | ||
|
|
decd3395db | ||
|
|
9d7a383348 | ||
|
|
8f7d91798b | ||
|
|
81a3af2c8b | ||
|
|
2ec0d22b14 | ||
|
|
27a77dc657 | ||
|
|
187e9c1e4e | ||
|
|
e5bab10824 | ||
|
|
347f4a0b03 | ||
|
|
289ebf42a6 | ||
|
|
71fdce08d7 | ||
|
|
adc5af9cef | ||
|
|
ce2ab322f6 | ||
|
|
a7e31b94c7 | ||
|
|
8498687794 | ||
|
|
190ca3e198 | ||
|
|
c1ddec1a61 | ||
|
|
1ba8077e95 | ||
|
|
9a42bd2302 | ||
|
|
35b662a52d | ||
|
|
a4faf52261 | ||
|
|
a30316d87a | ||
|
|
a4d10cbe3b | ||
|
|
ceccf9f1fa | ||
|
|
3964db20dc | ||
|
|
57ada0708f | ||
|
|
a1a92a833a | ||
|
|
5e0d8048f9 | ||
|
|
8903b35aec | ||
|
|
fa4f7e99fd | ||
|
|
b0630b3ddd | ||
|
|
b2bf69740c | ||
|
|
8cf66b9eca | ||
|
|
1db8577ca6 | ||
|
|
949e4dea9e | ||
|
|
c12988bc8a | ||
|
|
2728453f6c | ||
|
|
ccf43bbcd9 | ||
|
|
8d8de53e38 | ||
|
|
7faf556771 | ||
|
|
76ec8ad6f6 | ||
|
|
0cf05a76a0 | ||
|
|
357edbfbe0 | ||
|
|
0609a9afc8 | ||
|
|
96e59a018f | ||
|
|
79b2de8893 | ||
|
|
0c7cca035e | ||
|
|
00a3e5ddc3 | ||
|
|
a6533c0db7 | ||
|
|
704077d066 | ||
|
|
59ee0c1270 | ||
|
|
b37cc3ba1c | ||
|
|
dfe6d0a91b | ||
|
|
5813eedd4f | ||
|
|
363150380d | ||
|
|
47f9c04664 | ||
|
|
17cd88edda | ||
|
|
d33b620dc8 | ||
|
|
b95cb20704 | ||
|
|
4ff1944b60 | ||
|
|
1c6b0f8a86 | ||
|
|
d85801fe58 | ||
|
|
e79e7d505d | ||
|
|
33b1cd65b0 | ||
|
|
8d503c8bf8 | ||
|
|
df7f922013 | ||
|
|
a0541203e4 | ||
|
|
fb64731cd8 | ||
|
|
d960a18711 | ||
|
|
ee35cc21e9 | ||
|
|
e4a60daa17 | ||
|
|
7bcb770ee5 | ||
|
|
b5b09dc8b4 | ||
|
|
b1f6092620 | ||
|
|
c8441cfd73 | ||
|
|
7e4b147576 | ||
|
|
9cd082089a | ||
|
|
d4541e23f9 | ||
|
|
5e7e91cced | ||
|
|
d9787bb548 | ||
|
|
131b5b3bbe | ||
|
|
e58f95832b | ||
|
|
f24337d5f3 | ||
|
|
d6f1d25b59 | ||
|
|
0ec198fa43 | ||
|
|
77e96624ee | ||
|
|
5e02809db2 | ||
|
|
6fe001fcf8 | ||
|
|
f646102262 | ||
|
|
b97f4e16ba | ||
|
|
0c14306889 | ||
|
|
f1d043f67b | ||
|
|
c39a6e81d7 | ||
|
|
9c56c7e198 | ||
|
|
6484fef8ea | ||
|
|
8a194481ac | ||
|
|
072b817792 | ||
|
|
d32f7d36a6 | ||
|
|
54c9d4e725 | ||
|
|
d2637c3de2 | ||
|
|
2550324003 | ||
|
|
bf52dd8174 | ||
|
|
2ecec57d2f | ||
|
|
b5fda0e020 | ||
|
|
45a60cd9a7 | ||
|
|
2b82675853 | ||
|
|
1d3bf1ca73 | ||
|
|
04cb9c96fe | ||
|
|
45d8ac2eee | ||
|
|
eb60331c88 | ||
|
|
8fc9b0a22d | ||
|
|
ec5fd9e343 | ||
|
|
1e49939c38 | ||
|
|
5a7b23aa00 | ||
|
|
791505b7b8 | ||
|
|
da10649adb | ||
|
|
a025e3960d | ||
|
|
98ed348de9 | ||
|
|
b9dcc36b31 | ||
|
|
7a3d3844ae | ||
|
|
6ea2cf149a | ||
|
|
c30677d8b0 | ||
|
|
a1a2fb5628 | ||
|
|
f9cb0e24d6 | ||
|
|
2dc42183cb | ||
|
|
c781c11d26 | ||
|
|
e178cfe5c0 | ||
|
|
3b24373cd0 | ||
|
|
125ed8aa7a | ||
|
|
0b60a03e5d | ||
|
|
bb3f17ebfe | ||
|
|
ce7efd4758 | ||
|
|
1695710cbe | ||
|
|
b0366b18b6 | ||
|
|
a4f27249ed | ||
|
|
ebe8506c67 | ||
|
|
5243e42100 | ||
|
|
e5738f3b31 | ||
|
|
3c3bd9884f | ||
|
|
0213aff12a | ||
|
|
8dc793a128 | ||
|
|
0861eb4cdc | ||
|
|
d78c1459b7 | ||
|
|
f188383fea | ||
|
|
7421caba9b | ||
|
|
bbab359813 | ||
|
|
deb3844b4c | ||
|
|
26f36ccee1 | ||
|
|
8381ca5287 | ||
|
|
e8c1cdf959 | ||
|
|
675d2366da | ||
|
|
bbb63a5928 | ||
|
|
f5282bf1e7 | ||
|
|
4ae02c8d3e | ||
|
|
c0ffc0aaf5 | ||
|
|
3557ea50fa | ||
|
|
43702e42b8 | ||
|
|
7a3a975645 | ||
|
|
9da7a553bf | ||
|
|
2d6d16e046 | ||
|
|
919a3eee5d | ||
|
|
1a704f1c25 | ||
|
|
7ca0a7bd02 | ||
|
|
2681d02728 | ||
|
|
5e43bb9d2a | ||
|
|
ecd5fafbaa | ||
|
|
23b47657c0 | ||
|
|
76d050bea4 | ||
|
|
add9d1bab8 | ||
|
|
9c36d8f30a | ||
|
|
189e2a6c63 | ||
|
|
867c53984b | ||
|
|
560890f717 | ||
|
|
675adeeb63 | ||
|
|
6bc00eb869 | ||
|
|
1eccb54199 | ||
|
|
86e11baeb2 | ||
|
|
69ae1cafab | ||
|
|
9d6451b95b | ||
|
|
add1cba8cb | ||
|
|
927f0c784a | ||
|
|
ac5a9d01a8 | ||
|
|
7b213f547d | ||
|
|
7e4e2e98bb | ||
|
|
21d2a44090 | ||
|
|
e364d6e373 | ||
|
|
15feca802a | ||
|
|
2686e76c8a | ||
|
|
6cf91098d6 | ||
|
|
678a5ae4a5 | ||
|
|
a71c844ef4 | ||
|
|
74d10b61bc | ||
|
|
27bc01d442 | ||
|
|
bd900118f4 | ||
|
|
44c1d35b1f | ||
|
|
f6994e16b9 | ||
|
|
6d7b5eb219 | ||
|
|
27d4603b02 | ||
|
|
339e18d837 | ||
|
|
fb0fdb9c85 | ||
|
|
5b2baaf04d | ||
|
|
739b977ce3 | ||
|
|
0315fe91df | ||
|
|
02611588fc | ||
|
|
2991f0b640 | ||
|
|
6fde8ec80f | ||
|
|
081fcab7eb | ||
|
|
0e98c7783b | ||
|
|
812dc142c8 | ||
|
|
26946d0afb | ||
|
|
ec6657177a | ||
|
|
4b9cecd4d1 | ||
|
|
06867d33cb | ||
|
|
b53a933327 | ||
|
|
596b13711f | ||
|
|
e7c3169898 | ||
|
|
2612894557 | ||
|
|
7afc1da0af | ||
|
|
6aa0e9b5e4 | ||
|
|
24763427eb | ||
|
|
da03442be2 | ||
|
|
9750c6d605 | ||
|
|
a2b6620b10 | ||
|
|
b0a4e9e78f | ||
|
|
36809b3314 | ||
|
|
165c27164e | ||
|
|
1566ae7fbe | ||
|
|
b3feb243d3 | ||
|
|
cb104cc211 | ||
|
|
7a99c78840 | ||
|
|
a07df46f9d | ||
|
|
4fa3fec103 | ||
|
|
a2265cf357 | ||
|
|
0b1990f8b3 | ||
|
|
bf91579b4e | ||
|
|
cc2789f37f | ||
|
|
3fe47e0fb3 | ||
|
|
46e464b126 | ||
|
|
54c4293482 | ||
|
|
a799026d52 | ||
|
|
5ceb8a7ff9 | ||
|
|
e7e4570aeb | ||
|
|
f69346f2bc | ||
|
|
dcc80a4dca | ||
|
|
2fe4248785 | ||
|
|
bdaeb1bec4 | ||
|
|
89be3e317d | ||
|
|
2b079a4144 | ||
|
|
4ffec85e9b | ||
|
|
bd098e68e5 | ||
|
|
d282795644 | ||
|
|
5ba802482f | ||
|
|
378de21fa2 | ||
|
|
5eee6bfb6c | ||
|
|
d47295aa36 | ||
|
|
0a3b9ee02b | ||
|
|
97fb4a5cea | ||
|
|
80cd3ff7ec | ||
|
|
c66f79ad5a | ||
|
|
3797613182 | ||
|
|
7145f303da | ||
|
|
754f7cb87c | ||
|
|
8a7e41be61 | ||
|
|
532ea4941a | ||
|
|
286552d54b | ||
|
|
f82ba3c4b8 | ||
|
|
0e29ce28cf | ||
|
|
eed4d6857e | ||
|
|
601b03d84e | ||
|
|
8252febe22 | ||
|
|
1a57e5897d | ||
|
|
1e8e3a90aa | ||
|
|
2971406909 | ||
|
|
db772b1d1c | ||
|
|
ca12d653a6 | ||
|
|
a0ee5c9441 | ||
|
|
b1aa1f7a53 | ||
|
|
579604bd81 | ||
|
|
a303f24974 | ||
|
|
fadbd2fde0 | ||
|
|
bd7dbb13f3 | ||
|
|
394d96980b | ||
|
|
d27e5a4c01 | ||
|
|
a6703c9889 | ||
|
|
545e80d655 | ||
|
|
436e43dd04 | ||
|
|
b8562ec736 | ||
|
|
1b6b021226 | ||
|
|
55cdd0a708 | ||
|
|
f9d536f5a2 | ||
|
|
ba4e55d3e8 | ||
|
|
3594fdadfa | ||
|
|
83140b5f1d | ||
|
|
26cb55adfb | ||
|
|
440e2ba695 | ||
|
|
f19316639e | ||
|
|
564e781ba2 | ||
|
|
9b46d29e73 | ||
|
|
6630589e8e | ||
|
|
2f20868ca6 | ||
|
|
64df9cf437 | ||
|
|
66b84a77b9 | ||
|
|
d058d6d176 | ||
|
|
5f289885f7 | ||
|
|
82b06d130a | ||
|
|
0f0b6b976e | ||
|
|
f681cb9b23 | ||
|
|
ee5c8a455d | ||
|
|
b01172b242 | ||
|
|
19b9e52a45 | ||
|
|
563516f835 | ||
|
|
2d6ac806ff | ||
|
|
f9c4e96f97 | ||
|
|
66f674f651 | ||
|
|
98ef6e5775 | ||
|
|
ad452afa52 | ||
|
|
0a148bff4b | ||
|
|
9189a2ff2b | ||
|
|
c0255beca2 | ||
|
|
c526a27796 | ||
|
|
13ce966caa | ||
|
|
dcee1b6d55 | ||
|
|
0ce6dd0795 | ||
|
|
259d1c872b | ||
|
|
e7c2c9710a | ||
|
|
0eebbb094c | ||
|
|
2faabbe392 | ||
|
|
e22fc95ee9 | ||
|
|
72398408c5 | ||
|
|
18e4647211 | ||
|
|
642a284b33 | ||
|
|
3951a15e9d | ||
|
|
27561410e5 | ||
|
|
47849fc1a5 | ||
|
|
f19ad24907 | ||
|
|
5baa141b89 | ||
|
|
ea0ab5e3d2 | ||
|
|
e995a663c8 | ||
|
|
4a681297e0 | ||
|
|
a65fc39ae4 | ||
|
|
bcd41d0c19 | ||
|
|
96562b9f16 | ||
|
|
470105f895 | ||
|
|
7b51d08ea5 | ||
|
|
9e62e81158 | ||
|
|
b7accc54e7 | ||
|
|
4b4b84a220 | ||
|
|
5ec94860b2 | ||
|
|
263c18ebca | ||
|
|
4f06cfe1ab | ||
|
|
8b52927b4f | ||
|
|
1a7f484a62 | ||
|
|
aab6a2cdf3 | ||
|
|
3709d34343 | ||
|
|
bf6cfface6 | ||
|
|
075646481d | ||
|
|
f6586a481b | ||
|
|
a3cd92c503 | ||
|
|
11f63b4556 | ||
|
|
000a3970e0 | ||
|
|
be31989ab9 | ||
|
|
460de60019 | ||
|
|
3a9780dc4f | ||
|
|
9620817a8f | ||
|
|
03a705fc93 | ||
|
|
d7ef3ba67b | ||
|
|
80e7dac0c8 | ||
|
|
03274d9ee5 | ||
|
|
6a8d2c1f9c | ||
|
|
510fae6bf1 | ||
|
|
ad0b032384 | ||
|
|
a09f5c0577 | ||
|
|
b8960d57c8 | ||
|
|
f6e089daee | ||
|
|
7bd1dfbdaa | ||
|
|
175a80191e | ||
|
|
518202ae0e | ||
|
|
f1a1c40724 | ||
|
|
e11c550fc2 | ||
|
|
82b566d580 | ||
|
|
8b9998a53d | ||
|
|
96ab89adf3 | ||
|
|
d99c9ad1ee | ||
|
|
d918f96c66 | ||
|
|
8b17afc6b3 | ||
|
|
c5e4bbf2ce | ||
|
|
bc3dcda1ec | ||
|
|
b10147ea36 | ||
|
|
a738932e68 | ||
|
|
ea9c5e0ee8 | ||
|
|
83d51ea866 | ||
|
|
e36f6e08f7 | ||
|
|
80d4442cd0 | ||
|
|
24f67a3c0f | ||
|
|
13f374e262 | ||
|
|
2fc883dedc | ||
|
|
d60836798b | ||
|
|
e420a01e0d | ||
|
|
b6da5fb79b | ||
|
|
2157f84c4f | ||
|
|
801911d765 | ||
|
|
a75eb9fc86 | ||
|
|
ed8173e34d | ||
|
|
7e3f631dfc | ||
|
|
9deb3f6c49 | ||
|
|
1bfc005203 | ||
|
|
d413287ebc | ||
|
|
d582507523 | ||
|
|
509055423a | ||
|
|
7faa79d361 | ||
|
|
9d5f9fde62 | ||
|
|
c65ef97301 | ||
|
|
13ce64e51a | ||
|
|
5e563054f9 | ||
|
|
6805ebe38f | ||
|
|
e7f13871f8 | ||
|
|
4fc429a8d1 | ||
|
|
3c3f47bb5b | ||
|
|
685b01be01 | ||
|
|
e8c559580a | ||
|
|
dea8c73cdd | ||
|
|
dc79a22ba3 | ||
|
|
7497c1b7cd | ||
|
|
7eff1c919b | ||
|
|
7cdd188b06 | ||
|
|
bd5d129778 | ||
|
|
89792d5d3d | ||
|
|
19ab61a1d4 | ||
|
|
aa4a834c85 | ||
|
|
ed8f8f8a3e | ||
|
|
a64adc2504 | ||
|
|
a528de752b | ||
|
|
8a02f62c70 | ||
|
|
463ad49c9f | ||
|
|
96b8898c05 | ||
|
|
ee7cc8058f | ||
|
|
dd84da4add | ||
|
|
2abf05f11a | ||
|
|
72db45bcc0 | ||
|
|
7d47f97354 | ||
|
|
badd722f9d | ||
|
|
880e171933 | ||
|
|
18fa06678c | ||
|
|
54ebfc30f2 | ||
|
|
d5f867c76c | ||
|
|
f38af7f2e8 | ||
|
|
51a29a997b | ||
|
|
b988d5abee | ||
|
|
afaf3d6e26 | ||
|
|
c433c03fc0 | ||
|
|
c92ad06092 | ||
|
|
01729157b7 | ||
|
|
1811a80ecc | ||
|
|
dc0ae4dc42 | ||
|
|
32b7aa99c5 | ||
|
|
9b93cca790 | ||
|
|
f65bfbe83c | ||
|
|
66164e6cde | ||
|
|
9a69d23e5a | ||
|
|
68b9225818 | ||
|
|
df3c0800f0 | ||
|
|
34cda84064 | ||
|
|
c6a3024807 | ||
|
|
62ab02ec0e | ||
|
|
eb6f4e8cb8 | ||
|
|
50dd8fae41 | ||
|
|
effe37fa6c | ||
|
|
413fa6f0c2 | ||
|
|
c6c3799d08 | ||
|
|
62ae8d124b | ||
|
|
fdb2cbd9ab | ||
|
|
e1f0f39987 | ||
|
|
218a247684 | ||
|
|
5da758e8a1 | ||
|
|
666d58659e | ||
|
|
9dbbe26b17 | ||
|
|
8dc5b94198 | ||
|
|
09a80188ac | ||
|
|
8c45e92ee4 | ||
|
|
dfcb724502 | ||
|
|
a01f21e3ac | ||
|
|
52fb9be576 | ||
|
|
647c660476 | ||
|
|
3aa5dd3694 | ||
|
|
5a3acc8649 | ||
|
|
9185515660 | ||
|
|
f02b40b830 | ||
|
|
261915db29 | ||
|
|
5eec0f7975 | ||
|
|
76e6b6e276 | ||
|
|
4367b1c650 | ||
|
|
6b72611b72 | ||
|
|
2d4deda6b4 | ||
|
|
3c6b65baa1 | ||
|
|
fa1886a17e | ||
|
|
f434dcfaf6 | ||
|
|
8be93626c0 | ||
|
|
0ea642521c | ||
|
|
9ed8155c95 | ||
|
|
91dda3a84d | ||
|
|
d6f999a5c8 | ||
|
|
121c3e95c8 | ||
|
|
d3902a51ca | ||
|
|
8477e957bd | ||
|
|
ad29a0d85b | ||
|
|
a2e5180236 | ||
|
|
df940686e9 | ||
|
|
bb557fd187 | ||
|
|
f5155e7f7e | ||
|
|
66539a75dc | ||
|
|
6e4990167a | ||
|
|
cbc6fc4710 | ||
|
|
4df183c5a3 | ||
|
|
097a24fbbd | ||
|
|
33c07fdd33 | ||
|
|
cc7f49fa8b | ||
|
|
2a84c58d4b | ||
|
|
a06c5c097e | ||
|
|
0d96f797f1 | ||
|
|
168f822416 | ||
|
|
ff3dec28d2 | ||
|
|
dd047747f6 | ||
|
|
7f6c528291 | ||
|
|
08c734b242 | ||
|
|
a16968d6e5 | ||
|
|
c44fd8a40b | ||
|
|
f00c9ca611 | ||
|
|
237dd8c209 | ||
|
|
7f597b6409 | ||
|
|
a071d7c89b | ||
|
|
b917b72fe6 | ||
|
|
9249c1756f | ||
|
|
83cd69e5b7 | ||
|
|
f45855c34b | ||
|
|
a1fca58864 | ||
|
|
125e7b47e8 | ||
|
|
e2728ce8f7 | ||
|
|
9c67f6bfe1 | ||
|
|
a8d79c5e60 | ||
|
|
dd5db3eaa6 | ||
|
|
c524fbc0e4 | ||
|
|
0640d4c6c6 | ||
|
|
0d29972849 | ||
|
|
6f5bed1e0f | ||
|
|
dd79c3ca0d | ||
|
|
df7aa633ad | ||
|
|
8d10e09602 | ||
|
|
b86054d943 | ||
|
|
60d7032b9f | ||
|
|
93fc7df159 | ||
|
|
051dc13322 | ||
|
|
dac0a9c3f3 | ||
|
|
20260df9a1 | ||
|
|
c59a19bbcb | ||
|
|
d8170c3897 | ||
|
|
f4f47a8f33 | ||
|
|
c1efc721e2 | ||
|
|
d44bdd2dfd | ||
|
|
a22c2e10b0 | ||
|
|
4874c0e4b0 | ||
|
|
6a5ec866c0 | ||
|
|
2ba7a24f95 | ||
|
|
3de5daccbc | ||
|
|
02d9f8970e | ||
|
|
72a72b90fb | ||
|
|
13b2b6069e | ||
|
|
d28c5befdf | ||
|
|
3336f30112 | ||
|
|
9705f6c99c | ||
|
|
c4b8e74f74 | ||
|
|
7ab5314665 | ||
|
|
282dabd722 | ||
|
|
18742b52e5 | ||
|
|
a282b8373a | ||
|
|
73ec4e542a | ||
|
|
1ace2c8d54 | ||
|
|
b6a1781a9c | ||
|
|
9cd89ef01b | ||
|
|
5fde98d317 | ||
|
|
de1d6bda5b | ||
|
|
89c08b4bb0 | ||
|
|
f333658ac6 | ||
|
|
10a35876bf | ||
|
|
d31fed92d7 | ||
|
|
415748a0c1 | ||
|
|
07d5258551 | ||
|
|
abd422b5cf | ||
|
|
f51d1493e6 | ||
|
|
c3484e5ebc | ||
|
|
af7ba9669e | ||
|
|
458ef48a84 | ||
|
|
7db0fa44a9 | ||
|
|
41aafdd1cc | ||
|
|
35147287fb | ||
|
|
0de1cb395f | ||
|
|
62a8bdb602 | ||
|
|
50f2c9629d | ||
|
|
0e68f4a852 | ||
|
|
a9b2a1cd26 | ||
|
|
91135260c0 | ||
|
|
9937b6156d | ||
|
|
6482e030ee | ||
|
|
3e90a9f102 | ||
|
|
fa7e544a15 | ||
|
|
d6d8a9f178 | ||
|
|
2a316bc92d | ||
|
|
6ecc1c6aa8 | ||
|
|
be27e15885 | ||
|
|
041cf09011 | ||
|
|
a325b26704 | ||
|
|
8530237e3c | ||
|
|
cdb804bb88 | ||
|
|
d4fbf0997a | ||
|
|
f16f417bd5 | ||
|
|
ce79e3947d | ||
|
|
883a3b6d27 | ||
|
|
de95b6c925 | ||
|
|
80a700b890 | ||
|
|
44dd952d3b | ||
|
|
e1f65022a4 | ||
|
|
d9abe8d675 | ||
|
|
761b686214 | ||
|
|
96c79f44ed | ||
|
|
8668796c5d | ||
|
|
89e23198e0 | ||
|
|
061a41c4a1 | ||
|
|
0cc1d00140 | ||
|
|
3480a8fda4 | ||
|
|
6c6821df20 | ||
|
|
d2cd48a86a | ||
|
|
0e817e1e06 | ||
|
|
ccd23eabb1 | ||
|
|
621c0812d5 | ||
|
|
b90995d3aa | ||
|
|
2c03b2859d | ||
|
|
bf12d2bfd3 | ||
|
|
88718c7108 | ||
|
|
4a85d2302b | ||
|
|
5ec072ef17 | ||
|
|
bf24f195f2 | ||
|
|
950f4a7ec6 | ||
|
|
4f9cc96eb6 | ||
|
|
b45cd86edb | ||
|
|
dd80d8614a | ||
|
|
96d688318a | ||
|
|
06feeecedc | ||
|
|
a3d6083188 | ||
|
|
808fc7cb58 | ||
|
|
b52b01c7e5 | ||
|
|
b4d3939d95 | ||
|
|
d9e18f13ef | ||
|
|
5e75b2510d | ||
|
|
fbe899e326 | ||
|
|
8b32944692 | ||
|
|
7d9afb76b1 | ||
|
|
d3a6a6080c | ||
|
|
84f0d7c314 | ||
|
|
c5880b8d13 | ||
|
|
20865d53c3 | ||
|
|
5453aa7380 | ||
|
|
1db5696bc1 | ||
|
|
fec705fe67 | ||
|
|
9d842f40cb | ||
|
|
cad2e3c8c5 | ||
|
|
1010de5cfb | ||
|
|
4ddc491224 | ||
|
|
1234096678 | ||
|
|
8957b77d55 | ||
|
|
4c515318af | ||
|
|
603a1a5314 | ||
|
|
80a63c18c1 | ||
|
|
4da513a1ca | ||
|
|
37d65b4125 | ||
|
|
1a4c1b26ea | ||
|
|
8be91b128f | ||
|
|
a2cb4fc0ee | ||
|
|
194cce2914 | ||
|
|
a8d8c188eb | ||
|
|
c5c9ab32bf | ||
|
|
7285912356 | ||
|
|
03d19afe37 | ||
|
|
04c153277b | ||
|
|
48dce6c37b | ||
|
|
b97ffa927a | ||
|
|
70cd619b6c | ||
|
|
99f77e8637 | ||
|
|
bbb570fee6 | ||
|
|
66741fab51 | ||
|
|
f3b0bea93f | ||
|
|
6dca9b41a0 | ||
|
|
aa184aab94 | ||
|
|
a58d1a708d | ||
|
|
6f6e823117 | ||
|
|
cabd6fb7f5 | ||
|
|
663a6cad43 | ||
|
|
2ca4502303 | ||
|
|
3801d482cd | ||
|
|
2732323c6d | ||
|
|
28f06222de | ||
|
|
bbf96aa41c | ||
|
|
62dd0eb628 | ||
|
|
83c5404826 | ||
|
|
1e479270d7 | ||
|
|
2c4d1fa3db | ||
|
|
45fe93d2b2 | ||
|
|
37ebe3e68f | ||
|
|
78e3c14019 | ||
|
|
f2b7c9229b | ||
|
|
4610be9745 | ||
|
|
5ebcad7cde | ||
|
|
40cfa33556 | ||
|
|
b3ba4e482e | ||
|
|
787462a124 | ||
|
|
73ed74b714 | ||
|
|
39e7119d9a | ||
|
|
bb77448231 | ||
|
|
5772b02280 | ||
|
|
0711ed5f60 | ||
|
|
f29c702700 | ||
|
|
34b626d425 | ||
|
|
21d505a8d0 | ||
|
|
fb7cab4b51 | ||
|
|
ef75386f3f | ||
|
|
84475b32f9 | ||
|
|
cd3d645ece | ||
|
|
13571ef615 | ||
|
|
f924827c7c | ||
|
|
3c859a6b7b | ||
|
|
b189b8d4dd | ||
|
|
9e50c8824c | ||
|
|
e9eb7a847e | ||
|
|
05d06fae11 | ||
|
|
6009d929cf | ||
|
|
495ac39498 | ||
|
|
9bd5a221c8 | ||
|
|
a9cc34103a | ||
|
|
2972353fcf | ||
|
|
a9797ca2c7 | ||
|
|
47bcb7f2e5 | ||
|
|
4dc2036530 | ||
|
|
7779dbe1b0 | ||
|
|
5009a28853 | ||
|
|
7ac65aa342 | ||
|
|
70b5353d6f | ||
|
|
d1f7a82661 | ||
|
|
6b9dd1ce96 | ||
|
|
8fc326e011 | ||
|
|
dedc8c00ce | ||
|
|
557c077feb | ||
|
|
f44f910c80 | ||
|
|
d9c6b6bdb6 | ||
|
|
1389c581b7 | ||
|
|
27a8faccd1 | ||
|
|
cc7990f780 | ||
|
|
4bb3a53e9a | ||
|
|
f3045933df | ||
|
|
e789825852 | ||
|
|
ac13c6a6f2 | ||
|
|
b1cd9578fc | ||
|
|
5874c8a9b2 | ||
|
|
0b4ae7c7a1 | ||
|
|
059ffc1b01 | ||
|
|
9e5352b8d7 | ||
|
|
b758c9ef91 | ||
|
|
b3d937e695 | ||
|
|
29ddee8d06 | ||
|
|
fd7383c519 | ||
|
|
c4de677c86 | ||
|
|
7ea082427b | ||
|
|
86330c443e | ||
|
|
2301190c4d | ||
|
|
82f2d7e23f | ||
|
|
8ea896b261 | ||
|
|
7f09609ca0 | ||
|
|
defe853728 | ||
|
|
e350a221c9 | ||
|
|
9267002baa | ||
|
|
eca33e463a | ||
|
|
765c24dd3f | ||
|
|
d8995e996d | ||
|
|
96f467fa21 | ||
|
|
0e7f466723 | ||
|
|
ff3f51179b | ||
|
|
5c4d61c45c | ||
|
|
8fcf935201 | ||
|
|
7eb17bab2f | ||
|
|
ef26e59388 | ||
|
|
e159cc4a10 | ||
|
|
d32e6d0e19 | ||
|
|
04a255af23 | ||
|
|
3dcc31b3b9 | ||
|
|
77173de5c0 | ||
|
|
255b4fdde4 | ||
|
|
90f4d59ec5 | ||
|
|
6c18ae1ebe | ||
|
|
c5585da57d | ||
|
|
bedbeac9f7 | ||
|
|
5cddada3be | ||
|
|
8e1a0e678c | ||
|
|
3177ab6404 | ||
|
|
29688774c6 | ||
|
|
c8dfe0fab0 | ||
|
|
2f017082d2 | ||
|
|
426efc789c | ||
|
|
a28de6feaf | ||
|
|
41c7ed90c1 | ||
|
|
9c8d7fc269 | ||
|
|
6e281f6242 | ||
|
|
de85895ad6 | ||
|
|
2a63cd6bea | ||
|
|
3e327d52d6 | ||
|
|
b07641cf66 | ||
|
|
c46b7ab094 | ||
|
|
bf279898b9 | ||
|
|
8db35c45c1 | ||
|
|
923c75512b | ||
|
|
d744fb4106 | ||
|
|
4bddd1aab1 | ||
|
|
54381ffaf3 | ||
|
|
9a310bfb98 | ||
|
|
56b243c0ce | ||
|
|
b7bcca6562 | ||
|
|
3f063ae791 | ||
|
|
ff7f9f7876 | ||
|
|
ee6afffc6a | ||
|
|
e175864699 | ||
|
|
9b57655255 | ||
|
|
6b23fcd11c | ||
|
|
d9df013e33 | ||
|
|
00591a5b25 | ||
|
|
c307f6b0af | ||
|
|
301c60ee92 | ||
|
|
d1b1aaa5e0 | ||
|
|
6c44622e95 | ||
|
|
b2879da541 | ||
|
|
f9b1f2d927 | ||
|
|
892878396d | ||
|
|
4db8c9c397 | ||
|
|
b6c676f4dc | ||
|
|
d4c605f671 | ||
|
|
9d96ed347f | ||
|
|
e0882de139 | ||
|
|
efa5c191e0 | ||
|
|
154cc7290f | ||
|
|
c785dfa47b | ||
|
|
d8b3fd74b0 | ||
|
|
06f591d404 | ||
|
|
4ae21bcf1f | ||
|
|
95d36656b7 | ||
|
|
a4f8d59e28 | ||
|
|
7055cbcbda | ||
|
|
b51d55805e | ||
|
|
edb842679d | ||
|
|
0f8fb1693a | ||
|
|
7f9a11dbb8 | ||
|
|
1e56d7877f | ||
|
|
da251a5f7c | ||
|
|
046aa2aed0 | ||
|
|
62fe32195f | ||
|
|
1cf59cd49f | ||
|
|
5faa28ec4a | ||
|
|
ad9c837d43 | ||
|
|
cb6e4b7295 | ||
|
|
4bff2b4f38 | ||
|
|
694b3cd9c2 | ||
|
|
dad08b9cd7 | ||
|
|
ce5bbc1ff7 | ||
|
|
c13427a0b1 | ||
|
|
c5b56dec38 | ||
|
|
dc424b3720 | ||
|
|
1067cc5157 | ||
|
|
eb76450cdc | ||
|
|
7f0547a8b8 | ||
|
|
3ea41c3297 | ||
|
|
ef64c9dd08 | ||
|
|
4c5e39fe6b | ||
|
|
cc3813e97e | ||
|
|
de97de64b2 | ||
|
|
c92c8f9a1a | ||
|
|
32b9e9c068 | ||
|
|
2fd1464ccb | ||
|
|
d39d4804a3 | ||
|
|
116ceefd0a | ||
|
|
17616205b9 | ||
|
|
c6f821ce53 | ||
|
|
75a7630be8 | ||
|
|
415b2e1967 | ||
|
|
441fb1a0b7 | ||
|
|
e379b8c7e1 | ||
|
|
5299f1262f | ||
|
|
5942f847a1 | ||
|
|
f111ae1200 | ||
|
|
74edbd0a4e | ||
|
|
2529011df9 | ||
|
|
549df741fa | ||
|
|
4f4b64f2b1 | ||
|
|
24bac65f03 | ||
|
|
a0b03d3433 | ||
|
|
68897b9ebf | ||
|
|
0459eedd1a | ||
|
|
2033a3457f | ||
|
|
982be95435 | ||
|
|
0ba733eeee | ||
|
|
506187e3c2 | ||
|
|
b4834b1228 | ||
|
|
44116c5e52 | ||
|
|
1d04b0bde0 | ||
|
|
f1c7ae9974 | ||
|
|
d9dcd33425 | ||
|
|
d42442052b | ||
|
|
7df98e2865 | ||
|
|
6fab88e891 | ||
|
|
91afbcd71a | ||
|
|
efa73c827f | ||
|
|
bb58ac6f2c | ||
|
|
a1f950899a | ||
|
|
3bae4c5511 | ||
|
|
9f012fd43a | ||
|
|
6412825d30 | ||
|
|
628cf705ce | ||
|
|
bd8d848321 | ||
|
|
ae7c446e81 | ||
|
|
9e1b7fc3ab | ||
|
|
f43d700fd8 | ||
|
|
2c3132f660 | ||
|
|
df633689b6 | ||
|
|
d3d20ac2cd | ||
|
|
28ceb94417 | ||
|
|
2db86169fa | ||
|
|
0cae0c865b | ||
|
|
f1cca66012 | ||
|
|
af3266b979 | ||
|
|
89206c9fa6 | ||
|
|
4550f72b6d | ||
|
|
32e5c8195a | ||
|
|
037268179d | ||
|
|
0cd9c8fe00 | ||
|
|
24b77e220b | ||
|
|
d403640f7f | ||
|
|
5d6fc8adc9 | ||
|
|
91dac247c9 | ||
|
|
4920e8787b | ||
|
|
418c87093d | ||
|
|
42bd7e60a7 | ||
|
|
25b6acb0db | ||
|
|
8666973519 | ||
|
|
53f4e11145 | ||
|
|
1113e1ffec | ||
|
|
f9548dd7f2 | ||
|
|
a00af1e288 | ||
|
|
abef79efa6 | ||
|
|
bfdf7f7498 | ||
|
|
6c7ae3f79f | ||
|
|
77aa83d829 | ||
|
|
b327bd8fed | ||
|
|
64fb77132a | ||
|
|
2cb2e6d805 | ||
|
|
f533bb5350 | ||
|
|
24bb7d5cfa | ||
|
|
5ad2a63ba1 | ||
|
|
6169870c5b | ||
|
|
cc078ec353 | ||
|
|
c000ee4a82 | ||
|
|
c98874e22f | ||
|
|
82f263bc23 | ||
|
|
4c1f0f7b0f | ||
|
|
10cb004a5c | ||
|
|
bcdc2fd883 | ||
|
|
832b0dbd63 | ||
|
|
854d74bbe5 | ||
|
|
097cabbf5b | ||
|
|
003f3d13e0 | ||
|
|
88deedad6c | ||
|
|
bdea68c4e6 | ||
|
|
b2829963ab | ||
|
|
0724fde7da | ||
|
|
dd6eee6e26 | ||
|
|
b819e6634e | ||
|
|
2a88651610 | ||
|
|
902f5dd2d5 | ||
|
|
c6a4a6a8f5 | ||
|
|
d1135dc7bc | ||
|
|
26ba893877 | ||
|
|
b9013d72ad | ||
|
|
c58514765d | ||
|
|
28acc2e48b | ||
|
|
a827396d07 | ||
|
|
5b96c78ed9 | ||
|
|
89d17adc05 | ||
|
|
b7cac7defc | ||
|
|
a42848d183 | ||
|
|
982a4194d9 | ||
|
|
6308bd93c5 | ||
|
|
9b9a56d9a6 | ||
|
|
93b2c6acdd | ||
|
|
f2373da1b9 | ||
|
|
a447c8efc8 | ||
|
|
33036400d3 | ||
|
|
c816c95224 | ||
|
|
6a55750e8d | ||
|
|
64443ee136 | ||
|
|
390014fa1b | ||
|
|
5b969ef861 | ||
|
|
7bd791f8f5 | ||
|
|
7de58de42a | ||
|
|
4d977fd765 | ||
|
|
8b33c99235 | ||
|
|
add99fabc6 | ||
|
|
9e859be5ff | ||
|
|
1ea95ec39f | ||
|
|
36f1a3ba64 | ||
|
|
37c963b4ad | ||
|
|
eaf31fd3e7 | ||
|
|
cb21678f67 | ||
|
|
0f65670f99 | ||
|
|
ad59522a46 | ||
|
|
400e06eaca | ||
|
|
05c18c9096 | ||
|
|
7a76f73682 | ||
|
|
f9437ca9b8 | ||
|
|
daf27a76ab | ||
|
|
2a64f63073 | ||
|
|
06a5f6a0de | ||
|
|
2178a81c65 | ||
|
|
ec27e57f2c | ||
|
|
5e42dcad09 | ||
|
|
fa3817574b | ||
|
|
8d9a14a15f | ||
|
|
ea1650251c | ||
|
|
adad526a4f | ||
|
|
672cb7dd34 | ||
|
|
997771744f | ||
|
|
cacdfbcb62 | ||
|
|
aa94f66492 | ||
|
|
95a420ba82 | ||
|
|
04f34f796e | ||
|
|
dc400b506d | ||
|
|
562f3f43df | ||
|
|
f65546a619 | ||
|
|
18216401fd | ||
|
|
5c64d630a0 | ||
|
|
28a2c9897d | ||
|
|
acebb7b366 | ||
|
|
88b565fe6d | ||
|
|
b87c214cb5 | ||
|
|
40a1070b81 | ||
|
|
89e7581517 | ||
|
|
fb8b96779c | ||
|
|
7c529c281c | ||
|
|
e13a304d76 | ||
|
|
7afbaba4ce | ||
|
|
d6a629369f | ||
|
|
7811e8cdf3 | ||
|
|
4412e94f19 | ||
|
|
1e1e98e155 | ||
|
|
a3014c338a | ||
|
|
f48ce1fcdf | ||
|
|
18b0e861a7 | ||
|
|
051415bb5e | ||
|
|
f9636f63b3 | ||
|
|
a480ab0b6b | ||
|
|
8727de92d7 | ||
|
|
514618bddb | ||
|
|
58c0c34b2b | ||
|
|
c0a04eee6e | ||
|
|
1a8544a03f | ||
|
|
5974bd7986 | ||
|
|
c06d5cc8b4 | ||
|
|
7923be3c68 | ||
|
|
ba2223c944 | ||
|
|
390cec485b | ||
|
|
df5478a909 | ||
|
|
444d4fc55d | ||
|
|
a234dabd83 | ||
|
|
3ac5477556 | ||
|
|
b32bc73e63 | ||
|
|
92305f5713 | ||
|
|
30934223a2 | ||
|
|
19ae06d4ec |
|
|
@ -1,61 +1,50 @@
|
|||
name: BUG 提交
|
||||
description: 提交产品缺陷帮助我们更好的改进
|
||||
title: "[BUG]"
|
||||
labels: "类型: 缺陷"
|
||||
name: 'Bug Report'
|
||||
description: 'Report an Bug'
|
||||
title: "[Bug] "
|
||||
assignees: zyyfit
|
||||
body:
|
||||
- type: markdown
|
||||
id: contacts_title
|
||||
attributes:
|
||||
value: "## 联系方式"
|
||||
value: "## Contact Information"
|
||||
- type: input
|
||||
id: contacts
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: "联系方式"
|
||||
description: "可以快速联系到您的方式:交流群号及昵称、邮箱等"
|
||||
label: "Contact Information"
|
||||
description: "The ways to quickly contact you: WeChat group number and nickname, email, etc."
|
||||
- type: markdown
|
||||
id: environment
|
||||
attributes:
|
||||
value: "## 环境信息"
|
||||
value: "## Environment Information"
|
||||
- type: input
|
||||
id: version
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "MaxKB 版本"
|
||||
description: "登录 MaxKB Web 控制台,在右上角关于页面查看当前版本。"
|
||||
label: "MaxKB Version"
|
||||
description: "Log in to the MaxKB Web Console and check the current version on the `About` page in the top right corner."
|
||||
- type: markdown
|
||||
id: details
|
||||
attributes:
|
||||
value: "## 详细信息"
|
||||
value: "## Detailed information"
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: "问题描述"
|
||||
description: "简要描述您碰到的问题"
|
||||
label: "Problem Description"
|
||||
description: "Briefly describe the issue you’ve encountered."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: how-happened
|
||||
attributes:
|
||||
label: "重现步骤"
|
||||
description: "如果操作可以重现该问题"
|
||||
label: "Steps to Reproduce"
|
||||
description: "How can this issue be reproduced."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expect
|
||||
attributes:
|
||||
label: "期待的正确结果"
|
||||
label: "The expected correct result"
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: "相关日志输出"
|
||||
description: "请复制并粘贴任何相关的日志输出。 这将自动格式化为代码,因此无需反引号。"
|
||||
label: "Related log output"
|
||||
description: "Please paste any relevant log output here. It will automatically be formatted as code, so no backticks are necessary."
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: additional-information
|
||||
attributes:
|
||||
label: "附加信息"
|
||||
description: "如果你还有其他需要提供的信息,可以在这里填写(可以提供截图、视频等)。"
|
||||
label: "Additional Information"
|
||||
description: "If you have any additional information to provide, you can include it here (screenshots, videos, etc., are welcome)."
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 对 MaxKB 项目有其他问题
|
||||
url: https://bbs.fit2cloud.com/c/mk/11
|
||||
about: 如果你对 MaxKB 有其他想要提问的,我们欢迎到我们的官方社区进行提问。
|
||||
- name: Questions & Discussions
|
||||
url: https://github.com/1Panel-dev/MaxKB/discussions
|
||||
about: Raise questions about the installation, deployment, use and other aspects of the project.
|
||||
|
|
@ -1,36 +1,29 @@
|
|||
name: 需求建议
|
||||
description: 提出针对本项目的想法和建议
|
||||
title: "[FEATURE]"
|
||||
labels: enhancement
|
||||
name: 'Feature Request'
|
||||
description: 'Suggest an idea'
|
||||
title: '[Feature] '
|
||||
assignees: baixin513
|
||||
body:
|
||||
- type: markdown
|
||||
id: environment
|
||||
attributes:
|
||||
value: "## 环境信息"
|
||||
value: "## Environment Information"
|
||||
- type: input
|
||||
id: version
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "MaxKB 版本"
|
||||
description: "登录 MaxKB Web 控制台,在右上角关于页面查看当前版本。"
|
||||
label: "MaxKB Version"
|
||||
description: "Log in to the MaxKB Web Console and check the current version on the `About` page in the top right corner."
|
||||
- type: markdown
|
||||
id: details
|
||||
attributes:
|
||||
value: "## 详细信息"
|
||||
value: "## Detailed information"
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: "请描述您的需求或者改进建议"
|
||||
label: "Please describe your needs or suggestions for improvements"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: "请描述你建议的实现方案"
|
||||
label: "Please describe the solution you suggest"
|
||||
- type: textarea
|
||||
id: additional-information
|
||||
attributes:
|
||||
label: "附加信息"
|
||||
description: "如果你还有其他需要提供的信息,可以在这里填写(可以提供截图、视频等)。"
|
||||
label: "Additional Information"
|
||||
description: "If you have any additional information to provide, you can include it here (screenshots, videos, etc., are welcome)."
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
timezone: "Asia/Shanghai"
|
||||
day: "friday"
|
||||
target-branch: "v2"
|
||||
groups:
|
||||
python-dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
# ignore:
|
||||
# - dependency-name: "pymupdf"
|
||||
# versions: ["*"]
|
||||
|
||||
|
|
@ -14,7 +14,7 @@ on:
|
|||
- linux/amd64,linux/arm64
|
||||
jobs:
|
||||
build-and-push-python-pg-to-ghcr:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Check Disk Space
|
||||
run: df -h
|
||||
|
|
@ -50,6 +50,9 @@ jobs:
|
|||
${DOCKER_IMAGE_TAGS} .
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
# Until https://github.com/tonistiigi/binfmt/issues/215
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ on:
|
|||
|
||||
jobs:
|
||||
build-and-push-vector-model-to-ghcr:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Check Disk Space
|
||||
run: df -h
|
||||
|
|
@ -55,6 +55,9 @@ jobs:
|
|||
${DOCKER_IMAGE_TAGS} .
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
# Until https://github.com/tonistiigi/binfmt/issues/215
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
|
|
|
|||
|
|
@ -6,9 +6,14 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
dockerImageTag:
|
||||
description: 'Docker Image Tag'
|
||||
default: 'v1.6.0-dev'
|
||||
description: 'Image Tag'
|
||||
default: 'v1.10.7-dev'
|
||||
required: true
|
||||
dockerImageTagWithLatest:
|
||||
description: '是否发布latest tag(正式发版时选择,测试版本切勿选择)'
|
||||
default: false
|
||||
required: true
|
||||
type: boolean
|
||||
architecture:
|
||||
description: 'Architecture'
|
||||
required: true
|
||||
|
|
@ -57,13 +62,14 @@ jobs:
|
|||
DOCKER_IMAGE=${{ secrets.FIT2CLOUD_REGISTRY_HOST }}/maxkb/maxkb
|
||||
DOCKER_PLATFORMS=${{ github.event.inputs.architecture }}
|
||||
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
|
||||
if [[ ${TAG_NAME} == *dev* ]]; then
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
|
||||
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
|
||||
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
|
||||
else
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
|
||||
fi
|
||||
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
|
||||
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
|
||||
${DOCKER_IMAGE_TAGS} .
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
|
@ -83,6 +89,7 @@ jobs:
|
|||
password: ${{ secrets.FIT2CLOUD_REGISTRY_PASSWORD }}
|
||||
- name: Docker Buildx (build-and-push)
|
||||
run: |
|
||||
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
|
||||
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile
|
||||
|
||||
build-and-push-to-dockerhub:
|
||||
|
|
@ -112,14 +119,15 @@ jobs:
|
|||
run: |
|
||||
DOCKER_IMAGE=1panel/maxkb
|
||||
DOCKER_PLATFORMS=${{ github.event.inputs.architecture }}
|
||||
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
|
||||
if [[ ${TAG_NAME} == *dev* ]]; then
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
|
||||
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
|
||||
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
|
||||
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
|
||||
else
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
|
||||
fi
|
||||
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
|
||||
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
|
||||
${DOCKER_IMAGE_TAGS} .
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
|
@ -138,4 +146,5 @@ jobs:
|
|||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Docker Buildx (build-and-push)
|
||||
run: |
|
||||
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
|
||||
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
name: Issue Translator
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened]
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: usthe/issues-translate-action@v2.7
|
||||
with:
|
||||
IS_MODIFY_TITLE: true
|
||||
BOT_GITHUB_TOKEN: ${{ secrets.FIT2CLOUDRD_LLM_CODE_REVIEW_TOKEN }}
|
||||
|
|
@ -16,10 +16,10 @@ jobs:
|
|||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.FIT2CLOUDRD_LLM_CODE_REVIEW_TOKEN }}
|
||||
OPENAI_API_KEY: ${{ secrets.ALIYUN_LLM_API_KEY }}
|
||||
LANGUAGE: Chinese
|
||||
LANGUAGE: English
|
||||
OPENAI_API_ENDPOINT: https://dashscope.aliyuncs.com/compatible-mode/v1
|
||||
MODEL: qwen2-1.5b-instruct
|
||||
PROMPT: "请检查下面的代码差异是否有不规范、潜在的问题或者优化建议,用中文回答。"
|
||||
MODEL: qwen2.5-coder-3b-instruct
|
||||
PROMPT: "Please check the following code for any irregularities, potential issues, or optimization suggestions, and provide your answers in English."
|
||||
top_p: 1
|
||||
temperature: 1
|
||||
# max_tokens: 10000
|
||||
|
|
|
|||
|
|
@ -178,8 +178,10 @@ ui/node_modules
|
|||
ui/dist
|
||||
apps/static
|
||||
models/
|
||||
apps/xpack
|
||||
!apps/**/models/
|
||||
data
|
||||
.dev
|
||||
poetry.lock
|
||||
apps/setting/models_provider/impl/*/icon/
|
||||
apps/setting/models_provider/impl/*/icon/
|
||||
tmp/
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
[files]
|
||||
extend-exclude = [
|
||||
'apps/setting/models_provider/impl/*/icon/*'
|
||||
]
|
||||
]
|
||||
|
|
|
|||
155
README.md
155
README.md
|
|
@ -1,97 +1,126 @@
|
|||
[English](README_EN.md) | [中文](README.md)
|
||||
|
||||
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
|
||||
<h3 align="center">基于大语言模型和 RAG 的知识库问答系统</h3>
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: auto;" /></a>
|
||||
<a href="https://market.aliyun.com/products/53690006/cmjj00067609.html?userCode=kmemb8jp" target="_blank"><img src="https://img.alicdn.com/imgextra/i2/O1CN01H5JIwY1rZ0OobDjnJ_!!6000000005644-2-tps-1000-216.png" alt="1Panel-dev%2FMaxKB | Aliyun" style="width: 250px; height: auto;" /></a>
|
||||
</p>
|
||||
<h3 align="center">Open-source platform for building enterprise-grade agents</h3>
|
||||
<h3 align="center">强大易用的企业级智能体平台</h3>
|
||||
<p align="center"><a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a></p>
|
||||
<p align="center">
|
||||
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
|
||||
<a href="https://app.codacy.com/gh/1Panel-dev/maxkb?utm_source=github.com&utm_medium=referral&utm_content=1Panel-dev/maxkb&utm_campaign=Badge_Grade_Dashboard"><img src="https://app.codacy.com/project/badge/Grade/da67574fd82b473992781d1386b937ef" alt="Codacy"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb/releases/latest"><img src="https://img.shields.io/github/v/release/1Panel-dev/maxkb" alt="Latest release"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?color=%231890FF&style=flat-square" alt="Stars"></a>
|
||||
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
|
||||
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a><br/>
|
||||
[<a href="/README_CN.md">中文(简体)</a>] | [<a href="/README.md">English</a>]
|
||||
</p>
|
||||
<hr/>
|
||||
|
||||
MaxKB = Max Knowledge Base,是一款基于大语言模型和 RAG 的开源知识库问答系统,广泛应用于智能客服、企业内部知识库、学术研究与教育等场景。
|
||||
MaxKB = Max Knowledge Brain, it is an open-source platform for building enterprise-grade agents. MaxKB integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
|
||||
|
||||
- **开箱即用**:支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化和 RAG(检索增强生成),有效减少大模型幻觉,智能问答交互体验好;
|
||||
- **模型中立**:支持对接各种大模型,包括本地私有大模型(Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等)和国外公共大模型(OpenAI / Claude / Gemini 等);
|
||||
- **灵活编排**:内置强大的工作流引擎和函数库,支持编排 AI 工作过程,满足复杂业务场景下的需求;
|
||||
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度。
|
||||
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
|
||||
- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
|
||||
- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction.
|
||||
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.).
|
||||
- **Multi Modal**: Native support for input and output text, image, audio and video.
|
||||
|
||||
三分钟视频介绍:https://www.bilibili.com/video/BV18JypYeEkj/
|
||||
## Quick start
|
||||
|
||||
## 快速开始
|
||||
Execute the script below to start a MaxKB container using Docker:
|
||||
|
||||
```
|
||||
# Linux 机器
|
||||
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages cr2.fit2cloud.com/1panel/maxkb
|
||||
|
||||
# Windows 机器
|
||||
docker run -d --name=maxkb --restart=always -p 8080:8080 -v C:/maxkb:/var/lib/postgresql/data -v C:/python-packages:/opt/maxkb/app/sandbox/python-packages cr2.fit2cloud.com/1panel/maxkb
|
||||
|
||||
# 用户名: admin
|
||||
# 密码: MaxKB@123..
|
||||
```bash
|
||||
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages 1panel/maxkb
|
||||
```
|
||||
|
||||
- 你也可以通过 [1Panel 应用商店](https://apps.fit2cloud.com/1panel) 快速部署 MaxKB;
|
||||
- 如果是内网环境,推荐使用 [离线安装包](https://community.fit2cloud.com/#/products/maxkb/downloads) 进行安装部署;
|
||||
- MaxKB 产品版本分为社区版和专业版,详情请参见:[MaxKB 产品版本对比](https://maxkb.cn/pricing.html);
|
||||
- 如果您需要向团队介绍 MaxKB,可以使用这个 [官方 PPT 材料](https://maxkb.cn/download/introduce-maxkb_202411.pdf)。
|
||||
Access MaxKB web interface at `http://your_server_ip:8080` with default admin credentials:
|
||||
|
||||
如你有更多问题,可以查看使用手册,或者通过论坛与我们交流。
|
||||
- username: admin
|
||||
- password: MaxKB@123..
|
||||
|
||||
- [使用手册](https://maxkb.cn/docs/)
|
||||
- [论坛求助](https://bbs.fit2cloud.com/c/mk/11)
|
||||
- 技术交流群
|
||||
中国用户如遇到 Docker 镜像 Pull 失败问题,请参照该 [离线安装文档](https://maxkb.cn/docs/installation/offline_installtion/) 进行安装。
|
||||
|
||||
<image height="150px" width="150px" src="https://github.com/1Panel-dev/MaxKB/assets/52996290/a083d214-02be-4178-a1db-4f428124153a"/>
|
||||
|
||||
## 案例展示
|
||||
|
||||
MaxKB 自发布以来,日均安装下载超过 1000 次,被广泛应用于智能客服、企业内部知识库、学术研究与教育等场景。
|
||||
|
||||
- [华莱士智能客服](https://ai.cnhls.com/ui/chat/1fc0f6a9b5a6fb27)
|
||||
- [JumpServer 小助手](https://maxkb.fit2cloud.com/ui/chat/b4e27a6e72d349a3)
|
||||
- [信用深圳](https://www.szcredit.org.cn/#/index)
|
||||
- [重庆交通大学教务在线](http://jwc.anyquestion.cn/ui/chat/b75496390f7d935d)
|
||||
|
||||
## UI 展示
|
||||
## Screenshots
|
||||
|
||||
<table style="border-collapse: collapse; border: 1px solid black;">
|
||||
<tr>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/d87395fa-a8d7-401c-82bf-c6e475d10ae9" alt="MaxKB Demo1" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/47c35ee4-3a3b-4bd4-9f4f-ee20788b2b9a" alt="MaxKB Demo2" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/overview.png" alt="MaxKB Demo1" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/screenshot-models.png" alt="MaxKB Demo2" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/user-attachments/assets/9a1043cb-fa62-4f71-b9a3-0b46fa59a70e" alt="MaxKB Demo3" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/user-attachments/assets/3407ce9a-779c-4eb4-858e-9441a2ddc664" alt="MaxKB Demo4" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/screenshot-knowledge.png" alt="MaxKB Demo3" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/screenshot-function.png" alt="MaxKB Demo4" /></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## 技术栈
|
||||
## Technical stack
|
||||
|
||||
- 前端:[Vue.js](https://cn.vuejs.org/)
|
||||
- 后端:[Python / Django](https://www.djangoproject.com/)
|
||||
- LangChain:[LangChain](https://www.langchain.com/)
|
||||
- 向量数据库:[PostgreSQL / pgvector](https://www.postgresql.org/)
|
||||
- 大模型:各种本地私有或者公共大模型
|
||||
- Frontend:[Vue.js](https://vuejs.org/)
|
||||
- Backend:[Python / Django](https://www.djangoproject.com/)
|
||||
- LLM Framework:[LangChain](https://www.langchain.com/)
|
||||
- Database:[PostgreSQL + pgvector](https://www.postgresql.org/)
|
||||
|
||||
## 飞致云的其他明星项目
|
||||
## Feature Comparison
|
||||
|
||||
- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板
|
||||
- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机
|
||||
- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具
|
||||
- [MeterSphere](https://github.com/metersphere/metersphere/) - 新一代的开源持续测试工具
|
||||
- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具
|
||||
<table style="width: 100%;">
|
||||
<tr>
|
||||
<th align="center">Feature</th>
|
||||
<th align="center">LangChain</th>
|
||||
<th align="center">Dify.AI</th>
|
||||
<th align="center">Flowise</th>
|
||||
<th align="center">MaxKB <br>(Built upon LangChain)</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">Supported LLMs</td>
|
||||
<td align="center">Rich Variety</td>
|
||||
<td align="center">Rich Variety</td>
|
||||
<td align="center">Rich Variety</td>
|
||||
<td align="center">Rich Variety</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">RAG Engine</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">Agent</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">✅</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">Workflow</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">Observability</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">✅</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">SSO/Access control</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">✅ (Pro)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">On-premise Deployment</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#1Panel-dev/MaxKB&Date)
|
||||
|
||||
## License
|
||||
|
||||
Copyright (c) 2014-2024 飞致云 FIT2CLOUD, All rights reserved.
|
||||
|
||||
Licensed under The GNU General Public License version 3 (GPLv3) (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
<https://www.gnu.org/licenses/gpl-3.0.html>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,89 @@
|
|||
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
|
||||
<h3 align="center">强大易用的企业级智能体平台</h3>
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: auto;" /></a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="README_EN.md"><img src="https://img.shields.io/badge/English_README-blue" alt="English README"></a>
|
||||
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb/releases/latest"><img src="https://img.shields.io/github/v/release/1Panel-dev/maxkb" alt="Latest release"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?style=flat-square" alt="Stars"></a>
|
||||
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
|
||||
<a href="https://gitee.com/fit2cloud-feizhiyun/MaxKB"><img src="https://gitee.com/fit2cloud-feizhiyun/MaxKB/badge/star.svg?theme=gvp" alt="Gitee Stars"></a>
|
||||
<a href="https://gitcode.com/feizhiyun/MaxKB"><img src="https://gitcode.com/feizhiyun/MaxKB/star/badge.svg" alt="GitCode Stars"></a>
|
||||
</p>
|
||||
<hr/>
|
||||
|
||||
MaxKB = Max Knowledge Brain,是一款强大易用的企业级智能体平台,支持 RAG 检索增强生成、工作流编排、MCP 工具调用能力。MaxKB 支持对接各种主流大语言模型,广泛应用于智能客服、企业内部知识库问答、员工助手、学术研究与教育等场景。
|
||||
|
||||
- **RAG 检索增强生成**:高效搭建本地 AI 知识库,支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化,有效减少大模型幻觉,提升问答效果;
|
||||
- **灵活编排**:内置强大的工作流引擎、函数库和 MCP 工具调用能力,支持编排 AI 工作过程,满足复杂业务场景下的需求;
|
||||
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度;
|
||||
- **模型中立**:支持对接各种大模型,包括本地私有大模型(DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等)和国外公共大模型(OpenAI / Claude / Gemini 等)。
|
||||
|
||||
MaxKB 三分钟视频介绍:https://www.bilibili.com/video/BV18JypYeEkj/
|
||||
|
||||
## 快速开始
|
||||
|
||||
```
|
||||
# Linux 机器
|
||||
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb
|
||||
|
||||
# Windows 机器
|
||||
docker run -d --name=maxkb --restart=always -p 8080:8080 -v C:/maxkb:/var/lib/postgresql/data -v C:/python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb
|
||||
|
||||
# 用户名: admin
|
||||
# 密码: MaxKB@123..
|
||||
```
|
||||
|
||||
- 你也可以通过 [1Panel 应用商店](https://apps.fit2cloud.com/1panel) 快速部署 MaxKB;
|
||||
- 如果是内网环境,推荐使用 [离线安装包](https://community.fit2cloud.com/#/products/maxkb/downloads) 进行安装部署;
|
||||
- MaxKB 产品版本分为社区版和专业版,详情请参见:[MaxKB 产品版本对比](https://maxkb.cn/pricing.html);
|
||||
- 如果您需要向团队介绍 MaxKB,可以使用这个 [官方 PPT 材料](https://maxkb.cn/download/introduce-maxkb_202503.pdf)。
|
||||
|
||||
如你有更多问题,可以查看使用手册,或者通过论坛与我们交流。
|
||||
|
||||
- [案例展示](USE-CASES.md)
|
||||
- [使用手册](https://maxkb.cn/docs/)
|
||||
- [论坛求助](https://bbs.fit2cloud.com/c/mk/11)
|
||||
- 技术交流群
|
||||
|
||||
<image height="150px" width="150px" src="https://github.com/1Panel-dev/MaxKB/assets/52996290/a083d214-02be-4178-a1db-4f428124153a"/>
|
||||
|
||||
## UI 展示
|
||||
|
||||
<table style="border-collapse: collapse; border: 1px solid black;">
|
||||
<tr>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/d87395fa-a8d7-401c-82bf-c6e475d10ae9" alt="MaxKB Demo1" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/47c35ee4-3a3b-4bd4-9f4f-ee20788b2b9a" alt="MaxKB Demo2" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/user-attachments/assets/9a1043cb-fa62-4f71-b9a3-0b46fa59a70e" alt="MaxKB Demo3" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/user-attachments/assets/3407ce9a-779c-4eb4-858e-9441a2ddc664" alt="MaxKB Demo4" /></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## 技术栈
|
||||
|
||||
- 前端:[Vue.js](https://cn.vuejs.org/)
|
||||
- 后端:[Python / Django](https://www.djangoproject.com/)
|
||||
- LangChain:[LangChain](https://www.langchain.com/)
|
||||
- 向量数据库:[PostgreSQL / pgvector](https://www.postgresql.org/)
|
||||
|
||||
## 飞致云的其他明星项目
|
||||
|
||||
- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板
|
||||
- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机
|
||||
- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具
|
||||
- [MeterSphere](https://github.com/metersphere/metersphere/) - 新一代的开源持续测试工具
|
||||
- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具
|
||||
|
||||
## License
|
||||
|
||||
Copyright (c) 2014-2025 飞致云 FIT2CLOUD, All rights reserved.
|
||||
|
||||
Licensed under The GNU General Public License version 3 (GPLv3) (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
<https://www.gnu.org/licenses/gpl-3.0.html>
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
78
README_EN.md
78
README_EN.md
|
|
@ -1,78 +0,0 @@
|
|||
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
|
||||
<h3 align="center">Knowledge base, question answering system, based on LLM large language models</h3>
|
||||
<p align="center"><a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a></p>
|
||||
<p align="center">
|
||||
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
|
||||
<a href="https://app.codacy.com/gh/1Panel-dev/maxkb?utm_source=github.com&utm_medium=referral&utm_content=1Panel-dev/maxkb&utm_campaign=Badge_Grade_Dashboard"><img src="https://app.codacy.com/project/badge/Grade/da67574fd82b473992781d1386b937ef" alt="Codacy"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb/releases/latest"><img src="https://img.shields.io/github/v/release/1Panel-dev/maxkb" alt="Latest release"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?color=%231890FF&style=flat-square" alt="Stars"></a>
|
||||
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
|
||||
</p>
|
||||
<hr/>
|
||||
|
||||
MaxKB = Max Knowledge Base,It is an open source knowledge base question and answer system based on the LLM large language model. It is widely used in enterprise internal knowledge bases, customer services, academic research and education and other scenarios.
|
||||
|
||||
- **Out-of-the-box**: Supports direct uploading of documents, automatic crawling of online documents, automatic text splitting, vectorization, RAG (retrieval enhancement generation), and a good interactive experience in intelligent question and answer;
|
||||
- **Model neutral**: Supports docking with various large language models, including local private large models (Llama 3/Qwen 2, etc.), domestic public large models (Tongyi Qianwen/Zhipu AI/Baidu Qianfan/Kimi/DeepSeek, etc.) and foreign public models Large models (OpenAI / Azure OpenAI / Gemini, etc.);
|
||||
- **Flexible Orchestration**: Built-in powerful workflow engine supports the orchestration of AI work processes to meet the needs of complex business scenarios;
|
||||
- **Seamless Embedding**: Supports rapid embedding into third-party business systems with zero coding, allowing existing systems to quickly have intelligent question and answer capabilities and improve user satisfaction
|
||||
## Quick start
|
||||
|
||||
```
|
||||
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages cr2.fit2cloud.com/1panel/maxkb
|
||||
|
||||
# username: admin
|
||||
# pass: MaxKB@123..
|
||||
```
|
||||
|
||||
- You can also quickly deploy MaxKB + Ollama + Llama 3 through [1Panel App Store](https://apps.fit2cloud.com/1panel). A knowledge base question and answer system based on a local large model can be launched within 30 minutes and embedded into In third-party business systems;
|
||||
- If it is an intranet environment, it is recommended to use [offline installation package](https://community.fit2cloud.com/#/products/maxkb/downloads) for installation and deployment;
|
||||
- You can also experience it online: [DataEase Assistant](https://dataease.io/docs/v2/), which is an intelligent question and answer system based on MaxKB and has been embedded in DataEase products and online documents.;
|
||||
- MaxKB's product version is divided into community version and professional version. For details, please see: [MaxKB product version comparison](https://maxkb.cn/pricing.html).
|
||||
|
||||
If you have more questions, you can check the user manual or communicate with us through the forum. If you need to build a technical blog or knowledge base, it is recommended to use [Halo open source website building tool](https://github.com/halo-dev/halo/). You can experience Feizhiyun’s official [Technical Blog](https://blog.fit2cloud.com/) and [Knowledge Base](https://kb.fit2cloud.com) cases.
|
||||
- [Docs](https://maxkb.cn/docs/)
|
||||
- [Demo Vid](https://www.bilibili.com/video/BV1BE421M7YM/)
|
||||
- [Forum](https://bbs.fit2cloud.com/c/mk/11)
|
||||
- Technical exchange group
|
||||
|
||||
<image height="150px" width="150px" src="https://github.com/1Panel-dev/MaxKB/assets/52996290/a083d214-02be-4178-a1db-4f428124153a"/>
|
||||
|
||||
## UI Screenshots
|
||||
|
||||
<table style="border-collapse: collapse; border: 1px solid black;">
|
||||
<tr>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/d87395fa-a8d7-401c-82bf-c6e475d10ae9" alt="MaxKB Demo1" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/47c35ee4-3a3b-4bd4-9f4f-ee20788b2b9a" alt="MaxKB Demo2" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/1c0c5e32-6194-47f9-bc32-487996349d9c" alt="MaxKB Demo3" /></td>
|
||||
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/f32f5fe9-a769-488c-ae0e-783bc2b89b3e" alt="MaxKB Demo4" /></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Stack Used
|
||||
|
||||
- Frontend:[Vue.js](https://cn.vuejs.org/)
|
||||
- Backend:[Python / Django](https://www.djangoproject.com/)
|
||||
- LangChain:[LangChain](https://www.langchain.com/)
|
||||
- Vector DB:[PostgreSQL / pgvector](https://www.postgresql.org/)
|
||||
- Large models: various local private or public large models
|
||||
|
||||
## Other Projects From Feizhiyun
|
||||
|
||||
- [1Panel](https://github.com/1panel-dev/1panel/) - Modern, open source Linux server operation and maintenance management panel
|
||||
- [JumpServer](https://github.com/jumpserver/jumpserver/) - Popular open source bastion host
|
||||
- [DataEase](https://github.com/dataease/dataease/) - Open source data visualization analysis tools available to everyone
|
||||
- [MeterSphere](https://github.com/metersphere/metersphere/) - New generation of open-source test tools
|
||||
- [Halo](https://github.com/halo-dev/halo/) - Powerful and easy-to-use open source website building tool
|
||||
|
||||
## License
|
||||
|
||||
Copyright (c) 2014-2024 Feizhiyun FIT2CLOUD, All rights reserved.
|
||||
|
||||
Licensed under The GNU General Public License version 3 (GPLv3) (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
<https://www.gnu.org/licenses/gpl-3.0.html>
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
<h3 align="center">MaxKB 应用案例,持续更新中...</h3>
|
||||
|
||||
------------------------------
|
||||
|
||||
- [MaxKB 应用案例:中国农业大学-小鹉哥](https://mp.weixin.qq.com/s/4g_gySMBQZCJ9OZ-yBkmvw)
|
||||
- [MaxKB 应用案例:东北财经大学-小银杏](https://mp.weixin.qq.com/s/3BoxkY7EMomMmmvFYxvDIA)
|
||||
- [MaxKB 应用案例:中铁水务](https://mp.weixin.qq.com/s/voNAddbK2CJOrJJs1ewZ8g)
|
||||
- [MaxKB 应用案例:解放军总医院](https://mp.weixin.qq.com/s/ETrZC-vrA4Aap0eF-15EeQ)
|
||||
- [MaxKB 应用案例:无锡市数据局](https://mp.weixin.qq.com/s/enfUFLevvL_La74PQ0kIXw)
|
||||
- [MaxKB 应用案例:中核西仪研究院-西仪睿答](https://mp.weixin.qq.com/s/CbKr4mev8qahKLAtV6Dxdg)
|
||||
- [MaxKB 应用案例:南京中医药大学](https://mp.weixin.qq.com/s/WUmAKYbZjp3272HIecpRFA)
|
||||
- [MaxKB 应用案例:西北电力设计院-AI数字助理Memex](https://mp.weixin.qq.com/s/ezHFdB7C7AVL9MTtDwYGSA)
|
||||
- [MaxKB 应用案例:西安国际医院中心医院-国医小助](https://mp.weixin.qq.com/s/DSOUvwrQrxbqQxKBilTCFQ)
|
||||
- [MaxKB 应用案例:华莱士智能AI客服助手上线啦!](https://www.bilibili.com/video/BV1hQtVeXEBL)
|
||||
- [MaxKB 应用案例:把医疗行业知识转化为知识库问答助手!](https://www.bilibili.com/video/BV157wme9EgB)
|
||||
- [MaxKB 应用案例:会展AI智能客服体验](https://www.bilibili.com/video/BV1J7BqY6EKA)
|
||||
- [MaxKB 应用案例:孩子要上幼儿园了,AI 智能助手择校好帮手](https://www.bilibili.com/video/BV1wKrhYvEer)
|
||||
- [MaxKB 应用案例:产品使用指南AI助手,新手小白也能轻松搞定!](https://www.bilibili.com/video/BV1Yz6gYtEqX)
|
||||
- [MaxKB 应用案例:生物医药AI客服智能体验!](https://www.bilibili.com/video/BV13JzvYsE3e)
|
||||
- [MaxKB 应用案例:高校行政管理AI小助手](https://www.bilibili.com/video/BV1yvBMYvEdy)
|
||||
- [MaxKB 应用案例:岳阳市人民医院-OA小助手](https://mp.weixin.qq.com/s/O94Qo3UH-MiUtDdWCVg8sQ)
|
||||
- [MaxKB 应用案例:常熟市第一人民医院](https://mp.weixin.qq.com/s/s5XXGTR3_MUo41NbJ8WzZQ)
|
||||
- [MaxKB 应用案例:华北水利水电大学](https://mp.weixin.qq.com/s/PoOFAcMCr9qJdvSj8c08qg)
|
||||
- [MaxKB 应用案例:唐山海事局-“小海”AI语音助手](https://news.qq.com/rain/a/20250223A030BE00)
|
||||
- [MaxKB 应用案例:湖南汉寿政务](http://hsds.hsdj.gov.cn:19999/ui/chat/a2c976736739aadc)
|
||||
- [MaxKB 应用案例:广州市妇女儿童医疗中心-AI医疗数据分类分级小助手](https://mp.weixin.qq.com/s/YHUMkUOAaUomBV8bswpK3g)
|
||||
- [MaxKB 应用案例:苏州热工研究院有限公司-维修大纲评估质量自查AI小助手](https://mp.weixin.qq.com/s/Ts5FQdnv7Tu9Jp7bvofCVA)
|
||||
- [MaxKB 应用案例:国核自仪系统工程有限公司-NuCON AI帮](https://mp.weixin.qq.com/s/HNPc7u5xVfGLJr8IQz3vjQ)
|
||||
- [MaxKB 应用案例:深圳通开启Deep Seek智能应用新篇章](https://mp.weixin.qq.com/s/SILN0GSescH9LyeQqYP0VQ)
|
||||
- [MaxKB 应用案例:南通智慧出行领跑长三角!首款接入DeepSeek的"畅行南通"APP上线AI新场景](https://mp.weixin.qq.com/s/WEC9UQ6msY0VS8LhTZh-Ew)
|
||||
- [MaxKB 应用案例:中船动力人工智能"智慧动力云助手"及首批数字员工正式上线](https://mp.weixin.qq.com/s/OGcEkjh9DzGO1Tkc9nr7qg)
|
||||
- [MaxKB 应用案例:AI+矿山:DeepSeek助力绿色智慧矿山智慧“升级”](https://mp.weixin.qq.com/s/SZstxTvVoLZg0ECbZbfpIA)
|
||||
- [MaxKB 应用案例:DeepSeek落地弘盛铜业:国产大模型点亮"黑灯工厂"新引擎](https://mp.weixin.qq.com/s/Eczdx574MS5RMF7WfHN7_A)
|
||||
- [MaxKB 应用案例:拥抱智能时代!中国五矿以 “AI+”赋能企业发展](https://mp.weixin.qq.com/s/D5vBtlX2E81pWE3_2OgWSw)
|
||||
- [MaxKB 应用案例:DeepSeek赋能中冶武勘AI智能体](https://mp.weixin.qq.com/s/8m0vxGcWXNdZazziQrLyxg)
|
||||
- [MaxKB 应用案例:重磅!陕西广电网络“秦岭云”平台实现DeepSeek本地化部署](https://mp.weixin.qq.com/s/ZKmEU_wWShK1YDomKJHQeA)
|
||||
- [MaxKB 应用案例:粤海集团完成DeepSeek私有化部署,助力集团智能化管理](https://mp.weixin.qq.com/s/2JbVp0-kr9Hfp-0whH4cvg)
|
||||
- [MaxKB 应用案例:建筑材料工业信息中心完成DeepSeek本地化部署,推动行业数智化转型新发展](https://mp.weixin.qq.com/s/HThGSnND3qDF8ySEqiM4jw)
|
||||
- [MaxKB 应用案例:一起DeepSeek!福建设计以AI大模型开启新篇章](https://mp.weixin.qq.com/s/m67e-H7iQBg3d24NM82UjA)
|
||||
|
|
@ -9,6 +9,7 @@
|
|||
from abc import abstractmethod
|
||||
from typing import Type, List
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.schema import BaseMessage
|
||||
from rest_framework import serializers
|
||||
|
|
@ -23,7 +24,7 @@ from common.util.field_message import ErrMessage
|
|||
class ModelField(serializers.Field):
|
||||
def to_internal_value(self, data):
|
||||
if not isinstance(data, BaseChatModel):
|
||||
self.fail('模型类型错误', value=data)
|
||||
self.fail(_('Model type error'), value=data)
|
||||
return data
|
||||
|
||||
def to_representation(self, value):
|
||||
|
|
@ -33,7 +34,7 @@ class ModelField(serializers.Field):
|
|||
class MessageField(serializers.Field):
|
||||
def to_internal_value(self, data):
|
||||
if not isinstance(data, BaseMessage):
|
||||
self.fail('message类型错误', value=data)
|
||||
self.fail(_('Message type error'), value=data)
|
||||
return data
|
||||
|
||||
def to_representation(self, value):
|
||||
|
|
@ -52,37 +53,42 @@ class IChatStep(IBaseChatPipelineStep):
|
|||
class InstanceSerializer(serializers.Serializer):
|
||||
# 对话列表
|
||||
message_list = serializers.ListField(required=True, child=MessageField(required=True),
|
||||
error_messages=ErrMessage.list("对话列表"))
|
||||
model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid("模型id"))
|
||||
error_messages=ErrMessage.list(_("Conversation list")))
|
||||
model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid(_("Model id")))
|
||||
# 段落列表
|
||||
paragraph_list = serializers.ListField(error_messages=ErrMessage.list("段落列表"))
|
||||
paragraph_list = serializers.ListField(error_messages=ErrMessage.list(_("Paragraph List")))
|
||||
# 对话id
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
# 用户问题
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.uuid("用户问题"))
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("User Questions")))
|
||||
# 后置处理器
|
||||
post_response_handler = InstanceField(model_type=PostResponseHandler,
|
||||
error_messages=ErrMessage.base("用户问题"))
|
||||
error_messages=ErrMessage.base(_("Post-processor")))
|
||||
# 补全问题
|
||||
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.base("补全问题"))
|
||||
padding_problem_text = serializers.CharField(required=False,
|
||||
error_messages=ErrMessage.base(_("Completion Question")))
|
||||
# 是否使用流的形式输出
|
||||
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base("流式输出"))
|
||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端id"))
|
||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端类型"))
|
||||
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base(_("Streaming Output")))
|
||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
|
||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
|
||||
# 未查询到引用分段
|
||||
no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base("无引用分段设置"))
|
||||
no_references_setting = NoReferencesSetting(required=True,
|
||||
error_messages=ErrMessage.base(_("No reference segment settings")))
|
||||
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
model_setting = serializers.DictField(required=True, allow_null=True,
|
||||
error_messages=ErrMessage.dict(_("Model settings")))
|
||||
|
||||
model_params_setting = serializers.DictField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.dict("模型参数设置"))
|
||||
error_messages=ErrMessage.dict(_("Model parameter settings")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
message_list: List = self.initial_data.get('message_list')
|
||||
for message in message_list:
|
||||
if not isinstance(message, BaseMessage):
|
||||
raise Exception("message 类型错误")
|
||||
raise Exception(_("message type error"))
|
||||
|
||||
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
|
||||
return self.InstanceSerializer
|
||||
|
|
@ -100,5 +106,5 @@ class IChatStep(IBaseChatPipelineStep):
|
|||
paragraph_list=None,
|
||||
manage: PipelineManage = None,
|
||||
padding_problem_text: str = None, stream: bool = True, client_id=None, client_type=None,
|
||||
no_references_setting=None, model_params_setting=None, **kwargs):
|
||||
no_references_setting=None, model_params_setting=None, model_setting=None, **kwargs):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from typing import List
|
|||
|
||||
from django.db.models import QuerySet
|
||||
from django.http import StreamingHttpResponse
|
||||
from django.utils.translation import gettext as _
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.schema import BaseMessage
|
||||
from langchain.schema.messages import HumanMessage, AIMessage
|
||||
|
|
@ -23,14 +24,17 @@ from rest_framework import status
|
|||
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
|
||||
from application.chat_pipeline.pipeline_manage import PipelineManage
|
||||
from application.chat_pipeline.step.chat_step.i_chat_step import IChatStep, PostResponseHandler
|
||||
from application.flow.tools import Reasoning
|
||||
from application.models.api_key_model import ApplicationPublicAccessClient
|
||||
from common.constants.authentication_type import AuthenticationType
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
|
||||
|
||||
def add_access_num(client_id=None, client_type=None):
|
||||
if client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value:
|
||||
application_public_access_client = QuerySet(ApplicationPublicAccessClient).filter(id=client_id).first()
|
||||
def add_access_num(client_id=None, client_type=None, application_id=None):
|
||||
if client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value and application_id is not None:
|
||||
application_public_access_client = (QuerySet(ApplicationPublicAccessClient).filter(client_id=client_id,
|
||||
application_id=application_id)
|
||||
.first())
|
||||
if application_public_access_client is not None:
|
||||
application_public_access_client.access_num = application_public_access_client.access_num + 1
|
||||
application_public_access_client.intraday_access_num = application_public_access_client.intraday_access_num + 1
|
||||
|
|
@ -60,17 +64,54 @@ def event_content(response,
|
|||
problem_text: str,
|
||||
padding_problem_text: str = None,
|
||||
client_id=None, client_type=None,
|
||||
is_ai_chat: bool = None):
|
||||
is_ai_chat: bool = None,
|
||||
model_setting=None):
|
||||
if model_setting is None:
|
||||
model_setting = {}
|
||||
reasoning_content_enable = model_setting.get('reasoning_content_enable', False)
|
||||
reasoning_content_start = model_setting.get('reasoning_content_start', '<think>')
|
||||
reasoning_content_end = model_setting.get('reasoning_content_end', '</think>')
|
||||
reasoning = Reasoning(reasoning_content_start,
|
||||
reasoning_content_end)
|
||||
all_text = ''
|
||||
reasoning_content = ''
|
||||
try:
|
||||
response_reasoning_content = False
|
||||
for chunk in response:
|
||||
all_text += chunk.content
|
||||
reasoning_chunk = reasoning.get_reasoning_content(chunk)
|
||||
content_chunk = reasoning_chunk.get('content')
|
||||
if 'reasoning_content' in chunk.additional_kwargs:
|
||||
response_reasoning_content = True
|
||||
reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '')
|
||||
else:
|
||||
reasoning_content_chunk = reasoning_chunk.get('reasoning_content')
|
||||
all_text += content_chunk
|
||||
if reasoning_content_chunk is None:
|
||||
reasoning_content_chunk = ''
|
||||
reasoning_content += reasoning_content_chunk
|
||||
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
||||
[], chunk.content,
|
||||
[], content_chunk,
|
||||
False,
|
||||
0, 0, {'node_is_end': False,
|
||||
'view_type': 'many_view',
|
||||
'node_type': 'ai-chat-node'})
|
||||
'node_type': 'ai-chat-node',
|
||||
'real_node_id': 'ai-chat-node',
|
||||
'reasoning_content': reasoning_content_chunk if reasoning_content_enable else ''})
|
||||
reasoning_chunk = reasoning.get_end_reasoning_content()
|
||||
all_text += reasoning_chunk.get('content')
|
||||
reasoning_content_chunk = ""
|
||||
if not response_reasoning_content:
|
||||
reasoning_content_chunk = reasoning_chunk.get(
|
||||
'reasoning_content')
|
||||
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
||||
[], reasoning_chunk.get('content'),
|
||||
False,
|
||||
0, 0, {'node_is_end': False,
|
||||
'view_type': 'many_view',
|
||||
'node_type': 'ai-chat-node',
|
||||
'real_node_id': 'ai-chat-node',
|
||||
'reasoning_content'
|
||||
: reasoning_content_chunk if reasoning_content_enable else ''})
|
||||
# 获取token
|
||||
if is_ai_chat:
|
||||
try:
|
||||
|
|
@ -83,26 +124,34 @@ def event_content(response,
|
|||
request_token = 0
|
||||
response_token = 0
|
||||
write_context(step, manage, request_token, response_token, all_text)
|
||||
asker = manage.context.get('form_data', {}).get('asker', None)
|
||||
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
||||
all_text, manage, step, padding_problem_text, client_id)
|
||||
all_text, manage, step, padding_problem_text, client_id,
|
||||
reasoning_content=reasoning_content if reasoning_content_enable else ''
|
||||
, asker=asker)
|
||||
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
||||
[], '', True,
|
||||
request_token, response_token,
|
||||
{'node_is_end': True, 'view_type': 'many_view',
|
||||
'node_type': 'ai-chat-node'})
|
||||
add_access_num(client_id, client_type)
|
||||
add_access_num(client_id, client_type, manage.context.get('application_id'))
|
||||
except Exception as e:
|
||||
logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
|
||||
all_text = '异常' + str(e)
|
||||
all_text = 'Exception:' + str(e)
|
||||
write_context(step, manage, 0, 0, all_text)
|
||||
asker = manage.context.get('form_data', {}).get('asker', None)
|
||||
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
||||
all_text, manage, step, padding_problem_text, client_id)
|
||||
add_access_num(client_id, client_type)
|
||||
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), all_text,
|
||||
'ai-chat-node',
|
||||
[], True, 0, 0,
|
||||
{'node_is_end': True, 'view_type': 'many_view',
|
||||
'node_type': 'ai-chat-node'})
|
||||
all_text, manage, step, padding_problem_text, client_id, reasoning_content='',
|
||||
asker=asker)
|
||||
add_access_num(client_id, client_type, manage.context.get('application_id'))
|
||||
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
||||
[], all_text,
|
||||
False,
|
||||
0, 0, {'node_is_end': False,
|
||||
'view_type': 'many_view',
|
||||
'node_type': 'ai-chat-node',
|
||||
'real_node_id': 'ai-chat-node',
|
||||
'reasoning_content': ''})
|
||||
|
||||
|
||||
class BaseChatStep(IChatStep):
|
||||
|
|
@ -119,17 +168,20 @@ class BaseChatStep(IChatStep):
|
|||
client_id=None, client_type=None,
|
||||
no_references_setting=None,
|
||||
model_params_setting=None,
|
||||
model_setting=None,
|
||||
**kwargs):
|
||||
chat_model = get_model_instance_by_model_user_id(model_id, user_id,
|
||||
**model_params_setting) if model_id is not None else None
|
||||
if stream:
|
||||
return self.execute_stream(message_list, chat_id, problem_text, post_response_handler, chat_model,
|
||||
paragraph_list,
|
||||
manage, padding_problem_text, client_id, client_type, no_references_setting)
|
||||
manage, padding_problem_text, client_id, client_type, no_references_setting,
|
||||
model_setting)
|
||||
else:
|
||||
return self.execute_block(message_list, chat_id, problem_text, post_response_handler, chat_model,
|
||||
paragraph_list,
|
||||
manage, padding_problem_text, client_id, client_type, no_references_setting)
|
||||
manage, padding_problem_text, client_id, client_type, no_references_setting,
|
||||
model_setting)
|
||||
|
||||
def get_details(self, manage, **kwargs):
|
||||
return {
|
||||
|
|
@ -170,7 +222,8 @@ class BaseChatStep(IChatStep):
|
|||
return iter(
|
||||
[AIMessageChunk(content=no_references_setting.get('value').replace('{question}', problem_text))]), False
|
||||
if chat_model is None:
|
||||
return iter([AIMessageChunk('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。')]), False
|
||||
return iter([AIMessageChunk(
|
||||
_('Sorry, the AI model is not configured. Please go to the application to set up the AI model first.'))]), False
|
||||
else:
|
||||
return chat_model.stream(message_list), True
|
||||
|
||||
|
|
@ -183,14 +236,15 @@ class BaseChatStep(IChatStep):
|
|||
manage: PipelineManage = None,
|
||||
padding_problem_text: str = None,
|
||||
client_id=None, client_type=None,
|
||||
no_references_setting=None):
|
||||
no_references_setting=None,
|
||||
model_setting=None):
|
||||
chat_result, is_ai_chat = self.get_stream_result(message_list, chat_model, paragraph_list,
|
||||
no_references_setting, problem_text)
|
||||
chat_record_id = uuid.uuid1()
|
||||
r = StreamingHttpResponse(
|
||||
streaming_content=event_content(chat_result, chat_id, chat_record_id, paragraph_list,
|
||||
post_response_handler, manage, self, chat_model, message_list, problem_text,
|
||||
padding_problem_text, client_id, client_type, is_ai_chat),
|
||||
padding_problem_text, client_id, client_type, is_ai_chat, model_setting),
|
||||
content_type='text/event-stream;charset=utf-8')
|
||||
|
||||
r['Cache-Control'] = 'no-cache'
|
||||
|
|
@ -204,17 +258,17 @@ class BaseChatStep(IChatStep):
|
|||
problem_text=None):
|
||||
if paragraph_list is None:
|
||||
paragraph_list = []
|
||||
|
||||
directly_return_chunk_list = [AIMessage(content=paragraph.content)
|
||||
for paragraph in paragraph_list if
|
||||
paragraph.hit_handling_method == 'directly_return']
|
||||
directly_return_chunk_list = [AIMessageChunk(content=paragraph.content)
|
||||
for paragraph in paragraph_list if (
|
||||
paragraph.hit_handling_method == 'directly_return' and paragraph.similarity >= paragraph.directly_return_similarity)]
|
||||
if directly_return_chunk_list is not None and len(directly_return_chunk_list) > 0:
|
||||
return directly_return_chunk_list[0], False
|
||||
elif len(paragraph_list) == 0 and no_references_setting.get(
|
||||
'status') == 'designated_answer':
|
||||
return AIMessage(no_references_setting.get('value').replace('{question}', problem_text)), False
|
||||
if chat_model is None:
|
||||
return AIMessage('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。'), False
|
||||
return AIMessage(
|
||||
_('Sorry, the AI model is not configured. Please go to the application to set up the AI model first.')), False
|
||||
else:
|
||||
return chat_model.invoke(message_list), True
|
||||
|
||||
|
|
@ -226,7 +280,13 @@ class BaseChatStep(IChatStep):
|
|||
paragraph_list=None,
|
||||
manage: PipelineManage = None,
|
||||
padding_problem_text: str = None,
|
||||
client_id=None, client_type=None, no_references_setting=None):
|
||||
client_id=None, client_type=None, no_references_setting=None,
|
||||
model_setting=None):
|
||||
reasoning_content_enable = model_setting.get('reasoning_content_enable', False)
|
||||
reasoning_content_start = model_setting.get('reasoning_content_start', '<think>')
|
||||
reasoning_content_end = model_setting.get('reasoning_content_end', '</think>')
|
||||
reasoning = Reasoning(reasoning_content_start,
|
||||
reasoning_content_end)
|
||||
chat_record_id = uuid.uuid1()
|
||||
# 调用模型
|
||||
try:
|
||||
|
|
@ -239,17 +299,36 @@ class BaseChatStep(IChatStep):
|
|||
request_token = 0
|
||||
response_token = 0
|
||||
write_context(self, manage, request_token, response_token, chat_result.content)
|
||||
reasoning_result = reasoning.get_reasoning_content(chat_result)
|
||||
reasoning_result_end = reasoning.get_end_reasoning_content()
|
||||
content = reasoning_result.get('content') + reasoning_result_end.get('content')
|
||||
if 'reasoning_content' in chat_result.response_metadata:
|
||||
reasoning_content = chat_result.response_metadata.get('reasoning_content', '')
|
||||
else:
|
||||
reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get(
|
||||
'reasoning_content')
|
||||
asker = manage.context.get('form_data', {}).get('asker', None)
|
||||
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
||||
chat_result.content, manage, self, padding_problem_text, client_id)
|
||||
add_access_num(client_id, client_type)
|
||||
content, manage, self, padding_problem_text, client_id,
|
||||
reasoning_content=reasoning_content if reasoning_content_enable else '',
|
||||
asker=asker)
|
||||
add_access_num(client_id, client_type, manage.context.get('application_id'))
|
||||
return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id),
|
||||
chat_result.content, True,
|
||||
request_token, response_token)
|
||||
content, True,
|
||||
request_token, response_token,
|
||||
{
|
||||
'reasoning_content': reasoning_content if reasoning_content_enable else '',
|
||||
'answer_list': [{
|
||||
'content': content,
|
||||
'reasoning_content': reasoning_content if reasoning_content_enable else ''
|
||||
}]})
|
||||
except Exception as e:
|
||||
all_text = '异常' + str(e)
|
||||
all_text = 'Exception:' + str(e)
|
||||
write_context(self, manage, 0, 0, all_text)
|
||||
asker = manage.context.get('form_data', {}).get('asker', None)
|
||||
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
||||
all_text, manage, self, padding_problem_text, client_id)
|
||||
add_access_num(client_id, client_type)
|
||||
all_text, manage, self, padding_problem_text, client_id, reasoning_content='',
|
||||
asker=asker)
|
||||
add_access_num(client_id, client_type, manage.context.get('application_id'))
|
||||
return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id), all_text, True, 0,
|
||||
0, _status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
from abc import abstractmethod
|
||||
from typing import Type, List
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from langchain.schema import BaseMessage
|
||||
from rest_framework import serializers
|
||||
|
||||
|
|
@ -23,26 +24,26 @@ from common.util.field_message import ErrMessage
|
|||
class IGenerateHumanMessageStep(IBaseChatPipelineStep):
|
||||
class InstanceSerializer(serializers.Serializer):
|
||||
# 问题
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char("问题"))
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("question")))
|
||||
# 段落列表
|
||||
paragraph_list = serializers.ListField(child=InstanceField(model_type=ParagraphPipelineModel, required=True),
|
||||
error_messages=ErrMessage.list("段落列表"))
|
||||
error_messages=ErrMessage.list(_("Paragraph List")))
|
||||
# 历史对答
|
||||
history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True),
|
||||
error_messages=ErrMessage.list("历史对答"))
|
||||
error_messages=ErrMessage.list(_("History Questions")))
|
||||
# 多轮对话数量
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量"))
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations")))
|
||||
# 最大携带知识库段落长度
|
||||
max_paragraph_char_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(
|
||||
"最大携带知识库段落长度"))
|
||||
_("Maximum length of the knowledge base paragraph")))
|
||||
# 模板
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
|
||||
system = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("系统提示词(角色)"))
|
||||
error_messages=ErrMessage.char(_("System prompt words (role)")))
|
||||
# 补齐问题
|
||||
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("补齐问题"))
|
||||
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Completion problem")))
|
||||
# 未查询到引用分段
|
||||
no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base("无引用分段设置"))
|
||||
no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base(_("No reference segment settings")))
|
||||
|
||||
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
|
||||
return self.InstanceSerializer
|
||||
|
|
|
|||
|
|
@ -9,12 +9,11 @@
|
|||
from abc import abstractmethod
|
||||
from typing import Type, List
|
||||
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep
|
||||
from application.chat_pipeline.pipeline_manage import PipelineManage
|
||||
from application.chat_pipeline.step.chat_step.i_chat_step import ModelField
|
||||
from application.models import ChatRecord
|
||||
from common.field.common import InstanceField
|
||||
from common.util.field_message import ErrMessage
|
||||
|
|
@ -23,15 +22,16 @@ from common.util.field_message import ErrMessage
|
|||
class IResetProblemStep(IBaseChatPipelineStep):
|
||||
class InstanceSerializer(serializers.Serializer):
|
||||
# 问题文本
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.float("问题文本"))
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.float(_("question")))
|
||||
# 历史对答
|
||||
history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True),
|
||||
error_messages=ErrMessage.list("历史对答"))
|
||||
error_messages=ErrMessage.list(_("History Questions")))
|
||||
# 大语言模型
|
||||
model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid("模型id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid(_("Model id")))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
|
||||
error_messages=ErrMessage.char("问题补全提示词"))
|
||||
error_messages=ErrMessage.char(
|
||||
_("Question completion prompt")))
|
||||
|
||||
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
|
||||
return self.InstanceSerializer
|
||||
|
|
@ -45,8 +45,9 @@ class IResetProblemStep(IBaseChatPipelineStep):
|
|||
manage.context['problem_text'] = source_problem_text
|
||||
manage.context['padding_problem_text'] = padding_problem
|
||||
# 累加tokens
|
||||
manage.context['message_tokens'] = manage.context['message_tokens'] + self.context.get('message_tokens')
|
||||
manage.context['answer_tokens'] = manage.context['answer_tokens'] + self.context.get('answer_tokens')
|
||||
manage.context['message_tokens'] = manage.context.get('message_tokens', 0) + self.context.get('message_tokens',
|
||||
0)
|
||||
manage.context['answer_tokens'] = manage.context.get('answer_tokens', 0) + self.context.get('answer_tokens', 0)
|
||||
|
||||
@abstractmethod
|
||||
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None,
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
"""
|
||||
from typing import List
|
||||
|
||||
from django.utils.translation import gettext as _
|
||||
from langchain.schema import HumanMessage
|
||||
|
||||
from application.chat_pipeline.step.reset_problem_step.i_reset_problem_step import IResetProblemStep
|
||||
|
|
@ -15,8 +16,8 @@ from application.models import ChatRecord
|
|||
from common.util.split_model import flat_map
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
|
||||
prompt = (
|
||||
'()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中')
|
||||
prompt = _(
|
||||
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")
|
||||
|
||||
|
||||
class BaseResetProblemStep(IResetProblemStep):
|
||||
|
|
@ -25,6 +26,8 @@ class BaseResetProblemStep(IResetProblemStep):
|
|||
user_id=None,
|
||||
**kwargs) -> str:
|
||||
chat_model = get_model_instance_by_model_user_id(model_id, user_id) if model_id is not None else None
|
||||
if chat_model is None:
|
||||
return problem_text
|
||||
start_index = len(history_chat_record) - 3
|
||||
history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
|
||||
for index in
|
||||
|
|
@ -57,8 +60,8 @@ class BaseResetProblemStep(IResetProblemStep):
|
|||
'step_type': 'problem_padding',
|
||||
'run_time': self.context['run_time'],
|
||||
'model_id': str(manage.context['model_id']) if 'model_id' in manage.context else None,
|
||||
'message_tokens': self.context['message_tokens'],
|
||||
'answer_tokens': self.context['answer_tokens'],
|
||||
'message_tokens': self.context.get('message_tokens', 0),
|
||||
'answer_tokens': self.context.get('answer_tokens', 0),
|
||||
'cost': 0,
|
||||
'padding_problem_text': self.context.get('padding_problem_text'),
|
||||
'problem_text': self.context.get("step_args").get('problem_text'),
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from abc import abstractmethod
|
|||
from typing import List, Type
|
||||
|
||||
from django.core import validators
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep, ParagraphPipelineModel
|
||||
|
|
@ -21,29 +22,30 @@ from common.util.field_message import ErrMessage
|
|||
class ISearchDatasetStep(IBaseChatPipelineStep):
|
||||
class InstanceSerializer(serializers.Serializer):
|
||||
# 原始问题文本
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char("问题"))
|
||||
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("question")))
|
||||
# 系统补全问题文本
|
||||
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("系统补全问题文本"))
|
||||
padding_problem_text = serializers.CharField(required=False,
|
||||
error_messages=ErrMessage.char(_("System completes question text")))
|
||||
# 需要查询的数据集id列表
|
||||
dataset_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
|
||||
error_messages=ErrMessage.list("数据集id列表"))
|
||||
error_messages=ErrMessage.list(_("Dataset id list")))
|
||||
# 需要排除的文档id
|
||||
exclude_document_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
|
||||
error_messages=ErrMessage.list("排除的文档id列表"))
|
||||
error_messages=ErrMessage.list(_("List of document ids to exclude")))
|
||||
# 需要排除向量id
|
||||
exclude_paragraph_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
|
||||
error_messages=ErrMessage.list("排除向量id列表"))
|
||||
error_messages=ErrMessage.list(_("List of exclusion vector ids")))
|
||||
# 需要查询的条数
|
||||
top_n = serializers.IntegerField(required=True,
|
||||
error_messages=ErrMessage.integer("引用分段数"))
|
||||
error_messages=ErrMessage.integer(_("Reference segment number")))
|
||||
# 相似度 0-1之间
|
||||
similarity = serializers.FloatField(required=True, max_value=1, min_value=0,
|
||||
error_messages=ErrMessage.float("引用分段数"))
|
||||
error_messages=ErrMessage.float(_("Similarity")))
|
||||
search_mode = serializers.CharField(required=True, validators=[
|
||||
validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"),
|
||||
message="类型只支持register|reset_password", code=500)
|
||||
], error_messages=ErrMessage.char("检索模式"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
message=_("The type only supports embedding|keywords|blend"), code=500)
|
||||
], error_messages=ErrMessage.char(_("Retrieval Mode")))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
def get_step_serializer(self, manage: PipelineManage) -> Type[InstanceSerializer]:
|
||||
return self.InstanceSerializer
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ import os
|
|||
from typing import List, Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.utils.formatting import lazy_format
|
||||
|
||||
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
|
||||
from application.chat_pipeline.step.search_dataset_step.i_search_dataset_step import ISearchDatasetStep
|
||||
|
|
@ -26,18 +28,19 @@ from smartdoc.conf import PROJECT_DIR
|
|||
def get_model_by_id(_id, user_id):
|
||||
model = QuerySet(Model).filter(id=_id).first()
|
||||
if model is None:
|
||||
raise Exception("模型不存在")
|
||||
raise Exception(_("Model does not exist"))
|
||||
if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id):
|
||||
raise Exception(f"无权限使用此模型:{model.name}")
|
||||
message = lazy_format(_('No permission to use this model {model_name}'), model_name=model.name)
|
||||
raise Exception(message)
|
||||
return model
|
||||
|
||||
|
||||
def get_embedding_id(dataset_id_list):
|
||||
dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list)
|
||||
if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1:
|
||||
raise Exception("关联知识库的向量模型不一致,无法召回分段。")
|
||||
raise Exception(_("The vector model of the associated knowledge base is inconsistent and the segmentation cannot be recalled."))
|
||||
if len(dataset_list) == 0:
|
||||
raise Exception("知识库设置错误,请重新设置知识库")
|
||||
raise Exception(_("The knowledge base setting is wrong, please reset the knowledge base"))
|
||||
return dataset_list[0].embedding_mode_id
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎
|
||||
@file: common.py
|
||||
@date:2024/12/11 17:57
|
||||
@desc:
|
||||
"""
|
||||
|
||||
|
||||
class Answer:
|
||||
def __init__(self, content, view_type, runtime_node_id, chat_record_id, child_node, real_node_id,
|
||||
reasoning_content):
|
||||
self.view_type = view_type
|
||||
self.content = content
|
||||
self.reasoning_content = reasoning_content
|
||||
self.runtime_node_id = runtime_node_id
|
||||
self.chat_record_id = chat_record_id
|
||||
self.child_node = child_node
|
||||
self.real_node_id = real_node_id
|
||||
|
||||
def to_dict(self):
|
||||
return {'view_type': self.view_type, 'content': self.content, 'runtime_node_id': self.runtime_node_id,
|
||||
'chat_record_id': self.chat_record_id,
|
||||
'child_node': self.child_node,
|
||||
'reasoning_content': self.reasoning_content,
|
||||
'real_node_id': self.real_node_id}
|
||||
|
||||
|
||||
class NodeChunk:
|
||||
def __init__(self):
|
||||
self.status = 0
|
||||
self.chunk_list = []
|
||||
|
||||
def add_chunk(self, chunk):
|
||||
self.chunk_list.append(chunk)
|
||||
|
||||
def end(self, chunk=None):
|
||||
if chunk is not None:
|
||||
self.add_chunk(chunk)
|
||||
self.status = 200
|
||||
|
||||
def is_end(self):
|
||||
return self.status == 200
|
||||
|
|
@ -0,0 +1,451 @@
|
|||
{
|
||||
"nodes": [
|
||||
{
|
||||
"id": "base-node",
|
||||
"type": "base-node",
|
||||
"x": 360,
|
||||
"y": 2810,
|
||||
"properties": {
|
||||
"config": {
|
||||
|
||||
},
|
||||
"height": 825.6,
|
||||
"stepName": "Base",
|
||||
"node_data": {
|
||||
"desc": "",
|
||||
"name": "maxkbapplication",
|
||||
"prologue": "Hello, I am the MaxKB assistant. You can ask me about MaxKB usage issues.\n-What are the main functions of MaxKB?\n-What major language models does MaxKB support?\n-What document types does MaxKB support?"
|
||||
},
|
||||
"input_field_list": [
|
||||
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "start-node",
|
||||
"type": "start-node",
|
||||
"x": 430,
|
||||
"y": 3660,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "用户问题",
|
||||
"value": "question"
|
||||
}
|
||||
],
|
||||
"globalFields": [
|
||||
{
|
||||
"label": "当前时间",
|
||||
"value": "time"
|
||||
}
|
||||
]
|
||||
},
|
||||
"fields": [
|
||||
{
|
||||
"label": "用户问题",
|
||||
"value": "question"
|
||||
}
|
||||
],
|
||||
"height": 276,
|
||||
"stepName": "Start",
|
||||
"globalFields": [
|
||||
{
|
||||
"label": "当前时间",
|
||||
"value": "time"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"type": "search-dataset-node",
|
||||
"x": 840,
|
||||
"y": 3210,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "检索结果的分段列表",
|
||||
"value": "paragraph_list"
|
||||
},
|
||||
{
|
||||
"label": "满足直接回答的分段列表",
|
||||
"value": "is_hit_handling_method_list"
|
||||
},
|
||||
{
|
||||
"label": "检索结果",
|
||||
"value": "data"
|
||||
},
|
||||
{
|
||||
"label": "满足直接回答的分段内容",
|
||||
"value": "directly_return"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 794,
|
||||
"stepName": "Knowledge Search",
|
||||
"node_data": {
|
||||
"dataset_id_list": [
|
||||
|
||||
],
|
||||
"dataset_setting": {
|
||||
"top_n": 3,
|
||||
"similarity": 0.6,
|
||||
"search_mode": "embedding",
|
||||
"max_paragraph_char_number": 5000
|
||||
},
|
||||
"question_reference_address": [
|
||||
"start-node",
|
||||
"question"
|
||||
],
|
||||
"source_dataset_id_list": [
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"type": "condition-node",
|
||||
"x": 1490,
|
||||
"y": 3210,
|
||||
"properties": {
|
||||
"width": 600,
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "分支名称",
|
||||
"value": "branch_name"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 543.675,
|
||||
"stepName": "Conditional Branch",
|
||||
"node_data": {
|
||||
"branch": [
|
||||
{
|
||||
"id": "1009",
|
||||
"type": "IF",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
{
|
||||
"field": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"is_hit_handling_method_list"
|
||||
],
|
||||
"value": "1",
|
||||
"compare": "len_ge"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "4908",
|
||||
"type": "ELSE IF 1",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
{
|
||||
"field": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"paragraph_list"
|
||||
],
|
||||
"value": "1",
|
||||
"compare": "len_ge"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "161",
|
||||
"type": "ELSE",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"branch_condition_list": [
|
||||
{
|
||||
"index": 0,
|
||||
"height": 121.225,
|
||||
"id": "1009"
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"height": 121.225,
|
||||
"id": "4908"
|
||||
},
|
||||
{
|
||||
"index": 2,
|
||||
"height": 44,
|
||||
"id": "161"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
|
||||
"type": "reply-node",
|
||||
"x": 2170,
|
||||
"y": 2480,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 378,
|
||||
"stepName": "Specified Reply",
|
||||
"node_data": {
|
||||
"fields": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"directly_return"
|
||||
],
|
||||
"content": "",
|
||||
"reply_type": "referencing",
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
|
||||
"type": "ai-chat-node",
|
||||
"x": 2160,
|
||||
"y": 3200,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "AI 回答内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 763,
|
||||
"stepName": "AI Chat",
|
||||
"node_data": {
|
||||
"prompt": "Known information:\n{{Knowledge Search.data}}\nQuestion:\n{{Start.question}}",
|
||||
"system": "",
|
||||
"model_id": "",
|
||||
"dialogue_number": 0,
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
|
||||
"type": "ai-chat-node",
|
||||
"x": 2160,
|
||||
"y": 3970,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "AI 回答内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 763,
|
||||
"stepName": "AI Chat1",
|
||||
"node_data": {
|
||||
"prompt": "{{Start.question}}",
|
||||
"system": "",
|
||||
"model_id": "",
|
||||
"dialogue_number": 0,
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "start-node",
|
||||
"targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"startPoint": {
|
||||
"x": 590,
|
||||
"y": 3660
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 680,
|
||||
"y": 3210
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 590,
|
||||
"y": 3660
|
||||
},
|
||||
{
|
||||
"x": 700,
|
||||
"y": 3660
|
||||
},
|
||||
{
|
||||
"x": 570,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 680,
|
||||
"y": 3210
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "start-node_right",
|
||||
"targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left"
|
||||
},
|
||||
{
|
||||
"id": "35cb86dd-f328-429e-a973-12fd7218b696",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"startPoint": {
|
||||
"x": 1000,
|
||||
"y": 3210
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 1200,
|
||||
"y": 3210
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1000,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1110,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1090,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1200,
|
||||
"y": 3210
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right",
|
||||
"targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left"
|
||||
},
|
||||
{
|
||||
"id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3073.775
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2010,
|
||||
"y": 2480
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3073.775
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3073.775
|
||||
},
|
||||
{
|
||||
"x": 1900,
|
||||
"y": 2480
|
||||
},
|
||||
{
|
||||
"x": 2010,
|
||||
"y": 2480
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right",
|
||||
"targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left"
|
||||
},
|
||||
{
|
||||
"id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3203
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2000,
|
||||
"y": 3200
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3203
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3203
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3200
|
||||
},
|
||||
{
|
||||
"x": 2000,
|
||||
"y": 3200
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right",
|
||||
"targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left"
|
||||
},
|
||||
{
|
||||
"id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2000,
|
||||
"y": 3970
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3970
|
||||
},
|
||||
{
|
||||
"x": 2000,
|
||||
"y": 3970
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right",
|
||||
"targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,451 @@
|
|||
{
|
||||
"nodes": [
|
||||
{
|
||||
"id": "base-node",
|
||||
"type": "base-node",
|
||||
"x": 360,
|
||||
"y": 2810,
|
||||
"properties": {
|
||||
"config": {
|
||||
|
||||
},
|
||||
"height": 825.6,
|
||||
"stepName": "基本信息",
|
||||
"node_data": {
|
||||
"desc": "",
|
||||
"name": "maxkbapplication",
|
||||
"prologue": "您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?"
|
||||
},
|
||||
"input_field_list": [
|
||||
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "start-node",
|
||||
"type": "start-node",
|
||||
"x": 430,
|
||||
"y": 3660,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "用户问题",
|
||||
"value": "question"
|
||||
}
|
||||
],
|
||||
"globalFields": [
|
||||
{
|
||||
"label": "当前时间",
|
||||
"value": "time"
|
||||
}
|
||||
]
|
||||
},
|
||||
"fields": [
|
||||
{
|
||||
"label": "用户问题",
|
||||
"value": "question"
|
||||
}
|
||||
],
|
||||
"height": 276,
|
||||
"stepName": "开始",
|
||||
"globalFields": [
|
||||
{
|
||||
"label": "当前时间",
|
||||
"value": "time"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"type": "search-dataset-node",
|
||||
"x": 840,
|
||||
"y": 3210,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "检索结果的分段列表",
|
||||
"value": "paragraph_list"
|
||||
},
|
||||
{
|
||||
"label": "满足直接回答的分段列表",
|
||||
"value": "is_hit_handling_method_list"
|
||||
},
|
||||
{
|
||||
"label": "检索结果",
|
||||
"value": "data"
|
||||
},
|
||||
{
|
||||
"label": "满足直接回答的分段内容",
|
||||
"value": "directly_return"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 794,
|
||||
"stepName": "知识库检索",
|
||||
"node_data": {
|
||||
"dataset_id_list": [
|
||||
|
||||
],
|
||||
"dataset_setting": {
|
||||
"top_n": 3,
|
||||
"similarity": 0.6,
|
||||
"search_mode": "embedding",
|
||||
"max_paragraph_char_number": 5000
|
||||
},
|
||||
"question_reference_address": [
|
||||
"start-node",
|
||||
"question"
|
||||
],
|
||||
"source_dataset_id_list": [
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"type": "condition-node",
|
||||
"x": 1490,
|
||||
"y": 3210,
|
||||
"properties": {
|
||||
"width": 600,
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "分支名称",
|
||||
"value": "branch_name"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 543.675,
|
||||
"stepName": "判断器",
|
||||
"node_data": {
|
||||
"branch": [
|
||||
{
|
||||
"id": "1009",
|
||||
"type": "IF",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
{
|
||||
"field": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"is_hit_handling_method_list"
|
||||
],
|
||||
"value": "1",
|
||||
"compare": "len_ge"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "4908",
|
||||
"type": "ELSE IF 1",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
{
|
||||
"field": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"paragraph_list"
|
||||
],
|
||||
"value": "1",
|
||||
"compare": "len_ge"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "161",
|
||||
"type": "ELSE",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"branch_condition_list": [
|
||||
{
|
||||
"index": 0,
|
||||
"height": 121.225,
|
||||
"id": "1009"
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"height": 121.225,
|
||||
"id": "4908"
|
||||
},
|
||||
{
|
||||
"index": 2,
|
||||
"height": 44,
|
||||
"id": "161"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
|
||||
"type": "reply-node",
|
||||
"x": 2170,
|
||||
"y": 2480,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 378,
|
||||
"stepName": "指定回复",
|
||||
"node_data": {
|
||||
"fields": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"directly_return"
|
||||
],
|
||||
"content": "",
|
||||
"reply_type": "referencing",
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
|
||||
"type": "ai-chat-node",
|
||||
"x": 2160,
|
||||
"y": 3200,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "AI 回答内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 763,
|
||||
"stepName": "AI 对话",
|
||||
"node_data": {
|
||||
"prompt": "已知信息:\n{{知识库检索.data}}\n问题:\n{{开始.question}}",
|
||||
"system": "",
|
||||
"model_id": "",
|
||||
"dialogue_number": 0,
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
|
||||
"type": "ai-chat-node",
|
||||
"x": 2160,
|
||||
"y": 3970,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "AI 回答内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 763,
|
||||
"stepName": "AI 对话1",
|
||||
"node_data": {
|
||||
"prompt": "{{开始.question}}",
|
||||
"system": "",
|
||||
"model_id": "",
|
||||
"dialogue_number": 0,
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "start-node",
|
||||
"targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"startPoint": {
|
||||
"x": 590,
|
||||
"y": 3660
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 680,
|
||||
"y": 3210
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 590,
|
||||
"y": 3660
|
||||
},
|
||||
{
|
||||
"x": 700,
|
||||
"y": 3660
|
||||
},
|
||||
{
|
||||
"x": 570,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 680,
|
||||
"y": 3210
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "start-node_right",
|
||||
"targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left"
|
||||
},
|
||||
{
|
||||
"id": "35cb86dd-f328-429e-a973-12fd7218b696",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"startPoint": {
|
||||
"x": 1000,
|
||||
"y": 3210
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 1200,
|
||||
"y": 3210
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1000,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1110,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1090,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1200,
|
||||
"y": 3210
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right",
|
||||
"targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left"
|
||||
},
|
||||
{
|
||||
"id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3073.775
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2010,
|
||||
"y": 2480
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3073.775
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3073.775
|
||||
},
|
||||
{
|
||||
"x": 1900,
|
||||
"y": 2480
|
||||
},
|
||||
{
|
||||
"x": 2010,
|
||||
"y": 2480
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right",
|
||||
"targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left"
|
||||
},
|
||||
{
|
||||
"id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3203
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2000,
|
||||
"y": 3200
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3203
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3203
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3200
|
||||
},
|
||||
{
|
||||
"x": 2000,
|
||||
"y": 3200
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right",
|
||||
"targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left"
|
||||
},
|
||||
{
|
||||
"id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2000,
|
||||
"y": 3970
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3970
|
||||
},
|
||||
{
|
||||
"x": 2000,
|
||||
"y": 3970
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right",
|
||||
"targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,451 @@
|
|||
{
|
||||
"nodes": [
|
||||
{
|
||||
"id": "base-node",
|
||||
"type": "base-node",
|
||||
"x": 360,
|
||||
"y": 2810,
|
||||
"properties": {
|
||||
"config": {
|
||||
|
||||
},
|
||||
"height": 825.6,
|
||||
"stepName": "基本資訊",
|
||||
"node_data": {
|
||||
"desc": "",
|
||||
"name": "maxkbapplication",
|
||||
"prologue": "您好,我是MaxKB小助手,您可以向我提出MaxKB使用問題。\n- MaxKB主要功能有什麼?\n- MaxKB支持哪些大語言模型?\n- MaxKB支持哪些文檔類型?"
|
||||
},
|
||||
"input_field_list": [
|
||||
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "start-node",
|
||||
"type": "start-node",
|
||||
"x": 430,
|
||||
"y": 3660,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "用户问题",
|
||||
"value": "question"
|
||||
}
|
||||
],
|
||||
"globalFields": [
|
||||
{
|
||||
"label": "当前时间",
|
||||
"value": "time"
|
||||
}
|
||||
]
|
||||
},
|
||||
"fields": [
|
||||
{
|
||||
"label": "用户问题",
|
||||
"value": "question"
|
||||
}
|
||||
],
|
||||
"height": 276,
|
||||
"stepName": "開始",
|
||||
"globalFields": [
|
||||
{
|
||||
"label": "当前时间",
|
||||
"value": "time"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"type": "search-dataset-node",
|
||||
"x": 840,
|
||||
"y": 3210,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "检索结果的分段列表",
|
||||
"value": "paragraph_list"
|
||||
},
|
||||
{
|
||||
"label": "满足直接回答的分段列表",
|
||||
"value": "is_hit_handling_method_list"
|
||||
},
|
||||
{
|
||||
"label": "检索结果",
|
||||
"value": "data"
|
||||
},
|
||||
{
|
||||
"label": "满足直接回答的分段内容",
|
||||
"value": "directly_return"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 794,
|
||||
"stepName": "知識庫檢索",
|
||||
"node_data": {
|
||||
"dataset_id_list": [
|
||||
|
||||
],
|
||||
"dataset_setting": {
|
||||
"top_n": 3,
|
||||
"similarity": 0.6,
|
||||
"search_mode": "embedding",
|
||||
"max_paragraph_char_number": 5000
|
||||
},
|
||||
"question_reference_address": [
|
||||
"start-node",
|
||||
"question"
|
||||
],
|
||||
"source_dataset_id_list": [
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"type": "condition-node",
|
||||
"x": 1490,
|
||||
"y": 3210,
|
||||
"properties": {
|
||||
"width": 600,
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "分支名称",
|
||||
"value": "branch_name"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 543.675,
|
||||
"stepName": "判斷器",
|
||||
"node_data": {
|
||||
"branch": [
|
||||
{
|
||||
"id": "1009",
|
||||
"type": "IF",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
{
|
||||
"field": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"is_hit_handling_method_list"
|
||||
],
|
||||
"value": "1",
|
||||
"compare": "len_ge"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "4908",
|
||||
"type": "ELSE IF 1",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
{
|
||||
"field": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"paragraph_list"
|
||||
],
|
||||
"value": "1",
|
||||
"compare": "len_ge"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "161",
|
||||
"type": "ELSE",
|
||||
"condition": "and",
|
||||
"conditions": [
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"branch_condition_list": [
|
||||
{
|
||||
"index": 0,
|
||||
"height": 121.225,
|
||||
"id": "1009"
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"height": 121.225,
|
||||
"id": "4908"
|
||||
},
|
||||
{
|
||||
"index": 2,
|
||||
"height": 44,
|
||||
"id": "161"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
|
||||
"type": "reply-node",
|
||||
"x": 2170,
|
||||
"y": 2480,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 378,
|
||||
"stepName": "指定回覆",
|
||||
"node_data": {
|
||||
"fields": [
|
||||
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"directly_return"
|
||||
],
|
||||
"content": "",
|
||||
"reply_type": "referencing",
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
|
||||
"type": "ai-chat-node",
|
||||
"x": 2160,
|
||||
"y": 3200,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "AI 回答内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 763,
|
||||
"stepName": "AI 對話",
|
||||
"node_data": {
|
||||
"prompt": "已知資訊:\n{{知識庫檢索.data}}\n問題:\n{{開始.question}}",
|
||||
"system": "",
|
||||
"model_id": "",
|
||||
"dialogue_number": 0,
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
|
||||
"type": "ai-chat-node",
|
||||
"x": 2160,
|
||||
"y": 3970,
|
||||
"properties": {
|
||||
"config": {
|
||||
"fields": [
|
||||
{
|
||||
"label": "AI 回答内容",
|
||||
"value": "answer"
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 763,
|
||||
"stepName": "AI 對話1",
|
||||
"node_data": {
|
||||
"prompt": "{{開始.question}}",
|
||||
"system": "",
|
||||
"model_id": "",
|
||||
"dialogue_number": 0,
|
||||
"is_result": true
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "start-node",
|
||||
"targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"startPoint": {
|
||||
"x": 590,
|
||||
"y": 3660
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 680,
|
||||
"y": 3210
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 590,
|
||||
"y": 3660
|
||||
},
|
||||
{
|
||||
"x": 700,
|
||||
"y": 3660
|
||||
},
|
||||
{
|
||||
"x": 570,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 680,
|
||||
"y": 3210
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "start-node_right",
|
||||
"targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left"
|
||||
},
|
||||
{
|
||||
"id": "35cb86dd-f328-429e-a973-12fd7218b696",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
|
||||
"targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"startPoint": {
|
||||
"x": 1000,
|
||||
"y": 3210
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 1200,
|
||||
"y": 3210
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1000,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1110,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1090,
|
||||
"y": 3210
|
||||
},
|
||||
{
|
||||
"x": 1200,
|
||||
"y": 3210
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right",
|
||||
"targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left"
|
||||
},
|
||||
{
|
||||
"id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3073.775
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2010,
|
||||
"y": 2480
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3073.775
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3073.775
|
||||
},
|
||||
{
|
||||
"x": 1900,
|
||||
"y": 2480
|
||||
},
|
||||
{
|
||||
"x": 2010,
|
||||
"y": 2480
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right",
|
||||
"targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left"
|
||||
},
|
||||
{
|
||||
"id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3203
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2000,
|
||||
"y": 3200
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3203
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3203
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3200
|
||||
},
|
||||
{
|
||||
"x": 2000,
|
||||
"y": 3200
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right",
|
||||
"targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left"
|
||||
},
|
||||
{
|
||||
"id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5",
|
||||
"type": "app-edge",
|
||||
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
|
||||
"targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
|
||||
"startPoint": {
|
||||
"x": 1780,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
"endPoint": {
|
||||
"x": 2000,
|
||||
"y": 3970
|
||||
},
|
||||
"properties": {
|
||||
|
||||
},
|
||||
"pointsList": [
|
||||
{
|
||||
"x": 1780,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3293.6124999999997
|
||||
},
|
||||
{
|
||||
"x": 1890,
|
||||
"y": 3970
|
||||
},
|
||||
{
|
||||
"x": 2000,
|
||||
"y": 3970
|
||||
}
|
||||
],
|
||||
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right",
|
||||
"targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -17,6 +17,7 @@ from django.db.models import QuerySet
|
|||
from rest_framework import serializers
|
||||
from rest_framework.exceptions import ValidationError, ErrorDetail
|
||||
|
||||
from application.flow.common import Answer, NodeChunk
|
||||
from application.models import ChatRecord
|
||||
from application.models.api_key_model import ApplicationPublicAccessClient
|
||||
from common.constants.authentication_type import AuthenticationType
|
||||
|
|
@ -61,7 +62,9 @@ class WorkFlowPostHandler:
|
|||
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
|
||||
'answer_tokens' in row and row.get('answer_tokens') is not None])
|
||||
answer_text_list = workflow.get_answer_text_list()
|
||||
answer_text = '\n\n'.join(answer['content'] for answer in answer_text_list)
|
||||
answer_text = '\n\n'.join(
|
||||
'\n\n'.join([a.get('content') for a in answer]) for answer in
|
||||
answer_text_list)
|
||||
if workflow.chat_record is not None:
|
||||
chat_record = workflow.chat_record
|
||||
chat_record.answer_text = answer_text
|
||||
|
|
@ -81,12 +84,15 @@ class WorkFlowPostHandler:
|
|||
answer_text_list=answer_text_list,
|
||||
run_time=time.time() - workflow.context['start_time'],
|
||||
index=0)
|
||||
self.chat_info.append_chat_record(chat_record, self.client_id)
|
||||
asker = workflow.context.get('asker', None)
|
||||
self.chat_info.append_chat_record(chat_record, self.client_id, asker)
|
||||
# 重新设置缓存
|
||||
chat_cache.set(chat_id,
|
||||
self.chat_info, timeout=60 * 30)
|
||||
if self.client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value:
|
||||
application_public_access_client = QuerySet(ApplicationPublicAccessClient).filter(id=self.client_id).first()
|
||||
application_public_access_client = (QuerySet(ApplicationPublicAccessClient)
|
||||
.filter(client_id=self.client_id,
|
||||
application_id=self.chat_info.application.id).first())
|
||||
if application_public_access_client is not None:
|
||||
application_public_access_client.access_num = application_public_access_client.access_num + 1
|
||||
application_public_access_client.intraday_access_num = application_public_access_client.intraday_access_num + 1
|
||||
|
|
@ -151,11 +157,13 @@ class INode:
|
|||
def save_context(self, details, workflow_manage):
|
||||
pass
|
||||
|
||||
def get_answer_text(self):
|
||||
def get_answer_list(self) -> List[Answer] | None:
|
||||
if self.answer_text is None:
|
||||
return None
|
||||
return {'content': self.answer_text, 'runtime_node_id': self.runtime_node_id,
|
||||
'chat_record_id': self.workflow_params['chat_record_id']}
|
||||
reasoning_content_enable = self.context.get('model_setting', {}).get('reasoning_content_enable', False)
|
||||
return [
|
||||
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {},
|
||||
self.runtime_node_id, self.context.get('reasoning_content', '') if reasoning_content_enable else '')]
|
||||
|
||||
def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None,
|
||||
get_node_params=lambda node: node.properties.get('node_data')):
|
||||
|
|
@ -174,6 +182,7 @@ class INode:
|
|||
if up_node_id_list is None:
|
||||
up_node_id_list = []
|
||||
self.up_node_id_list = up_node_id_list
|
||||
self.node_chunk = NodeChunk()
|
||||
self.runtime_node_id = sha1(uuid.NAMESPACE_DNS.bytes + bytes(str(uuid.uuid5(uuid.NAMESPACE_DNS,
|
||||
"".join([*sorted(up_node_id_list),
|
||||
node.id]))),
|
||||
|
|
@ -213,6 +222,7 @@ class INode:
|
|||
|
||||
def get_write_error_context(self, e):
|
||||
self.status = 500
|
||||
self.answer_text = str(e)
|
||||
self.err_message = str(e)
|
||||
self.context['run_time'] = time.time() - self.context['start_time']
|
||||
|
||||
|
|
|
|||
|
|
@ -18,14 +18,21 @@ from .reranker_node import *
|
|||
|
||||
from .document_extract_node import *
|
||||
from .image_understand_step_node import *
|
||||
from .image_generate_step_node import *
|
||||
|
||||
from .search_dataset_node import *
|
||||
from .speech_to_text_step_node import BaseSpeechToTextNode
|
||||
from .start_node import *
|
||||
from .text_to_speech_step_node.impl.base_text_to_speech_node import BaseTextToSpeechNode
|
||||
from .variable_assign_node import BaseVariableAssignNode
|
||||
from .mcp_node import BaseMcpNode
|
||||
|
||||
node_list = [BaseStartStepNode, BaseChatNode, BaseSearchDatasetNode, BaseQuestionNode, BaseConditionNode, BaseReplyNode,
|
||||
node_list = [BaseStartStepNode, BaseChatNode, BaseSearchDatasetNode, BaseQuestionNode,
|
||||
BaseConditionNode, BaseReplyNode,
|
||||
BaseFunctionNodeNode, BaseFunctionLibNodeNode, BaseRerankerNode, BaseApplicationNode,
|
||||
BaseDocumentExtractNode,
|
||||
BaseImageUnderstandNode, BaseFormNode]
|
||||
BaseImageUnderstandNode, BaseFormNode, BaseSpeechToTextNode, BaseTextToSpeechNode,
|
||||
BaseImageGenerateNode, BaseVariableAssignNode, BaseMcpNode]
|
||||
|
||||
|
||||
def get_node(node_type):
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
"""
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
|
@ -15,16 +16,26 @@ from common.util.field_message import ErrMessage
|
|||
|
||||
|
||||
class ChatNodeSerializer(serializers.Serializer):
|
||||
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char("模型id"))
|
||||
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
|
||||
system = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
error_messages=ErrMessage.char("角色设定"))
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
|
||||
error_messages=ErrMessage.char(_("Role Setting")))
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
|
||||
# 多轮对话数量
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量"))
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(
|
||||
_("Number of multi-round conversations")))
|
||||
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
|
||||
is_result = serializers.BooleanField(required=False,
|
||||
error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.integer("模型参数相关设置"))
|
||||
model_params_setting = serializers.DictField(required=False,
|
||||
error_messages=ErrMessage.dict(_("Model parameter settings")))
|
||||
model_setting = serializers.DictField(required=False,
|
||||
error_messages=ErrMessage.dict('Model settings'))
|
||||
dialogue_type = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
error_messages=ErrMessage.char(_("Context Type")))
|
||||
mcp_enable = serializers.BooleanField(required=False,
|
||||
error_messages=ErrMessage.boolean(_("Whether to enable MCP")))
|
||||
mcp_servers = serializers.JSONField(required=False, error_messages=ErrMessage.list(_("MCP Server")))
|
||||
|
||||
|
||||
class IChatNode(INode):
|
||||
|
|
@ -39,5 +50,9 @@ class IChatNode(INode):
|
|||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id,
|
||||
chat_record_id,
|
||||
model_params_setting=None,
|
||||
dialogue_type=None,
|
||||
model_setting=None,
|
||||
mcp_enable=False,
|
||||
mcp_servers=None,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -6,22 +6,43 @@
|
|||
@date:2024/6/4 14:30
|
||||
@desc:
|
||||
"""
|
||||
import asyncio
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from functools import reduce
|
||||
from types import AsyncGeneratorType
|
||||
from typing import List, Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.messages import BaseMessage, AIMessage, AIMessageChunk, ToolMessage
|
||||
from langchain_mcp_adapters.client import MultiServerMCPClient
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
from application.flow.i_step_node import NodeResult, INode
|
||||
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
|
||||
from application.flow.tools import Reasoning
|
||||
from setting.models import Model
|
||||
from setting.models_provider import get_model_credential
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
|
||||
tool_message_template = """
|
||||
<details>
|
||||
<summary>
|
||||
<strong>Called MCP Tool: <em>%s</em></strong>
|
||||
</summary>
|
||||
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
|
||||
```json
|
||||
%s
|
||||
```
|
||||
</details>
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
|
||||
reasoning_content: str):
|
||||
chat_model = node_variable.get('chat_model')
|
||||
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
|
||||
answer_tokens = chat_model.get_num_tokens(answer)
|
||||
|
|
@ -31,6 +52,7 @@ def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wo
|
|||
node.context['history_message'] = node_variable['history_message']
|
||||
node.context['question'] = node_variable['question']
|
||||
node.context['run_time'] = time.time() - node.context['start_time']
|
||||
node.context['reasoning_content'] = reasoning_content
|
||||
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
|
||||
node.answer_text = answer
|
||||
|
||||
|
|
@ -45,10 +67,73 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||
"""
|
||||
response = node_variable.get('result')
|
||||
answer = ''
|
||||
reasoning_content = ''
|
||||
model_setting = node.context.get('model_setting',
|
||||
{'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
|
||||
'reasoning_content_start': '<think>'})
|
||||
reasoning = Reasoning(model_setting.get('reasoning_content_start', '<think>'),
|
||||
model_setting.get('reasoning_content_end', '</think>'))
|
||||
response_reasoning_content = False
|
||||
|
||||
for chunk in response:
|
||||
answer += chunk.content
|
||||
yield chunk.content
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
||||
reasoning_chunk = reasoning.get_reasoning_content(chunk)
|
||||
content_chunk = reasoning_chunk.get('content')
|
||||
if 'reasoning_content' in chunk.additional_kwargs:
|
||||
response_reasoning_content = True
|
||||
reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '')
|
||||
else:
|
||||
reasoning_content_chunk = reasoning_chunk.get('reasoning_content')
|
||||
answer += content_chunk
|
||||
if reasoning_content_chunk is None:
|
||||
reasoning_content_chunk = ''
|
||||
reasoning_content += reasoning_content_chunk
|
||||
yield {'content': content_chunk,
|
||||
'reasoning_content': reasoning_content_chunk if model_setting.get('reasoning_content_enable',
|
||||
False) else ''}
|
||||
|
||||
reasoning_chunk = reasoning.get_end_reasoning_content()
|
||||
answer += reasoning_chunk.get('content')
|
||||
reasoning_content_chunk = ""
|
||||
if not response_reasoning_content:
|
||||
reasoning_content_chunk = reasoning_chunk.get(
|
||||
'reasoning_content')
|
||||
yield {'content': reasoning_chunk.get('content'),
|
||||
'reasoning_content': reasoning_content_chunk if model_setting.get('reasoning_content_enable',
|
||||
False) else ''}
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||
|
||||
|
||||
async def _yield_mcp_response(chat_model, message_list, mcp_servers):
|
||||
async with MultiServerMCPClient(json.loads(mcp_servers)) as client:
|
||||
agent = create_react_agent(chat_model, client.get_tools())
|
||||
response = agent.astream({"messages": message_list}, stream_mode='messages')
|
||||
async for chunk in response:
|
||||
if isinstance(chunk[0], ToolMessage):
|
||||
content = tool_message_template % (chunk[0].name, chunk[0].content)
|
||||
chunk[0].content = content
|
||||
yield chunk[0]
|
||||
if isinstance(chunk[0], AIMessageChunk):
|
||||
yield chunk[0]
|
||||
|
||||
|
||||
def mcp_response_generator(chat_model, message_list, mcp_servers):
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
async_gen = _yield_mcp_response(chat_model, message_list, mcp_servers)
|
||||
while True:
|
||||
try:
|
||||
chunk = loop.run_until_complete(anext_async(async_gen))
|
||||
yield chunk
|
||||
except StopAsyncIteration:
|
||||
break
|
||||
except Exception as e:
|
||||
print(f'exception: {e}')
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def anext_async(agen):
|
||||
return await agen.__anext__()
|
||||
|
||||
|
||||
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
||||
|
|
@ -60,8 +145,18 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
|
|||
@param workflow: 工作流管理器
|
||||
"""
|
||||
response = node_variable.get('result')
|
||||
answer = response.content
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
||||
model_setting = node.context.get('model_setting',
|
||||
{'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
|
||||
'reasoning_content_start': '<think>'})
|
||||
reasoning = Reasoning(model_setting.get('reasoning_content_start'), model_setting.get('reasoning_content_end'))
|
||||
reasoning_result = reasoning.get_reasoning_content(response)
|
||||
reasoning_result_end = reasoning.get_end_reasoning_content()
|
||||
content = reasoning_result.get('content') + reasoning_result_end.get('content')
|
||||
if 'reasoning_content' in response.response_metadata:
|
||||
reasoning_content = response.response_metadata.get('reasoning_content', '')
|
||||
else:
|
||||
reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get('reasoning_content')
|
||||
_write_context(node_variable, workflow_variable, node, workflow, content, reasoning_content)
|
||||
|
||||
|
||||
def get_default_model_params_setting(model_id):
|
||||
|
|
@ -72,25 +167,65 @@ def get_default_model_params_setting(model_id):
|
|||
return model_params_setting
|
||||
|
||||
|
||||
def get_node_message(chat_record, runtime_node_id):
|
||||
node_details = chat_record.get_node_details_runtime_node_id(runtime_node_id)
|
||||
if node_details is None:
|
||||
return []
|
||||
return [HumanMessage(node_details.get('question')), AIMessage(node_details.get('answer'))]
|
||||
|
||||
|
||||
def get_workflow_message(chat_record):
|
||||
return [chat_record.get_human_message(), chat_record.get_ai_message()]
|
||||
|
||||
|
||||
def get_message(chat_record, dialogue_type, runtime_node_id):
|
||||
return get_node_message(chat_record, runtime_node_id) if dialogue_type == 'NODE' else get_workflow_message(
|
||||
chat_record)
|
||||
|
||||
|
||||
class BaseChatNode(IChatNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.answer_text = details.get('answer')
|
||||
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
||||
model_params_setting=None,
|
||||
dialogue_type=None,
|
||||
model_setting=None,
|
||||
mcp_enable=False,
|
||||
mcp_servers=None,
|
||||
**kwargs) -> NodeResult:
|
||||
if dialogue_type is None:
|
||||
dialogue_type = 'WORKFLOW'
|
||||
|
||||
if model_params_setting is None:
|
||||
model_params_setting = get_default_model_params_setting(model_id)
|
||||
if model_setting is None:
|
||||
model_setting = {'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
|
||||
'reasoning_content_start': '<think>'}
|
||||
self.context['model_setting'] = model_setting
|
||||
chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
|
||||
**model_params_setting)
|
||||
history_message = self.get_history_message(history_chat_record, dialogue_number)
|
||||
history_message = self.get_history_message(history_chat_record, dialogue_number, dialogue_type,
|
||||
self.runtime_node_id)
|
||||
self.context['history_message'] = history_message
|
||||
question = self.generate_prompt_question(prompt)
|
||||
self.context['question'] = question.content
|
||||
system = self.workflow_manage.generate_prompt(system)
|
||||
self.context['system'] = system
|
||||
message_list = self.generate_message_list(system, prompt, history_message)
|
||||
self.context['message_list'] = message_list
|
||||
|
||||
if mcp_enable and mcp_servers is not None and '"stdio"' not in mcp_servers:
|
||||
r = mcp_response_generator(chat_model, message_list, mcp_servers)
|
||||
return NodeResult(
|
||||
{'result': r, 'chat_model': chat_model, 'message_list': message_list,
|
||||
'history_message': history_message, 'question': question.content}, {},
|
||||
_write_context=write_context_stream)
|
||||
|
||||
if stream:
|
||||
r = chat_model.stream(message_list)
|
||||
return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list,
|
||||
|
|
@ -103,12 +238,15 @@ class BaseChatNode(IChatNode):
|
|||
_write_context=write_context)
|
||||
|
||||
@staticmethod
|
||||
def get_history_message(history_chat_record, dialogue_number):
|
||||
def get_history_message(history_chat_record, dialogue_number, dialogue_type, runtime_node_id):
|
||||
start_index = len(history_chat_record) - dialogue_number
|
||||
history_message = reduce(lambda x, y: [*x, *y], [
|
||||
[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
|
||||
get_message(history_chat_record[index], dialogue_type, runtime_node_id)
|
||||
for index in
|
||||
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
|
||||
for message in history_message:
|
||||
if isinstance(message.content, str):
|
||||
message.content = re.sub('<form_rander>[\d\D]*?<\/form_rander>', '', message.content)
|
||||
return history_message
|
||||
|
||||
def generate_prompt_question(self, prompt):
|
||||
|
|
@ -135,12 +273,13 @@ class BaseChatNode(IChatNode):
|
|||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'system': self.node_params.get('system'),
|
||||
'system': self.context.get('system'),
|
||||
'history_message': [{'content': message.content, 'role': message.type} for message in
|
||||
(self.context.get('history_message') if self.context.get(
|
||||
'history_message') is not None else [])],
|
||||
'question': self.context.get('question'),
|
||||
'answer': self.context.get('answer'),
|
||||
'reasoning_content': self.context.get('reasoning_content'),
|
||||
'type': self.node.type,
|
||||
'message_tokens': self.context.get('message_tokens'),
|
||||
'answer_tokens': self.context.get('answer_tokens'),
|
||||
|
|
|
|||
|
|
@ -6,16 +6,22 @@ from rest_framework import serializers
|
|||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ApplicationNodeSerializer(serializers.Serializer):
|
||||
application_id = serializers.CharField(required=True, error_messages=ErrMessage.char("应用id"))
|
||||
question_reference_address = serializers.ListField(required=True, error_messages=ErrMessage.list("用户问题"))
|
||||
api_input_field_list = serializers.ListField(required=False, error_messages=ErrMessage.list("api输入字段"))
|
||||
user_input_field_list = serializers.ListField(required=False, error_messages=ErrMessage.uuid("用户输入字段"))
|
||||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list("图片"))
|
||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list("文档"))
|
||||
child_node = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict("子节点"))
|
||||
node_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict("表单数据"))
|
||||
application_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Application ID")))
|
||||
question_reference_address = serializers.ListField(required=True,
|
||||
error_messages=ErrMessage.list(_("User Questions")))
|
||||
api_input_field_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("API Input Fields")))
|
||||
user_input_field_list = serializers.ListField(required=False,
|
||||
error_messages=ErrMessage.uuid(_("User Input Fields")))
|
||||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
|
||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
|
||||
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
|
||||
child_node = serializers.DictField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.dict(_("Child Nodes")))
|
||||
node_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict(_("Form Data")))
|
||||
|
||||
|
||||
class IApplicationNode(INode):
|
||||
|
|
@ -30,11 +36,16 @@ class IApplicationNode(INode):
|
|||
self.node_params_serializer.data.get('question_reference_address')[1:])
|
||||
kwargs = {}
|
||||
for api_input_field in self.node_params_serializer.data.get('api_input_field_list', []):
|
||||
kwargs[api_input_field['variable']] = self.workflow_manage.get_reference_field(api_input_field['value'][0],
|
||||
api_input_field['value'][1:])
|
||||
value = api_input_field.get('value', [''])[0] if api_input_field.get('value') else ''
|
||||
kwargs[api_input_field['variable']] = self.workflow_manage.get_reference_field(value,
|
||||
api_input_field['value'][
|
||||
1:]) if value != '' else ''
|
||||
|
||||
for user_input_field in self.node_params_serializer.data.get('user_input_field_list', []):
|
||||
kwargs[user_input_field['field']] = self.workflow_manage.get_reference_field(user_input_field['value'][0],
|
||||
user_input_field['value'][1:])
|
||||
value = user_input_field.get('value', [''])[0] if user_input_field.get('value') else ''
|
||||
kwargs[user_input_field['field']] = self.workflow_manage.get_reference_field(value,
|
||||
user_input_field['value'][
|
||||
1:]) if value != '' else ''
|
||||
# 判断是否包含这个属性
|
||||
app_document_list = self.node_params_serializer.data.get('document_list', [])
|
||||
if app_document_list and len(app_document_list) > 0:
|
||||
|
|
@ -43,7 +54,8 @@ class IApplicationNode(INode):
|
|||
app_document_list[1:])
|
||||
for document in app_document_list:
|
||||
if 'file_id' not in document:
|
||||
raise ValueError("参数值错误: 上传的文档中缺少file_id")
|
||||
raise ValueError(
|
||||
_("Parameter value error: The uploaded document lacks file_id, and the document upload fails"))
|
||||
app_image_list = self.node_params_serializer.data.get('image_list', [])
|
||||
if app_image_list and len(app_image_list) > 0:
|
||||
app_image_list = self.workflow_manage.get_reference_field(
|
||||
|
|
@ -51,11 +63,24 @@ class IApplicationNode(INode):
|
|||
app_image_list[1:])
|
||||
for image in app_image_list:
|
||||
if 'file_id' not in image:
|
||||
raise ValueError("参数值错误: 上传的图片中缺少file_id")
|
||||
raise ValueError(
|
||||
_("Parameter value error: The uploaded image lacks file_id, and the image upload fails"))
|
||||
|
||||
app_audio_list = self.node_params_serializer.data.get('audio_list', [])
|
||||
if app_audio_list and len(app_audio_list) > 0:
|
||||
app_audio_list = self.workflow_manage.get_reference_field(
|
||||
app_audio_list[0],
|
||||
app_audio_list[1:])
|
||||
for audio in app_audio_list:
|
||||
if 'file_id' not in audio:
|
||||
raise ValueError(
|
||||
_("Parameter value error: The uploaded audio lacks file_id, and the audio upload fails."))
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
app_document_list=app_document_list, app_image_list=app_image_list,
|
||||
app_audio_list=app_audio_list,
|
||||
message=str(question), **kwargs)
|
||||
|
||||
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
|
||||
app_document_list=None, app_image_list=None, child_node=None, node_data=None, **kwargs) -> NodeResult:
|
||||
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
# coding=utf-8
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from application.flow.common import Answer
|
||||
from application.flow.i_step_node import NodeResult, INode
|
||||
from application.flow.step_node.application_node.i_application_node import IApplicationNode
|
||||
from application.models import Chat
|
||||
|
|
@ -17,14 +19,17 @@ def _is_interrupt_exec(node, node_variable: Dict, workflow_variable: Dict):
|
|||
return node_variable.get('is_interrupt_exec', False)
|
||||
|
||||
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
|
||||
reasoning_content: str):
|
||||
result = node_variable.get('result')
|
||||
node.context['child_node'] = node_variable.get('child_node')
|
||||
node.context['application_node_dict'] = node_variable.get('application_node_dict')
|
||||
node.context['node_dict'] = node_variable.get('node_dict', {})
|
||||
node.context['is_interrupt_exec'] = node_variable.get('is_interrupt_exec')
|
||||
node.context['message_tokens'] = result.get('usage', {}).get('prompt_tokens', 0)
|
||||
node.context['answer_tokens'] = result.get('usage', {}).get('completion_tokens', 0)
|
||||
node.context['answer'] = answer
|
||||
node.context['result'] = answer
|
||||
node.context['reasoning_content'] = reasoning_content
|
||||
node.context['question'] = node_variable['question']
|
||||
node.context['run_time'] = time.time() - node.context['start_time']
|
||||
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
|
||||
|
|
@ -41,8 +46,10 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||
"""
|
||||
response = node_variable.get('result')
|
||||
answer = ''
|
||||
reasoning_content = ''
|
||||
usage = {}
|
||||
node_child_node = {}
|
||||
application_node_dict = node.context.get('application_node_dict', {})
|
||||
is_interrupt_exec = False
|
||||
for chunk in response:
|
||||
# 先把流转成字符串
|
||||
|
|
@ -52,25 +59,47 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||
runtime_node_id = response_content.get('runtime_node_id', '')
|
||||
chat_record_id = response_content.get('chat_record_id', '')
|
||||
child_node = response_content.get('child_node')
|
||||
view_type = response_content.get('view_type')
|
||||
node_type = response_content.get('node_type')
|
||||
real_node_id = response_content.get('real_node_id')
|
||||
node_is_end = response_content.get('node_is_end', False)
|
||||
_reasoning_content = response_content.get('reasoning_content', '')
|
||||
if node_type == 'form-node':
|
||||
is_interrupt_exec = True
|
||||
answer += content
|
||||
reasoning_content += _reasoning_content
|
||||
node_child_node = {'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
|
||||
'child_node': child_node}
|
||||
|
||||
if real_node_id is not None:
|
||||
application_node = application_node_dict.get(real_node_id, None)
|
||||
if application_node is None:
|
||||
|
||||
application_node_dict[real_node_id] = {'content': content,
|
||||
'runtime_node_id': runtime_node_id,
|
||||
'chat_record_id': chat_record_id,
|
||||
'child_node': child_node,
|
||||
'index': len(application_node_dict),
|
||||
'view_type': view_type,
|
||||
'reasoning_content': _reasoning_content}
|
||||
else:
|
||||
application_node['content'] += content
|
||||
application_node['reasoning_content'] += _reasoning_content
|
||||
|
||||
yield {'content': content,
|
||||
'node_type': node_type,
|
||||
'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
|
||||
'reasoning_content': _reasoning_content,
|
||||
'child_node': child_node,
|
||||
'real_node_id': real_node_id,
|
||||
'node_is_end': node_is_end}
|
||||
'node_is_end': node_is_end,
|
||||
'view_type': view_type}
|
||||
usage = response_content.get('usage', {})
|
||||
node_variable['result'] = {'usage': usage}
|
||||
node_variable['is_interrupt_exec'] = is_interrupt_exec
|
||||
node_variable['child_node'] = node_child_node
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
||||
node_variable['application_node_dict'] = application_node_dict
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||
|
||||
|
||||
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
||||
|
|
@ -85,36 +114,80 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
|
|||
node_variable['result'] = {'usage': {'completion_tokens': response.get('completion_tokens'),
|
||||
'prompt_tokens': response.get('prompt_tokens')}}
|
||||
answer = response.get('content', '') or "抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。"
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
||||
reasoning_content = response.get('reasoning_content', '')
|
||||
answer_list = response.get('answer_list', [])
|
||||
node_variable['application_node_dict'] = {answer.get('real_node_id'): {**answer, 'index': index} for answer, index
|
||||
in
|
||||
zip(answer_list, range(len(answer_list)))}
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||
|
||||
|
||||
def reset_application_node_dict(application_node_dict, runtime_node_id, node_data):
|
||||
try:
|
||||
if application_node_dict is None:
|
||||
return
|
||||
for key in application_node_dict:
|
||||
application_node = application_node_dict[key]
|
||||
if application_node.get('runtime_node_id') == runtime_node_id:
|
||||
content: str = application_node.get('content')
|
||||
match = re.search('<form_rander>.*?</form_rander>', content)
|
||||
if match:
|
||||
form_setting_str = match.group().replace('<form_rander>', '').replace('</form_rander>', '')
|
||||
form_setting = json.loads(form_setting_str)
|
||||
form_setting['is_submit'] = True
|
||||
form_setting['form_data'] = node_data
|
||||
value = f'<form_rander>{json.dumps(form_setting)}</form_rander>'
|
||||
res = re.sub('<form_rander>.*?</form_rander>',
|
||||
'${value}', content)
|
||||
application_node['content'] = res.replace('${value}', value)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
|
||||
class BaseApplicationNode(IApplicationNode):
|
||||
def get_answer_text(self):
|
||||
def get_answer_list(self) -> List[Answer] | None:
|
||||
if self.answer_text is None:
|
||||
return None
|
||||
return {'content': self.answer_text, 'runtime_node_id': self.runtime_node_id,
|
||||
'chat_record_id': self.workflow_params['chat_record_id'], 'child_node': self.context.get('child_node')}
|
||||
application_node_dict = self.context.get('application_node_dict')
|
||||
if application_node_dict is None or len(application_node_dict) == 0:
|
||||
return [
|
||||
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'],
|
||||
self.context.get('child_node'), self.runtime_node_id, '')]
|
||||
else:
|
||||
return [Answer(n.get('content'), n.get('view_type'), self.runtime_node_id,
|
||||
self.workflow_params['chat_record_id'], {'runtime_node_id': n.get('runtime_node_id'),
|
||||
'chat_record_id': n.get('chat_record_id')
|
||||
, 'child_node': n.get('child_node')}, n.get('real_node_id'),
|
||||
n.get('reasoning_content', ''))
|
||||
for n in
|
||||
sorted(application_node_dict.values(), key=lambda item: item.get('index'))]
|
||||
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['result'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.context['type'] = details.get('type')
|
||||
self.answer_text = details.get('answer')
|
||||
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
|
||||
app_document_list=None, app_image_list=None, child_node=None, node_data=None,
|
||||
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
|
||||
**kwargs) -> NodeResult:
|
||||
from application.serializers.chat_message_serializers import ChatMessageSerializer
|
||||
# 生成嵌入应用的chat_id
|
||||
current_chat_id = string_to_uuid(chat_id + application_id)
|
||||
Chat.objects.get_or_create(id=current_chat_id, defaults={
|
||||
'application_id': application_id,
|
||||
'abstract': message
|
||||
'abstract': message[0:1024],
|
||||
'client_id': client_id,
|
||||
})
|
||||
if app_document_list is None:
|
||||
app_document_list = []
|
||||
if app_image_list is None:
|
||||
app_image_list = []
|
||||
if app_audio_list is None:
|
||||
app_audio_list = []
|
||||
runtime_node_id = None
|
||||
record_id = None
|
||||
child_node_value = None
|
||||
|
|
@ -122,6 +195,8 @@ class BaseApplicationNode(IApplicationNode):
|
|||
runtime_node_id = child_node.get('runtime_node_id')
|
||||
record_id = child_node.get('chat_record_id')
|
||||
child_node_value = child_node.get('child_node')
|
||||
application_node_dict = self.context.get('application_node_dict')
|
||||
reset_application_node_dict(application_node_dict, runtime_node_id, node_data)
|
||||
|
||||
response = ChatMessageSerializer(
|
||||
data={'chat_id': current_chat_id, 'message': message,
|
||||
|
|
@ -132,6 +207,7 @@ class BaseApplicationNode(IApplicationNode):
|
|||
'client_type': client_type,
|
||||
'document_list': app_document_list,
|
||||
'image_list': app_image_list,
|
||||
'audio_list': app_audio_list,
|
||||
'runtime_node_id': runtime_node_id,
|
||||
'chat_record_id': record_id,
|
||||
'child_node': child_node_value,
|
||||
|
|
@ -150,20 +226,25 @@ class BaseApplicationNode(IApplicationNode):
|
|||
def get_details(self, index: int, **kwargs):
|
||||
global_fields = []
|
||||
for api_input_field in self.node_params_serializer.data.get('api_input_field_list', []):
|
||||
value = api_input_field.get('value', [''])[0] if api_input_field.get('value') else ''
|
||||
global_fields.append({
|
||||
'label': api_input_field['variable'],
|
||||
'key': api_input_field['variable'],
|
||||
'value': self.workflow_manage.get_reference_field(
|
||||
api_input_field['value'][0],
|
||||
api_input_field['value'][1:])
|
||||
value,
|
||||
api_input_field['value'][1:]
|
||||
) if value != '' else ''
|
||||
})
|
||||
|
||||
for user_input_field in self.node_params_serializer.data.get('user_input_field_list', []):
|
||||
value = user_input_field.get('value', [''])[0] if user_input_field.get('value') else ''
|
||||
global_fields.append({
|
||||
'label': user_input_field['label'],
|
||||
'key': user_input_field['field'],
|
||||
'value': self.workflow_manage.get_reference_field(
|
||||
user_input_field['value'][0],
|
||||
user_input_field['value'][1:])
|
||||
value,
|
||||
user_input_field['value'][1:]
|
||||
) if value != '' else ''
|
||||
})
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
|
|
@ -172,6 +253,7 @@ class BaseApplicationNode(IApplicationNode):
|
|||
'run_time': self.context.get('run_time'),
|
||||
'question': self.context.get('question'),
|
||||
'answer': self.context.get('answer'),
|
||||
'reasoning_content': self.context.get('reasoning_content'),
|
||||
'type': self.node.type,
|
||||
'message_tokens': self.context.get('message_tokens'),
|
||||
'answer_tokens': self.context.get('answer_tokens'),
|
||||
|
|
@ -179,5 +261,7 @@ class BaseApplicationNode(IApplicationNode):
|
|||
'err_message': self.err_message,
|
||||
'global_fields': global_fields,
|
||||
'document_list': self.workflow_manage.document_list,
|
||||
'image_list': self.workflow_manage.image_list
|
||||
'image_list': self.workflow_manage.image_list,
|
||||
'audio_list': self.workflow_manage.audio_list,
|
||||
'application_node_dict': self.context.get('application_node_dict')
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,20 +9,22 @@
|
|||
|
||||
from .contain_compare import *
|
||||
from .equal_compare import *
|
||||
from .gt_compare import *
|
||||
from .ge_compare import *
|
||||
from .gt_compare import *
|
||||
from .is_not_null_compare import *
|
||||
from .is_not_true import IsNotTrueCompare
|
||||
from .is_null_compare import *
|
||||
from .is_true import IsTrueCompare
|
||||
from .le_compare import *
|
||||
from .lt_compare import *
|
||||
from .len_equal_compare import *
|
||||
from .len_ge_compare import *
|
||||
from .len_gt_compare import *
|
||||
from .len_le_compare import *
|
||||
from .len_lt_compare import *
|
||||
from .len_equal_compare import *
|
||||
from .is_not_null_compare import *
|
||||
from .is_null_compare import *
|
||||
from .lt_compare import *
|
||||
from .not_contain_compare import *
|
||||
|
||||
compare_handle_list = [GECompare(), GTCompare(), ContainCompare(), EqualCompare(), LTCompare(), LECompare(),
|
||||
LenLECompare(), LenGECompare(), LenEqualCompare(), LenGTCompare(), LenLTCompare(),
|
||||
IsNullCompare(),
|
||||
IsNotNullCompare(), NotContainCompare()]
|
||||
IsNotNullCompare(), NotContainCompare(), IsTrueCompare(), IsNotTrueCompare()]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎
|
||||
@file: is_not_true.py
|
||||
@date:2025/4/7 13:44
|
||||
@desc:
|
||||
"""
|
||||
from typing import List
|
||||
|
||||
from application.flow.step_node.condition_node.compare import Compare
|
||||
|
||||
|
||||
class IsNotTrueCompare(Compare):
|
||||
|
||||
def support(self, node_id, fields: List[str], source_value, compare, target_value):
|
||||
if compare == 'is_not_true':
|
||||
return True
|
||||
|
||||
def compare(self, source_value, compare, target_value):
|
||||
try:
|
||||
return source_value is False
|
||||
except Exception as e:
|
||||
return False
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎
|
||||
@file: IsTrue.py
|
||||
@date:2025/4/7 13:38
|
||||
@desc:
|
||||
"""
|
||||
from typing import List
|
||||
|
||||
from application.flow.step_node.condition_node.compare import Compare
|
||||
|
||||
|
||||
class IsTrueCompare(Compare):
|
||||
|
||||
def support(self, node_id, fields: List[str], source_value, compare, target_value):
|
||||
if compare == 'is_true':
|
||||
return True
|
||||
|
||||
def compare(self, source_value, compare, target_value):
|
||||
try:
|
||||
return source_value is True
|
||||
except Exception as e:
|
||||
return False
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
@date:2024/6/7 9:54
|
||||
@desc:
|
||||
"""
|
||||
import json
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode
|
||||
|
|
@ -16,15 +16,15 @@ from common.util.field_message import ErrMessage
|
|||
|
||||
|
||||
class ConditionSerializer(serializers.Serializer):
|
||||
compare = serializers.CharField(required=True, error_messages=ErrMessage.char("比较器"))
|
||||
value = serializers.CharField(required=True, error_messages=ErrMessage.char(""))
|
||||
field = serializers.ListField(required=True, error_messages=ErrMessage.char("字段"))
|
||||
compare = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Comparator")))
|
||||
value = serializers.CharField(required=True, error_messages=ErrMessage.char(_("value")))
|
||||
field = serializers.ListField(required=True, error_messages=ErrMessage.char(_("Fields")))
|
||||
|
||||
|
||||
class ConditionBranchSerializer(serializers.Serializer):
|
||||
id = serializers.CharField(required=True, error_messages=ErrMessage.char("分支id"))
|
||||
type = serializers.CharField(required=True, error_messages=ErrMessage.char("分支类型"))
|
||||
condition = serializers.CharField(required=True, error_messages=ErrMessage.char("条件or|and"))
|
||||
id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Branch id")))
|
||||
type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Branch Type")))
|
||||
condition = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Condition or|and")))
|
||||
conditions = ConditionSerializer(many=True)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,15 @@ class BaseConditionNode(IConditionNode):
|
|||
return all(condition_list) if condition == 'and' else any(condition_list)
|
||||
|
||||
def assertion(self, field_list: List[str], compare: str, value):
|
||||
field_value = self.workflow_manage.get_reference_field(field_list[0], field_list[1:])
|
||||
try:
|
||||
value = self.workflow_manage.generate_prompt(value)
|
||||
except Exception as e:
|
||||
pass
|
||||
field_value = None
|
||||
try:
|
||||
field_value = self.workflow_manage.get_reference_field(field_list[0], field_list[1:])
|
||||
except Exception as e:
|
||||
pass
|
||||
for compare_handler in compare_handle_list:
|
||||
if compare_handler.support(field_list[0], field_list[1:], field_value, compare, value):
|
||||
return compare_handler.compare(field_value, compare, value)
|
||||
|
|
|
|||
|
|
@ -13,25 +13,26 @@ from rest_framework import serializers
|
|||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ReplyNodeParamsSerializer(serializers.Serializer):
|
||||
reply_type = serializers.CharField(required=True, error_messages=ErrMessage.char("回复类型"))
|
||||
fields = serializers.ListField(required=False, error_messages=ErrMessage.list("引用字段"))
|
||||
reply_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Response Type")))
|
||||
fields = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Reference Field")))
|
||||
content = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
error_messages=ErrMessage.char("直接回答内容"))
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
|
||||
error_messages=ErrMessage.char(_("Direct answer content")))
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
if self.data.get('reply_type') == 'referencing':
|
||||
if 'fields' not in self.data:
|
||||
raise AppApiException(500, "引用字段不能为空")
|
||||
raise AppApiException(500, _("Reference field cannot be empty"))
|
||||
if len(self.data.get('fields')) < 2:
|
||||
raise AppApiException(500, "引用字段错误")
|
||||
raise AppApiException(500, _("Reference field error"))
|
||||
else:
|
||||
if 'content' not in self.data or self.data.get('content') is None:
|
||||
raise AppApiException(500, "内容不能为空")
|
||||
raise AppApiException(500, _("Content cannot be empty"))
|
||||
|
||||
|
||||
class IReplyNode(INode):
|
||||
|
|
|
|||
|
|
@ -15,7 +15,9 @@ from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode
|
|||
class BaseReplyNode(IReplyNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
|
||||
if reply_type == 'referencing':
|
||||
result = self.get_reference_content(fields)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
|
@ -9,7 +10,7 @@ from common.util.field_message import ErrMessage
|
|||
|
||||
|
||||
class DocumentExtractNodeSerializer(serializers.Serializer):
|
||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list("文档"))
|
||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
|
||||
|
||||
|
||||
class IDocumentExtractNode(INode):
|
||||
|
|
|
|||
|
|
@ -38,6 +38,10 @@ def bytes_to_uploaded_file(file_bytes, file_name="file.txt"):
|
|||
splitter = '\n`-----------------------------------`\n'
|
||||
|
||||
class BaseDocumentExtractNode(IDocumentExtractNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['content'] = details.get('content')
|
||||
|
||||
|
||||
def execute(self, document, chat_id, **kwargs):
|
||||
get_buffer = FileBufferHandle().get_buffer
|
||||
|
||||
|
|
@ -70,7 +74,7 @@ class BaseDocumentExtractNode(IDocumentExtractNode):
|
|||
# 回到文件头
|
||||
buffer.seek(0)
|
||||
file_content = split_handle.get_content(buffer, save_image)
|
||||
content.append('## ' + doc['name'] + '\n' + file_content)
|
||||
content.append('### ' + doc['name'] + '\n' + file_content)
|
||||
break
|
||||
|
||||
return NodeResult({'content': splitter.join(content)}, {})
|
||||
|
|
|
|||
|
|
@ -12,12 +12,13 @@ from rest_framework import serializers
|
|||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class FormNodeParamsSerializer(serializers.Serializer):
|
||||
form_field_list = serializers.ListField(required=True, error_messages=ErrMessage.list("表单配置"))
|
||||
form_content_format = serializers.CharField(required=True, error_messages=ErrMessage.char('表单输出内容'))
|
||||
form_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict("表单数据"))
|
||||
form_field_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_("Form Configuration")))
|
||||
form_content_format = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Form output content')))
|
||||
form_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict(_("Form Data")))
|
||||
|
||||
|
||||
class IFormNode(INode):
|
||||
|
|
|
|||
|
|
@ -8,10 +8,11 @@
|
|||
"""
|
||||
import json
|
||||
import time
|
||||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
from application.flow.common import Answer
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.form_node.i_form_node import IFormNode
|
||||
|
||||
|
|
@ -37,7 +38,8 @@ class BaseFormNode(IFormNode):
|
|||
self.context['start_time'] = details.get('start_time')
|
||||
self.context['form_data'] = form_data
|
||||
self.context['is_submit'] = details.get('is_submit')
|
||||
self.answer_text = details.get('result')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('result')
|
||||
if form_data is not None:
|
||||
for key in form_data:
|
||||
self.context[key] = form_data[key]
|
||||
|
|
@ -53,25 +55,29 @@ class BaseFormNode(IFormNode):
|
|||
form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id,
|
||||
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
|
||||
"is_submit": self.context.get("is_submit", False)}
|
||||
form = f'<form_rander>{json.dumps(form_setting)}</form_rander>'
|
||||
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
|
||||
context = self.workflow_manage.get_workflow_content()
|
||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||
value = prompt_template.format(form=form)
|
||||
value = prompt_template.format(form=form, context=context)
|
||||
return NodeResult(
|
||||
{'result': value, 'form_field_list': form_field_list, 'form_content_format': form_content_format}, {},
|
||||
_write_context=write_context)
|
||||
|
||||
def get_answer_text(self):
|
||||
def get_answer_list(self) -> List[Answer] | None:
|
||||
form_content_format = self.context.get('form_content_format')
|
||||
form_field_list = self.context.get('form_field_list')
|
||||
form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id,
|
||||
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
|
||||
'form_data': self.context.get('form_data', {}),
|
||||
"is_submit": self.context.get("is_submit", False)}
|
||||
form = f'<form_rander>{json.dumps(form_setting)}</form_rander>'
|
||||
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
|
||||
context = self.workflow_manage.get_workflow_content()
|
||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||
value = prompt_template.format(form=form)
|
||||
return {'content': value, 'runtime_node_id': self.runtime_node_id,
|
||||
'chat_record_id': self.workflow_params['chat_record_id']}
|
||||
value = prompt_template.format(form=form, context=context)
|
||||
return [Answer(value, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], None,
|
||||
self.runtime_node_id, '')]
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
form_content_format = self.context.get('form_content_format')
|
||||
|
|
@ -80,9 +86,11 @@ class BaseFormNode(IFormNode):
|
|||
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
|
||||
'form_data': self.context.get('form_data', {}),
|
||||
"is_submit": self.context.get("is_submit", False)}
|
||||
form = f'<form_rander>{json.dumps(form_setting)}</form_rander>'
|
||||
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
|
||||
context = self.workflow_manage.get_workflow_content()
|
||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||
value = prompt_template.format(form=form)
|
||||
value = prompt_template.format(form=form, context=context)
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
|
|
|
|||
|
|
@ -15,23 +15,24 @@ from application.flow.i_step_node import INode, NodeResult
|
|||
from common.field.common import ObjectField
|
||||
from common.util.field_message import ErrMessage
|
||||
from function_lib.models.function import FunctionLib
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class InputField(serializers.Serializer):
|
||||
name = serializers.CharField(required=True, error_messages=ErrMessage.char('变量名'))
|
||||
value = ObjectField(required=True, error_messages=ErrMessage.char("变量值"), model_type_list=[str, list])
|
||||
name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Variable Name')))
|
||||
value = ObjectField(required=True, error_messages=ErrMessage.char(_("Variable Value")), model_type_list=[str, list])
|
||||
|
||||
|
||||
class FunctionLibNodeParamsSerializer(serializers.Serializer):
|
||||
function_lib_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid('函数库id'))
|
||||
function_lib_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('Library ID')))
|
||||
input_field_list = InputField(required=True, many=True)
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
f_lib = QuerySet(FunctionLib).filter(id=self.data.get('function_lib_id')).first()
|
||||
if f_lib is None:
|
||||
raise Exception('函数库已被删除')
|
||||
raise Exception(_('The function has been deleted'))
|
||||
|
||||
|
||||
class IFunctionLibNode(INode):
|
||||
|
|
|
|||
|
|
@ -11,11 +11,13 @@ import time
|
|||
from typing import Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from django.utils.translation import gettext as _
|
||||
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.function_lib_node.i_function_lib_node import IFunctionLibNode
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.util.function_code import FunctionExecutor
|
||||
from common.util.rsa_util import rsa_long_decrypt
|
||||
from function_lib.models.function import FunctionLib
|
||||
from smartdoc.const import CONFIG
|
||||
|
||||
|
|
@ -29,7 +31,7 @@ def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
|
|||
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable:
|
||||
result = str(step_variable['result']) + '\n'
|
||||
yield result
|
||||
workflow.answer += result
|
||||
node.answer_text = result
|
||||
node.context['run_time'] = time.time() - node.context['start_time']
|
||||
|
||||
|
||||
|
|
@ -38,15 +40,15 @@ def get_field_value(debug_field_list, name, is_required):
|
|||
if len(result) > 0:
|
||||
return result[-1]['value']
|
||||
if is_required:
|
||||
raise AppApiException(500, f"{name}字段未设置值")
|
||||
raise AppApiException(500, _('Field: {name} No value set').format(name=name))
|
||||
return None
|
||||
|
||||
|
||||
def valid_reference_value(_type, value, name):
|
||||
if _type == 'int':
|
||||
instance_type = int
|
||||
instance_type = int | float
|
||||
elif _type == 'float':
|
||||
instance_type = float
|
||||
instance_type = float | int
|
||||
elif _type == 'dict':
|
||||
instance_type = dict
|
||||
elif _type == 'array':
|
||||
|
|
@ -54,13 +56,16 @@ def valid_reference_value(_type, value, name):
|
|||
elif _type == 'string':
|
||||
instance_type = str
|
||||
else:
|
||||
raise Exception(500, f'字段:{name}类型:{_type} 不支持的类型')
|
||||
raise Exception(_('Field: {name} Type: {_type} Value: {value} Unsupported types').format(name=name,
|
||||
_type=_type))
|
||||
if not isinstance(value, instance_type):
|
||||
raise Exception(f'字段:{name}类型:{_type}值:{value}类型错误')
|
||||
raise Exception(
|
||||
_('Field: {name} Type: {_type} Value: {value} Type error').format(name=name, _type=_type,
|
||||
value=value))
|
||||
|
||||
|
||||
def convert_value(name: str, value, _type, is_required, source, node):
|
||||
if not is_required and value is None:
|
||||
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
|
||||
return None
|
||||
if not is_required and source == 'reference' and (value is None or len(value) == 0):
|
||||
return None
|
||||
|
|
@ -69,6 +74,10 @@ def convert_value(name: str, value, _type, is_required, source, node):
|
|||
value[0],
|
||||
value[1:])
|
||||
valid_reference_value(_type, value, name)
|
||||
if _type == 'int':
|
||||
return int(value)
|
||||
if _type == 'float':
|
||||
return float(value)
|
||||
return value
|
||||
try:
|
||||
if _type == 'int':
|
||||
|
|
@ -79,25 +88,37 @@ def convert_value(name: str, value, _type, is_required, source, node):
|
|||
v = json.loads(value)
|
||||
if isinstance(v, dict):
|
||||
return v
|
||||
raise Exception("类型错误")
|
||||
raise Exception(_('type error'))
|
||||
if _type == 'array':
|
||||
v = json.loads(value)
|
||||
if isinstance(v, list):
|
||||
return v
|
||||
raise Exception("类型错误")
|
||||
raise Exception(_('type error'))
|
||||
return value
|
||||
except Exception as e:
|
||||
raise Exception(f'字段:{name}类型:{_type}值:{value}类型错误')
|
||||
raise Exception(
|
||||
_('Field: {name} Type: {_type} Value: {value} Type error').format(name=name, _type=_type,
|
||||
value=value))
|
||||
|
||||
|
||||
def valid_function(function_lib, user_id):
|
||||
if function_lib is None:
|
||||
raise Exception(_('Function does not exist'))
|
||||
if function_lib.permission_type == 'PRIVATE' and str(function_lib.user_id) != str(user_id):
|
||||
raise Exception(_('No permission to use this function {name}').format(name=function_lib.name))
|
||||
if not function_lib.is_active:
|
||||
raise Exception(_('Function {name} is unavailable').format(name=function_lib.name))
|
||||
|
||||
|
||||
class BaseFunctionLibNodeNode(IFunctionLibNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['result'] = details.get('result')
|
||||
self.answer_text = details.get('result')
|
||||
if self.node_params.get('is_result'):
|
||||
self.answer_text = str(details.get('result'))
|
||||
|
||||
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
|
||||
function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()
|
||||
if not function_lib.is_active:
|
||||
raise Exception(f'函数:{function_lib.name} 不可用')
|
||||
valid_function(function_lib, self.flow_params_serializer.data.get('user_id'))
|
||||
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),
|
||||
field.get('is_required'),
|
||||
field.get('source'), self)
|
||||
|
|
@ -106,8 +127,14 @@ class BaseFunctionLibNodeNode(IFunctionLibNode):
|
|||
), **field}
|
||||
for field in
|
||||
function_lib.input_field_list]}
|
||||
|
||||
self.context['params'] = params
|
||||
result = function_executor.exec_code(function_lib.code, params)
|
||||
# 合并初始化参数
|
||||
if function_lib.init_params is not None:
|
||||
all_params = json.loads(rsa_long_decrypt(function_lib.init_params)) | params
|
||||
else:
|
||||
all_params = params
|
||||
result = function_executor.exec_code(function_lib.code, all_params)
|
||||
return NodeResult({'result': result}, {}, _write_context=write_context)
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
|
|
|
|||
|
|
@ -16,32 +16,35 @@ from application.flow.i_step_node import INode, NodeResult
|
|||
from common.exception.app_exception import AppApiException
|
||||
from common.field.common import ObjectField
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.utils.formatting import lazy_format
|
||||
|
||||
|
||||
class InputField(serializers.Serializer):
|
||||
name = serializers.CharField(required=True, error_messages=ErrMessage.char('变量名'))
|
||||
is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("是否必填"))
|
||||
type = serializers.CharField(required=True, error_messages=ErrMessage.char("类型"), validators=[
|
||||
name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Variable Name')))
|
||||
is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_("Is this field required")))
|
||||
type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("type")), validators=[
|
||||
validators.RegexValidator(regex=re.compile("^string|int|dict|array|float$"),
|
||||
message="字段只支持string|int|dict|array|float", code=500)
|
||||
message=_("The field only supports string|int|dict|array|float"), code=500)
|
||||
])
|
||||
source = serializers.CharField(required=True, error_messages=ErrMessage.char("来源"), validators=[
|
||||
source = serializers.CharField(required=True, error_messages=ErrMessage.char(_("source")), validators=[
|
||||
validators.RegexValidator(regex=re.compile("^custom|reference$"),
|
||||
message="字段只支持custom|reference", code=500)
|
||||
message=_("The field only supports custom|reference"), code=500)
|
||||
])
|
||||
value = ObjectField(required=True, error_messages=ErrMessage.char("变量值"), model_type_list=[str, list])
|
||||
value = ObjectField(required=True, error_messages=ErrMessage.char(_("Variable Value")), model_type_list=[str, list])
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
is_required = self.data.get('is_required')
|
||||
if is_required and self.data.get('value') is None:
|
||||
raise AppApiException(500, f'{self.data.get("name")}必填')
|
||||
message = lazy_format(_('{field}, this field is required.'), field=self.data.get("name"))
|
||||
raise AppApiException(500, message)
|
||||
|
||||
|
||||
class FunctionNodeParamsSerializer(serializers.Serializer):
|
||||
input_field_list = InputField(required=True, many=True)
|
||||
code = serializers.CharField(required=True, error_messages=ErrMessage.char("函数"))
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
|
||||
code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("function")))
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
|
|
|
|||
|
|
@ -27,15 +27,15 @@ def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
|
|||
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable:
|
||||
result = str(step_variable['result']) + '\n'
|
||||
yield result
|
||||
workflow.answer += result
|
||||
node.answer_text = result
|
||||
node.context['run_time'] = time.time() - node.context['start_time']
|
||||
|
||||
|
||||
def valid_reference_value(_type, value, name):
|
||||
if _type == 'int':
|
||||
instance_type = int
|
||||
instance_type = int | float
|
||||
elif _type == 'float':
|
||||
instance_type = float
|
||||
instance_type = float | int
|
||||
elif _type == 'dict':
|
||||
instance_type = dict
|
||||
elif _type == 'array':
|
||||
|
|
@ -49,13 +49,17 @@ def valid_reference_value(_type, value, name):
|
|||
|
||||
|
||||
def convert_value(name: str, value, _type, is_required, source, node):
|
||||
if not is_required and value is None:
|
||||
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
|
||||
return None
|
||||
if source == 'reference':
|
||||
value = node.workflow_manage.get_reference_field(
|
||||
value[0],
|
||||
value[1:])
|
||||
valid_reference_value(_type, value, name)
|
||||
if _type == 'int':
|
||||
return int(value)
|
||||
if _type == 'float':
|
||||
return float(value)
|
||||
return value
|
||||
try:
|
||||
if _type == 'int':
|
||||
|
|
@ -80,7 +84,8 @@ def convert_value(name: str, value, _type, is_required, source, node):
|
|||
class BaseFunctionNodeNode(IFunctionNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['result'] = details.get('result')
|
||||
self.answer_text = details.get('result')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = str(details.get('result'))
|
||||
|
||||
def execute(self, input_field_list, code, **kwargs) -> NodeResult:
|
||||
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .impl import *
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Type
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ImageGenerateNodeSerializer(serializers.Serializer):
|
||||
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
|
||||
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word (positive)")))
|
||||
|
||||
negative_prompt = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Prompt word (negative)")),
|
||||
allow_null=True, allow_blank=True, )
|
||||
# 多轮对话数量
|
||||
dialogue_number = serializers.IntegerField(required=False, default=0,
|
||||
error_messages=ErrMessage.integer(_("Number of multi-round conversations")))
|
||||
|
||||
dialogue_type = serializers.CharField(required=False, default='NODE',
|
||||
error_messages=ErrMessage.char(_("Conversation storage type")))
|
||||
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
model_params_setting = serializers.JSONField(required=False, default=dict,
|
||||
error_messages=ErrMessage.json(_("Model parameter settings")))
|
||||
|
||||
|
||||
class IImageGenerateNode(INode):
|
||||
type = 'image-generate-node'
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ImageGenerateNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .base_image_generate_node import BaseImageGenerateNode
|
||||
|
|
@ -0,0 +1,122 @@
|
|||
# coding=utf-8
|
||||
from functools import reduce
|
||||
from typing import List
|
||||
|
||||
import requests
|
||||
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
||||
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.image_generate_step_node.i_image_generate_node import IImageGenerateNode
|
||||
from common.util.common import bytes_to_uploaded_file
|
||||
from dataset.serializers.file_serializers import FileSerializer
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
|
||||
|
||||
class BaseImageGenerateNode(IImageGenerateNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
**kwargs) -> NodeResult:
|
||||
print(model_params_setting)
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
|
||||
**model_params_setting)
|
||||
history_message = self.get_history_message(history_chat_record, dialogue_number)
|
||||
self.context['history_message'] = history_message
|
||||
question = self.generate_prompt_question(prompt)
|
||||
self.context['question'] = question
|
||||
message_list = self.generate_message_list(question, history_message)
|
||||
self.context['message_list'] = message_list
|
||||
self.context['dialogue_type'] = dialogue_type
|
||||
print(message_list)
|
||||
image_urls = tti_model.generate_image(question, negative_prompt)
|
||||
# 保存图片
|
||||
file_urls = []
|
||||
for image_url in image_urls:
|
||||
file_name = 'generated_image.png'
|
||||
file = bytes_to_uploaded_file(requests.get(image_url).content, file_name)
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
file_url = FileSerializer(data={'file': file, 'meta': meta}).upload()
|
||||
file_urls.append(file_url)
|
||||
self.context['image_list'] = [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls]
|
||||
answer = ' '.join([f"" for path in file_urls])
|
||||
return NodeResult({'answer': answer, 'chat_model': tti_model, 'message_list': message_list,
|
||||
'image': [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls],
|
||||
'history_message': history_message, 'question': question}, {})
|
||||
|
||||
def generate_history_ai_message(self, chat_record):
|
||||
for val in chat_record.details.values():
|
||||
if self.node.id == val['node_id'] and 'image_list' in val:
|
||||
if val['dialogue_type'] == 'WORKFLOW':
|
||||
return chat_record.get_ai_message()
|
||||
image_list = val['image_list']
|
||||
return AIMessage(content=[
|
||||
*[{'type': 'image_url', 'image_url': {'url': f'{file_url}'}} for file_url in image_list]
|
||||
])
|
||||
return chat_record.get_ai_message()
|
||||
|
||||
def get_history_message(self, history_chat_record, dialogue_number):
|
||||
start_index = len(history_chat_record) - dialogue_number
|
||||
history_message = reduce(lambda x, y: [*x, *y], [
|
||||
[self.generate_history_human_message(history_chat_record[index]),
|
||||
self.generate_history_ai_message(history_chat_record[index])]
|
||||
for index in
|
||||
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
|
||||
return history_message
|
||||
|
||||
def generate_history_human_message(self, chat_record):
|
||||
|
||||
for data in chat_record.details.values():
|
||||
if self.node.id == data['node_id'] and 'image_list' in data:
|
||||
image_list = data['image_list']
|
||||
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
|
||||
return HumanMessage(content=chat_record.problem_text)
|
||||
return HumanMessage(content=data['question'])
|
||||
return HumanMessage(content=chat_record.problem_text)
|
||||
|
||||
def generate_prompt_question(self, prompt):
|
||||
return self.workflow_manage.generate_prompt(prompt)
|
||||
|
||||
def generate_message_list(self, question: str, history_message):
|
||||
return [
|
||||
*history_message,
|
||||
question
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def reset_message_list(message_list: List[BaseMessage], answer_text):
|
||||
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
|
||||
message
|
||||
in
|
||||
message_list]
|
||||
result.append({'role': 'ai', 'content': answer_text})
|
||||
return result
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'history_message': [{'content': message.content, 'role': message.type} for message in
|
||||
(self.context.get('history_message') if self.context.get(
|
||||
'history_message') is not None else [])],
|
||||
'question': self.context.get('question'),
|
||||
'answer': self.context.get('answer'),
|
||||
'type': self.node.type,
|
||||
'message_tokens': self.context.get('message_tokens'),
|
||||
'answer_tokens': self.context.get('answer_tokens'),
|
||||
'status': self.status,
|
||||
'err_message': self.err_message,
|
||||
'image_list': self.context.get('image_list'),
|
||||
'dialogue_type': self.context.get('dialogue_type')
|
||||
}
|
||||
|
|
@ -6,21 +6,25 @@ from rest_framework import serializers
|
|||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ImageUnderstandNodeSerializer(serializers.Serializer):
|
||||
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char("模型id"))
|
||||
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
|
||||
system = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
error_messages=ErrMessage.char("角色设定"))
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
|
||||
error_messages=ErrMessage.char(_("Role Setting")))
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
|
||||
# 多轮对话数量
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量"))
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations")))
|
||||
|
||||
dialogue_type = serializers.CharField(required=True, error_messages=ErrMessage.char("对话存储类型"))
|
||||
dialogue_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Conversation storage type")))
|
||||
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list("图片"))
|
||||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
|
||||
|
||||
model_params_setting = serializers.JSONField(required=False, default=dict,
|
||||
error_messages=ErrMessage.json(_("Model parameter settings")))
|
||||
|
||||
|
||||
class IImageUnderstandNode(INode):
|
||||
|
|
@ -35,6 +39,7 @@ class IImageUnderstandNode(INode):
|
|||
return self.execute(image=res, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
image,
|
||||
**kwargs) -> NodeResult:
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ from application.flow.i_step_node import NodeResult, INode
|
|||
from application.flow.step_node.image_understand_step_node.i_image_understand_node import IImageUnderstandNode
|
||||
from dataset.models import File
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
from imghdr import what
|
||||
|
||||
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
|
||||
|
|
@ -59,25 +60,28 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
|
|||
|
||||
def file_id_to_base64(file_id: str):
|
||||
file = QuerySet(File).filter(id=file_id).first()
|
||||
base64_image = base64.b64encode(file.get_byte()).decode("utf-8")
|
||||
return base64_image
|
||||
file_bytes = file.get_byte()
|
||||
base64_image = base64.b64encode(file_bytes).decode("utf-8")
|
||||
return [base64_image, what(None, file_bytes.tobytes())]
|
||||
|
||||
|
||||
class BaseImageUnderstandNode(IImageUnderstandNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
image,
|
||||
**kwargs) -> NodeResult:
|
||||
# 处理不正确的参数
|
||||
if image is None or not isinstance(image, list):
|
||||
image = []
|
||||
|
||||
image_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'))
|
||||
print(model_params_setting)
|
||||
image_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), **model_params_setting)
|
||||
# 执行详情中的历史消息不需要图片内容
|
||||
history_message = self.get_history_message_for_details(history_chat_record, dialogue_number)
|
||||
self.context['history_message'] = history_message
|
||||
|
|
@ -151,7 +155,7 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
|
|||
return HumanMessage(
|
||||
content=[
|
||||
{'type': 'text', 'text': data['question']},
|
||||
*[{'type': 'image_url', 'image_url': {'url': f'data:image/jpeg;base64,{base64_image}'}} for
|
||||
*[{'type': 'image_url', 'image_url': {'url': f'data:image/{base64_image[1]};base64,{base64_image[0]}'}} for
|
||||
base64_image in image_base64_list]
|
||||
])
|
||||
return HumanMessage(content=chat_record.problem_text)
|
||||
|
|
@ -166,8 +170,10 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
|
|||
for img in image:
|
||||
file_id = img['file_id']
|
||||
file = QuerySet(File).filter(id=file_id).first()
|
||||
base64_image = base64.b64encode(file.get_byte()).decode("utf-8")
|
||||
images.append({'type': 'image_url', 'image_url': {'url': f'data:image/jpeg;base64,{base64_image}'}})
|
||||
image_bytes = file.get_byte()
|
||||
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
||||
image_format = what(None, image_bytes.tobytes())
|
||||
images.append({'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}})
|
||||
messages = [HumanMessage(
|
||||
content=[
|
||||
{'type': 'text', 'text': self.workflow_manage.generate_prompt(prompt)},
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .impl import *
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Type
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class McpNodeSerializer(serializers.Serializer):
|
||||
mcp_servers = serializers.JSONField(required=True,
|
||||
error_messages=ErrMessage.char(_("Mcp servers")))
|
||||
|
||||
mcp_server = serializers.CharField(required=True,
|
||||
error_messages=ErrMessage.char(_("Mcp server")))
|
||||
|
||||
mcp_tool = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Mcp tool")))
|
||||
|
||||
tool_params = serializers.DictField(required=True,
|
||||
error_messages=ErrMessage.char(_("Tool parameters")))
|
||||
|
||||
|
||||
class IMcpNode(INode):
|
||||
type = 'mcp-node'
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return McpNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .base_mcp_node import BaseMcpNode
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
# coding=utf-8
|
||||
import asyncio
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
from langchain_mcp_adapters.client import MultiServerMCPClient
|
||||
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.mcp_node.i_mcp_node import IMcpNode
|
||||
|
||||
|
||||
class BaseMcpNode(IMcpNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['result'] = details.get('result')
|
||||
self.context['tool_params'] = details.get('tool_params')
|
||||
self.context['mcp_tool'] = details.get('mcp_tool')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('result')
|
||||
|
||||
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
|
||||
servers = json.loads(mcp_servers)
|
||||
params = json.loads(json.dumps(tool_params))
|
||||
params = self.handle_variables(params)
|
||||
|
||||
async def call_tool(s, session, t, a):
|
||||
async with MultiServerMCPClient(s) as client:
|
||||
s = await client.sessions[session].call_tool(t, a)
|
||||
return s
|
||||
|
||||
res = asyncio.run(call_tool(servers, mcp_server, mcp_tool, params))
|
||||
return NodeResult(
|
||||
{'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
|
||||
|
||||
def handle_variables(self, tool_params):
|
||||
# 处理参数中的变量
|
||||
for k, v in tool_params.items():
|
||||
if type(v) == str:
|
||||
tool_params[k] = self.workflow_manage.generate_prompt(tool_params[k])
|
||||
if type(v) == dict:
|
||||
self.handle_variables(v)
|
||||
if (type(v) == list) and (type(v[0]) == str):
|
||||
tool_params[k] = self.get_reference_content(v)
|
||||
return tool_params
|
||||
|
||||
def get_reference_content(self, fields: List[str]):
|
||||
return str(self.workflow_manage.get_reference_field(
|
||||
fields[0],
|
||||
fields[1:]))
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'status': self.status,
|
||||
'err_message': self.err_message,
|
||||
'type': self.node.type,
|
||||
'mcp_tool': self.context.get('mcp_tool'),
|
||||
'tool_params': self.context.get('tool_params'),
|
||||
'result': self.context.get('result'),
|
||||
}
|
||||
|
|
@ -12,18 +12,19 @@ from rest_framework import serializers
|
|||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class QuestionNodeSerializer(serializers.Serializer):
|
||||
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char("模型id"))
|
||||
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
|
||||
system = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
error_messages=ErrMessage.char("角色设定"))
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
|
||||
error_messages=ErrMessage.char(_("Role Setting")))
|
||||
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
|
||||
# 多轮对话数量
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量"))
|
||||
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations")))
|
||||
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
|
||||
model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.integer("模型参数相关设置"))
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.integer(_("Model parameter settings")))
|
||||
|
||||
|
||||
class IQuestionNode(INode):
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
@date:2024/6/4 14:30
|
||||
@desc:
|
||||
"""
|
||||
import re
|
||||
import time
|
||||
from functools import reduce
|
||||
from typing import List, Dict
|
||||
|
|
@ -79,7 +80,8 @@ class BaseQuestionNode(IQuestionNode):
|
|||
self.context['answer'] = details.get('answer')
|
||||
self.context['message_tokens'] = details.get('message_tokens')
|
||||
self.context['answer_tokens'] = details.get('answer_tokens')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
||||
model_params_setting=None,
|
||||
|
|
@ -92,6 +94,8 @@ class BaseQuestionNode(IQuestionNode):
|
|||
self.context['history_message'] = history_message
|
||||
question = self.generate_prompt_question(prompt)
|
||||
self.context['question'] = question.content
|
||||
system = self.workflow_manage.generate_prompt(system)
|
||||
self.context['system'] = system
|
||||
message_list = self.generate_message_list(system, prompt, history_message)
|
||||
self.context['message_list'] = message_list
|
||||
if stream:
|
||||
|
|
@ -112,6 +116,9 @@ class BaseQuestionNode(IQuestionNode):
|
|||
[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
|
||||
for index in
|
||||
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
|
||||
for message in history_message:
|
||||
if isinstance(message.content, str):
|
||||
message.content = re.sub('<form_rander>[\d\D]*?<\/form_rander>', '', message.content)
|
||||
return history_message
|
||||
|
||||
def generate_prompt_question(self, prompt):
|
||||
|
|
@ -138,7 +145,7 @@ class BaseQuestionNode(IQuestionNode):
|
|||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'system': self.node_params.get('system'),
|
||||
'system': self.context.get('system'),
|
||||
'history_message': [{'content': message.content, 'role': message.type} for message in
|
||||
(self.context.get('history_message') if self.context.get(
|
||||
'history_message') is not None else [])],
|
||||
|
|
|
|||
|
|
@ -12,17 +12,18 @@ from rest_framework import serializers
|
|||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class RerankerSettingSerializer(serializers.Serializer):
|
||||
# 需要查询的条数
|
||||
top_n = serializers.IntegerField(required=True,
|
||||
error_messages=ErrMessage.integer("引用分段数"))
|
||||
error_messages=ErrMessage.integer(_("Reference segment number")))
|
||||
# 相似度 0-1之间
|
||||
similarity = serializers.FloatField(required=True, max_value=2, min_value=0,
|
||||
error_messages=ErrMessage.float("引用分段数"))
|
||||
error_messages=ErrMessage.float(_("Reference segment number")))
|
||||
max_paragraph_char_number = serializers.IntegerField(required=True,
|
||||
error_messages=ErrMessage.float("最大引用分段字数"))
|
||||
error_messages=ErrMessage.float(_("Maximum number of words in a quoted segment")))
|
||||
|
||||
|
||||
class RerankerStepNodeSerializer(serializers.Serializer):
|
||||
|
|
|
|||
|
|
@ -23,9 +23,14 @@ def merge_reranker_list(reranker_list, result=None):
|
|||
merge_reranker_list(document, result)
|
||||
elif isinstance(document, dict):
|
||||
content = document.get('title', '') + document.get('content', '')
|
||||
result.append(str(document) if len(content) == 0 else content)
|
||||
title = document.get("title")
|
||||
dataset_name = document.get("dataset_name")
|
||||
document_name = document.get('document_name')
|
||||
result.append(
|
||||
Document(page_content=str(document) if len(content) == 0 else content,
|
||||
metadata={'title': title, 'dataset_name': dataset_name, 'document_name': document_name}))
|
||||
else:
|
||||
result.append(str(document))
|
||||
result.append(Document(page_content=str(document), metadata={}))
|
||||
return result
|
||||
|
||||
|
||||
|
|
@ -43,6 +48,21 @@ def filter_result(document_list: List[Document], max_paragraph_char_number, top_
|
|||
return result
|
||||
|
||||
|
||||
def reset_result_list(result_list: List[Document], document_list: List[Document]):
|
||||
r = []
|
||||
document_list = document_list.copy()
|
||||
for result in result_list:
|
||||
filter_result_list = [document for document in document_list if document.page_content == result.page_content]
|
||||
if len(filter_result_list) > 0:
|
||||
item = filter_result_list[0]
|
||||
document_list.remove(item)
|
||||
r.append(Document(page_content=item.page_content,
|
||||
metadata={**item.metadata, 'relevance_score': result.metadata.get('relevance_score')}))
|
||||
else:
|
||||
r.append(result)
|
||||
return r
|
||||
|
||||
|
||||
class BaseRerankerNode(IRerankerNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['document_list'] = details.get('document_list', [])
|
||||
|
|
@ -55,16 +75,18 @@ class BaseRerankerNode(IRerankerNode):
|
|||
**kwargs) -> NodeResult:
|
||||
documents = merge_reranker_list(reranker_list)
|
||||
top_n = reranker_setting.get('top_n', 3)
|
||||
self.context['document_list'] = documents
|
||||
self.context['document_list'] = [{'page_content': document.page_content, 'metadata': document.metadata} for
|
||||
document in documents]
|
||||
self.context['question'] = question
|
||||
reranker_model = get_model_instance_by_model_user_id(reranker_model_id,
|
||||
self.flow_params_serializer.data.get('user_id'),
|
||||
top_n=top_n)
|
||||
result = reranker_model.compress_documents(
|
||||
[Document(page_content=document) for document in documents if document is not None and len(document) > 0],
|
||||
documents,
|
||||
question)
|
||||
similarity = reranker_setting.get('similarity', 0.6)
|
||||
max_paragraph_char_number = reranker_setting.get('max_paragraph_char_number', 5000)
|
||||
result = reset_result_list(result, documents)
|
||||
r = filter_result(result, max_paragraph_char_number, top_n, similarity)
|
||||
return NodeResult({'result_list': r, 'result': ''.join([item.get('page_content') for item in r])}, {})
|
||||
|
||||
|
|
|
|||
|
|
@ -15,30 +15,31 @@ from rest_framework import serializers
|
|||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.common import flat_map
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class DatasetSettingSerializer(serializers.Serializer):
|
||||
# 需要查询的条数
|
||||
top_n = serializers.IntegerField(required=True,
|
||||
error_messages=ErrMessage.integer("引用分段数"))
|
||||
error_messages=ErrMessage.integer(_("Reference segment number")))
|
||||
# 相似度 0-1之间
|
||||
similarity = serializers.FloatField(required=True, max_value=2, min_value=0,
|
||||
error_messages=ErrMessage.float("引用分段数"))
|
||||
error_messages=ErrMessage.float(_('similarity')))
|
||||
search_mode = serializers.CharField(required=True, validators=[
|
||||
validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"),
|
||||
message="类型只支持register|reset_password", code=500)
|
||||
], error_messages=ErrMessage.char("检索模式"))
|
||||
message=_("The type only supports embedding|keywords|blend"), code=500)
|
||||
], error_messages=ErrMessage.char(_("Retrieval Mode")))
|
||||
max_paragraph_char_number = serializers.IntegerField(required=True,
|
||||
error_messages=ErrMessage.float("最大引用分段字数"))
|
||||
error_messages=ErrMessage.float(_("Maximum number of words in a quoted segment")))
|
||||
|
||||
|
||||
class SearchDatasetStepNodeSerializer(serializers.Serializer):
|
||||
# 需要查询的数据集id列表
|
||||
dataset_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
|
||||
error_messages=ErrMessage.list("数据集id列表"))
|
||||
error_messages=ErrMessage.list(_("Dataset id list")))
|
||||
dataset_setting = DatasetSettingSerializer(required=True)
|
||||
|
||||
question_reference_address = serializers.ListField(required=True, )
|
||||
question_reference_address = serializers.ListField(required=True)
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
|
|
@ -65,7 +66,7 @@ class ISearchDatasetStepNode(INode):
|
|||
if self.flow_params_serializer.data.get('re_chat', False):
|
||||
history_chat_record = self.flow_params_serializer.data.get('history_chat_record', [])
|
||||
paragraph_id_list = [p.get('id') for p in flat_map(
|
||||
[get_paragraph_list(chat_record, self.node.id) for chat_record in history_chat_record if
|
||||
[get_paragraph_list(chat_record, self.runtime_node_id) for chat_record in history_chat_record if
|
||||
chat_record.problem_text == question])]
|
||||
exclude_paragraph_id_list = list(set(paragraph_id_list))
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import os
|
|||
from typing import List, Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
|
||||
from django.db import connection
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.search_dataset_node.i_search_dataset_node import ISearchDatasetStepNode
|
||||
from common.config.embedding_config import VectorStore
|
||||
|
|
@ -77,6 +77,8 @@ class BaseSearchDatasetNode(ISearchDatasetStepNode):
|
|||
embedding_list = vector.query(question, embedding_value, dataset_id_list, exclude_document_id_list,
|
||||
exclude_paragraph_id_list, True, dataset_setting.get('top_n'),
|
||||
dataset_setting.get('similarity'), SearchMode(dataset_setting.get('search_mode')))
|
||||
# 手动关闭数据库连接
|
||||
connection.close()
|
||||
if embedding_list is None:
|
||||
return get_none_result(question)
|
||||
paragraph_list = self.list_paragraph(embedding_list, vector)
|
||||
|
|
@ -86,7 +88,7 @@ class BaseSearchDatasetNode(ISearchDatasetStepNode):
|
|||
'is_hit_handling_method_list': [row for row in result if row.get('is_hit_handling_method')],
|
||||
'data': '\n'.join(
|
||||
[f"{reset_title(paragraph.get('title', ''))}{paragraph.get('content')}" for paragraph in
|
||||
paragraph_list])[0:dataset_setting.get('max_paragraph_char_number', 5000)],
|
||||
result])[0:dataset_setting.get('max_paragraph_char_number', 5000)],
|
||||
'directly_return': '\n'.join(
|
||||
[paragraph.get('content') for paragraph in
|
||||
result if
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .impl import *
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Type
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class SpeechToTextNodeSerializer(serializers.Serializer):
|
||||
stt_model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
|
||||
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
audio_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_("The audio file cannot be empty")))
|
||||
|
||||
|
||||
class ISpeechToTextNode(INode):
|
||||
type = 'speech-to-text-node'
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return SpeechToTextNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('audio_list')[0],
|
||||
self.node_params_serializer.data.get('audio_list')[1:])
|
||||
for audio in res:
|
||||
if 'file_id' not in audio:
|
||||
raise ValueError(_("Parameter value error: The uploaded audio lacks file_id, and the audio upload fails"))
|
||||
|
||||
return self.execute(audio=res, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, stt_model_id, chat_id,
|
||||
audio,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .base_speech_to_text_node import BaseSpeechToTextNode
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
# coding=utf-8
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
import io
|
||||
from typing import List, Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from pydub import AudioSegment
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from application.flow.i_step_node import NodeResult, INode
|
||||
from application.flow.step_node.speech_to_text_step_node.i_speech_to_text_node import ISpeechToTextNode
|
||||
from common.util.common import split_and_transcribe, any_to_mp3
|
||||
from dataset.models import File
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
|
||||
class BaseSpeechToTextNode(ISpeechToTextNode):
|
||||
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, stt_model_id, chat_id, audio, **kwargs) -> NodeResult:
|
||||
stt_model = get_model_instance_by_model_user_id(stt_model_id, self.flow_params_serializer.data.get('user_id'))
|
||||
audio_list = audio
|
||||
self.context['audio_list'] = audio
|
||||
|
||||
def process_audio_item(audio_item, model):
|
||||
file = QuerySet(File).filter(id=audio_item['file_id']).first()
|
||||
# 根据file_name 吧文件转成mp3格式
|
||||
file_format = file.file_name.split('.')[-1]
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{file_format}') as temp_file:
|
||||
temp_file.write(file.get_byte().tobytes())
|
||||
temp_file_path = temp_file.name
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_amr_file:
|
||||
temp_mp3_path = temp_amr_file.name
|
||||
any_to_mp3(temp_file_path, temp_mp3_path)
|
||||
try:
|
||||
transcription = split_and_transcribe(temp_mp3_path, model)
|
||||
return {file.file_name: transcription}
|
||||
finally:
|
||||
os.remove(temp_file_path)
|
||||
os.remove(temp_mp3_path)
|
||||
|
||||
def process_audio_items(audio_list, model):
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
results = list(executor.map(lambda item: process_audio_item(item, model), audio_list))
|
||||
return results
|
||||
|
||||
result = process_audio_items(audio_list, stt_model)
|
||||
content = []
|
||||
result_content = []
|
||||
for item in result:
|
||||
for key, value in item.items():
|
||||
content.append(f'### {key}\n{value}')
|
||||
result_content.append(value)
|
||||
return NodeResult({'answer': '\n'.join(result_content), 'result': '\n'.join(result_content),
|
||||
'content': content}, {})
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'answer': self.context.get('answer'),
|
||||
'content': self.context.get('content'),
|
||||
'type': self.node.type,
|
||||
'status': self.status,
|
||||
'err_message': self.err_message,
|
||||
'audio_list': self.context.get('audio_list'),
|
||||
}
|
||||
|
|
@ -39,10 +39,14 @@ class BaseStartStepNode(IStarNode):
|
|||
self.context['run_time'] = details.get('run_time')
|
||||
self.context['document'] = details.get('document_list')
|
||||
self.context['image'] = details.get('image_list')
|
||||
self.context['audio'] = details.get('audio_list')
|
||||
self.context['other'] = details.get('other_list')
|
||||
self.status = details.get('status')
|
||||
self.err_message = details.get('err_message')
|
||||
for key, value in workflow_variable.items():
|
||||
workflow_manage.context[key] = value
|
||||
for item in details.get('global_fields', []):
|
||||
workflow_manage.context[item.get('key')] = item.get('value')
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
pass
|
||||
|
|
@ -57,7 +61,9 @@ class BaseStartStepNode(IStarNode):
|
|||
node_variable = {
|
||||
'question': question,
|
||||
'image': self.workflow_manage.image_list,
|
||||
'document': self.workflow_manage.document_list
|
||||
'document': self.workflow_manage.document_list,
|
||||
'audio': self.workflow_manage.audio_list,
|
||||
'other': self.workflow_manage.other_list,
|
||||
}
|
||||
return NodeResult(node_variable, workflow_variable)
|
||||
|
||||
|
|
@ -80,5 +86,7 @@ class BaseStartStepNode(IStarNode):
|
|||
'err_message': self.err_message,
|
||||
'image_list': self.context.get('image'),
|
||||
'document_list': self.context.get('document'),
|
||||
'audio_list': self.context.get('audio'),
|
||||
'other_list': self.context.get('other'),
|
||||
'global_fields': global_fields
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .impl import *
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Type
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class TextToSpeechNodeSerializer(serializers.Serializer):
|
||||
tts_model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
|
||||
|
||||
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||
|
||||
content_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_("Text content")))
|
||||
model_params_setting = serializers.DictField(required=False,
|
||||
error_messages=ErrMessage.integer(_("Model parameter settings")))
|
||||
|
||||
|
||||
class ITextToSpeechNode(INode):
|
||||
type = 'text-to-speech-node'
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return TextToSpeechNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
content = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('content_list')[0],
|
||||
self.node_params_serializer.data.get('content_list')[1:])
|
||||
return self.execute(content=content, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, tts_model_id, chat_id,
|
||||
content, model_params_setting=None,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .base_text_to_speech_node import BaseTextToSpeechNode
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
# coding=utf-8
|
||||
import io
|
||||
import mimetypes
|
||||
|
||||
from django.core.files.uploadedfile import InMemoryUploadedFile
|
||||
|
||||
from application.flow.i_step_node import NodeResult, INode
|
||||
from application.flow.step_node.image_understand_step_node.i_image_understand_node import IImageUnderstandNode
|
||||
from application.flow.step_node.text_to_speech_step_node.i_text_to_speech_node import ITextToSpeechNode
|
||||
from dataset.models import File
|
||||
from dataset.serializers.file_serializers import FileSerializer
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
|
||||
|
||||
def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"):
|
||||
content_type, _ = mimetypes.guess_type(file_name)
|
||||
if content_type is None:
|
||||
# 如果未能识别,设置为默认的二进制文件类型
|
||||
content_type = "application/octet-stream"
|
||||
# 创建一个内存中的字节流对象
|
||||
file_stream = io.BytesIO(file_bytes)
|
||||
|
||||
# 获取文件大小
|
||||
file_size = len(file_bytes)
|
||||
|
||||
uploaded_file = InMemoryUploadedFile(
|
||||
file=file_stream,
|
||||
field_name=None,
|
||||
name=file_name,
|
||||
content_type=content_type,
|
||||
size=file_size,
|
||||
charset=None,
|
||||
)
|
||||
return uploaded_file
|
||||
|
||||
|
||||
class BaseTextToSpeechNode(ITextToSpeechNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, tts_model_id, chat_id,
|
||||
content, model_params_setting=None,
|
||||
**kwargs) -> NodeResult:
|
||||
self.context['content'] = content
|
||||
model = get_model_instance_by_model_user_id(tts_model_id, self.flow_params_serializer.data.get('user_id'),
|
||||
**model_params_setting)
|
||||
audio_byte = model.text_to_speech(content)
|
||||
# 需要把这个音频文件存储到数据库中
|
||||
file_name = 'generated_audio.mp3'
|
||||
file = bytes_to_uploaded_file(audio_byte, file_name)
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
file_url = FileSerializer(data={'file': file, 'meta': meta}).upload()
|
||||
# 拼接一个audio标签的src属性
|
||||
audio_label = f'<audio src="{file_url}" controls style = "width: 300px; height: 43px"></audio>'
|
||||
file_id = file_url.split('/')[-1]
|
||||
audio_list = [{'file_id': file_id, 'file_name': file_name, 'url': file_url}]
|
||||
return NodeResult({'answer': audio_label, 'result': audio_list}, {})
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
'status': self.status,
|
||||
'content': self.context.get('content'),
|
||||
'err_message': self.err_message,
|
||||
'answer': self.context.get('answer'),
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .impl import *
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.util.field_message import ErrMessage
|
||||
|
||||
|
||||
class VariableAssignNodeParamsSerializer(serializers.Serializer):
|
||||
variable_list = serializers.ListField(required=True,
|
||||
error_messages=ErrMessage.list(_("Reference Field")))
|
||||
|
||||
|
||||
class IVariableAssignNode(INode):
|
||||
type = 'variable-assign-node'
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return VariableAssignNodeParamsSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, variable_list, stream, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: maxkb
|
||||
@Author:虎
|
||||
@file: __init__.py
|
||||
@date:2024/6/11 17:49
|
||||
@desc:
|
||||
"""
|
||||
from .base_variable_assign_node import *
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
# coding=utf-8
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.variable_assign_node.i_variable_assign_node import IVariableAssignNode
|
||||
|
||||
|
||||
class BaseVariableAssignNode(IVariableAssignNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['variable_list'] = details.get('variable_list')
|
||||
self.context['result_list'] = details.get('result_list')
|
||||
|
||||
def execute(self, variable_list, stream, **kwargs) -> NodeResult:
|
||||
#
|
||||
result_list = []
|
||||
for variable in variable_list:
|
||||
if 'fields' not in variable:
|
||||
continue
|
||||
if 'global' == variable['fields'][0]:
|
||||
result = {
|
||||
'name': variable['name'],
|
||||
'input_value': self.get_reference_content(variable['fields']),
|
||||
}
|
||||
if variable['source'] == 'custom':
|
||||
if variable['type'] == 'json':
|
||||
if isinstance(variable['value'], dict) or isinstance(variable['value'], list):
|
||||
val = variable['value']
|
||||
else:
|
||||
val = json.loads(variable['value'])
|
||||
self.workflow_manage.context[variable['fields'][1]] = val
|
||||
result['output_value'] = variable['value'] = val
|
||||
elif variable['type'] == 'string':
|
||||
# 变量解析 例如:{{global.xxx}}
|
||||
val = self.workflow_manage.generate_prompt(variable['value'])
|
||||
self.workflow_manage.context[variable['fields'][1]] = val
|
||||
result['output_value'] = val
|
||||
else:
|
||||
val = variable['value']
|
||||
self.workflow_manage.context[variable['fields'][1]] = val
|
||||
result['output_value'] = val
|
||||
else:
|
||||
reference = self.get_reference_content(variable['reference'])
|
||||
self.workflow_manage.context[variable['fields'][1]] = reference
|
||||
result['output_value'] = reference
|
||||
result_list.append(result)
|
||||
|
||||
return NodeResult({'variable_list': variable_list, 'result_list': result_list}, {})
|
||||
|
||||
def get_reference_content(self, fields: List[str]):
|
||||
return str(self.workflow_manage.get_reference_field(
|
||||
fields[0],
|
||||
fields[1:]))
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
'variable_list': self.context.get('variable_list'),
|
||||
'result_list': self.context.get('result_list'),
|
||||
'status': self.status,
|
||||
'err_message': self.err_message
|
||||
}
|
||||
|
|
@ -16,6 +16,92 @@ from application.flow.i_step_node import WorkFlowPostHandler
|
|||
from common.response import result
|
||||
|
||||
|
||||
class Reasoning:
|
||||
def __init__(self, reasoning_content_start, reasoning_content_end):
|
||||
self.content = ""
|
||||
self.reasoning_content = ""
|
||||
self.all_content = ""
|
||||
self.reasoning_content_start_tag = reasoning_content_start
|
||||
self.reasoning_content_end_tag = reasoning_content_end
|
||||
self.reasoning_content_start_tag_len = len(
|
||||
reasoning_content_start) if reasoning_content_start is not None else 0
|
||||
self.reasoning_content_end_tag_len = len(reasoning_content_end) if reasoning_content_end is not None else 0
|
||||
self.reasoning_content_end_tag_prefix = reasoning_content_end[
|
||||
0] if self.reasoning_content_end_tag_len > 0 else ''
|
||||
self.reasoning_content_is_start = False
|
||||
self.reasoning_content_is_end = False
|
||||
self.reasoning_content_chunk = ""
|
||||
|
||||
def get_end_reasoning_content(self):
|
||||
if not self.reasoning_content_is_start and not self.reasoning_content_is_end:
|
||||
r = {'content': self.all_content, 'reasoning_content': ''}
|
||||
self.reasoning_content_chunk = ""
|
||||
return r
|
||||
if self.reasoning_content_is_start and not self.reasoning_content_is_end:
|
||||
r = {'content': '', 'reasoning_content': self.reasoning_content_chunk}
|
||||
self.reasoning_content_chunk = ""
|
||||
return r
|
||||
return {'content': '', 'reasoning_content': ''}
|
||||
|
||||
def get_reasoning_content(self, chunk):
|
||||
# 如果没有开始思考过程标签那么就全是结果
|
||||
if self.reasoning_content_start_tag is None or len(self.reasoning_content_start_tag) == 0:
|
||||
self.content += chunk.content
|
||||
return {'content': chunk.content, 'reasoning_content': ''}
|
||||
# 如果没有结束思考过程标签那么就全部是思考过程
|
||||
if self.reasoning_content_end_tag is None or len(self.reasoning_content_end_tag) == 0:
|
||||
return {'content': '', 'reasoning_content': chunk.content}
|
||||
self.all_content += chunk.content
|
||||
if not self.reasoning_content_is_start and len(self.all_content) >= self.reasoning_content_start_tag_len:
|
||||
if self.all_content.startswith(self.reasoning_content_start_tag):
|
||||
self.reasoning_content_is_start = True
|
||||
self.reasoning_content_chunk = self.all_content[self.reasoning_content_start_tag_len:]
|
||||
else:
|
||||
if not self.reasoning_content_is_end:
|
||||
self.reasoning_content_is_end = True
|
||||
self.content += self.all_content
|
||||
return {'content': self.all_content, 'reasoning_content': ''}
|
||||
else:
|
||||
if self.reasoning_content_is_start:
|
||||
self.reasoning_content_chunk += chunk.content
|
||||
reasoning_content_end_tag_prefix_index = self.reasoning_content_chunk.find(
|
||||
self.reasoning_content_end_tag_prefix)
|
||||
if self.reasoning_content_is_end:
|
||||
self.content += chunk.content
|
||||
return {'content': chunk.content, 'reasoning_content': ''}
|
||||
# 是否包含结束
|
||||
if reasoning_content_end_tag_prefix_index > -1:
|
||||
if len(self.reasoning_content_chunk) - reasoning_content_end_tag_prefix_index >= self.reasoning_content_end_tag_len:
|
||||
reasoning_content_end_tag_index = self.reasoning_content_chunk.find(self.reasoning_content_end_tag)
|
||||
if reasoning_content_end_tag_index > -1:
|
||||
reasoning_content_chunk = self.reasoning_content_chunk[0:reasoning_content_end_tag_index]
|
||||
content_chunk = self.reasoning_content_chunk[
|
||||
reasoning_content_end_tag_index + self.reasoning_content_end_tag_len:]
|
||||
self.reasoning_content += reasoning_content_chunk
|
||||
self.content += content_chunk
|
||||
self.reasoning_content_chunk = ""
|
||||
self.reasoning_content_is_end = True
|
||||
return {'content': content_chunk, 'reasoning_content': reasoning_content_chunk}
|
||||
else:
|
||||
reasoning_content_chunk = self.reasoning_content_chunk[0:reasoning_content_end_tag_prefix_index + 1]
|
||||
self.reasoning_content_chunk = self.reasoning_content_chunk.replace(reasoning_content_chunk, '')
|
||||
self.reasoning_content += reasoning_content_chunk
|
||||
return {'content': '', 'reasoning_content': reasoning_content_chunk}
|
||||
else:
|
||||
return {'content': '', 'reasoning_content': ''}
|
||||
|
||||
else:
|
||||
if self.reasoning_content_is_end:
|
||||
self.content += chunk.content
|
||||
return {'content': chunk.content, 'reasoning_content': ''}
|
||||
else:
|
||||
# aaa
|
||||
result = {'content': '', 'reasoning_content': self.reasoning_content_chunk}
|
||||
self.reasoning_content += self.reasoning_content_chunk
|
||||
self.reasoning_content_chunk = ""
|
||||
return result
|
||||
|
||||
|
||||
def event_content(chat_id, chat_record_id, response, workflow,
|
||||
write_context,
|
||||
post_handler: WorkFlowPostHandler):
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
@date:2024/1/9 17:40
|
||||
@desc:
|
||||
"""
|
||||
import concurrent
|
||||
import json
|
||||
import threading
|
||||
import traceback
|
||||
|
|
@ -13,7 +14,11 @@ from concurrent.futures import ThreadPoolExecutor
|
|||
from functools import reduce
|
||||
from typing import List, Dict
|
||||
|
||||
from django.db import close_old_connections
|
||||
from django.db.models import QuerySet
|
||||
from django.utils import translation
|
||||
from django.utils.translation import get_language
|
||||
from django.utils.translation import gettext as _
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from rest_framework import status
|
||||
from rest_framework.exceptions import ErrorDetail, ValidationError
|
||||
|
|
@ -28,7 +33,7 @@ from function_lib.models.function import FunctionLib
|
|||
from setting.models import Model
|
||||
from setting.models_provider import get_model_credential
|
||||
|
||||
executor = ThreadPoolExecutor(max_workers=50)
|
||||
executor = ThreadPoolExecutor(max_workers=200)
|
||||
|
||||
|
||||
class Edge:
|
||||
|
|
@ -53,7 +58,7 @@ class Node:
|
|||
|
||||
|
||||
end_nodes = ['ai-chat-node', 'reply-node', 'function-node', 'function-lib-node', 'application-node',
|
||||
'image-understand-node']
|
||||
'image-understand-node', 'speech-to-text-node', 'text-to-speech-node', 'image-generate-node']
|
||||
|
||||
|
||||
class Flow:
|
||||
|
|
@ -99,12 +104,14 @@ class Flow:
|
|||
edge_list = [edge for edge in self.edges if edge.sourceAnchorId == source_anchor_id]
|
||||
if len(edge_list) == 0:
|
||||
raise AppApiException(500,
|
||||
f'{node.properties.get("stepName")} 节点的{branch.get("type")}分支需要连接')
|
||||
_('The branch {branch} of the {node} node needs to be connected').format(
|
||||
node=node.properties.get("stepName"), branch=branch.get("type")))
|
||||
|
||||
else:
|
||||
edge_list = [edge for edge in self.edges if edge.sourceNodeId == node.id]
|
||||
if len(edge_list) == 0 and not end_nodes.__contains__(node.type):
|
||||
raise AppApiException(500, f'{node.properties.get("stepName")} 节点不能当做结束节点')
|
||||
raise AppApiException(500, _("{node} Nodes cannot be considered as end nodes").format(
|
||||
node=node.properties.get("stepName")))
|
||||
|
||||
def get_next_nodes(self, node: Node):
|
||||
edge_list = [edge for edge in self.edges if edge.sourceNodeId == node.id]
|
||||
|
|
@ -113,7 +120,7 @@ class Flow:
|
|||
[])
|
||||
if len(node_list) == 0 and not end_nodes.__contains__(node.type):
|
||||
raise AppApiException(500,
|
||||
f'不存在的下一个节点')
|
||||
_("The next node that does not exist"))
|
||||
return node_list
|
||||
|
||||
def is_valid_work_flow(self, up_node=None):
|
||||
|
|
@ -127,16 +134,17 @@ class Flow:
|
|||
def is_valid_start_node(self):
|
||||
start_node_list = [node for node in self.nodes if node.id == 'start-node']
|
||||
if len(start_node_list) == 0:
|
||||
raise AppApiException(500, '开始节点必填')
|
||||
raise AppApiException(500, _('The starting node is required'))
|
||||
if len(start_node_list) > 1:
|
||||
raise AppApiException(500, '开始节点只能有一个')
|
||||
raise AppApiException(500, _('There can only be one starting node'))
|
||||
|
||||
def is_valid_model_params(self):
|
||||
node_list = [node for node in self.nodes if (node.type == 'ai-chat-node' or node.type == 'question-node')]
|
||||
for node in node_list:
|
||||
model = QuerySet(Model).filter(id=node.properties.get('node_data', {}).get('model_id')).first()
|
||||
if model is None:
|
||||
raise ValidationError(ErrorDetail(f'节点{node.properties.get("stepName")} 模型不存在'))
|
||||
raise ValidationError(ErrorDetail(
|
||||
_('The node {node} model does not exist').format(node=node.properties.get("stepName"))))
|
||||
credential = get_model_credential(model.provider, model.model_type, model.model_name)
|
||||
model_params_setting = node.properties.get('node_data', {}).get('model_params_setting')
|
||||
model_params_setting_form = credential.get_model_params_setting_form(
|
||||
|
|
@ -145,22 +153,25 @@ class Flow:
|
|||
model_params_setting = model_params_setting_form.get_default_form_data()
|
||||
node.properties.get('node_data', {})['model_params_setting'] = model_params_setting
|
||||
if node.properties.get('status', 200) != 200:
|
||||
raise ValidationError(ErrorDetail(f'节点{node.properties.get("stepName")} 不可用'))
|
||||
raise ValidationError(
|
||||
ErrorDetail(_("Node {node} is unavailable").format(node.properties.get("stepName"))))
|
||||
node_list = [node for node in self.nodes if (node.type == 'function-lib-node')]
|
||||
for node in node_list:
|
||||
function_lib_id = node.properties.get('node_data', {}).get('function_lib_id')
|
||||
if function_lib_id is None:
|
||||
raise ValidationError(ErrorDetail(f'节点{node.properties.get("stepName")} 函数库id不能为空'))
|
||||
raise ValidationError(ErrorDetail(
|
||||
_('The library ID of node {node} cannot be empty').format(node=node.properties.get("stepName"))))
|
||||
f_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()
|
||||
if f_lib is None:
|
||||
raise ValidationError(ErrorDetail(f'节点{node.properties.get("stepName")} 函数库不可用'))
|
||||
raise ValidationError(ErrorDetail(_("The function library for node {node} is not available").format(
|
||||
node=node.properties.get("stepName"))))
|
||||
|
||||
def is_valid_base_node(self):
|
||||
base_node_list = [node for node in self.nodes if node.id == 'base-node']
|
||||
if len(base_node_list) == 0:
|
||||
raise AppApiException(500, '基本信息节点必填')
|
||||
raise AppApiException(500, _('Basic information node is required'))
|
||||
if len(base_node_list) > 1:
|
||||
raise AppApiException(500, '基本信息节点只能有一个')
|
||||
raise AppApiException(500, _('There can only be one basic information node'))
|
||||
|
||||
|
||||
class NodeResultFuture:
|
||||
|
|
@ -222,27 +233,12 @@ class NodeChunkManage:
|
|||
return None
|
||||
|
||||
|
||||
class NodeChunk:
|
||||
def __init__(self):
|
||||
self.status = 0
|
||||
self.chunk_list = []
|
||||
|
||||
def add_chunk(self, chunk):
|
||||
self.chunk_list.append(chunk)
|
||||
|
||||
def end(self, chunk=None):
|
||||
if chunk is not None:
|
||||
self.add_chunk(chunk)
|
||||
self.status = 200
|
||||
|
||||
def is_end(self):
|
||||
return self.status == 200
|
||||
|
||||
|
||||
class WorkflowManage:
|
||||
def __init__(self, flow: Flow, params, work_flow_post_handler: WorkFlowPostHandler,
|
||||
base_to_response: BaseToResponse = SystemToResponse(), form_data=None, image_list=None,
|
||||
document_list=None,
|
||||
audio_list=None,
|
||||
other_list=None,
|
||||
start_node_id=None,
|
||||
start_node_data=None, chat_record=None, child_node=None):
|
||||
if form_data is None:
|
||||
|
|
@ -251,14 +247,19 @@ class WorkflowManage:
|
|||
image_list = []
|
||||
if document_list is None:
|
||||
document_list = []
|
||||
if audio_list is None:
|
||||
audio_list = []
|
||||
if other_list is None:
|
||||
other_list = []
|
||||
self.start_node_id = start_node_id
|
||||
self.start_node = None
|
||||
self.form_data = form_data
|
||||
self.image_list = image_list
|
||||
self.document_list = document_list
|
||||
self.audio_list = audio_list
|
||||
self.other_list = other_list
|
||||
self.params = params
|
||||
self.flow = flow
|
||||
self.lock = threading.Lock()
|
||||
self.context = {}
|
||||
self.node_chunk_manage = NodeChunkManage(self)
|
||||
self.work_flow_post_handler = work_flow_post_handler
|
||||
|
|
@ -266,16 +267,42 @@ class WorkflowManage:
|
|||
self.current_result = None
|
||||
self.answer = ""
|
||||
self.answer_list = ['']
|
||||
self.status = 0
|
||||
self.status = 200
|
||||
self.base_to_response = base_to_response
|
||||
self.chat_record = chat_record
|
||||
self.await_future_map = {}
|
||||
self.child_node = child_node
|
||||
self.future_list = []
|
||||
self.lock = threading.Lock()
|
||||
self.field_list = []
|
||||
self.global_field_list = []
|
||||
self.init_fields()
|
||||
if start_node_id is not None:
|
||||
self.load_node(chat_record, start_node_id, start_node_data)
|
||||
else:
|
||||
self.node_context = []
|
||||
|
||||
def init_fields(self):
|
||||
field_list = []
|
||||
global_field_list = []
|
||||
for node in self.flow.nodes:
|
||||
properties = node.properties
|
||||
node_name = properties.get('stepName')
|
||||
node_id = node.id
|
||||
node_config = properties.get('config')
|
||||
if node_config is not None:
|
||||
fields = node_config.get('fields')
|
||||
if fields is not None:
|
||||
for field in fields:
|
||||
field_list.append({**field, 'node_id': node_id, 'node_name': node_name})
|
||||
global_fields = node_config.get('globalFields')
|
||||
if global_fields is not None:
|
||||
for global_field in global_fields:
|
||||
global_field_list.append({**global_field, 'node_id': node_id, 'node_name': node_name})
|
||||
field_list.sort(key=lambda f: len(f.get('node_name')), reverse=True)
|
||||
global_field_list.sort(key=lambda f: len(f.get('node_name')), reverse=True)
|
||||
self.field_list = field_list
|
||||
self.global_field_list = global_field_list
|
||||
|
||||
def append_answer(self, content):
|
||||
self.answer += content
|
||||
self.answer_list[-1] += content
|
||||
|
|
@ -302,6 +329,9 @@ class WorkflowManage:
|
|||
get_node_params=get_node_params)
|
||||
self.start_node.valid_args(
|
||||
{**self.start_node.node_params, 'form_data': start_node_data}, self.start_node.workflow_params)
|
||||
if self.start_node.type == 'application-node':
|
||||
application_node_dict = node_details.get('application_node_dict', {})
|
||||
self.start_node.context['application_node_dict'] = application_node_dict
|
||||
self.node_context.append(self.start_node)
|
||||
continue
|
||||
|
||||
|
|
@ -309,46 +339,68 @@ class WorkflowManage:
|
|||
node = self.get_node_cls_by_id(node_id, node_details.get('up_node_id_list'))
|
||||
node.valid_args(node.node_params, node.workflow_params)
|
||||
node.save_context(node_details, self)
|
||||
node.node_chunk.end()
|
||||
self.node_context.append(node)
|
||||
|
||||
def run(self):
|
||||
close_old_connections()
|
||||
language = get_language()
|
||||
if self.params.get('stream'):
|
||||
return self.run_stream(self.start_node, None)
|
||||
return self.run_block()
|
||||
return self.run_stream(self.start_node, None, language)
|
||||
return self.run_block(language)
|
||||
|
||||
def run_block(self):
|
||||
def run_block(self, language='zh'):
|
||||
"""
|
||||
非流式响应
|
||||
@return: 结果
|
||||
"""
|
||||
result = self.run_chain_async(None, None)
|
||||
result.result()
|
||||
self.run_chain_async(None, None, language)
|
||||
while self.is_run():
|
||||
pass
|
||||
details = self.get_runtime_details()
|
||||
message_tokens = sum([row.get('message_tokens') for row in details.values() if
|
||||
'message_tokens' in row and row.get('message_tokens') is not None])
|
||||
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
|
||||
'answer_tokens' in row and row.get('answer_tokens') is not None])
|
||||
answer_text_list = self.get_answer_text_list()
|
||||
answer_text = '\n\n'.join(answer['content'] for answer in answer_text_list)
|
||||
answer_text = '\n\n'.join(
|
||||
'\n\n'.join([a.get('content') for a in answer]) for answer in
|
||||
answer_text_list)
|
||||
answer_list = reduce(lambda pre, _n: [*pre, *_n], answer_text_list, [])
|
||||
self.work_flow_post_handler.handler(self.params['chat_id'], self.params['chat_record_id'],
|
||||
answer_text,
|
||||
self)
|
||||
return self.base_to_response.to_block_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'], answer_text, True
|
||||
, message_tokens, answer_tokens,
|
||||
_status=status.HTTP_200_OK if self.status == 200 else status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
_status=status.HTTP_200_OK if self.status == 200 else status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
other_params={'answer_list': answer_list})
|
||||
|
||||
def run_stream(self, current_node, node_result_future):
|
||||
def run_stream(self, current_node, node_result_future, language='zh'):
|
||||
"""
|
||||
流式响应
|
||||
@return:
|
||||
"""
|
||||
result = self.run_chain_async(current_node, node_result_future)
|
||||
return tools.to_stream_response_simple(self.await_result(result))
|
||||
self.run_chain_async(current_node, node_result_future, language)
|
||||
return tools.to_stream_response_simple(self.await_result())
|
||||
|
||||
def await_result(self, result):
|
||||
def is_run(self, timeout=0.5):
|
||||
future_list_len = len(self.future_list)
|
||||
try:
|
||||
while await_result(result):
|
||||
r = concurrent.futures.wait(self.future_list, timeout)
|
||||
if len(r.not_done) > 0:
|
||||
return True
|
||||
else:
|
||||
if future_list_len == len(self.future_list):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
except Exception as e:
|
||||
return True
|
||||
|
||||
def await_result(self):
|
||||
try:
|
||||
while self.is_run():
|
||||
while True:
|
||||
chunk = self.node_chunk_manage.pop()
|
||||
if chunk is not None:
|
||||
|
|
@ -361,6 +413,8 @@ class WorkflowManage:
|
|||
break
|
||||
yield chunk
|
||||
finally:
|
||||
while self.is_run():
|
||||
pass
|
||||
details = self.get_runtime_details()
|
||||
message_tokens = sum([row.get('message_tokens') for row in details.values() if
|
||||
'message_tokens' in row and row.get('message_tokens') is not None])
|
||||
|
|
@ -375,44 +429,45 @@ class WorkflowManage:
|
|||
[],
|
||||
'', True, message_tokens, answer_tokens, {})
|
||||
|
||||
def run_chain_async(self, current_node, node_result_future):
|
||||
future = executor.submit(self.run_chain, current_node, node_result_future)
|
||||
return future
|
||||
def run_chain_async(self, current_node, node_result_future, language='zh'):
|
||||
future = executor.submit(self.run_chain_manage, current_node, node_result_future, language)
|
||||
self.future_list.append(future)
|
||||
|
||||
def set_await_map(self, node_run_list):
|
||||
sorted_node_run_list = sorted(node_run_list, key=lambda n: n.get('node').node.y)
|
||||
for index in range(len(sorted_node_run_list)):
|
||||
self.await_future_map[sorted_node_run_list[index].get('node').runtime_node_id] = [
|
||||
sorted_node_run_list[i].get('future')
|
||||
for i in range(index)]
|
||||
|
||||
def run_chain(self, current_node, node_result_future=None):
|
||||
def run_chain_manage(self, current_node, node_result_future, language='zh'):
|
||||
translation.activate(language)
|
||||
if current_node is None:
|
||||
start_node = self.get_start_node()
|
||||
current_node = get_node(start_node.type)(start_node, self.params, self)
|
||||
self.node_chunk_manage.add_node_chunk(current_node.node_chunk)
|
||||
# 添加节点
|
||||
self.append_node(current_node)
|
||||
result = self.run_chain(current_node, node_result_future)
|
||||
if result is None:
|
||||
return
|
||||
node_list = self.get_next_node_list(current_node, result)
|
||||
if len(node_list) == 1:
|
||||
self.run_chain_manage(node_list[0], None, language)
|
||||
elif len(node_list) > 1:
|
||||
sorted_node_run_list = sorted(node_list, key=lambda n: n.node.y)
|
||||
# 获取到可执行的子节点
|
||||
result_list = [{'node': node, 'future': executor.submit(self.run_chain_manage, node, None, language)} for
|
||||
node in
|
||||
sorted_node_run_list]
|
||||
for r in result_list:
|
||||
self.future_list.append(r.get('future'))
|
||||
|
||||
def run_chain(self, current_node, node_result_future=None):
|
||||
if node_result_future is None:
|
||||
node_result_future = self.run_node_future(current_node)
|
||||
try:
|
||||
is_stream = self.params.get('stream', True)
|
||||
# 处理节点响应
|
||||
await_future_list = self.await_future_map.get(current_node.runtime_node_id, None)
|
||||
if await_future_list is not None:
|
||||
[f.result() for f in await_future_list]
|
||||
result = self.hand_event_node_result(current_node,
|
||||
node_result_future) if is_stream else self.hand_node_result(
|
||||
current_node, node_result_future)
|
||||
with self.lock:
|
||||
if current_node.status == 500:
|
||||
return
|
||||
node_list = self.get_next_node_list(current_node, result)
|
||||
# 获取到可执行的子节点
|
||||
result_list = [{'node': node, 'future': self.run_chain_async(node, None)} for node in node_list]
|
||||
self.set_await_map(result_list)
|
||||
[r.get('future').result() for r in result_list]
|
||||
if self.status == 0:
|
||||
self.status = 200
|
||||
return result
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def hand_node_result(self, current_node, node_result_future):
|
||||
try:
|
||||
|
|
@ -421,16 +476,14 @@ class WorkflowManage:
|
|||
if result is not None:
|
||||
# 阻塞获取结果
|
||||
list(result)
|
||||
# 添加节点
|
||||
self.node_context.append(current_node)
|
||||
return current_result
|
||||
except Exception as e:
|
||||
# 添加节点
|
||||
self.node_context.append(current_node)
|
||||
traceback.print_exc()
|
||||
self.status = 500
|
||||
current_node.get_write_error_context(e)
|
||||
self.answer += str(e)
|
||||
finally:
|
||||
current_node.node_chunk.end()
|
||||
|
||||
def append_node(self, current_node):
|
||||
for index in range(len(self.node_context)):
|
||||
|
|
@ -441,75 +494,81 @@ class WorkflowManage:
|
|||
self.node_context.append(current_node)
|
||||
|
||||
def hand_event_node_result(self, current_node, node_result_future):
|
||||
node_chunk = NodeChunk()
|
||||
runtime_node_id = current_node.runtime_node_id
|
||||
real_node_id = current_node.runtime_node_id
|
||||
child_node = {}
|
||||
view_type = current_node.view_type
|
||||
try:
|
||||
current_result = node_result_future.result()
|
||||
result = current_result.write_context(current_node, self)
|
||||
if result is not None:
|
||||
if self.is_result(current_node, current_result):
|
||||
self.node_chunk_manage.add_node_chunk(node_chunk)
|
||||
for r in result:
|
||||
reasoning_content = ''
|
||||
content = r
|
||||
child_node = {}
|
||||
node_is_end = False
|
||||
view_type = current_node.view_type
|
||||
if isinstance(r, dict):
|
||||
content = r.get('content')
|
||||
child_node = {'runtime_node_id': r.get('runtime_node_id'),
|
||||
'chat_record_id': r.get('chat_record_id')
|
||||
, 'child_node': r.get('child_node')}
|
||||
real_node_id = r.get('real_node_id')
|
||||
node_is_end = r.get('node_is_end')
|
||||
if r.__contains__('real_node_id'):
|
||||
real_node_id = r.get('real_node_id')
|
||||
if r.__contains__('node_is_end'):
|
||||
node_is_end = r.get('node_is_end')
|
||||
view_type = r.get('view_type')
|
||||
reasoning_content = r.get('reasoning_content')
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
content, False, 0, 0,
|
||||
{'node_type': current_node.type,
|
||||
'runtime_node_id': current_node.runtime_node_id,
|
||||
'view_type': current_node.view_type,
|
||||
'runtime_node_id': runtime_node_id,
|
||||
'view_type': view_type,
|
||||
'child_node': child_node,
|
||||
'node_is_end': node_is_end,
|
||||
'real_node_id': real_node_id})
|
||||
node_chunk.add_chunk(chunk)
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
'', False, 0, 0, {'node_is_end': True,
|
||||
'runtime_node_id': current_node.runtime_node_id,
|
||||
'node_type': current_node.type,
|
||||
'view_type': current_node.view_type,
|
||||
'child_node': child_node,
|
||||
'real_node_id': real_node_id})
|
||||
node_chunk.end(chunk)
|
||||
'real_node_id': real_node_id,
|
||||
'reasoning_content': reasoning_content})
|
||||
current_node.node_chunk.add_chunk(chunk)
|
||||
chunk = (self.base_to_response
|
||||
.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
'', False, 0, 0, {'node_is_end': True,
|
||||
'runtime_node_id': runtime_node_id,
|
||||
'node_type': current_node.type,
|
||||
'view_type': view_type,
|
||||
'child_node': child_node,
|
||||
'real_node_id': real_node_id,
|
||||
'reasoning_content': ''}))
|
||||
current_node.node_chunk.add_chunk(chunk)
|
||||
else:
|
||||
list(result)
|
||||
# 添加节点
|
||||
self.append_node(current_node)
|
||||
return current_result
|
||||
except Exception as e:
|
||||
# 添加节点
|
||||
self.append_node(current_node)
|
||||
traceback.print_exc()
|
||||
self.answer += str(e)
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
str(e), False, 0, 0,
|
||||
'Exception:' + str(e), False, 0, 0,
|
||||
{'node_is_end': True,
|
||||
'runtime_node_id': current_node.runtime_node_id,
|
||||
'node_type': current_node.type,
|
||||
'view_type': current_node.view_type,
|
||||
'child_node': {},
|
||||
'real_node_id': real_node_id})
|
||||
if not self.node_chunk_manage.contains(node_chunk):
|
||||
self.node_chunk_manage.add_node_chunk(node_chunk)
|
||||
node_chunk.end(chunk)
|
||||
current_node.node_chunk.add_chunk(chunk)
|
||||
current_node.get_write_error_context(e)
|
||||
self.status = 500
|
||||
return None
|
||||
finally:
|
||||
current_node.node_chunk.end()
|
||||
|
||||
def run_node_async(self, node):
|
||||
future = executor.submit(self.run_node, node)
|
||||
|
|
@ -575,35 +634,28 @@ class WorkflowManage:
|
|||
|
||||
def get_answer_text_list(self):
|
||||
result = []
|
||||
next_node_id_list = []
|
||||
if self.start_node is not None:
|
||||
next_node_id_list = [edge.targetNodeId for edge in self.flow.edges if
|
||||
edge.sourceNodeId == self.start_node.id]
|
||||
for index in range(len(self.node_context)):
|
||||
node = self.node_context[index]
|
||||
up_node = None
|
||||
if index > 0:
|
||||
up_node = self.node_context[index - 1]
|
||||
answer_text = node.get_answer_text()
|
||||
if answer_text is not None:
|
||||
if up_node is None or node.view_type == 'single_view' or (
|
||||
node.view_type == 'many_view' and up_node.view_type == 'single_view'):
|
||||
result.append(node.get_answer_text())
|
||||
elif self.chat_record is not None and next_node_id_list.__contains__(
|
||||
node.id) and up_node is not None and not next_node_id_list.__contains__(
|
||||
up_node.id):
|
||||
result.append(node.get_answer_text())
|
||||
answer_list = reduce(lambda x, y: [*x, *y],
|
||||
[n.get_answer_list() for n in self.node_context if n.get_answer_list() is not None],
|
||||
[])
|
||||
up_node = None
|
||||
for index in range(len(answer_list)):
|
||||
current_answer = answer_list[index]
|
||||
if len(current_answer.content) > 0:
|
||||
if up_node is None or current_answer.view_type == 'single_view' or (
|
||||
current_answer.view_type == 'many_view' and up_node.view_type == 'single_view'):
|
||||
result.append([current_answer])
|
||||
else:
|
||||
if len(result) > 0:
|
||||
exec_index = len(result) - 1
|
||||
content = result[exec_index]['content']
|
||||
result[exec_index]['content'] += answer_text['content'] if len(
|
||||
content) == 0 else ('\n\n' + answer_text['content'])
|
||||
if isinstance(result[exec_index], list):
|
||||
result[exec_index].append(current_answer)
|
||||
else:
|
||||
answer_text = node.get_answer_text()
|
||||
result.insert(0, answer_text)
|
||||
|
||||
return result
|
||||
result.insert(0, [current_answer])
|
||||
up_node = current_answer
|
||||
if len(result) == 0:
|
||||
# 如果没有响应 就响应一个空数据
|
||||
return [[]]
|
||||
return [[item.to_dict() for item in r] for r in result]
|
||||
|
||||
def get_next_node(self):
|
||||
"""
|
||||
|
|
@ -627,6 +679,8 @@ class WorkflowManage:
|
|||
|
||||
@staticmethod
|
||||
def dependent_node(up_node_id, node):
|
||||
if not node.node_chunk.is_end():
|
||||
return False
|
||||
if node.id == up_node_id:
|
||||
if node.type == 'form-node':
|
||||
if node.context.get('form_data', None) is not None:
|
||||
|
|
@ -663,14 +717,33 @@ class WorkflowManage:
|
|||
for edge in self.flow.edges:
|
||||
if (edge.sourceNodeId == current_node.id and
|
||||
f"{edge.sourceNodeId}_{current_node_result.node_variable.get('branch_id')}_right" == edge.sourceAnchorId):
|
||||
if self.dependent_node_been_executed(edge.targetNodeId):
|
||||
next_node = [node for node in self.flow.nodes if node.id == edge.targetNodeId]
|
||||
if len(next_node) == 0:
|
||||
continue
|
||||
if next_node[0].properties.get('condition', "AND") == 'AND':
|
||||
if self.dependent_node_been_executed(edge.targetNodeId):
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
else:
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId, self.get_up_node_id_list(edge.targetNodeId)))
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
else:
|
||||
for edge in self.flow.edges:
|
||||
if edge.sourceNodeId == current_node.id and self.dependent_node_been_executed(edge.targetNodeId):
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId, self.get_up_node_id_list(edge.targetNodeId)))
|
||||
if edge.sourceNodeId == current_node.id:
|
||||
next_node = [node for node in self.flow.nodes if node.id == edge.targetNodeId]
|
||||
if len(next_node) == 0:
|
||||
continue
|
||||
if next_node[0].properties.get('condition', "AND") == 'AND':
|
||||
if self.dependent_node_been_executed(edge.targetNodeId):
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
else:
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
return node_list
|
||||
|
||||
def get_reference_field(self, node_id: str, fields: List[str]):
|
||||
|
|
@ -684,33 +757,36 @@ class WorkflowManage:
|
|||
else:
|
||||
return self.get_node_by_id(node_id).get_reference_field(fields)
|
||||
|
||||
def get_workflow_content(self):
|
||||
context = {
|
||||
'global': self.context,
|
||||
}
|
||||
|
||||
for node in self.node_context:
|
||||
context[node.id] = node.context
|
||||
return context
|
||||
|
||||
def reset_prompt(self, prompt: str):
|
||||
placeholder = "{}"
|
||||
for field in self.field_list:
|
||||
globeLabel = f"{field.get('node_name')}.{field.get('value')}"
|
||||
globeValue = f"context.get('{field.get('node_id')}',{placeholder}).get('{field.get('value', '')}','')"
|
||||
prompt = prompt.replace(globeLabel, globeValue)
|
||||
for field in self.global_field_list:
|
||||
globeLabel = f"全局变量.{field.get('value')}"
|
||||
globeLabelNew = f"global.{field.get('value')}"
|
||||
globeValue = f"context.get('global').get('{field.get('value', '')}','')"
|
||||
prompt = prompt.replace(globeLabel, globeValue).replace(globeLabelNew, globeValue)
|
||||
return prompt
|
||||
|
||||
def generate_prompt(self, prompt: str):
|
||||
"""
|
||||
格式化生成提示词
|
||||
@param prompt: 提示词信息
|
||||
@return: 格式化后的提示词
|
||||
"""
|
||||
context = {
|
||||
'global': self.context,
|
||||
}
|
||||
|
||||
for node in self.node_context:
|
||||
properties = node.node.properties
|
||||
node_config = properties.get('config')
|
||||
if node_config is not None:
|
||||
fields = node_config.get('fields')
|
||||
if fields is not None:
|
||||
for field in fields:
|
||||
globeLabel = f"{properties.get('stepName')}.{field.get('value')}"
|
||||
globeValue = f"context['{node.id}'].{field.get('value')}"
|
||||
prompt = prompt.replace(globeLabel, globeValue)
|
||||
global_fields = node_config.get('globalFields')
|
||||
if global_fields is not None:
|
||||
for field in global_fields:
|
||||
globeLabel = f"全局变量.{field.get('value')}"
|
||||
globeValue = f"context['global'].{field.get('value')}"
|
||||
prompt = prompt.replace(globeLabel, globeValue)
|
||||
context[node.id] = node.context
|
||||
context = self.get_workflow_content()
|
||||
prompt = self.reset_prompt(prompt)
|
||||
prompt_template = PromptTemplate.from_template(prompt, template_format='jinja2')
|
||||
value = prompt_template.format(context=context)
|
||||
return value
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
# Generated by Django 4.2.13 on 2024-07-15 15:52
|
||||
|
||||
import application.models.application
|
||||
from django.db import migrations, models
|
||||
|
||||
import common.encoder.encoder
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('application', '0009_application_type_application_work_flow_and_more'),
|
||||
]
|
||||
|
|
@ -14,6 +14,6 @@ class Migration(migrations.Migration):
|
|||
migrations.AlterField(
|
||||
model_name='chatrecord',
|
||||
name='details',
|
||||
field=models.JSONField(default=dict, encoder=application.models.application.DateEncoder, verbose_name='对话详情'),
|
||||
field=models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='对话详情'),
|
||||
),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,34 @@
|
|||
# Generated by Django 4.2.15 on 2024-12-27 18:42
|
||||
|
||||
from django.db import migrations, models
|
||||
import uuid
|
||||
|
||||
run_sql = """
|
||||
UPDATE application_public_access_client
|
||||
SET client_id="id"
|
||||
"""
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('application', '0020_application_record_update_time'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='applicationpublicaccessclient',
|
||||
name='client_id',
|
||||
field=models.UUIDField(default=uuid.uuid1, verbose_name='公共访问链接客户端id'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='applicationpublicaccessclient',
|
||||
name='id',
|
||||
field=models.UUIDField(default=uuid.uuid1, editable=False, primary_key=True, serialize=False,
|
||||
verbose_name='主键id'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='applicationpublicaccessclient',
|
||||
index=models.Index(fields=['client_id'], name='application_client__4de9af_idx'),
|
||||
),
|
||||
migrations.RunSQL(run_sql)
|
||||
]
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 4.2.15 on 2025-01-03 14:07
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('application', '0021_applicationpublicaccessclient_client_id_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='application',
|
||||
name='tts_autoplay',
|
||||
field=models.BooleanField(default=False, verbose_name='自动播放'),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 4.2.15 on 2025-01-06 10:37
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('application', '0022_application_tts_autoplay'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='application',
|
||||
name='stt_autosend',
|
||||
field=models.BooleanField(default=False, verbose_name='自动发送'),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
# Generated by Django 4.2.15 on 2025-01-20 06:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('application', '0023_application_stt_autosend'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='applicationaccesstoken',
|
||||
name='language',
|
||||
field=models.CharField(default=None, max_length=10, null=True, verbose_name='语言')
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 4.2.18 on 2025-01-22 09:53
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('application', '0024_applicationaccesstoken_language'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='application',
|
||||
name='prologue',
|
||||
field=models.CharField(default='', max_length=40960, verbose_name='开场白'),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
# Generated by Django 4.2.18 on 2025-03-18 06:05
|
||||
|
||||
import application.models.application
|
||||
import common.encoder.encoder
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('application', '0025_alter_application_prologue'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='chat',
|
||||
name='asker',
|
||||
field=models.JSONField(default=application.models.application.default_asker, encoder=common.encoder.encoder.SystemEncoder, verbose_name='访问者'),
|
||||
),
|
||||
]
|
||||
|
|
@ -13,9 +13,14 @@ from django.db import models
|
|||
|
||||
from application.models import Application
|
||||
from common.mixins.app_model_mixin import AppModelMixin
|
||||
from smartdoc.const import CONFIG
|
||||
from users.models import User
|
||||
|
||||
|
||||
def get_language():
|
||||
return CONFIG.get_language_code()
|
||||
|
||||
|
||||
class ApplicationApiKey(AppModelMixin):
|
||||
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
|
||||
secret_key = models.CharField(max_length=1024, verbose_name="秘钥", unique=True)
|
||||
|
|
@ -45,15 +50,21 @@ class ApplicationAccessToken(AppModelMixin):
|
|||
, default=list)
|
||||
show_source = models.BooleanField(default=False, verbose_name="是否显示知识来源")
|
||||
|
||||
language = models.CharField(max_length=10, verbose_name="语言", default=None, null=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "application_access_token"
|
||||
|
||||
|
||||
class ApplicationPublicAccessClient(AppModelMixin):
|
||||
id = models.UUIDField(max_length=128, primary_key=True, verbose_name="公共访问链接客户端id")
|
||||
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
|
||||
client_id = models.UUIDField(max_length=128, default=uuid.uuid1, verbose_name="公共访问链接客户端id")
|
||||
application = models.ForeignKey(Application, on_delete=models.CASCADE, verbose_name="应用id")
|
||||
access_num = models.IntegerField(default=0, verbose_name="访问总次数次数")
|
||||
intraday_access_num = models.IntegerField(default=0, verbose_name="当日访问次数")
|
||||
|
||||
class Meta:
|
||||
db_table = "application_public_access_client"
|
||||
indexes = [
|
||||
models.Index(fields=['client_id']),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -6,14 +6,13 @@
|
|||
@date:2023/9/25 14:24
|
||||
@desc:
|
||||
"""
|
||||
import datetime
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
from django.db import models
|
||||
from langchain.schema import HumanMessage, AIMessage
|
||||
|
||||
from django.utils.translation import gettext as _
|
||||
from common.encoder.encoder import SystemEncoder
|
||||
from common.mixins.app_model_mixin import AppModelMixin
|
||||
from dataset.models.data_set import DataSet
|
||||
from setting.models.model_management import Model
|
||||
|
|
@ -35,14 +34,20 @@ def get_dataset_setting_dict():
|
|||
|
||||
|
||||
def get_model_setting_dict():
|
||||
return {'prompt': Application.get_default_model_prompt(), 'no_references_prompt': '{question}'}
|
||||
return {
|
||||
'prompt': Application.get_default_model_prompt(),
|
||||
'no_references_prompt': '{question}',
|
||||
'reasoning_content_start': '<think>',
|
||||
'reasoning_content_end': '</think>',
|
||||
'reasoning_content_enable': False,
|
||||
}
|
||||
|
||||
|
||||
class Application(AppModelMixin):
|
||||
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
|
||||
name = models.CharField(max_length=128, verbose_name="应用名称")
|
||||
desc = models.CharField(max_length=512, verbose_name="引用描述", default="")
|
||||
prologue = models.CharField(max_length=4096, verbose_name="开场白", default="")
|
||||
prologue = models.CharField(max_length=40960, verbose_name="开场白", default="")
|
||||
dialogue_number = models.IntegerField(default=0, verbose_name="会话数量")
|
||||
user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
|
||||
model = models.ForeignKey(Model, on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True)
|
||||
|
|
@ -65,6 +70,8 @@ class Application(AppModelMixin):
|
|||
tts_model_enable = models.BooleanField(verbose_name="语音合成模型是否启用", default=False)
|
||||
stt_model_enable = models.BooleanField(verbose_name="语音识别模型是否启用", default=False)
|
||||
tts_type = models.CharField(verbose_name="语音播放类型", max_length=20, default="BROWSER")
|
||||
tts_autoplay = models.BooleanField(verbose_name="自动播放", default=False)
|
||||
stt_autosend = models.BooleanField(verbose_name="自动发送", default=False)
|
||||
clean_time = models.IntegerField(verbose_name="清理时间", default=180)
|
||||
file_upload_enable = models.BooleanField(verbose_name="文件上传是否启用", default=False)
|
||||
file_upload_setting = models.JSONField(verbose_name="文件上传相关设置", default=dict)
|
||||
|
|
@ -108,10 +115,15 @@ class ApplicationDatasetMapping(AppModelMixin):
|
|||
db_table = "application_dataset_mapping"
|
||||
|
||||
|
||||
def default_asker():
|
||||
return {'user_name': '游客'}
|
||||
|
||||
|
||||
class Chat(AppModelMixin):
|
||||
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
|
||||
application = models.ForeignKey(Application, on_delete=models.CASCADE)
|
||||
abstract = models.CharField(max_length=1024, verbose_name="摘要")
|
||||
asker = models.JSONField(verbose_name="访问者", default=default_asker, encoder=SystemEncoder)
|
||||
client_id = models.UUIDField(verbose_name="客户端id", default=None, null=True)
|
||||
is_deleted = models.BooleanField(verbose_name="", default=False)
|
||||
|
||||
|
|
@ -126,16 +138,6 @@ class VoteChoices(models.TextChoices):
|
|||
TRAMPLE = 1, '反对'
|
||||
|
||||
|
||||
class DateEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, uuid.UUID):
|
||||
return str(obj)
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return obj.strftime("%Y-%m-%d %H:%M:%S")
|
||||
else:
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
class ChatRecord(AppModelMixin):
|
||||
"""
|
||||
对话日志 详情
|
||||
|
|
@ -152,7 +154,7 @@ class ChatRecord(AppModelMixin):
|
|||
message_tokens = models.IntegerField(verbose_name="请求token数量", default=0)
|
||||
answer_tokens = models.IntegerField(verbose_name="响应token数量", default=0)
|
||||
const = models.IntegerField(verbose_name="总费用", default=0)
|
||||
details = models.JSONField(verbose_name="对话详情", default=dict, encoder=DateEncoder)
|
||||
details = models.JSONField(verbose_name="对话详情", default=dict, encoder=SystemEncoder)
|
||||
improve_paragraph_id_list = ArrayField(verbose_name="改进标注列表",
|
||||
base_field=models.UUIDField(max_length=128, blank=True)
|
||||
, default=list)
|
||||
|
|
@ -165,7 +167,14 @@ class ChatRecord(AppModelMixin):
|
|||
return HumanMessage(content=self.problem_text)
|
||||
|
||||
def get_ai_message(self):
|
||||
return AIMessage(content=self.answer_text)
|
||||
answer_text = self.answer_text
|
||||
if answer_text is None or len(str(answer_text).strip()) == 0:
|
||||
answer_text = _(
|
||||
'Sorry, no relevant content was found. Please re-describe your problem or provide more information. ')
|
||||
return AIMessage(content=answer_text)
|
||||
|
||||
def get_node_details_runtime_node_id(self, runtime_node_id):
|
||||
return self.details.get(runtime_node_id, None)
|
||||
|
||||
class Meta:
|
||||
db_table = "application_chat_record"
|
||||
|
|
|
|||
|
|
@ -6,23 +6,28 @@
|
|||
@date:2023/11/7 10:02
|
||||
@desc:
|
||||
"""
|
||||
import asyncio
|
||||
import datetime
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import pickle
|
||||
import re
|
||||
import uuid
|
||||
from functools import reduce
|
||||
from typing import Dict, List
|
||||
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
from django.core import cache, validators
|
||||
from django.core import signing
|
||||
from django.db import transaction, models
|
||||
from django.db.models import QuerySet, Q
|
||||
from django.db.models import QuerySet
|
||||
from django.db.models.expressions import RawSQL
|
||||
from django.http import HttpResponse
|
||||
from django.template import Template, Context
|
||||
from rest_framework import serializers
|
||||
from langchain_mcp_adapters.client import MultiServerMCPClient
|
||||
from mcp.client.sse import sse_client
|
||||
from rest_framework import serializers, status
|
||||
from rest_framework.utils.formatting import lazy_format
|
||||
|
||||
from application.flow.workflow_manage import Flow
|
||||
from application.models import Application, ApplicationDatasetMapping, ApplicationTypeChoices, WorkFlowVersion
|
||||
|
|
@ -33,35 +38,46 @@ from common.config.embedding_config import VectorStore
|
|||
from common.constants.authentication_type import AuthenticationType
|
||||
from common.db.search import get_dynamics_model, native_search, native_page_search
|
||||
from common.db.sql_execute import select_list
|
||||
from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed
|
||||
from common.field.common import UploadedImageField
|
||||
from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed, ChatException
|
||||
from common.field.common import UploadedImageField, UploadedFileField
|
||||
from common.models.db_model_manage import DBModelManage
|
||||
from common.util.common import valid_license, password_encrypt
|
||||
from common.response import result
|
||||
from common.util.common import valid_license, password_encrypt, restricted_loads
|
||||
from common.util.field_message import ErrMessage
|
||||
from common.util.file_util import get_file_content
|
||||
from dataset.models import DataSet, Document, Image
|
||||
from dataset.serializers.common_serializers import list_paragraph, get_embedding_model_by_dataset_id_list
|
||||
from embedding.models import SearchMode
|
||||
from function_lib.serializers.function_lib_serializer import FunctionLibSerializer
|
||||
from setting.models import AuthOperate
|
||||
from function_lib.models.function import FunctionLib, PermissionType, FunctionType
|
||||
from function_lib.serializers.function_lib_serializer import FunctionLibSerializer, FunctionLibModelSerializer
|
||||
from setting.models import AuthOperate, TeamMemberPermission
|
||||
from setting.models.model_management import Model
|
||||
from setting.models_provider import get_model_credential
|
||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||
from setting.serializers.provider_serializers import ModelSerializer
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
from users.models import User
|
||||
from django.utils.translation import gettext_lazy as _, get_language, to_locale
|
||||
|
||||
chat_cache = cache.caches['chat_cache']
|
||||
|
||||
|
||||
class MKInstance:
|
||||
|
||||
def __init__(self, application: dict, function_lib_list: List[dict], version: str):
|
||||
self.application = application
|
||||
self.function_lib_list = function_lib_list
|
||||
self.version = version
|
||||
|
||||
|
||||
class ModelDatasetAssociation(serializers.Serializer):
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("模型id"))
|
||||
error_messages=ErrMessage.char(_("Model id")))
|
||||
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True,
|
||||
error_messages=ErrMessage.uuid(
|
||||
"知识库id")),
|
||||
error_messages=ErrMessage.list("知识库列表"))
|
||||
_("Knowledge base id"))),
|
||||
error_messages=ErrMessage.list(_("Knowledge Base List")))
|
||||
|
||||
def is_valid(self, *, raise_exception=True):
|
||||
super().is_valid(raise_exception=True)
|
||||
|
|
@ -69,13 +85,13 @@ class ModelDatasetAssociation(serializers.Serializer):
|
|||
user_id = self.data.get('user_id')
|
||||
if model_id is not None and len(model_id) > 0:
|
||||
if not QuerySet(Model).filter(id=model_id).exists():
|
||||
raise AppApiException(500, f'模型不存在【{model_id}】')
|
||||
raise AppApiException(500, f'{_("Model does not exist")}【{model_id}】')
|
||||
dataset_id_list = list(set(self.data.get('dataset_id_list')))
|
||||
exist_dataset_id_list = [str(dataset.id) for dataset in
|
||||
QuerySet(DataSet).filter(id__in=dataset_id_list, user_id=user_id)]
|
||||
for dataset_id in dataset_id_list:
|
||||
if not exist_dataset_id_list.__contains__(dataset_id):
|
||||
raise AppApiException(500, f'知识库id不存在【{dataset_id}】')
|
||||
raise AppApiException(500, f'{_("The knowledge base id does not exist")}【{dataset_id}】')
|
||||
|
||||
|
||||
class ApplicationSerializerModel(serializers.ModelSerializer):
|
||||
|
|
@ -92,8 +108,8 @@ class NoReferencesChoices(models.TextChoices):
|
|||
|
||||
class NoReferencesSetting(serializers.Serializer):
|
||||
status = serializers.ChoiceField(required=True, choices=NoReferencesChoices.choices,
|
||||
error_messages=ErrMessage.char("无引用状态"))
|
||||
value = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
|
||||
error_messages=ErrMessage.char(_("No reference status")))
|
||||
value = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
|
||||
|
||||
|
||||
def valid_model_params_setting(model_id, model_params_setting):
|
||||
|
|
@ -105,45 +121,64 @@ def valid_model_params_setting(model_id, model_params_setting):
|
|||
|
||||
|
||||
class DatasetSettingSerializer(serializers.Serializer):
|
||||
top_n = serializers.FloatField(required=True, max_value=100, min_value=1,
|
||||
error_messages=ErrMessage.float("引用分段数"))
|
||||
top_n = serializers.FloatField(required=True, max_value=10000, min_value=1,
|
||||
error_messages=ErrMessage.float(_("Reference segment number")))
|
||||
similarity = serializers.FloatField(required=True, max_value=1, min_value=0,
|
||||
error_messages=ErrMessage.float("相识度"))
|
||||
error_messages=ErrMessage.float(_("Acquaintance")))
|
||||
max_paragraph_char_number = serializers.IntegerField(required=True, min_value=500, max_value=100000,
|
||||
error_messages=ErrMessage.integer("最多引用字符数"))
|
||||
error_messages=ErrMessage.integer(
|
||||
_("Maximum number of quoted characters")))
|
||||
search_mode = serializers.CharField(required=True, validators=[
|
||||
validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"),
|
||||
message="类型只支持register|reset_password", code=500)
|
||||
], error_messages=ErrMessage.char("检索模式"))
|
||||
message=_("The type only supports embedding|keywords|blend"), code=500)
|
||||
], error_messages=ErrMessage.char(_("Retrieval Mode")))
|
||||
|
||||
no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base("未引用分段设置"))
|
||||
no_references_setting = NoReferencesSetting(required=True,
|
||||
error_messages=ErrMessage.base(_("Segment settings not referenced")))
|
||||
|
||||
|
||||
class ModelSettingSerializer(serializers.Serializer):
|
||||
prompt = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char("提示词"))
|
||||
error_messages=ErrMessage.char(_("Prompt word")))
|
||||
system = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char("角色提示词"))
|
||||
error_messages=ErrMessage.char(_("Role prompts")))
|
||||
no_references_prompt = serializers.CharField(required=True, max_length=102400, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("无引用分段提示词"))
|
||||
error_messages=ErrMessage.char(_("No citation segmentation prompt")))
|
||||
reasoning_content_enable = serializers.BooleanField(required=False,
|
||||
error_messages=ErrMessage.char(_("Thinking process switch")))
|
||||
reasoning_content_start = serializers.CharField(required=False, allow_null=True, default="<think>",
|
||||
allow_blank=True, max_length=256,
|
||||
trim_whitespace=False,
|
||||
error_messages=ErrMessage.char(
|
||||
_("The thinking process begins to mark")))
|
||||
reasoning_content_end = serializers.CharField(required=False, allow_null=True, allow_blank=True, default="</think>",
|
||||
max_length=256,
|
||||
trim_whitespace=False,
|
||||
error_messages=ErrMessage.char(_("End of thinking process marker")))
|
||||
|
||||
|
||||
class ApplicationWorkflowSerializer(serializers.Serializer):
|
||||
name = serializers.CharField(required=True, max_length=64, min_length=1, error_messages=ErrMessage.char("应用名称"))
|
||||
name = serializers.CharField(required=True, max_length=64, min_length=1,
|
||||
error_messages=ErrMessage.char(_("Application Name")))
|
||||
desc = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
max_length=256, min_length=1,
|
||||
error_messages=ErrMessage.char("应用描述"))
|
||||
work_flow = serializers.DictField(required=False, error_messages=ErrMessage.dict("工作流对象"))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
|
||||
error_messages=ErrMessage.char("开场白"))
|
||||
error_messages=ErrMessage.char(_("Application Description")))
|
||||
work_flow = serializers.DictField(required=False, error_messages=ErrMessage.dict(_("Workflow Objects")))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char(_("Opening remarks")))
|
||||
|
||||
@staticmethod
|
||||
def to_application_model(user_id: str, application: Dict):
|
||||
language = get_language()
|
||||
if application.get('work_flow') is not None:
|
||||
default_workflow = application.get('work_flow')
|
||||
else:
|
||||
default_workflow_json = get_file_content(
|
||||
os.path.join(PROJECT_DIR, "apps", "application", 'flow', 'default_workflow.json'))
|
||||
workflow_file_path = os.path.join(PROJECT_DIR, "apps", "application", 'flow',
|
||||
f'default_workflow_{to_locale(language)}.json')
|
||||
if not os.path.exists(workflow_file_path):
|
||||
workflow_file_path = os.path.join(PROJECT_DIR, "apps", "application", 'flow',
|
||||
f'default_workflow_zh.json')
|
||||
default_workflow_json = get_file_content(workflow_file_path)
|
||||
default_workflow = json.loads(default_workflow_json)
|
||||
for node in default_workflow.get('nodes'):
|
||||
if node.get('id') == 'base-node':
|
||||
|
|
@ -181,36 +216,41 @@ def get_base_node_work_flow(work_flow):
|
|||
|
||||
|
||||
class ApplicationSerializer(serializers.Serializer):
|
||||
name = serializers.CharField(required=True, max_length=64, min_length=1, error_messages=ErrMessage.char("应用名称"))
|
||||
name = serializers.CharField(required=True, max_length=64, min_length=1,
|
||||
error_messages=ErrMessage.char(_("application name")))
|
||||
desc = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
max_length=256, min_length=1,
|
||||
error_messages=ErrMessage.char("应用描述"))
|
||||
error_messages=ErrMessage.char(_("application describe")))
|
||||
model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("模型"))
|
||||
error_messages=ErrMessage.char(_("Model")))
|
||||
dialogue_number = serializers.IntegerField(required=True,
|
||||
min_value=0,
|
||||
max_value=1024,
|
||||
error_messages=ErrMessage.integer("历史聊天记录"))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
|
||||
error_messages=ErrMessage.char("开场白"))
|
||||
error_messages=ErrMessage.integer(_("Historical chat records")))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char(_("Opening remarks")))
|
||||
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
|
||||
allow_null=True, error_messages=ErrMessage.list("关联知识库"))
|
||||
allow_null=True,
|
||||
error_messages=ErrMessage.list(_("Related Knowledge Base")))
|
||||
# 数据集相关设置
|
||||
dataset_setting = DatasetSettingSerializer(required=True)
|
||||
# 模型相关设置
|
||||
model_setting = ModelSettingSerializer(required=True)
|
||||
# 问题补全
|
||||
problem_optimization = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("问题补全"))
|
||||
problem_optimization = serializers.BooleanField(required=True,
|
||||
error_messages=ErrMessage.boolean(_("Question completion")))
|
||||
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
|
||||
error_messages=ErrMessage.char("问题补全提示词"))
|
||||
error_messages=ErrMessage.char(_("Question completion prompt")))
|
||||
# 应用类型
|
||||
type = serializers.CharField(required=True, error_messages=ErrMessage.char("应用类型"),
|
||||
type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Application Type")),
|
||||
validators=[
|
||||
validators.RegexValidator(regex=re.compile("^SIMPLE|WORK_FLOW$"),
|
||||
message="应用类型只支持SIMPLE|WORK_FLOW", code=500)
|
||||
message=_(
|
||||
"Application type only supports SIMPLE|WORK_FLOW"),
|
||||
code=500)
|
||||
]
|
||||
)
|
||||
model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.dict('模型参数'))
|
||||
model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.dict(_('Model parameters')))
|
||||
|
||||
def is_valid(self, *, user_id=None, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
|
|
@ -218,9 +258,9 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'dataset_id_list': self.data.get('dataset_id_list')}).is_valid()
|
||||
|
||||
class Embed(serializers.Serializer):
|
||||
host = serializers.CharField(required=True, error_messages=ErrMessage.char("主机"))
|
||||
protocol = serializers.CharField(required=True, error_messages=ErrMessage.char("协议"))
|
||||
token = serializers.CharField(required=True, error_messages=ErrMessage.char("token"))
|
||||
host = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Host")))
|
||||
protocol = serializers.CharField(required=True, error_messages=ErrMessage.char(_("protocol")))
|
||||
token = serializers.CharField(required=True, error_messages=ErrMessage.char(_("token")))
|
||||
|
||||
def get_embed(self, with_valid=True, params=None):
|
||||
if params is None:
|
||||
|
|
@ -241,6 +281,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
# 获取接入的query参数
|
||||
query = self.get_query_api_input(application_access_token.application, params)
|
||||
float_location = {"x": {"type": "right", "value": 0}, "y": {"type": "bottom", "value": 30}}
|
||||
header_font_color = "rgb(100, 106, 115)"
|
||||
application_setting_model = DBModelManage.get_model('application_setting')
|
||||
if application_setting_model is not None and X_PACK_LICENSE_IS_VALID:
|
||||
application_setting = QuerySet(application_setting_model).filter(
|
||||
|
|
@ -252,6 +293,10 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
show_guide = 'true' if application_setting.show_guide else 'false'
|
||||
if application_setting.float_location is not None:
|
||||
float_location = application_setting.float_location
|
||||
if application_setting.custom_theme is not None and len(
|
||||
application_setting.custom_theme.get('header_font_color', 'rgb(100, 106, 115)')) > 0:
|
||||
header_font_color = application_setting.custom_theme.get('header_font_color',
|
||||
'rgb(100, 106, 115)')
|
||||
|
||||
is_auth = 'true' if application_access_token is not None and application_access_token.is_active else 'false'
|
||||
t = Template(content)
|
||||
|
|
@ -269,7 +314,9 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'x_type': float_location.get('x', {}).get('type', 'right'),
|
||||
'x_value': float_location.get('x', {}).get('value', 0),
|
||||
'y_type': float_location.get('y', {}).get('type', 'bottom'),
|
||||
'y_value': float_location.get('y', {}).get('value', 30)}))
|
||||
'y_value': float_location.get('y', {}).get('value', 30),
|
||||
'max_kb_id': str(uuid.uuid1()).replace('-', ''),
|
||||
'header_font_color': header_font_color}))
|
||||
response = HttpResponse(s, status=200, headers={'Content-Type': 'text/javascript'})
|
||||
return response
|
||||
|
||||
|
|
@ -287,26 +334,31 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
for field in input_field_list:
|
||||
if field['assignment_method'] == 'api_input' and field['variable'] in params:
|
||||
query += f"&{field['variable']}={params[field['variable']]}"
|
||||
|
||||
if 'asker' in params:
|
||||
query += f"&asker={params.get('asker')}"
|
||||
return query
|
||||
|
||||
class AccessTokenSerializer(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.boolean("应用id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.boolean(_("Application ID")))
|
||||
|
||||
class AccessTokenEditSerializer(serializers.Serializer):
|
||||
access_token_reset = serializers.BooleanField(required=False,
|
||||
error_messages=ErrMessage.boolean("重置Token"))
|
||||
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("是否开启"))
|
||||
error_messages=ErrMessage.boolean(_("Reset Token")))
|
||||
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Is it enabled")))
|
||||
access_num = serializers.IntegerField(required=False, max_value=10000,
|
||||
min_value=0,
|
||||
error_messages=ErrMessage.integer("访问次数"))
|
||||
white_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("是否开启白名单"))
|
||||
error_messages=ErrMessage.integer(_("Number of visits")))
|
||||
white_active = serializers.BooleanField(required=False,
|
||||
error_messages=ErrMessage.boolean(_("Whether to enable whitelist")))
|
||||
white_list = serializers.ListSerializer(required=False, child=serializers.CharField(required=True,
|
||||
error_messages=ErrMessage.char(
|
||||
"白名单")),
|
||||
error_messages=ErrMessage.list("白名单列表")),
|
||||
_("Whitelist"))),
|
||||
error_messages=ErrMessage.list(_("Whitelist"))),
|
||||
show_source = serializers.BooleanField(required=False,
|
||||
error_messages=ErrMessage.boolean("是否显示知识来源"))
|
||||
error_messages=ErrMessage.boolean(
|
||||
_("Whether to display knowledge sources")))
|
||||
language = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
error_messages=ErrMessage.char(_("language")))
|
||||
|
||||
def edit(self, instance: Dict, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -329,6 +381,10 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
application_access_token.white_list = instance.get('white_list')
|
||||
if 'show_source' in instance and instance.get('show_source') is not None:
|
||||
application_access_token.show_source = instance.get('show_source')
|
||||
if 'language' in instance and instance.get('language') is not None:
|
||||
application_access_token.language = instance.get('language')
|
||||
if 'language' not in instance or instance.get('language') is None:
|
||||
application_access_token.language = None
|
||||
application_access_token.save()
|
||||
application_setting_model = DBModelManage.get_model('application_setting')
|
||||
xpack_cache = DBModelManage.get_model('xpack_cache')
|
||||
|
|
@ -366,13 +422,14 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'access_num': application_access_token.access_num,
|
||||
'white_active': application_access_token.white_active,
|
||||
'white_list': application_access_token.white_list,
|
||||
'show_source': application_access_token.show_source
|
||||
'show_source': application_access_token.show_source,
|
||||
'language': application_access_token.language
|
||||
}
|
||||
|
||||
class Authentication(serializers.Serializer):
|
||||
access_token = serializers.CharField(required=True, error_messages=ErrMessage.char("access_token"))
|
||||
access_token = serializers.CharField(required=True, error_messages=ErrMessage.char(_("access_token")))
|
||||
authentication_value = serializers.JSONField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.char("认证信息"))
|
||||
error_messages=ErrMessage.char(_("Certification Information")))
|
||||
|
||||
def auth(self, request, with_valid=True):
|
||||
token = request.META.get('HTTP_AUTHORIZATION')
|
||||
|
|
@ -409,7 +466,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'authentication': authentication})
|
||||
return token
|
||||
else:
|
||||
raise NotFound404(404, "无效的access_token")
|
||||
raise NotFound404(404, _("Invalid access_token"))
|
||||
|
||||
def auth_authentication_value(self, authentication_value, application_id):
|
||||
application_setting_model = DBModelManage.get_model('application_setting')
|
||||
|
|
@ -420,7 +477,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
if application_setting.authentication and authentication_value is not None:
|
||||
if authentication_value.get('type') == 'password':
|
||||
if not self.auth_password(authentication_value, application_setting.authentication_value):
|
||||
raise AppApiException(1005, "密码错误")
|
||||
raise AppApiException(1005, _("Wrong password"))
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -429,38 +486,40 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
|
||||
class Edit(serializers.Serializer):
|
||||
name = serializers.CharField(required=False, max_length=64, min_length=1,
|
||||
error_messages=ErrMessage.char("应用名称"))
|
||||
error_messages=ErrMessage.char(_("Application Name")))
|
||||
desc = serializers.CharField(required=False, max_length=256, min_length=1, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("应用描述"))
|
||||
error_messages=ErrMessage.char(_("Application Description")))
|
||||
model_id = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
error_messages=ErrMessage.char("模型"))
|
||||
error_messages=ErrMessage.char(_("Model")))
|
||||
dialogue_number = serializers.IntegerField(required=False,
|
||||
min_value=0,
|
||||
max_value=1024,
|
||||
error_messages=ErrMessage.integer("历史聊天记录"))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
|
||||
error_messages=ErrMessage.char("开场白"))
|
||||
error_messages=ErrMessage.integer(_("Historical chat records")))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char(_("Opening remarks")))
|
||||
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
|
||||
error_messages=ErrMessage.list("关联知识库")
|
||||
error_messages=ErrMessage.list(_("Related Knowledge Base"))
|
||||
)
|
||||
# 数据集相关设置
|
||||
dataset_setting = DatasetSettingSerializer(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.json("数据集设置"))
|
||||
error_messages=ErrMessage.json(_("Dataset settings")))
|
||||
# 模型相关设置
|
||||
model_setting = ModelSettingSerializer(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.json("模型设置"))
|
||||
error_messages=ErrMessage.json(_("Model setup")))
|
||||
# 问题补全
|
||||
problem_optimization = serializers.BooleanField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.boolean("问题补全"))
|
||||
icon = serializers.CharField(required=False, allow_null=True, error_messages=ErrMessage.char("icon图标"))
|
||||
error_messages=ErrMessage.boolean(_("Question completion")))
|
||||
icon = serializers.CharField(required=False, allow_null=True, error_messages=ErrMessage.char(_("Icon")))
|
||||
|
||||
model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.dict('模型参数'))
|
||||
model_params_setting = serializers.DictField(required=False,
|
||||
error_messages=ErrMessage.dict(_('Model parameters')))
|
||||
|
||||
class Create(serializers.Serializer):
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
@valid_license(model=Application, count=5,
|
||||
message='社区版最多支持 5 个应用,如需拥有更多应用,请联系我们(https://fit2cloud.com/)。')
|
||||
message=_(
|
||||
'The community version supports up to 5 applications. If you need more applications, please contact us (https://fit2cloud.com/).'))
|
||||
@transaction.atomic
|
||||
def insert(self, application: Dict):
|
||||
application_type = application.get('type')
|
||||
|
|
@ -526,22 +585,22 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
return ApplicationDatasetMapping(id=uuid.uuid1(), application_id=application_id, dataset_id=dataset_id)
|
||||
|
||||
class HitTest(serializers.Serializer):
|
||||
id = serializers.CharField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.uuid("用户id"))
|
||||
query_text = serializers.CharField(required=True, error_messages=ErrMessage.char("查询文本"))
|
||||
top_number = serializers.IntegerField(required=True, max_value=100, min_value=1,
|
||||
error_messages=ErrMessage.integer("topN"))
|
||||
id = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
query_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Query text")))
|
||||
top_number = serializers.IntegerField(required=True, max_value=10000, min_value=1,
|
||||
error_messages=ErrMessage.integer(_("topN")))
|
||||
similarity = serializers.FloatField(required=True, max_value=2, min_value=0,
|
||||
error_messages=ErrMessage.float("相关度"))
|
||||
error_messages=ErrMessage.float(_("Relevance")))
|
||||
search_mode = serializers.CharField(required=True, validators=[
|
||||
validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"),
|
||||
message="类型只支持register|reset_password", code=500)
|
||||
], error_messages=ErrMessage.char("检索模式"))
|
||||
message=_("The type only supports embedding|keywords|blend"), code=500)
|
||||
], error_messages=ErrMessage.char(_("Retrieval Mode")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
if not QuerySet(Application).filter(id=self.data.get('id')).exists():
|
||||
raise AppApiException(500, '不存在的应用id')
|
||||
raise AppApiException(500, _('Application id does not exist'))
|
||||
|
||||
def hit_test(self):
|
||||
self.is_valid()
|
||||
|
|
@ -568,12 +627,12 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'comprehensive_score': hit_dict.get(p.get('id')).get('comprehensive_score')} for p in p_list]
|
||||
|
||||
class Query(serializers.Serializer):
|
||||
name = serializers.CharField(required=False, error_messages=ErrMessage.char("应用名称"))
|
||||
name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Application Name")))
|
||||
|
||||
desc = serializers.CharField(required=False, error_messages=ErrMessage.char("应用描述"))
|
||||
desc = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Application Description")))
|
||||
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
select_user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.uuid("选择用户id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
select_user_id = serializers.UUIDField(required=False, error_messages=ErrMessage.uuid(_("Select User ID")))
|
||||
|
||||
def get_query_set(self):
|
||||
user_id = self.data.get("user_id")
|
||||
|
|
@ -601,8 +660,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
query_set_dict['team_member_permission_custom_sql'] = QuerySet(model=get_dynamics_model(
|
||||
{'user_id': models.CharField(),
|
||||
'team_member_permission.auth_target_type': models.CharField(),
|
||||
'team_member_permission.operate': ArrayField(verbose_name="权限操作列表",
|
||||
base_field=models.CharField(max_length=256,
|
||||
'team_member_permission.operate': ArrayField(base_field=models.CharField(max_length=256,
|
||||
blank=True,
|
||||
choices=AuthOperate.choices,
|
||||
default=AuthOperate.USE)
|
||||
|
|
@ -642,16 +700,16 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
fields = ['id', 'name', 'desc', 'prologue', 'dialogue_number', 'icon', 'type']
|
||||
|
||||
class IconOperate(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
image = UploadedImageField(required=True, error_messages=ErrMessage.image("图片"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
image = UploadedImageField(required=True, error_messages=ErrMessage.image(_("picture")))
|
||||
|
||||
def edit(self, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
application = QuerySet(Application).filter(id=self.data.get('application_id')).first()
|
||||
if application is None:
|
||||
raise AppApiException(500, '不存在的应用id')
|
||||
raise AppApiException(500, _('Application id does not exist'))
|
||||
image_id = uuid.uuid1()
|
||||
image = Image(id=image_id, image=self.data.get('image').read(), image_name=self.data.get('image').name)
|
||||
image.save()
|
||||
|
|
@ -662,14 +720,88 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
get_application_access_token(application_access_token.access_token, False)
|
||||
return {**ApplicationSerializer.Query.reset_application(ApplicationSerializerModel(application).data)}
|
||||
|
||||
class Import(serializers.Serializer):
|
||||
file = UploadedFileField(required=True, error_messages=ErrMessage.image(_("file")))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
@valid_license(model=Application, count=5,
|
||||
message=_(
|
||||
'The community version supports up to 5 applications. If you need more applications, please contact us (https://fit2cloud.com/).'))
|
||||
@transaction.atomic
|
||||
def import_(self, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid()
|
||||
user_id = self.data.get('user_id')
|
||||
mk_instance_bytes = self.data.get('file').read()
|
||||
try:
|
||||
mk_instance = restricted_loads(mk_instance_bytes)
|
||||
except Exception as e:
|
||||
raise AppApiException(1001, _("Unsupported file format"))
|
||||
application = mk_instance.application
|
||||
function_lib_list = mk_instance.function_lib_list
|
||||
if len(function_lib_list) > 0:
|
||||
function_lib_id_list = [function_lib.get('id') for function_lib in function_lib_list]
|
||||
exits_function_lib_id_list = [str(function_lib.id) for function_lib in
|
||||
QuerySet(FunctionLib).filter(id__in=function_lib_id_list)]
|
||||
# 获取到需要插入的函数
|
||||
function_lib_list = [function_lib for function_lib in function_lib_list if
|
||||
not exits_function_lib_id_list.__contains__(function_lib.get('id'))]
|
||||
application_model = self.to_application(application, user_id)
|
||||
function_lib_model_list = [self.to_function_lib(f, user_id) for f in function_lib_list]
|
||||
application_model.save()
|
||||
# 插入认证信息
|
||||
ApplicationAccessToken(application_id=application_model.id,
|
||||
access_token=hashlib.md5(str(uuid.uuid1()).encode()).hexdigest()[8:24]).save()
|
||||
QuerySet(FunctionLib).bulk_create(function_lib_model_list) if len(function_lib_model_list) > 0 else None
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def to_application(application, user_id):
|
||||
work_flow = application.get('work_flow')
|
||||
for node in work_flow.get('nodes', []):
|
||||
if node.get('type') == 'search-dataset-node':
|
||||
node.get('properties', {}).get('node_data', {})['dataset_id_list'] = []
|
||||
return Application(id=uuid.uuid1(), user_id=user_id, name=application.get('name'),
|
||||
desc=application.get('desc'),
|
||||
prologue=application.get('prologue'), dialogue_number=application.get('dialogue_number'),
|
||||
dataset_setting=application.get('dataset_setting'),
|
||||
model_setting=application.get('model_setting'),
|
||||
model_params_setting=application.get('model_params_setting'),
|
||||
tts_model_params_setting=application.get('tts_model_params_setting'),
|
||||
problem_optimization=application.get('problem_optimization'),
|
||||
icon="/ui/favicon.ico",
|
||||
work_flow=work_flow,
|
||||
type=application.get('type'),
|
||||
problem_optimization_prompt=application.get('problem_optimization_prompt'),
|
||||
tts_model_enable=application.get('tts_model_enable'),
|
||||
stt_model_enable=application.get('stt_model_enable'),
|
||||
tts_type=application.get('tts_type'),
|
||||
clean_time=application.get('clean_time'),
|
||||
file_upload_enable=application.get('file_upload_enable'),
|
||||
file_upload_setting=application.get('file_upload_setting'),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def to_function_lib(function_lib, user_id):
|
||||
"""
|
||||
|
||||
@param user_id: 用户id
|
||||
@param function_lib: 函数库
|
||||
@return:
|
||||
"""
|
||||
return FunctionLib(id=function_lib.get('id'), user_id=user_id, name=function_lib.get('name'),
|
||||
code=function_lib.get('code'), input_field_list=function_lib.get('input_field_list'),
|
||||
is_active=function_lib.get('is_active'),
|
||||
permission_type=PermissionType.PRIVATE)
|
||||
|
||||
class Operate(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
if not QuerySet(Application).filter(id=self.data.get('application_id')).exists():
|
||||
raise AppApiException(500, '不存在的应用id')
|
||||
raise AppApiException(500, _('Application id does not exist'))
|
||||
|
||||
def list_model(self, model_type=None, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -685,8 +817,10 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
application = QuerySet(Application).filter(id=self.data.get("application_id")).first()
|
||||
return FunctionLibSerializer.Query(data={'user_id': application.user_id, 'is_active': True}).list(
|
||||
with_valid=True)
|
||||
return FunctionLibSerializer.Query(
|
||||
data={'user_id': application.user_id, 'is_active': True,
|
||||
'function_type': FunctionType.PUBLIC}
|
||||
).list(with_valid=True)
|
||||
|
||||
def get_function_lib(self, function_lib_id, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -708,6 +842,31 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
QuerySet(Application).filter(id=self.data.get('application_id')).delete()
|
||||
return True
|
||||
|
||||
def export(self, with_valid=True):
|
||||
try:
|
||||
if with_valid:
|
||||
self.is_valid()
|
||||
application_id = self.data.get('application_id')
|
||||
application = QuerySet(Application).filter(id=application_id).first()
|
||||
function_lib_id_list = [node.get('properties', {}).get('node_data', {}).get('function_lib_id') for node
|
||||
in
|
||||
application.work_flow.get('nodes', []) if
|
||||
node.get('type') == 'function-lib-node']
|
||||
function_lib_list = []
|
||||
if len(function_lib_id_list) > 0:
|
||||
function_lib_list = QuerySet(FunctionLib).filter(id__in=function_lib_id_list)
|
||||
application_dict = ApplicationSerializerModel(application).data
|
||||
|
||||
mk_instance = MKInstance(application_dict,
|
||||
[FunctionLibModelSerializer(function_lib).data for function_lib in
|
||||
function_lib_list], 'v1')
|
||||
application_pickle = pickle.dumps(mk_instance)
|
||||
response = HttpResponse(content_type='text/plain', content=application_pickle)
|
||||
response['Content-Disposition'] = f'attachment; filename="{application.name}.mk"'
|
||||
return response
|
||||
except Exception as e:
|
||||
return result.error(str(e), response_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
@transaction.atomic
|
||||
def publish(self, instance, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -717,7 +876,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
application = QuerySet(Application).filter(id=self.data.get("application_id")).first()
|
||||
work_flow = instance.get('work_flow')
|
||||
if work_flow is None:
|
||||
raise AppApiException(500, "work_flow是必填字段")
|
||||
raise AppApiException(500, _("work_flow is a required field"))
|
||||
Flow.new_instance(work_flow).is_valid()
|
||||
base_node = get_base_node_work_flow(work_flow)
|
||||
if base_node is not None:
|
||||
|
|
@ -779,7 +938,9 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
dataset_id_list = node_data.get('dataset_id_list', [])
|
||||
for dataset_id in dataset_id_list:
|
||||
if not user_dataset_id_list.__contains__(dataset_id):
|
||||
raise AppApiException(500, f"未知的知识库id${dataset_id},无法关联")
|
||||
message = lazy_format(_('Unknown knowledge base id {dataset_id}, unable to associate'),
|
||||
dataset_id=dataset_id)
|
||||
raise AppApiException(500, message)
|
||||
|
||||
source_dataset_id_list = node_data.get('source_dataset_id_list', [])
|
||||
source_dataset_id_list = [source_dataset_id for source_dataset_id in source_dataset_id_list if
|
||||
|
|
@ -797,8 +958,14 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
application = QuerySet(Application).get(id=application_id)
|
||||
application_access_token = QuerySet(ApplicationAccessToken).filter(application_id=application.id).first()
|
||||
if application_access_token is None:
|
||||
raise AppUnauthorizedFailed(500, "非法用户")
|
||||
raise AppUnauthorizedFailed(500, _("Illegal User"))
|
||||
application_setting_model = DBModelManage.get_model('application_setting')
|
||||
if application.type == ApplicationTypeChoices.WORK_FLOW:
|
||||
work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=application.id).order_by(
|
||||
'-create_time')[0:1].first()
|
||||
if work_flow_version is not None:
|
||||
application.work_flow = work_flow_version.work_flow
|
||||
|
||||
xpack_cache = DBModelManage.get_model('xpack_cache')
|
||||
X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', False)
|
||||
application_setting_dict = {}
|
||||
|
|
@ -823,6 +990,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'draggable': application_setting.draggable,
|
||||
'show_guide': application_setting.show_guide,
|
||||
'avatar': application_setting.avatar,
|
||||
'show_avatar': application_setting.show_avatar,
|
||||
'float_icon': application_setting.float_icon,
|
||||
'authentication': application_setting.authentication,
|
||||
'authentication_type': application_setting.authentication_value.get(
|
||||
|
|
@ -831,6 +999,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'disclaimer_value': application_setting.disclaimer_value,
|
||||
'custom_theme': application_setting.custom_theme,
|
||||
'user_avatar': application_setting.user_avatar,
|
||||
'show_user_avatar': application_setting.show_user_avatar,
|
||||
'float_location': application_setting.float_location}
|
||||
return ApplicationSerializer.Query.reset_application(
|
||||
{**ApplicationSerializer.ApplicationModel(application).data,
|
||||
|
|
@ -839,10 +1008,14 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'stt_model_enable': application.stt_model_enable,
|
||||
'tts_model_enable': application.tts_model_enable,
|
||||
'tts_type': application.tts_type,
|
||||
'tts_autoplay': application.tts_autoplay,
|
||||
'stt_autosend': application.stt_autosend,
|
||||
'file_upload_enable': application.file_upload_enable,
|
||||
'file_upload_setting': application.file_upload_setting,
|
||||
'work_flow': application.work_flow,
|
||||
'work_flow': {'nodes': [node for node in ((application.work_flow or {}).get('nodes', []) or []) if
|
||||
node.get('id') == 'base-node']},
|
||||
'show_source': application_access_token.show_source,
|
||||
'language': application_access_token.language,
|
||||
**application_setting_dict})
|
||||
|
||||
@transaction.atomic
|
||||
|
|
@ -860,27 +1033,30 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
model = QuerySet(Model).filter(
|
||||
id=instance.get('model_id')).first()
|
||||
if model is None:
|
||||
raise AppApiException(500, "模型不存在")
|
||||
raise AppApiException(500, _("Model does not exist"))
|
||||
if not model.is_permission(application.user_id):
|
||||
raise AppApiException(500, f"沒有权限使用该模型:{model.name}")
|
||||
message = lazy_format(_('No permission to use this model:{model_name}'), model_name=model.name)
|
||||
raise AppApiException(500, message)
|
||||
if instance.get('stt_model_id') is None or len(instance.get('stt_model_id')) == 0:
|
||||
application.stt_model_id = None
|
||||
else:
|
||||
model = QuerySet(Model).filter(
|
||||
id=instance.get('stt_model_id')).first()
|
||||
if model is None:
|
||||
raise AppApiException(500, "模型不存在")
|
||||
raise AppApiException(500, _("Model does not exist"))
|
||||
if not model.is_permission(application.user_id):
|
||||
raise AppApiException(500, f"沒有权限使用该模型:{model.name}")
|
||||
message = lazy_format(_('No permission to use this model:{model_name}'), model_name=model.name)
|
||||
raise AppApiException(500, message)
|
||||
if instance.get('tts_model_id') is None or len(instance.get('tts_model_id')) == 0:
|
||||
application.tts_model_id = None
|
||||
else:
|
||||
model = QuerySet(Model).filter(
|
||||
id=instance.get('tts_model_id')).first()
|
||||
if model is None:
|
||||
raise AppApiException(500, "模型不存在")
|
||||
raise AppApiException(500, _("Model does not exist"))
|
||||
if not model.is_permission(application.user_id):
|
||||
raise AppApiException(500, f"沒有权限使用该模型:{model.name}")
|
||||
message = lazy_format(_('No permission to use this model:{model_name}'), model_name=model.name)
|
||||
raise AppApiException(500, message)
|
||||
if 'work_flow' in instance:
|
||||
# 当前用户可修改关联的知识库列表
|
||||
application_dataset_id_list = [str(dataset_dict.get('id')) for dataset_dict in
|
||||
|
|
@ -892,12 +1068,13 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
update_keys = ['name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'prologue', 'status',
|
||||
'dataset_setting', 'model_setting', 'problem_optimization', 'dialogue_number',
|
||||
'stt_model_id', 'tts_model_id', 'tts_model_enable', 'stt_model_enable', 'tts_type',
|
||||
'file_upload_enable', 'file_upload_setting',
|
||||
'tts_autoplay', 'stt_autosend', 'file_upload_enable', 'file_upload_setting',
|
||||
'api_key_is_active', 'icon', 'work_flow', 'model_params_setting', 'tts_model_params_setting',
|
||||
'problem_optimization_prompt', 'clean_time']
|
||||
for update_key in update_keys:
|
||||
if update_key in instance and instance.get(update_key) is not None:
|
||||
application.__setattr__(update_key, instance.get(update_key))
|
||||
print(application.name)
|
||||
application.save()
|
||||
|
||||
if 'dataset_id_list' in instance:
|
||||
|
|
@ -907,13 +1084,16 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
self.list_dataset(with_valid=False)]
|
||||
for dataset_id in dataset_id_list:
|
||||
if not application_dataset_id_list.__contains__(dataset_id):
|
||||
raise AppApiException(500, f"未知的知识库id${dataset_id},无法关联")
|
||||
message = lazy_format(_('Unknown knowledge base id {dataset_id}, unable to associate'),
|
||||
dataset_id=dataset_id)
|
||||
raise AppApiException(500, message)
|
||||
|
||||
self.save_application_mapping(application_dataset_id_list, dataset_id_list, application_id)
|
||||
if application.type == ApplicationTypeChoices.SIMPLE:
|
||||
chat_cache.clear_by_application_id(application_id)
|
||||
application_access_token = QuerySet(ApplicationAccessToken).filter(application_id=application_id).first()
|
||||
# 更新缓存数据
|
||||
print(application.name)
|
||||
get_application_access_token(application_access_token.access_token, False)
|
||||
return self.one(with_valid=False)
|
||||
|
||||
|
|
@ -956,12 +1136,18 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
instance['tts_model_enable'] = node_data['tts_model_enable']
|
||||
if 'tts_type' in node_data:
|
||||
instance['tts_type'] = node_data['tts_type']
|
||||
if 'tts_autoplay' in node_data:
|
||||
instance['tts_autoplay'] = node_data['tts_autoplay']
|
||||
if 'stt_autosend' in node_data:
|
||||
instance['stt_autosend'] = node_data['stt_autosend']
|
||||
if 'tts_model_params_setting' in node_data:
|
||||
instance['tts_model_params_setting'] = node_data['tts_model_params_setting']
|
||||
if 'file_upload_enable' in node_data:
|
||||
instance['file_upload_enable'] = node_data['file_upload_enable']
|
||||
if 'file_upload_setting' in node_data:
|
||||
instance['file_upload_setting'] = node_data['file_upload_setting']
|
||||
if 'name' in node_data:
|
||||
instance['name'] = node_data['name']
|
||||
break
|
||||
|
||||
def speech_to_text(self, file, with_valid=True):
|
||||
|
|
@ -1001,28 +1187,70 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
self.is_valid(raise_exception=True)
|
||||
user_id = self.data.get('user_id')
|
||||
application_id = self.data.get('application_id')
|
||||
application = Application.objects.filter(user_id=user_id).exclude(id=application_id)
|
||||
application = QuerySet(Application).get(id=application_id)
|
||||
|
||||
application_user_id = user_id if user_id == str(application.user_id) else None
|
||||
|
||||
if application_user_id is not None:
|
||||
all_applications = Application.objects.filter(user_id=application_user_id).exclude(id=application_id)
|
||||
else:
|
||||
all_applications = Application.objects.none()
|
||||
|
||||
# 获取团队共享的应用
|
||||
shared_applications = Application.objects.filter(
|
||||
id__in=TeamMemberPermission.objects.filter(
|
||||
auth_target_type='APPLICATION',
|
||||
operate__contains=RawSQL("ARRAY['USE']", []),
|
||||
member_id__team_id=application.user_id,
|
||||
member_id__user_id=user_id
|
||||
).values('target')
|
||||
)
|
||||
all_applications = all_applications.union(shared_applications)
|
||||
|
||||
# 把应用的type为WORK_FLOW的应用放到最上面 然后再按名称排序
|
||||
serialized_data = ApplicationSerializerModel(application, many=True).data
|
||||
serialized_data = ApplicationSerializerModel(all_applications, many=True).data
|
||||
application = sorted(serialized_data, key=lambda x: (x['type'] != 'WORK_FLOW', x['name']))
|
||||
return list(application)
|
||||
|
||||
def get_application(self, app_id, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
if with_valid:
|
||||
self.is_valid()
|
||||
embed_application = QuerySet(Application).filter(id=app_id).first()
|
||||
if embed_application is None:
|
||||
raise AppApiException(500, _('Application does not exist'))
|
||||
if embed_application.type == ApplicationTypeChoices.WORK_FLOW:
|
||||
work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=embed_application.id).order_by(
|
||||
'-create_time')[0:1].first()
|
||||
if work_flow_version is not None:
|
||||
embed_application.work_flow = work_flow_version.work_flow
|
||||
dataset_list = self.list_dataset(with_valid=False)
|
||||
mapping_dataset_id_list = [adm.dataset_id for adm in
|
||||
QuerySet(ApplicationDatasetMapping).filter(application_id=app_id)]
|
||||
dataset_id_list = [d.get('id') for d in
|
||||
list(filter(lambda row: mapping_dataset_id_list.__contains__(row.get('id')),
|
||||
dataset_list))]
|
||||
self.update_search_node(embed_application.work_flow, [str(dataset.get('id')) for dataset in dataset_list])
|
||||
return {**ApplicationSerializer.Query.reset_application(ApplicationSerializerModel(embed_application).data),
|
||||
'dataset_id_list': dataset_id_list}
|
||||
|
||||
class ApplicationKeySerializerModel(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = ApplicationApiKey
|
||||
fields = "__all__"
|
||||
|
||||
class ApplicationKeySerializer(serializers.Serializer):
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
application_id = self.data.get("application_id")
|
||||
application = QuerySet(Application).filter(id=application_id).first()
|
||||
if application is None:
|
||||
raise AppApiException(1001, "应用不存在")
|
||||
raise AppApiException(1001, _("Application does not exist"))
|
||||
|
||||
def generate(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -1044,21 +1272,22 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
QuerySet(ApplicationApiKey).filter(application_id=application_id)]
|
||||
|
||||
class Edit(serializers.Serializer):
|
||||
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("是否可用"))
|
||||
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Availability")))
|
||||
|
||||
allow_cross_domain = serializers.BooleanField(required=False,
|
||||
error_messages=ErrMessage.boolean("是否允许跨域"))
|
||||
error_messages=ErrMessage.boolean(
|
||||
_("Is cross-domain allowed")))
|
||||
|
||||
cross_domain_list = serializers.ListSerializer(required=False,
|
||||
child=serializers.CharField(required=True,
|
||||
error_messages=ErrMessage.char(
|
||||
"跨域列表")),
|
||||
error_messages=ErrMessage.char("跨域地址"))
|
||||
_("Cross-domain address"))),
|
||||
error_messages=ErrMessage.char(_("Cross-domain list")))
|
||||
|
||||
class Operate(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
|
||||
api_key_id = serializers.CharField(required=True, error_messages=ErrMessage.char("ApiKeyid"))
|
||||
api_key_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("ApiKeyid")))
|
||||
|
||||
def delete(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -1079,7 +1308,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
application_api_key = QuerySet(ApplicationApiKey).filter(id=api_key_id,
|
||||
application_id=application_id).first()
|
||||
if application_api_key is None:
|
||||
raise AppApiException(500, '不存在')
|
||||
raise AppApiException(500, _('APIKey does not exist'))
|
||||
if 'is_active' in instance and instance.get('is_active') is not None:
|
||||
application_api_key.is_active = instance.get('is_active')
|
||||
if 'allow_cross_domain' in instance and instance.get('allow_cross_domain') is not None:
|
||||
|
|
@ -1089,3 +1318,29 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
application_api_key.save()
|
||||
# 写入缓存
|
||||
get_application_api_key(application_api_key.secret_key, False)
|
||||
|
||||
class McpServers(serializers.Serializer):
|
||||
mcp_servers = serializers.JSONField(required=True)
|
||||
|
||||
def get_mcp_servers(self, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
if '"stdio"' in self.data.get('mcp_servers'):
|
||||
raise AppApiException(500, _('stdio is not supported'))
|
||||
servers = json.loads(self.data.get('mcp_servers'))
|
||||
|
||||
async def get_mcp_tools(servers):
|
||||
async with MultiServerMCPClient(servers) as client:
|
||||
return client.get_tools()
|
||||
|
||||
tools = []
|
||||
for server in servers:
|
||||
tools += [
|
||||
{
|
||||
'server': server,
|
||||
'name': tool.name,
|
||||
'description': tool.description,
|
||||
'args_schema': tool.args_schema,
|
||||
}
|
||||
for tool in asyncio.run(get_mcp_tools({server: servers[server]}))]
|
||||
return tools
|
||||
|
|
|
|||
|
|
@ -19,12 +19,13 @@ from common.db.search import native_search, get_dynamics_model
|
|||
from common.util.field_message import ErrMessage
|
||||
from common.util.file_util import get_file_content
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ApplicationStatisticsSerializer(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("应用id"))
|
||||
start_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date("开始时间"))
|
||||
end_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date("结束时间"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_("Application ID")))
|
||||
start_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("Start time")))
|
||||
end_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("End time")))
|
||||
|
||||
def get_end_time(self):
|
||||
return datetime.datetime.combine(
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
from typing import Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.models import WorkFlowVersion
|
||||
|
|
@ -26,14 +27,14 @@ class ApplicationVersionModelSerializer(serializers.ModelSerializer):
|
|||
|
||||
class ApplicationVersionEditSerializer(serializers.Serializer):
|
||||
name = serializers.CharField(required=False, max_length=128, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("版本名称"))
|
||||
error_messages=ErrMessage.char(_("Version Name")))
|
||||
|
||||
|
||||
class ApplicationVersionSerializer(serializers.Serializer):
|
||||
class Query(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("应用id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_("Application ID")))
|
||||
name = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("摘要"))
|
||||
error_messages=ErrMessage.char(_("summary")))
|
||||
|
||||
def get_query_set(self):
|
||||
query_set = QuerySet(WorkFlowVersion).filter(application_id=self.data.get('application_id'))
|
||||
|
|
@ -55,8 +56,9 @@ class ApplicationVersionSerializer(serializers.Serializer):
|
|||
post_records_handler=lambda v: ApplicationVersionModelSerializer(v).data)
|
||||
|
||||
class Operate(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("应用id"))
|
||||
work_flow_version_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("工作流版本id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_("Application ID")))
|
||||
work_flow_version_id = serializers.UUIDField(required=True,
|
||||
error_messages=ErrMessage.uuid(_("Workflow version id")))
|
||||
|
||||
def one(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -66,7 +68,7 @@ class ApplicationVersionSerializer(serializers.Serializer):
|
|||
if work_flow_version is not None:
|
||||
return ApplicationVersionModelSerializer(work_flow_version).data
|
||||
else:
|
||||
raise AppApiException(500, '不存在的工作流版本')
|
||||
raise AppApiException(500, _('Workflow version does not exist'))
|
||||
|
||||
def edit(self, instance: Dict, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -81,4 +83,4 @@ class ApplicationVersionSerializer(serializers.Serializer):
|
|||
work_flow_version.save()
|
||||
return ApplicationVersionModelSerializer(work_flow_version).data
|
||||
else:
|
||||
raise AppApiException(500, '不存在的工作流版本')
|
||||
raise AppApiException(500, _('Workflow version does not exist'))
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@
|
|||
@date:2023/11/14 13:51
|
||||
@desc:
|
||||
"""
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import List, Dict
|
||||
from uuid import UUID
|
||||
|
||||
|
|
@ -22,6 +22,7 @@ from application.chat_pipeline.step.generate_human_message_step.impl.base_genera
|
|||
BaseGenerateHumanMessageStep
|
||||
from application.chat_pipeline.step.reset_problem_step.impl.base_reset_problem_step import BaseResetProblemStep
|
||||
from application.chat_pipeline.step.search_dataset_step.impl.base_search_dataset_step import BaseSearchDatasetStep
|
||||
from application.flow.common import Answer
|
||||
from application.flow.i_step_node import WorkFlowPostHandler
|
||||
from application.flow.workflow_manage import WorkflowManage, Flow
|
||||
from application.models import ChatRecord, Chat, Application, ApplicationDatasetMapping, ApplicationTypeChoices, \
|
||||
|
|
@ -37,6 +38,7 @@ from common.util.split_model import flat_map
|
|||
from dataset.models import Paragraph, Document
|
||||
from setting.models import Model, Status
|
||||
from setting.models_provider import get_model_credential
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
chat_cache = caches['chat_cache']
|
||||
|
||||
|
|
@ -93,7 +95,8 @@ class ChatInfo:
|
|||
'chat_id': self.chat_id,
|
||||
'dialogue_number': self.application.dialogue_number,
|
||||
'problem_optimization_prompt': self.application.problem_optimization_prompt if self.application.problem_optimization_prompt is not None and len(
|
||||
self.application.problem_optimization_prompt) > 0 else '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
|
||||
self.application.problem_optimization_prompt) > 0 else _(
|
||||
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag"),
|
||||
'prompt': model_setting.get(
|
||||
'prompt') if 'prompt' in model_setting and len(model_setting.get(
|
||||
'prompt')) > 0 else Application.get_default_model_prompt(),
|
||||
|
|
@ -102,22 +105,26 @@ class ChatInfo:
|
|||
'model_id': model_id,
|
||||
'problem_optimization': self.application.problem_optimization,
|
||||
'stream': True,
|
||||
'model_setting': model_setting,
|
||||
'model_params_setting': model_params_setting if self.application.model_params_setting is None or len(
|
||||
self.application.model_params_setting.keys()) == 0 else self.application.model_params_setting,
|
||||
'search_mode': self.application.dataset_setting.get(
|
||||
'search_mode') if 'search_mode' in self.application.dataset_setting else 'embedding',
|
||||
'no_references_setting': self.get_no_references_setting(self.application.dataset_setting, model_setting),
|
||||
'user_id': self.application.user_id
|
||||
'user_id': self.application.user_id,
|
||||
'application_id': self.application.id
|
||||
}
|
||||
|
||||
def to_pipeline_manage_params(self, problem_text: str, post_response_handler: PostResponseHandler,
|
||||
exclude_paragraph_id_list, client_id: str, client_type, stream=True):
|
||||
exclude_paragraph_id_list, client_id: str, client_type, stream=True, form_data=None):
|
||||
if form_data is None:
|
||||
form_data = {}
|
||||
params = self.to_base_pipeline_manage_params()
|
||||
return {**params, 'problem_text': problem_text, 'post_response_handler': post_response_handler,
|
||||
'exclude_paragraph_id_list': exclude_paragraph_id_list, 'stream': stream, 'client_id': client_id,
|
||||
'client_type': client_type}
|
||||
'client_type': client_type, 'form_data': form_data}
|
||||
|
||||
def append_chat_record(self, chat_record: ChatRecord, client_id=None):
|
||||
def append_chat_record(self, chat_record: ChatRecord, client_id=None, asker=None):
|
||||
chat_record.problem_text = chat_record.problem_text[0:10240] if chat_record.problem_text is not None else ""
|
||||
chat_record.answer_text = chat_record.answer_text[0:40960] if chat_record.problem_text is not None else ""
|
||||
is_save = True
|
||||
|
|
@ -132,8 +139,17 @@ class ChatInfo:
|
|||
if self.application.id is not None:
|
||||
# 插入数据库
|
||||
if not QuerySet(Chat).filter(id=self.chat_id).exists():
|
||||
asker_dict = {'user_name': '游客'}
|
||||
if asker is not None:
|
||||
if isinstance(asker, str):
|
||||
asker_dict = {
|
||||
'user_name': asker
|
||||
}
|
||||
elif isinstance(asker, dict):
|
||||
asker_dict = asker
|
||||
|
||||
Chat(id=self.chat_id, application_id=self.application.id, abstract=chat_record.problem_text[0:1024],
|
||||
client_id=client_id, update_time=datetime.now()).save()
|
||||
client_id=client_id, asker=asker_dict, update_time=datetime.now()).save()
|
||||
else:
|
||||
Chat.objects.filter(id=self.chat_id).update(update_time=datetime.now())
|
||||
# 插入会话记录
|
||||
|
|
@ -154,6 +170,8 @@ def get_post_handler(chat_info: ChatInfo):
|
|||
padding_problem_text: str = None,
|
||||
client_id=None,
|
||||
**kwargs):
|
||||
answer_list = [[Answer(answer_text, 'ai-chat-node', 'ai-chat-node', 'ai-chat-node', {}, 'ai-chat-node',
|
||||
kwargs.get('reasoning_content', '')).to_dict()]]
|
||||
chat_record = ChatRecord(id=chat_record_id,
|
||||
chat_id=chat_id,
|
||||
problem_text=problem_text,
|
||||
|
|
@ -161,10 +179,11 @@ def get_post_handler(chat_info: ChatInfo):
|
|||
details=manage.get_details(),
|
||||
message_tokens=manage.context['message_tokens'],
|
||||
answer_tokens=manage.context['answer_tokens'],
|
||||
answer_text_list=[answer_text],
|
||||
answer_text_list=answer_list,
|
||||
run_time=manage.context['run_time'],
|
||||
index=len(chat_info.chat_record_list) + 1)
|
||||
chat_info.append_chat_record(chat_record, client_id)
|
||||
asker = kwargs.get("asker", None)
|
||||
chat_info.append_chat_record(chat_record, client_id, asker=asker)
|
||||
# 重新设置缓存
|
||||
chat_cache.set(chat_id,
|
||||
chat_info, timeout=60 * 30)
|
||||
|
|
@ -173,33 +192,42 @@ def get_post_handler(chat_info: ChatInfo):
|
|||
|
||||
|
||||
class OpenAIMessage(serializers.Serializer):
|
||||
content = serializers.CharField(required=True, error_messages=ErrMessage.char('内容'))
|
||||
role = serializers.CharField(required=True, error_messages=ErrMessage.char('角色'))
|
||||
content = serializers.CharField(required=True, error_messages=ErrMessage.char(_('content')))
|
||||
role = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Role')))
|
||||
|
||||
|
||||
class OpenAIInstanceSerializer(serializers.Serializer):
|
||||
messages = serializers.ListField(child=OpenAIMessage())
|
||||
chat_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char("对话id"))
|
||||
re_chat = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("重新生成"))
|
||||
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("流式输出"))
|
||||
chat_id = serializers.UUIDField(required=False, error_messages=ErrMessage.char(_("Conversation ID")))
|
||||
re_chat = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Regenerate")))
|
||||
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_("Streaming Output")))
|
||||
|
||||
|
||||
class OpenAIChatSerializer(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端id"))
|
||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端类型"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
|
||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
|
||||
|
||||
@staticmethod
|
||||
def get_message(instance):
|
||||
return instance.get('messages')[-1].get('content')
|
||||
|
||||
@staticmethod
|
||||
def generate_chat(chat_id, application_id, message, client_id):
|
||||
def generate_chat(chat_id, application_id, message, client_id, asker=None):
|
||||
if chat_id is None:
|
||||
chat_id = str(uuid.uuid1())
|
||||
chat = QuerySet(Chat).filter(id=chat_id).first()
|
||||
if chat is None:
|
||||
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id).save()
|
||||
asker_dict = {'user_name': '游客'}
|
||||
if asker is not None:
|
||||
if isinstance(asker, str):
|
||||
asker_dict = {
|
||||
'user_name': asker
|
||||
}
|
||||
elif isinstance(asker, dict):
|
||||
asker_dict = asker
|
||||
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id,
|
||||
asker=asker_dict).save()
|
||||
return chat_id
|
||||
|
||||
def chat(self, instance: Dict, with_valid=True):
|
||||
|
|
@ -213,39 +241,53 @@ class OpenAIChatSerializer(serializers.Serializer):
|
|||
application_id = self.data.get('application_id')
|
||||
client_id = self.data.get('client_id')
|
||||
client_type = self.data.get('client_type')
|
||||
chat_id = self.generate_chat(chat_id, application_id, message, client_id)
|
||||
chat_id = self.generate_chat(chat_id, application_id, message, client_id,
|
||||
asker=instance.get('form_data', {}).get("asker"))
|
||||
return ChatMessageSerializer(
|
||||
data={'chat_id': chat_id, 'message': message,
|
||||
're_chat': re_chat,
|
||||
'stream': stream,
|
||||
'application_id': application_id,
|
||||
'client_id': client_id,
|
||||
'client_type': client_type, 'form_data': instance.get('form_data', {})}).chat(
|
||||
base_to_response=OpenaiToResponse())
|
||||
data={
|
||||
'chat_id': chat_id, 'message': message,
|
||||
're_chat': re_chat,
|
||||
'stream': stream,
|
||||
'application_id': application_id,
|
||||
'client_id': client_id,
|
||||
'client_type': client_type,
|
||||
'form_data': instance.get('form_data', {}),
|
||||
'image_list': instance.get('image_list', []),
|
||||
'document_list': instance.get('document_list', []),
|
||||
'audio_list': instance.get('audio_list', []),
|
||||
'other_list': instance.get('other_list', []),
|
||||
}
|
||||
).chat(base_to_response=OpenaiToResponse())
|
||||
|
||||
|
||||
class ChatMessageSerializer(serializers.Serializer):
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
message = serializers.CharField(required=True, error_messages=ErrMessage.char("用户问题"))
|
||||
stream = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否流式回答"))
|
||||
re_chat = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否重新回答"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
message = serializers.CharField(required=True, error_messages=ErrMessage.char(_("User Questions")))
|
||||
stream = serializers.BooleanField(required=True,
|
||||
error_messages=ErrMessage.char(_("Is the answer in streaming mode")))
|
||||
re_chat = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_("Do you want to reply again")))
|
||||
chat_record_id = serializers.UUIDField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.uuid("对话记录id"))
|
||||
error_messages=ErrMessage.uuid(_("Conversation record id")))
|
||||
|
||||
node_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("节点id"))
|
||||
error_messages=ErrMessage.char(_("Node id")))
|
||||
|
||||
runtime_node_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("运行时节点id"))
|
||||
error_messages=ErrMessage.char(_("Runtime node id")))
|
||||
|
||||
node_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.char("节点参数"))
|
||||
application_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端id"))
|
||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端类型"))
|
||||
form_data = serializers.DictField(required=False, error_messages=ErrMessage.char("全局变量"))
|
||||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list("图片"))
|
||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list("文档"))
|
||||
child_node = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict("子节点"))
|
||||
node_data = serializers.DictField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.char(_("Node parameters")))
|
||||
application_id = serializers.UUIDField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
|
||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
|
||||
form_data = serializers.DictField(required=False, error_messages=ErrMessage.char(_("Global variables")))
|
||||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
|
||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
|
||||
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
|
||||
other_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Other")))
|
||||
child_node = serializers.DictField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.dict(_("Child Nodes")))
|
||||
|
||||
def is_valid_application_workflow(self, *, raise_exception=False):
|
||||
self.is_valid_intraday_access_num()
|
||||
|
|
@ -253,13 +295,15 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
def is_valid_chat_id(self, chat_info: ChatInfo):
|
||||
if self.data.get('application_id') is not None and self.data.get('application_id') != str(
|
||||
chat_info.application.id):
|
||||
raise ChatException(500, "会话不存在")
|
||||
raise ChatException(500, _("Conversation does not exist"))
|
||||
|
||||
def is_valid_intraday_access_num(self):
|
||||
if self.data.get('client_type') == AuthenticationType.APPLICATION_ACCESS_TOKEN.value:
|
||||
access_client = QuerySet(ApplicationPublicAccessClient).filter(id=self.data.get('client_id')).first()
|
||||
access_client = QuerySet(ApplicationPublicAccessClient).filter(client_id=self.data.get('client_id'),
|
||||
application_id=self.data.get(
|
||||
'application_id')).first()
|
||||
if access_client is None:
|
||||
access_client = ApplicationPublicAccessClient(id=self.data.get('client_id'),
|
||||
access_client = ApplicationPublicAccessClient(client_id=self.data.get('client_id'),
|
||||
application_id=self.data.get('application_id'),
|
||||
access_num=0,
|
||||
intraday_access_num=0)
|
||||
|
|
@ -268,7 +312,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
application_access_token = QuerySet(ApplicationAccessToken).filter(
|
||||
application_id=self.data.get('application_id')).first()
|
||||
if application_access_token.access_num <= access_client.intraday_access_num:
|
||||
raise AppChatNumOutOfBoundsFailed(1002, "访问次数超过今日访问量")
|
||||
raise AppChatNumOutOfBoundsFailed(1002, _("The number of visits exceeds today's visits"))
|
||||
|
||||
def is_valid_application_simple(self, *, chat_info: ChatInfo, raise_exception=False):
|
||||
self.is_valid_intraday_access_num()
|
||||
|
|
@ -279,9 +323,9 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
if model is None:
|
||||
return chat_info
|
||||
if model.status == Status.ERROR:
|
||||
raise ChatException(500, "当前模型不可用")
|
||||
raise ChatException(500, _("The current model is not available"))
|
||||
if model.status == Status.DOWNLOAD:
|
||||
raise ChatException(500, "模型正在下载中,请稍后再发起对话")
|
||||
raise ChatException(500, _("The model is downloading, please try again later"))
|
||||
return chat_info
|
||||
|
||||
def chat_simple(self, chat_info: ChatInfo, base_to_response):
|
||||
|
|
@ -290,6 +334,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
stream = self.data.get('stream')
|
||||
client_id = self.data.get('client_id')
|
||||
client_type = self.data.get('client_type')
|
||||
form_data = self.data.get("form_data")
|
||||
pipeline_manage_builder = PipelineManage.builder()
|
||||
# 如果开启了问题优化,则添加上问题优化步骤
|
||||
if chat_info.application.problem_optimization:
|
||||
|
|
@ -311,7 +356,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
exclude_paragraph_id_list = list(set(paragraph_id_list))
|
||||
# 构建运行参数
|
||||
params = chat_info.to_pipeline_manage_params(message, get_post_handler(chat_info), exclude_paragraph_id_list,
|
||||
client_id, client_type, stream)
|
||||
client_id, client_type, stream, form_data)
|
||||
# 运行流水线作业
|
||||
pipeline_message.run(params)
|
||||
return pipeline_message.context['chat_result']
|
||||
|
|
@ -325,7 +370,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
return chat_record_list[-1]
|
||||
chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_info.chat_id).first()
|
||||
if chat_record is None:
|
||||
raise ChatException(500, "对话纪要不存在")
|
||||
raise ChatException(500, _("Conversation record does not exist"))
|
||||
chat_record = QuerySet(ChatRecord).filter(id=chat_record_id).first()
|
||||
return chat_record
|
||||
|
||||
|
|
@ -338,13 +383,17 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
form_data = self.data.get('form_data')
|
||||
image_list = self.data.get('image_list')
|
||||
document_list = self.data.get('document_list')
|
||||
audio_list = self.data.get('audio_list')
|
||||
other_list = self.data.get('other_list')
|
||||
user_id = chat_info.application.user_id
|
||||
chat_record_id = self.data.get('chat_record_id')
|
||||
chat_record = None
|
||||
history_chat_record = chat_info.chat_record_list
|
||||
if chat_record_id is not None:
|
||||
chat_record = self.get_chat_record(chat_info, chat_record_id)
|
||||
history_chat_record = [r for r in chat_info.chat_record_list if str(r.id) != chat_record_id]
|
||||
work_flow_manage = WorkflowManage(Flow.new_instance(chat_info.work_flow_version.work_flow),
|
||||
{'history_chat_record': chat_info.chat_record_list, 'question': message,
|
||||
{'history_chat_record': history_chat_record, 'question': message,
|
||||
'chat_id': chat_info.chat_id, 'chat_record_id': str(
|
||||
uuid.uuid1()) if chat_record is None else chat_record.id,
|
||||
'stream': stream,
|
||||
|
|
@ -352,7 +401,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
'client_id': client_id,
|
||||
'client_type': client_type,
|
||||
'user_id': user_id}, WorkFlowPostHandler(chat_info, client_id, client_type),
|
||||
base_to_response, form_data, image_list, document_list,
|
||||
base_to_response, form_data, image_list, document_list, audio_list, other_list,
|
||||
self.data.get('runtime_node_id'),
|
||||
self.data.get('node_data'), chat_record, self.data.get('child_node'))
|
||||
r = work_flow_manage.run()
|
||||
|
|
@ -382,10 +431,10 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
def re_open_chat(self, chat_id: str):
|
||||
chat = QuerySet(Chat).filter(id=chat_id).first()
|
||||
if chat is None:
|
||||
raise ChatException(500, "会话不存在")
|
||||
raise ChatException(500, _("Conversation does not exist"))
|
||||
application = QuerySet(Application).filter(id=chat.application_id).first()
|
||||
if application is None:
|
||||
raise ChatException(500, "应用不存在")
|
||||
raise ChatException(500, _("Application does not exist"))
|
||||
if application.type == ApplicationTypeChoices.SIMPLE:
|
||||
return self.re_open_chat_simple(chat_id, application)
|
||||
else:
|
||||
|
|
@ -415,7 +464,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=application.id).order_by(
|
||||
'-create_time')[0:1].first()
|
||||
if work_flow_version is None:
|
||||
raise ChatException(500, "应用未发布,请发布后再使用")
|
||||
raise ChatException(500, _("The application has not been published. Please use it after publishing."))
|
||||
|
||||
chat_info = ChatInfo(chat_id, [], [], application, work_flow_version)
|
||||
chat_record_list = list(QuerySet(ChatRecord).filter(chat_id=chat_id).order_by('-create_time')[0:5])
|
||||
|
|
|
|||
|
|
@ -13,15 +13,17 @@ import uuid
|
|||
from functools import reduce
|
||||
from io import BytesIO
|
||||
from typing import Dict
|
||||
|
||||
import pytz
|
||||
import openpyxl
|
||||
from django.core import validators
|
||||
from django.core.cache import caches
|
||||
from django.db import transaction, models
|
||||
from django.db.models import QuerySet, Q
|
||||
from django.http import StreamingHttpResponse
|
||||
from django.utils.translation import gettext_lazy as _, gettext
|
||||
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
|
||||
from rest_framework import serializers
|
||||
from rest_framework.utils.formatting import lazy_format
|
||||
|
||||
from application.flow.workflow_manage import Flow
|
||||
from application.models import Chat, Application, ApplicationDatasetMapping, VoteChoices, ChatRecord, WorkFlowVersion, \
|
||||
|
|
@ -44,13 +46,14 @@ from embedding.task import embedding_by_paragraph, embedding_by_paragraph_list
|
|||
from setting.models import Model
|
||||
from setting.models_provider import get_model_credential
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
from smartdoc.settings import TIME_ZONE
|
||||
|
||||
chat_cache = caches['chat_cache']
|
||||
|
||||
|
||||
class WorkFlowSerializers(serializers.Serializer):
|
||||
nodes = serializers.ListSerializer(child=serializers.DictField(), error_messages=ErrMessage.uuid("节点"))
|
||||
edges = serializers.ListSerializer(child=serializers.DictField(), error_messages=ErrMessage.uuid("连线"))
|
||||
nodes = serializers.ListSerializer(child=serializers.DictField(), error_messages=ErrMessage.uuid(_("node")))
|
||||
edges = serializers.ListSerializer(child=serializers.DictField(), error_messages=ErrMessage.uuid(_("Connection")))
|
||||
|
||||
|
||||
def valid_model_params_setting(model_id, model_params_setting):
|
||||
|
|
@ -64,10 +67,14 @@ def valid_model_params_setting(model_id, model_params_setting):
|
|||
credential.get_model_params_setting_form(model.model_name).valid_form(model_params_setting)
|
||||
|
||||
|
||||
class ReAbstractInstanceSerializers(serializers.Serializer):
|
||||
abstract = serializers.CharField(required=True, error_messages=ErrMessage.char(_("abstract")))
|
||||
|
||||
|
||||
class ChatSerializers(serializers.Serializer):
|
||||
class Operate(serializers.Serializer):
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
|
||||
def logic_delete(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -76,6 +83,15 @@ class ChatSerializers(serializers.Serializer):
|
|||
is_deleted=True)
|
||||
return True
|
||||
|
||||
def re_abstract(self, instance, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
ReAbstractInstanceSerializers(data=instance).is_valid(raise_exception=True)
|
||||
|
||||
QuerySet(Chat).filter(id=self.data.get('chat_id'), application_id=self.data.get('application_id')).update(
|
||||
abstract=instance.get('abstract'))
|
||||
return True
|
||||
|
||||
def delete(self, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
|
|
@ -83,8 +99,8 @@ class ChatSerializers(serializers.Serializer):
|
|||
return True
|
||||
|
||||
class ClientChatHistory(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
client_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("客户端id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
client_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Client id")))
|
||||
|
||||
def page(self, current_page: int, page_size: int, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -96,18 +112,18 @@ class ChatSerializers(serializers.Serializer):
|
|||
return page_search(current_page, page_size, queryset, lambda row: ChatSerializerModel(row).data)
|
||||
|
||||
class Query(serializers.Serializer):
|
||||
abstract = serializers.CharField(required=False, error_messages=ErrMessage.char("摘要"))
|
||||
start_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date("开始时间"))
|
||||
end_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date("结束时间"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
abstract = serializers.CharField(required=False, error_messages=ErrMessage.char(_("summary")))
|
||||
start_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("Start time")))
|
||||
end_time = serializers.DateField(format='%Y-%m-%d', error_messages=ErrMessage.date(_("End time")))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
min_star = serializers.IntegerField(required=False, min_value=0,
|
||||
error_messages=ErrMessage.integer("最小点赞数"))
|
||||
error_messages=ErrMessage.integer(_("Minimum number of likes")))
|
||||
min_trample = serializers.IntegerField(required=False, min_value=0,
|
||||
error_messages=ErrMessage.integer("最小点踩数"))
|
||||
comparer = serializers.CharField(required=False, error_messages=ErrMessage.char("比较器"), validators=[
|
||||
error_messages=ErrMessage.integer(_("Minimum number of clicks")))
|
||||
comparer = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Comparator")), validators=[
|
||||
validators.RegexValidator(regex=re.compile("^and|or$"),
|
||||
message="只支持and|or", code=500)
|
||||
message=_("Only supports and|or"), code=500)
|
||||
])
|
||||
|
||||
def get_end_time(self):
|
||||
|
|
@ -158,7 +174,14 @@ class ChatSerializers(serializers.Serializer):
|
|||
condition = base_condition & min_trample_query
|
||||
else:
|
||||
condition = base_condition
|
||||
return query_set.filter(condition).order_by("-application_chat.update_time")
|
||||
inner_queryset = QuerySet(Chat).filter(application_id=self.data.get("application_id"))
|
||||
if 'abstract' in self.data and self.data.get('abstract') is not None:
|
||||
inner_queryset = inner_queryset.filter(abstract__icontains=self.data.get('abstract'))
|
||||
|
||||
return {
|
||||
'inner_queryset': inner_queryset,
|
||||
'default_queryset': query_set.filter(condition).order_by("-application_chat.update_time")
|
||||
}
|
||||
|
||||
def list(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -176,9 +199,8 @@ class ChatSerializers(serializers.Serializer):
|
|||
@staticmethod
|
||||
def to_row(row: Dict):
|
||||
details = row.get('details')
|
||||
padding_problem_text = details.get('problem_padding').get(
|
||||
'padding_problem_text') if 'problem_padding' in details and 'padding_problem_text' in details.get(
|
||||
'problem_padding') else ""
|
||||
padding_problem_text = ' '.join(node.get("answer", "") for key, node in details.items() if
|
||||
node.get("type") == 'question-node')
|
||||
search_dataset_node_list = [(key, node) for key, node in details.items() if
|
||||
node.get("type") == 'search-dataset-node' or node.get(
|
||||
"step_type") == 'search_step']
|
||||
|
|
@ -201,8 +223,9 @@ class ChatSerializers(serializers.Serializer):
|
|||
"\n".join([
|
||||
f"{improve_paragraph_list[index].get('title')}\n{improve_paragraph_list[index].get('content')}"
|
||||
for index in range(len(improve_paragraph_list))]),
|
||||
row.get('asker').get('user_name'),
|
||||
row.get('message_tokens') + row.get('answer_tokens'), row.get('run_time'),
|
||||
str(row.get('create_time').strftime('%Y-%m-%d %H:%M:%S')
|
||||
str(row.get('create_time').astimezone(pytz.timezone(TIME_ZONE)).strftime('%Y-%m-%d %H:%M:%S')
|
||||
)]
|
||||
|
||||
def export(self, data, with_valid=True):
|
||||
|
|
@ -222,9 +245,14 @@ class ChatSerializers(serializers.Serializer):
|
|||
worksheet = workbook.active
|
||||
worksheet.title = 'Sheet1'
|
||||
|
||||
headers = ['会话ID', '摘要', '用户问题', '优化后问题', '回答', '用户反馈', '引用分段数',
|
||||
'分段标题+内容',
|
||||
'标注', '消耗tokens', '耗时(s)', '提问时间']
|
||||
headers = [gettext('Conversation ID'), gettext('summary'), gettext('User Questions'),
|
||||
gettext('Problem after optimization'),
|
||||
gettext('answer'), gettext('User feedback'),
|
||||
gettext('Reference segment number'),
|
||||
gettext('Section title + content'),
|
||||
gettext('Annotation'), gettext('USER'), gettext('Consuming tokens'),
|
||||
gettext('Time consumed (s)'),
|
||||
gettext('Question Time')]
|
||||
for col_idx, header in enumerate(headers, 1):
|
||||
cell = worksheet.cell(row=1, column=col_idx)
|
||||
cell.value = header
|
||||
|
|
@ -237,6 +265,10 @@ class ChatSerializers(serializers.Serializer):
|
|||
cell = worksheet.cell(row=row_idx, column=col_idx)
|
||||
if isinstance(value, str):
|
||||
value = re.sub(ILLEGAL_CHARACTERS_RE, '', value)
|
||||
if isinstance(value, datetime.datetime):
|
||||
eastern = pytz.timezone(TIME_ZONE)
|
||||
c = datetime.timezone(eastern._utcoffset)
|
||||
value = value.astimezone(c)
|
||||
cell.value = value
|
||||
|
||||
output = BytesIO()
|
||||
|
|
@ -259,16 +291,16 @@ class ChatSerializers(serializers.Serializer):
|
|||
with_table_name=False)
|
||||
|
||||
class OpenChat(serializers.Serializer):
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
user_id = self.data.get('user_id')
|
||||
application_id = self.data.get('application_id')
|
||||
if not QuerySet(Application).filter(id=application_id, user_id=user_id).exists():
|
||||
raise AppApiException(500, '应用不存在')
|
||||
raise AppApiException(500, gettext('Application does not exist'))
|
||||
|
||||
def open(self):
|
||||
self.is_valid(raise_exception=True)
|
||||
|
|
@ -286,7 +318,9 @@ class ChatSerializers(serializers.Serializer):
|
|||
work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=application_id).order_by(
|
||||
'-create_time')[0:1].first()
|
||||
if work_flow_version is None:
|
||||
raise AppApiException(500, "应用未发布,请发布后再使用")
|
||||
raise AppApiException(500,
|
||||
gettext(
|
||||
"The application has not been published. Please use it after publishing."))
|
||||
chat_cache.set(chat_id,
|
||||
ChatInfo(chat_id, [],
|
||||
[],
|
||||
|
|
@ -309,8 +343,8 @@ class ChatSerializers(serializers.Serializer):
|
|||
return chat_id
|
||||
|
||||
class OpenWorkFlowChat(serializers.Serializer):
|
||||
work_flow = WorkFlowSerializers(error_messages=ErrMessage.uuid("工作流"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
work_flow = WorkFlowSerializers(error_messages=ErrMessage.uuid(_("Workflow")))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
def open(self):
|
||||
self.is_valid(raise_exception=True)
|
||||
|
|
@ -332,26 +366,29 @@ class ChatSerializers(serializers.Serializer):
|
|||
return chat_id
|
||||
|
||||
class OpenTempChat(serializers.Serializer):
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
|
||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||
|
||||
id = serializers.UUIDField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.uuid("应用id"))
|
||||
error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.uuid("模型id"))
|
||||
error_messages=ErrMessage.uuid(_("Model id")))
|
||||
|
||||
multiple_rounds_dialogue = serializers.BooleanField(required=True,
|
||||
error_messages=ErrMessage.boolean("多轮会话"))
|
||||
error_messages=ErrMessage.boolean(
|
||||
_("Multi-round conversation")))
|
||||
|
||||
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
|
||||
error_messages=ErrMessage.list("关联数据集"))
|
||||
error_messages=ErrMessage.list(_("Related Datasets")))
|
||||
# 数据集相关设置
|
||||
dataset_setting = DatasetSettingSerializer(required=True)
|
||||
# 模型相关设置
|
||||
model_setting = ModelSettingSerializer(required=True)
|
||||
# 问题补全
|
||||
problem_optimization = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("问题补全"))
|
||||
problem_optimization = serializers.BooleanField(required=True,
|
||||
error_messages=ErrMessage.boolean(_("Question completion")))
|
||||
# 模型相关设置
|
||||
model_params_setting = serializers.JSONField(required=False, error_messages=ErrMessage.dict("模型参数相关设置"))
|
||||
model_params_setting = serializers.JSONField(required=False,
|
||||
error_messages=ErrMessage.dict(_("Model parameter settings")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
|
|
@ -365,7 +402,7 @@ class ChatSerializers(serializers.Serializer):
|
|||
if 'id' in self.data and self.data.get('id') is not None:
|
||||
application = QuerySet(Application).filter(id=self.data.get('id')).first()
|
||||
if application is None:
|
||||
raise AppApiException(500, "应用不存在")
|
||||
raise AppApiException(500, gettext("Application does not exist"))
|
||||
return application.user_id
|
||||
return self.data.get('user_id')
|
||||
|
||||
|
|
@ -408,18 +445,19 @@ class ChatSerializerModel(serializers.ModelSerializer):
|
|||
|
||||
class ChatRecordSerializer(serializers.Serializer):
|
||||
class Operate(serializers.Serializer):
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("应用id"))
|
||||
chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
application_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
||||
chat_record_id = serializers.UUIDField(required=True,
|
||||
error_messages=ErrMessage.uuid(_("Conversation record id")))
|
||||
|
||||
def is_valid(self, *, current_role=None, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
application_access_token = QuerySet(ApplicationAccessToken).filter(
|
||||
application_id=self.data.get('application_id')).first()
|
||||
if application_access_token is None:
|
||||
raise AppApiException(500, '不存在的应用认证信息')
|
||||
raise AppApiException(500, gettext('Application authentication information does not exist'))
|
||||
if not application_access_token.show_source and current_role == RoleConstants.APPLICATION_ACCESS_TOKEN.value:
|
||||
raise AppApiException(500, '未开启显示知识来源')
|
||||
raise AppApiException(500, gettext('Displaying knowledge sources is not enabled'))
|
||||
|
||||
def get_chat_record(self):
|
||||
chat_record_id = self.data.get('chat_record_id')
|
||||
|
|
@ -437,13 +475,13 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
self.is_valid(current_role=current_role, raise_exception=True)
|
||||
chat_record = self.get_chat_record()
|
||||
if chat_record is None:
|
||||
raise AppApiException(500, "对话不存在")
|
||||
raise AppApiException(500, gettext("Conversation does not exist"))
|
||||
return ChatRecordSerializer.Query.reset_chat_record(chat_record)
|
||||
|
||||
class Query(serializers.Serializer):
|
||||
application_id = serializers.UUIDField(required=True)
|
||||
chat_id = serializers.UUIDField(required=True)
|
||||
order_asc = serializers.BooleanField(required=False)
|
||||
order_asc = serializers.BooleanField(required=False, allow_null=True)
|
||||
|
||||
def list(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -499,23 +537,27 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
return page
|
||||
|
||||
class Vote(serializers.Serializer):
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
|
||||
chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id"))
|
||||
chat_record_id = serializers.UUIDField(required=True,
|
||||
error_messages=ErrMessage.uuid(_("Conversation record id")))
|
||||
|
||||
vote_status = serializers.ChoiceField(choices=VoteChoices.choices, error_messages=ErrMessage.uuid("投标状态"))
|
||||
vote_status = serializers.ChoiceField(choices=VoteChoices.choices,
|
||||
error_messages=ErrMessage.uuid(_("Bidding Status")))
|
||||
|
||||
@transaction.atomic
|
||||
def vote(self, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
if not try_lock(self.data.get('chat_record_id')):
|
||||
raise AppApiException(500, "正在对当前会话纪要进行投票中,请勿重复发送请求")
|
||||
raise AppApiException(500,
|
||||
gettext(
|
||||
"Voting on the current session minutes, please do not send repeated requests"))
|
||||
try:
|
||||
chat_record_details_model = QuerySet(ChatRecord).get(id=self.data.get('chat_record_id'),
|
||||
chat_id=self.data.get('chat_id'))
|
||||
if chat_record_details_model is None:
|
||||
raise AppApiException(500, "不存在的对话 chat_record_id")
|
||||
raise AppApiException(500, gettext("Non-existent conversation chat_record_id"))
|
||||
vote_status = self.data.get("vote_status")
|
||||
if chat_record_details_model.vote_status == VoteChoices.UN_VOTE:
|
||||
if vote_status == VoteChoices.STAR:
|
||||
|
|
@ -532,18 +574,18 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
chat_record_details_model.vote_status = VoteChoices.UN_VOTE
|
||||
chat_record_details_model.save()
|
||||
else:
|
||||
raise AppApiException(500, "已经投票过,请先取消后再进行投票")
|
||||
raise AppApiException(500, gettext("Already voted, please cancel first and then vote again"))
|
||||
finally:
|
||||
un_lock(self.data.get('chat_record_id'))
|
||||
return True
|
||||
|
||||
class ImproveSerializer(serializers.Serializer):
|
||||
title = serializers.CharField(required=False, max_length=256, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("段落标题"))
|
||||
content = serializers.CharField(required=True, error_messages=ErrMessage.char("段落内容"))
|
||||
error_messages=ErrMessage.char(_("Section title")))
|
||||
content = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Paragraph content")))
|
||||
|
||||
problem_text = serializers.CharField(required=False, max_length=256, allow_null=True, allow_blank=True,
|
||||
error_messages=ErrMessage.char("问题"))
|
||||
error_messages=ErrMessage.char(_("question")))
|
||||
|
||||
class ParagraphModel(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
|
|
@ -551,9 +593,10 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
fields = "__all__"
|
||||
|
||||
class ChatRecordImprove(serializers.Serializer):
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
|
||||
chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id"))
|
||||
chat_record_id = serializers.UUIDField(required=True,
|
||||
error_messages=ErrMessage.uuid(_("Conversation record id")))
|
||||
|
||||
def get(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -562,7 +605,7 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
chat_id = self.data.get('chat_id')
|
||||
chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_id).first()
|
||||
if chat_record is None:
|
||||
raise AppApiException(500, '不存在的对话记录')
|
||||
raise AppApiException(500, gettext('Conversation record does not exist'))
|
||||
if chat_record.improve_paragraph_id_list is None or len(chat_record.improve_paragraph_id_list) == 0:
|
||||
return []
|
||||
|
||||
|
|
@ -576,19 +619,20 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
return [ChatRecordSerializer.ParagraphModel(p).data for p in paragraph_model_list]
|
||||
|
||||
class Improve(serializers.Serializer):
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
|
||||
chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id"))
|
||||
chat_record_id = serializers.UUIDField(required=True,
|
||||
error_messages=ErrMessage.uuid(_("Conversation record id")))
|
||||
|
||||
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
|
||||
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Knowledge base id")))
|
||||
|
||||
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
|
||||
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Document id")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
if not QuerySet(Document).filter(id=self.data.get('document_id'),
|
||||
dataset_id=self.data.get('dataset_id')).exists():
|
||||
raise AppApiException(500, "文档id不正确")
|
||||
raise AppApiException(500, gettext("The document id is incorrect"))
|
||||
|
||||
@staticmethod
|
||||
def post_embedding_paragraph(chat_record, paragraph_id, dataset_id):
|
||||
|
|
@ -607,7 +651,7 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
chat_id = self.data.get('chat_id')
|
||||
chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_id).first()
|
||||
if chat_record is None:
|
||||
raise AppApiException(500, '不存在的对话记录')
|
||||
raise AppApiException(500, gettext('Conversation record does not exist'))
|
||||
|
||||
document_id = self.data.get("document_id")
|
||||
dataset_id = self.data.get("dataset_id")
|
||||
|
|
@ -634,15 +678,16 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
return ChatRecordSerializerModel(chat_record).data, paragraph.id, dataset_id
|
||||
|
||||
class Operate(serializers.Serializer):
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
|
||||
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
|
||||
|
||||
chat_record_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话记录id"))
|
||||
chat_record_id = serializers.UUIDField(required=True,
|
||||
error_messages=ErrMessage.uuid(_("Conversation record id")))
|
||||
|
||||
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
|
||||
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Knowledge base id")))
|
||||
|
||||
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
|
||||
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Document id")))
|
||||
|
||||
paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("段落id"))
|
||||
paragraph_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Paragraph id")))
|
||||
|
||||
def delete(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
@ -655,9 +700,12 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
paragraph_id = self.data.get('paragraph_id')
|
||||
chat_record = QuerySet(ChatRecord).filter(id=chat_record_id, chat_id=chat_id).first()
|
||||
if chat_record is None:
|
||||
raise AppApiException(500, '不存在的对话记录')
|
||||
raise AppApiException(500, gettext('Conversation record does not exist'))
|
||||
if not chat_record.improve_paragraph_id_list.__contains__(uuid.UUID(paragraph_id)):
|
||||
raise AppApiException(500, f'段落id错误,当前对话记录不存在【{paragraph_id}】段落id')
|
||||
message = lazy_format(
|
||||
_('The paragraph id is wrong. The current conversation record does not exist. [{paragraph_id}] paragraph id'),
|
||||
paragraph_id=paragraph_id)
|
||||
raise AppApiException(500, message)
|
||||
chat_record.improve_paragraph_id_list = [row for row in chat_record.improve_paragraph_id_list if
|
||||
str(row) != paragraph_id]
|
||||
chat_record.save()
|
||||
|
|
@ -667,15 +715,15 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
return o.delete()
|
||||
|
||||
class PostImprove(serializers.Serializer):
|
||||
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("知识库id"))
|
||||
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
|
||||
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Knowledge base id")))
|
||||
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Document id")))
|
||||
chat_ids = serializers.ListSerializer(child=serializers.UUIDField(), required=True,
|
||||
error_messages=ErrMessage.list("对话id"))
|
||||
error_messages=ErrMessage.list(_("Conversation ID")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
if not Document.objects.filter(id=self.data['document_id'], dataset_id=self.data['dataset_id']).exists():
|
||||
raise AppApiException(500, "文档id不正确")
|
||||
raise AppApiException(500, gettext("The document id is incorrect"))
|
||||
|
||||
@staticmethod
|
||||
def post_embedding_paragraph(paragraph_ids, dataset_id):
|
||||
|
|
@ -694,7 +742,7 @@ class ChatRecordSerializer(serializers.Serializer):
|
|||
# 获取所有聊天记录
|
||||
chat_record_list = list(ChatRecord.objects.filter(chat_id__in=chat_ids))
|
||||
if len(chat_record_list) < len(chat_ids):
|
||||
raise AppApiException(500, "存在不存在的对话记录")
|
||||
raise AppApiException(500, gettext("Conversation records that do not exist"))
|
||||
|
||||
# 批量创建段落和问题映射
|
||||
paragraphs = []
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@ SELECT
|
|||
application_chat_record_temp."index" as "index",
|
||||
application_chat_record_temp.improve_paragraph_list as improve_paragraph_list,
|
||||
application_chat_record_temp.vote_status as vote_status,
|
||||
application_chat_record_temp.create_time as create_time
|
||||
application_chat_record_temp.create_time as create_time,
|
||||
to_json(application_chat.asker) as asker
|
||||
FROM
|
||||
application_chat application_chat
|
||||
LEFT JOIN (
|
||||
|
|
@ -22,6 +23,8 @@ FROM
|
|||
chat_id
|
||||
FROM
|
||||
application_chat_record
|
||||
WHERE chat_id IN (
|
||||
SELECT id FROM application_chat ${inner_queryset})
|
||||
GROUP BY
|
||||
application_chat_record.chat_id
|
||||
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
|
||||
|
|
@ -34,4 +37,5 @@ FROM
|
|||
END as improve_paragraph_list
|
||||
FROM
|
||||
application_chat_record application_chat_record
|
||||
) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id"
|
||||
) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id"
|
||||
${default_queryset}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
SELECT
|
||||
*
|
||||
*,to_json(asker) as asker
|
||||
FROM
|
||||
application_chat application_chat
|
||||
LEFT JOIN (
|
||||
|
|
@ -11,6 +11,9 @@ FROM
|
|||
chat_id
|
||||
FROM
|
||||
application_chat_record
|
||||
WHERE chat_id IN (
|
||||
SELECT id FROM application_chat ${inner_queryset})
|
||||
GROUP BY
|
||||
application_chat_record.chat_id
|
||||
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
|
||||
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
|
||||
${default_queryset}
|
||||
|
|
@ -9,6 +9,7 @@
|
|||
from drf_yasg import openapi
|
||||
|
||||
from common.mixins.api_mixin import ApiMixin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ApplicationApi(ApiMixin):
|
||||
|
|
@ -20,7 +21,7 @@ class ApplicationApi(ApiMixin):
|
|||
in_=openapi.IN_FORM,
|
||||
type=openapi.TYPE_FILE,
|
||||
required=True,
|
||||
description='上传文件')
|
||||
description=_('Upload files'))
|
||||
]
|
||||
|
||||
class Authentication(ApiMixin):
|
||||
|
|
@ -30,12 +31,22 @@ class ApplicationApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['access_token', ],
|
||||
properties={
|
||||
'access_token': openapi.Schema(type=openapi.TYPE_STRING, title="应用认证token",
|
||||
description="应用认证token"),
|
||||
'access_token': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Application authentication token"),
|
||||
description=_("Application authentication token")),
|
||||
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_("Application authentication token"),
|
||||
description=_("Application authentication token"),
|
||||
default="token"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
|
|
@ -43,27 +54,35 @@ class ApplicationApi(ApiMixin):
|
|||
required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'user_id', 'status', 'create_time',
|
||||
'update_time'],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"),
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
|
||||
description="多轮对话次数"),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_("Primary key id")),
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
|
||||
description=_("Application Name")),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
|
||||
description=_("Application Description")),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"), description=_("Model id")),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
|
||||
title=_("Number of multi-round conversations"),
|
||||
description=_("Number of multi-round conversations")),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
|
||||
description=_("Opening remarks")),
|
||||
'example': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title="示例列表", description="示例列表"),
|
||||
'user_id': openapi.Schema(type=openapi.TYPE_STRING, title="所属用户", description="所属用户"),
|
||||
title=_("Example List"), description=_("Example List")),
|
||||
'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Affiliation user"),
|
||||
description=_("Affiliation user")),
|
||||
|
||||
'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否发布", description='是否发布'),
|
||||
'status': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is publish"), description=_('Is publish')),
|
||||
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description='创建时间'),
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"),
|
||||
description=_('Creation time')),
|
||||
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description='修改时间'),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"),
|
||||
description=_('Modification time')),
|
||||
|
||||
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title="关联知识库Id列表",
|
||||
description="关联知识库Id列表(查询详情的时候返回)")
|
||||
title=_("List of associated knowledge base IDs"),
|
||||
description=_(
|
||||
"List of associated knowledge base IDs (returned when querying details)"))
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -74,11 +93,11 @@ class ApplicationApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='model_type', in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=False,
|
||||
description='模型类型'),
|
||||
description=_('Model Type')),
|
||||
]
|
||||
|
||||
class ApiKey(ApiMixin):
|
||||
|
|
@ -88,7 +107,7 @@ class ApplicationApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id')
|
||||
description=_('Application ID'))
|
||||
|
||||
]
|
||||
|
||||
|
|
@ -99,12 +118,12 @@ class ApplicationApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='api_key_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用api_key id')
|
||||
description=_('Application api_key id'))
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -113,11 +132,33 @@ class ApplicationApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=[],
|
||||
properties={
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否激活",
|
||||
description="是否激活"),
|
||||
'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否允许跨域",
|
||||
description="是否允许跨域"),
|
||||
'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title='跨域列表',
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
|
||||
description=_("Is activation")),
|
||||
'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN,
|
||||
title=_("Is cross-domain allowed"),
|
||||
description=_("Is cross-domain allowed")),
|
||||
'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Cross-domain list'),
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING))
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
|
||||
description=_("Primary key id")),
|
||||
'secret_key': openapi.Schema(type=openapi.TYPE_STRING, title=_("Secret key"),
|
||||
description=_("Secret key")),
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
|
||||
description=_("Is activation")),
|
||||
'application_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"),
|
||||
description=_("Application ID")),
|
||||
'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN,
|
||||
title=_("Is cross-domain allowed"),
|
||||
description=_("Is cross-domain allowed")),
|
||||
'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Cross-domain list'),
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING))
|
||||
}
|
||||
)
|
||||
|
|
@ -129,7 +170,7 @@ class ApplicationApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id')
|
||||
description=_('Application ID'))
|
||||
|
||||
]
|
||||
|
||||
|
|
@ -139,18 +180,55 @@ class ApplicationApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=[],
|
||||
properties={
|
||||
'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="重置Token",
|
||||
description="重置Token"),
|
||||
'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Reset Token"),
|
||||
description=_("Reset Token")),
|
||||
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否激活", description="是否激活"),
|
||||
'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="访问次数", description="访问次数"),
|
||||
'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启白名单",
|
||||
description="是否开启白名单"),
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
|
||||
description=_("Is activation")),
|
||||
'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of visits"),
|
||||
description=_("Number of visits")),
|
||||
'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Whether to enable whitelist"),
|
||||
description=_("Whether to enable whitelist")),
|
||||
'white_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING), title="白名单列表",
|
||||
description="白名单列表"),
|
||||
'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否显示知识来源",
|
||||
description="是否显示知识来源"),
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING), title=_("Whitelist"),
|
||||
description=_("Whitelist")),
|
||||
'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN,
|
||||
title=_("Whether to display knowledge sources"),
|
||||
description=_("Whether to display knowledge sources")),
|
||||
'language': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("language"),
|
||||
description=_("language"))
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=[],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
|
||||
description=_("Primary key id")),
|
||||
'access_token': openapi.Schema(type=openapi.TYPE_STRING, title=_("Access Token"),
|
||||
description=_("Access Token")),
|
||||
'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Reset Token"),
|
||||
description=_("Reset Token")),
|
||||
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
|
||||
description=_("Is activation")),
|
||||
'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of visits"),
|
||||
description=_("Number of visits")),
|
||||
'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Whether to enable whitelist"),
|
||||
description=_("Whether to enable whitelist")),
|
||||
'white_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING), title=_("Whitelist"),
|
||||
description=_("Whitelist")),
|
||||
'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN,
|
||||
title=_("Whether to display knowledge sources"),
|
||||
description=_("Whether to display knowledge sources")),
|
||||
'language': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("language"),
|
||||
description=_("language"))
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -161,37 +239,46 @@ class ApplicationApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=[],
|
||||
properties={
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
|
||||
description="多轮对话次数"),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
|
||||
description=_("Application Name")),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
|
||||
description=_("Application Description")),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"),
|
||||
description=_("Model id")),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
|
||||
title=_("Number of multi-round conversations"),
|
||||
description=_("Number of multi-round conversations")),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
|
||||
description=_("Opening remarks")),
|
||||
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title="关联知识库Id列表", description="关联知识库Id列表"),
|
||||
title=_("List of associated knowledge base IDs"),
|
||||
description=_("List of associated knowledge base IDs")),
|
||||
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
|
||||
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化",
|
||||
description="是否开启问题优化", default=True),
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"),
|
||||
description=_("Whether to enable problem optimization"),
|
||||
default=True),
|
||||
'icon': openapi.Schema(type=openapi.TYPE_STRING, title="icon",
|
||||
description="icon", default="/ui/favicon.ico"),
|
||||
'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型",
|
||||
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"),
|
||||
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"),
|
||||
description=_("Application Type SIMPLE | WORK_FLOW")),
|
||||
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
|
||||
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词',
|
||||
description="问题优化提示词",
|
||||
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中"),
|
||||
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID",
|
||||
description="文字转语音模型ID"),
|
||||
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id",
|
||||
description="语音转文字模型id"),
|
||||
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
|
||||
description="语音转文字是否开启"),
|
||||
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
|
||||
description="语音转文字是否开启"),
|
||||
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型",
|
||||
description="文字转语音类型")
|
||||
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_('Question optimization tips'),
|
||||
description=_("Question optimization tips"),
|
||||
default=_(
|
||||
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")),
|
||||
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"),
|
||||
description=_("Text-to-speech model ID")),
|
||||
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"),
|
||||
description=_("Speech-to-text model id")),
|
||||
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"),
|
||||
description=_("Is speech-to-text enabled")),
|
||||
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"),
|
||||
description=_("Is text-to-speech enabled")),
|
||||
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"),
|
||||
description=_("Text-to-speech type"))
|
||||
|
||||
}
|
||||
)
|
||||
|
|
@ -204,11 +291,11 @@ class ApplicationApi(ApiMixin):
|
|||
required=[''],
|
||||
properties={
|
||||
'nodes': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_OBJECT),
|
||||
title="节点列表", description="节点列表",
|
||||
title=_("Node List"), description=_("Node List"),
|
||||
default=[]),
|
||||
'edges': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_OBJECT),
|
||||
title='连线列表', description="连线列表",
|
||||
default={}),
|
||||
title=_('Connection List'), description=_("Connection List"),
|
||||
default=[]),
|
||||
|
||||
}
|
||||
)
|
||||
|
|
@ -220,24 +307,31 @@ class ApplicationApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=[''],
|
||||
properties={
|
||||
'top_n': openapi.Schema(type=openapi.TYPE_NUMBER, title="引用分段数", description="引用分段数",
|
||||
'top_n': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Reference segment number"),
|
||||
description=_("Reference segment number"),
|
||||
default=5),
|
||||
'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title='相似度', description="相似度",
|
||||
'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('Similarity'),
|
||||
description=_("Similarity"),
|
||||
default=0.6),
|
||||
'max_paragraph_char_number': openapi.Schema(type=openapi.TYPE_NUMBER, title='最多引用字符数',
|
||||
description="最多引用字符数", default=3000),
|
||||
'search_mode': openapi.Schema(type=openapi.TYPE_STRING, title='检索模式',
|
||||
'max_paragraph_char_number': openapi.Schema(type=openapi.TYPE_NUMBER,
|
||||
title=_('Maximum number of quoted characters'),
|
||||
description=_("Maximum number of quoted characters"),
|
||||
default=3000),
|
||||
'search_mode': openapi.Schema(type=openapi.TYPE_STRING, title=_('Retrieval Mode'),
|
||||
description="embedding|keywords|blend", default='embedding'),
|
||||
'no_references_setting': openapi.Schema(type=openapi.TYPE_OBJECT, title='检索模式',
|
||||
'no_references_setting': openapi.Schema(type=openapi.TYPE_OBJECT,
|
||||
title=_('No reference segment settings'),
|
||||
required=['status', 'value'],
|
||||
properties={
|
||||
'status': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title="状态",
|
||||
description="ai作答:ai_questioning,指定回答:designated_answer",
|
||||
title=_("state"),
|
||||
description=_(
|
||||
"ai_questioning|designated_answer"),
|
||||
default='ai_questioning'),
|
||||
'value': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title="值",
|
||||
description="ai作答:就是题词,指定回答:就是指定回答内容",
|
||||
title=_("value"),
|
||||
description=_(
|
||||
"ai_questioning: is the title, designated_answer: is the designated answer content"),
|
||||
default='{question}'),
|
||||
}),
|
||||
}
|
||||
|
|
@ -250,23 +344,26 @@ class ApplicationApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['prompt'],
|
||||
properties={
|
||||
'prompt': openapi.Schema(type=openapi.TYPE_STRING, title="提示词", description="提示词",
|
||||
default=('已知信息:'
|
||||
'\n{data}'
|
||||
'\n回答要求:'
|
||||
'\n- 如果你不知道答案或者没有从获取答案,请回答“没有在知识库中查找到相关信息,建议咨询相关技术支持或参考官方文档进行操作”。'
|
||||
'\n- 避免提及你是从<data></data>中获得的知识。'
|
||||
'\n- 请保持答案与<data></data>中描述的一致。'
|
||||
'\n- 请使用markdown 语法优化答案的格式。'
|
||||
'\n- <data></data>中的图片链接、链接地址和脚本语言请完整返回。'
|
||||
'\n- 请使用与问题相同的语言来回答。'
|
||||
'\n问题:'
|
||||
'\n{question}')),
|
||||
'prompt': openapi.Schema(type=openapi.TYPE_STRING, title=_("Prompt word"),
|
||||
description=_("Prompt word"),
|
||||
default=_(("Known information:\n"
|
||||
"{data}\n"
|
||||
"Answer requirements:\n"
|
||||
"- If you don't know the answer or don't get the answer, please answer \"No relevant information found in the knowledge base, it is recommended to consult relevant technical support or refer to official documents for operation\".\n"
|
||||
"- Avoid mentioning that you got the knowledge from <data></data>.\n"
|
||||
"- Please keep the answer consistent with the description in <data></data>.\n"
|
||||
"- Please use markdown syntax to optimize the format of the answer.\n"
|
||||
"- Please return the image link, link address and script language in <data></data> completely.\n"
|
||||
"- Please answer in the same language as the question.\n"
|
||||
"Question:\n"
|
||||
"{question}"))),
|
||||
|
||||
'system': openapi.Schema(type=openapi.TYPE_STRING, title="系统提示词(角色)",
|
||||
description="系统提示词(角色)"),
|
||||
'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING, title="无引用分段提示词",
|
||||
default="{question}", description="无引用分段提示词")
|
||||
'system': openapi.Schema(type=openapi.TYPE_STRING, title=_("System prompt words (role)"),
|
||||
description=_("System prompt words (role)")),
|
||||
'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("No citation segmentation prompt"),
|
||||
default="{question}",
|
||||
description=_("No citation segmentation prompt"))
|
||||
|
||||
}
|
||||
)
|
||||
|
|
@ -288,36 +385,96 @@ class ApplicationApi(ApiMixin):
|
|||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=['name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
|
||||
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type'],
|
||||
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type',
|
||||
'work_flow'],
|
||||
properties={
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
|
||||
description="多轮对话次数"),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
|
||||
description=_("Application Name")),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
|
||||
description=_("Application Description")),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"),
|
||||
description=_("Model id")),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
|
||||
title=_("Number of multi-round conversations"),
|
||||
description=_("Number of multi-round conversations")),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
|
||||
description=_("Opening remarks")),
|
||||
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title="关联知识库Id列表", description="关联知识库Id列表"),
|
||||
title=_("List of associated knowledge base IDs"),
|
||||
description=_("List of associated knowledge base IDs")),
|
||||
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
|
||||
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化",
|
||||
description="是否开启问题优化", default=True),
|
||||
'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型",
|
||||
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"),
|
||||
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词',
|
||||
description="问题优化提示词",
|
||||
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中"),
|
||||
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID",
|
||||
description="文字转语音模型ID"),
|
||||
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id",
|
||||
description="语音转文字模型id"),
|
||||
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
|
||||
description="语音转文字是否开启"),
|
||||
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
|
||||
description="语音转文字是否开启"),
|
||||
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型",
|
||||
description="文字转语音类型")
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"),
|
||||
description=_("Problem Optimization"), default=True),
|
||||
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"),
|
||||
description=_("Application Type SIMPLE | WORK_FLOW")),
|
||||
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_('Question optimization tips'),
|
||||
description=_("Question optimization tips"),
|
||||
default=_(
|
||||
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")),
|
||||
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"),
|
||||
description=_("Text-to-speech model ID")),
|
||||
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"),
|
||||
description=_("Speech-to-text model id")),
|
||||
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"),
|
||||
description=_("Is speech-to-text enabled")),
|
||||
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"),
|
||||
description=_("Is text-to-speech enabled")),
|
||||
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"),
|
||||
description=_("Text-to-speech type")),
|
||||
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
|
||||
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type',
|
||||
'work_flow'],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
|
||||
description=_("Primary key id")),
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
|
||||
description=_("Application Name")),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
|
||||
description=_("Application Description")),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"),
|
||||
description=_("Model id")),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
|
||||
title=_("Number of multi-round conversations"),
|
||||
description=_("Number of multi-round conversations")),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
|
||||
description=_("Opening remarks")),
|
||||
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title=_("List of associated knowledge base IDs"),
|
||||
description=_("List of associated knowledge base IDs")),
|
||||
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
|
||||
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"),
|
||||
description=_("Problem Optimization"), default=True),
|
||||
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"),
|
||||
description=_("Application Type SIMPLE | WORK_FLOW")),
|
||||
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_('Question optimization tips'),
|
||||
description=_("Question optimization tips"),
|
||||
default=_(
|
||||
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")),
|
||||
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"),
|
||||
description=_("Text-to-speech model ID")),
|
||||
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"),
|
||||
description=_("Speech-to-text model id")),
|
||||
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"),
|
||||
description=_("Is speech-to-text enabled")),
|
||||
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"),
|
||||
description=_("Is text-to-speech enabled")),
|
||||
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"),
|
||||
description=_("Text-to-speech type")),
|
||||
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -328,12 +485,33 @@ class ApplicationApi(ApiMixin):
|
|||
in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=False,
|
||||
description='应用名称'),
|
||||
description=_('Application Name')),
|
||||
openapi.Parameter(name='desc',
|
||||
in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=False,
|
||||
description='应用描述')
|
||||
description=_('Application Description'))
|
||||
]
|
||||
|
||||
class Export(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
return [openapi.Parameter(name='application_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description=_('Application ID')),
|
||||
|
||||
]
|
||||
|
||||
class Import(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
return [openapi.Parameter(name='file',
|
||||
in_=openapi.IN_FORM,
|
||||
type=openapi.TYPE_FILE,
|
||||
required=True,
|
||||
description=_('Upload image files'))
|
||||
]
|
||||
|
||||
class Operate(ApiMixin):
|
||||
|
|
@ -343,6 +521,28 @@ class ApplicationApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
|
||||
]
|
||||
|
||||
class TextToSpeech(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
return [openapi.Parameter(name='application_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description=_('Application ID')),
|
||||
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def get_request_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=[],
|
||||
properties={
|
||||
'text': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text"),
|
||||
description=_("Text")),
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
from drf_yasg import openapi
|
||||
|
||||
from common.mixins.api_mixin import ApiMixin
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
class ApplicationStatisticsApi(ApiMixin):
|
||||
@staticmethod
|
||||
|
|
@ -18,17 +18,17 @@ class ApplicationStatisticsApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='start_time',
|
||||
in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='开始时间'),
|
||||
description=_('Start time')),
|
||||
openapi.Parameter(name='end_time',
|
||||
in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='结束时间'),
|
||||
description=_('End time')),
|
||||
]
|
||||
|
||||
class ChatRecordAggregate(ApiMixin):
|
||||
|
|
@ -38,21 +38,21 @@ class ApplicationStatisticsApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['star_num', 'trample_num', 'tokens_num', 'chat_record_count'],
|
||||
properties={
|
||||
'star_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="点赞数量",
|
||||
description="点赞数量"),
|
||||
'star_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of Likes"),
|
||||
description=_("Number of Likes")),
|
||||
|
||||
'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="点踩数量", description="点踩数量"),
|
||||
'tokens_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="token使用数量",
|
||||
description="token使用数量"),
|
||||
'chat_record_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="对话次数",
|
||||
description="对话次数"),
|
||||
'customer_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="客户数量",
|
||||
description="客户数量"),
|
||||
'customer_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="客户新增数量",
|
||||
description="客户新增数量"),
|
||||
'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of thumbs-downs"), description=_("Number of thumbs-downs")),
|
||||
'tokens_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of tokens used"),
|
||||
description=_("Number of tokens used")),
|
||||
'chat_record_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of conversations"),
|
||||
description=_("Number of conversations")),
|
||||
'customer_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of customers"),
|
||||
description=_("Number of customers")),
|
||||
'customer_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of new customers"),
|
||||
description=_("Number of new customers")),
|
||||
'day': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title="日期",
|
||||
description="日期,只有查询趋势的时候才有该字段"),
|
||||
title=_("time"),
|
||||
description=_("Time, this field is only available when querying trends")),
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -63,11 +63,11 @@ class ApplicationStatisticsApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['added_count'],
|
||||
properties={
|
||||
'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="新增数量", description="新增数量"),
|
||||
'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("New quantity"), description=_("New quantity")),
|
||||
|
||||
'day': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title="时间",
|
||||
description="时间"),
|
||||
title=_("time"),
|
||||
description=_("time")),
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -78,9 +78,9 @@ class ApplicationStatisticsApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['added_count'],
|
||||
properties={
|
||||
'today_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="今日新增数量",
|
||||
description="今日新增数量"),
|
||||
'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title="新增数量", description="新增数量"),
|
||||
'today_added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Today's new quantity"),
|
||||
description=_("Today's new quantity")),
|
||||
'added_count': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("New quantity"), description=_("New quantity")),
|
||||
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
from drf_yasg import openapi
|
||||
|
||||
from common.mixins.api_mixin import ApiMixin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ApplicationVersionApi(ApiMixin):
|
||||
|
|
@ -18,13 +19,16 @@ class ApplicationVersionApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['id', 'name', 'work_flow', 'create_time', 'update_time'],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_NUMBER, title="主键id",
|
||||
description="主键id"),
|
||||
'name': openapi.Schema(type=openapi.TYPE_NUMBER, title="版本名称",
|
||||
description="版本名称"),
|
||||
'work_flow': openapi.Schema(type=openapi.TYPE_STRING, title="工作流数据", description='工作流数据'),
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description='创建时间'),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description='修改时间')
|
||||
'id': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Primary key id"),
|
||||
description=_("Primary key id")),
|
||||
'name': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Version Name"),
|
||||
description=_("Version Name")),
|
||||
'work_flow': openapi.Schema(type=openapi.TYPE_STRING, title=_("Workflow data"),
|
||||
description=_('Workflow data')),
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"),
|
||||
description=_('Creation time')),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"),
|
||||
description=_('Modification time'))
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -35,12 +39,12 @@ class ApplicationVersionApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='name',
|
||||
in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=False,
|
||||
description='版本名称')]
|
||||
description=_('Version Name'))]
|
||||
|
||||
class Operate(ApiMixin):
|
||||
@staticmethod
|
||||
|
|
@ -49,12 +53,12 @@ class ApplicationVersionApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='work_flow_version_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用版本id'), ]
|
||||
description=_('Application version id')), ]
|
||||
|
||||
class Edit(ApiMixin):
|
||||
@staticmethod
|
||||
|
|
@ -63,7 +67,7 @@ class ApplicationVersionApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=[],
|
||||
properties={
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title="版本名称",
|
||||
description="版本名称")
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Version Name"),
|
||||
description=_("Version Name"))
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from drf_yasg import openapi
|
|||
|
||||
from application.swagger_api.application_api import ApplicationApi
|
||||
from common.mixins.api_mixin import ApiMixin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ChatClientHistoryApi(ApiMixin):
|
||||
|
|
@ -19,31 +20,143 @@ class ChatClientHistoryApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id')
|
||||
description=_('Application ID'))
|
||||
]
|
||||
|
||||
class Operate(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
return [openapi.Parameter(name='application_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='chat_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description=_('Conversation ID')),
|
||||
]
|
||||
|
||||
class ReAbstract(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=['abstract'],
|
||||
properties={
|
||||
'abstract': openapi.Schema(type=openapi.TYPE_STRING, title=_("abstract"),
|
||||
description=_("abstract"))
|
||||
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class OpenAIChatApi(ApiMixin):
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Responses(responses={
|
||||
200: openapi.Response(description=_('response parameters'),
|
||||
schema=openapi.Schema(type=openapi.TYPE_OBJECT,
|
||||
required=['id',
|
||||
'choices'],
|
||||
properties={
|
||||
'id': openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_(
|
||||
"Conversation ID")),
|
||||
'choices': openapi.Schema(
|
||||
type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=[
|
||||
'message'],
|
||||
properties={
|
||||
'finish_reason': openapi.Schema(
|
||||
type=openapi.TYPE_STRING, ),
|
||||
'index': openapi.Schema(
|
||||
type=openapi.TYPE_INTEGER),
|
||||
'answer_list': openapi.Schema(
|
||||
type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=[
|
||||
'content'],
|
||||
properties={
|
||||
'content': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
'view_type': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
'runtime_node_id': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
'chat_record_id': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
'reasoning_content': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
}
|
||||
)),
|
||||
'message': openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=[
|
||||
'content'],
|
||||
properties={
|
||||
'content': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
'role': openapi.Schema(
|
||||
type=openapi.TYPE_STRING)
|
||||
|
||||
}),
|
||||
|
||||
}
|
||||
)),
|
||||
'created': openapi.Schema(
|
||||
type=openapi.TYPE_INTEGER),
|
||||
'model': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
'object': openapi.Schema(
|
||||
type=openapi.TYPE_STRING),
|
||||
'usage': openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=[
|
||||
'completion_tokens',
|
||||
'prompt_tokens',
|
||||
'total_tokens'],
|
||||
properties={
|
||||
'completion_tokens': openapi.Schema(
|
||||
type=openapi.TYPE_INTEGER),
|
||||
'prompt_tokens': openapi.Schema(
|
||||
type=openapi.TYPE_INTEGER),
|
||||
'total_tokens': openapi.Schema(
|
||||
type=openapi.TYPE_INTEGER)
|
||||
})
|
||||
|
||||
}))})
|
||||
|
||||
@staticmethod
|
||||
def get_request_body_api():
|
||||
return openapi.Schema(type=openapi.TYPE_OBJECT,
|
||||
required=['message'],
|
||||
properties={
|
||||
'messages': openapi.Schema(type=openapi.TYPE_ARRAY, title="问题", description="问题",
|
||||
'messages': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("problem"),
|
||||
description=_("problem"),
|
||||
items=openapi.Schema(type=openapi.TYPE_OBJECT,
|
||||
required=['role', 'content'],
|
||||
properties={
|
||||
'content': openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title="问题内容", default=''),
|
||||
title=_("Question content"),
|
||||
default=''),
|
||||
'role': openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title='角色', default="user")
|
||||
title=_('role'),
|
||||
default="user")
|
||||
}
|
||||
)),
|
||||
'chat_id': openapi.Schema(type=openapi.TYPE_STRING, title="对话id"),
|
||||
're_chat': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="重新生成", default=False),
|
||||
'stream': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="流式输出", default=True)
|
||||
'chat_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Conversation ID")),
|
||||
're_chat': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("regenerate"),
|
||||
default=False),
|
||||
'stream': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Stream Output"),
|
||||
default=True)
|
||||
|
||||
})
|
||||
|
||||
|
|
@ -55,9 +168,68 @@ class ChatApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['message'],
|
||||
properties={
|
||||
'message': openapi.Schema(type=openapi.TYPE_STRING, title="问题", description="问题"),
|
||||
're_chat': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="重新生成", default=False),
|
||||
'stream': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="重新生成", default=True)
|
||||
'message': openapi.Schema(type=openapi.TYPE_STRING, title=_("problem"), description=_("problem")),
|
||||
're_chat': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("regenerate"), default=False),
|
||||
'stream': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is it streaming output"), default=True),
|
||||
|
||||
'form_data': openapi.Schema(type=openapi.TYPE_OBJECT, title=_("Form data"),
|
||||
description=_("Form data"),
|
||||
default={}),
|
||||
'image_list': openapi.Schema(
|
||||
type=openapi.TYPE_ARRAY,
|
||||
title=_("Image list"),
|
||||
description=_("Image list"),
|
||||
items=openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
properties={
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Image name")),
|
||||
'url': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Image URL")),
|
||||
'file_id': openapi.Schema(type=openapi.TYPE_STRING),
|
||||
}
|
||||
),
|
||||
default=[]
|
||||
),
|
||||
'document_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Document list"),
|
||||
description=_("Document list"),
|
||||
items=openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
properties={
|
||||
# 定义对象的具体属性
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Document name")),
|
||||
'url': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Document URL")),
|
||||
'file_id': openapi.Schema(type=openapi.TYPE_STRING),
|
||||
}
|
||||
),
|
||||
default=[]),
|
||||
'audio_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Audio list"),
|
||||
description=_("Audio list"),
|
||||
items=openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
properties={
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Audio name")),
|
||||
'url': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Audio URL")),
|
||||
'file_id': openapi.Schema(type=openapi.TYPE_STRING),
|
||||
}
|
||||
),
|
||||
default=[]),
|
||||
'runtime_node_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Runtime node id"),
|
||||
description=_("Runtime node id"),
|
||||
default=""),
|
||||
'node_data': openapi.Schema(type=openapi.TYPE_OBJECT, title=_("Node data"),
|
||||
description=_("Node data"),
|
||||
default={}),
|
||||
'chat_record_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Conversation record id"),
|
||||
description=_("Conversation record id"),
|
||||
default=""),
|
||||
'child_node': openapi.Schema(type=openapi.TYPE_STRING, title=_("Child node"),
|
||||
description=_("Child node"),
|
||||
default={}),
|
||||
|
||||
}
|
||||
)
|
||||
|
|
@ -71,26 +243,26 @@ class ChatApi(ApiMixin):
|
|||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
|
||||
description="id", default="xx"),
|
||||
'application_id': openapi.Schema(type=openapi.TYPE_STRING, title="应用id",
|
||||
description="应用id", default='应用id'),
|
||||
'abstract': openapi.Schema(type=openapi.TYPE_STRING, title="摘要",
|
||||
description="摘要", default='摘要'),
|
||||
'chat_id': openapi.Schema(type=openapi.TYPE_STRING, title="对话id",
|
||||
description="对话id", default="对话id"),
|
||||
'chat_record_count': openapi.Schema(type=openapi.TYPE_STRING, title="对话提问数量",
|
||||
description="对话提问数量",
|
||||
default="对话提问数量"),
|
||||
'mark_sum': openapi.Schema(type=openapi.TYPE_STRING, title="标记数量",
|
||||
description="标记数量", default=1),
|
||||
'star_num': openapi.Schema(type=openapi.TYPE_STRING, title="点赞数量",
|
||||
description="点赞数量", default=1),
|
||||
'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title="点踩数量",
|
||||
description="点踩数量", default=1),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
|
||||
description="修改时间",
|
||||
'application_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"),
|
||||
description=_("Application ID"), default=_('Application ID')),
|
||||
'abstract': openapi.Schema(type=openapi.TYPE_STRING, title=_("abstract"),
|
||||
description=_("abstract"), default=_('abstract')),
|
||||
'chat_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Conversation ID"),
|
||||
description=_("Conversation ID"), default=_("Conversation ID")),
|
||||
'chat_record_count': openapi.Schema(type=openapi.TYPE_STRING, title=_("Number of dialogue questions"),
|
||||
description=_("Number of dialogue questions"),
|
||||
default=0),
|
||||
'mark_sum': openapi.Schema(type=openapi.TYPE_STRING, title=_("Number of tags"),
|
||||
description=_("Number of tags"), default=1),
|
||||
'star_num': openapi.Schema(type=openapi.TYPE_STRING, title=_("Number of likes"),
|
||||
description=_("Number of likes"), default=1),
|
||||
'trample_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of clicks"),
|
||||
description=_("Number of clicks"), default=1),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Change time"),
|
||||
description=_("Change time"),
|
||||
default="1970-01-01 00:00:00"),
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
|
||||
description="创建时间",
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"),
|
||||
description=_("Creation time"),
|
||||
default="1970-01-01 00:00:00"
|
||||
)
|
||||
}
|
||||
|
|
@ -103,7 +275,7 @@ class ChatApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
|
||||
]
|
||||
|
||||
|
|
@ -126,41 +298,64 @@ class ChatApi(ApiMixin):
|
|||
required=['model_id', 'multiple_rounds_dialogue', 'dataset_setting', 'model_setting',
|
||||
'problem_optimization'],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title="应用id",
|
||||
description="应用id,修改的时候传,创建的时候不传"),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"),
|
||||
description=_(
|
||||
"Application ID, pass when modifying, do not pass when creating")),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model ID"),
|
||||
description=_("Model ID")),
|
||||
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title="关联知识库Id列表", description="关联知识库Id列表"),
|
||||
'multiple_rounds_dialogue': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮会话",
|
||||
description="是否开启多轮会话"),
|
||||
title=_("List of associated knowledge base IDs"),
|
||||
description=_("List of associated knowledge base IDs")),
|
||||
'multiple_rounds_dialogue': openapi.Schema(type=openapi.TYPE_BOOLEAN,
|
||||
title=_("Do you want to initiate multiple sessions"),
|
||||
description=_(
|
||||
"Do you want to initiate multiple sessions")),
|
||||
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
|
||||
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化",
|
||||
description="是否开启问题优化", default=True)
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem optimization"),
|
||||
description=_("Do you want to enable problem optimization"),
|
||||
default=True)
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_("Conversation ID"),
|
||||
description=_("Conversation ID"),
|
||||
default="chat_id"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
return [openapi.Parameter(name='application_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='history_day',
|
||||
in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_NUMBER,
|
||||
required=True,
|
||||
description='历史天数'),
|
||||
description=_('Historical days')),
|
||||
openapi.Parameter(name='abstract', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False,
|
||||
description="摘要"),
|
||||
description=_("abstract")),
|
||||
openapi.Parameter(name='min_star', in_=openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False,
|
||||
description="最小点赞数"),
|
||||
description=_("Minimum number of likes")),
|
||||
openapi.Parameter(name='min_trample', in_=openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False,
|
||||
description="最小点踩数"),
|
||||
description=_("Minimum number of clicks")),
|
||||
openapi.Parameter(name='comparer', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False,
|
||||
description="or|and 比较器")
|
||||
description=_("or|and comparator")),
|
||||
openapi.Parameter(name='start_time', in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description=_('start time')),
|
||||
openapi.Parameter(name='end_time', in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description=_('End time')),
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -171,12 +366,17 @@ class ChatRecordApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='chat_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='对话id'),
|
||||
description=_('Conversation ID')),
|
||||
openapi.Parameter(name='order_asc',
|
||||
in_=openapi.IN_QUERY,
|
||||
type=openapi.TYPE_BOOLEAN,
|
||||
required=False,
|
||||
description=_('Is it ascending order')),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -189,34 +389,39 @@ class ChatRecordApi(ApiMixin):
|
|||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
|
||||
description="id", default="xx"),
|
||||
'chat': openapi.Schema(type=openapi.TYPE_STRING, title="会话日志id",
|
||||
description="会话日志id", default='会话日志id'),
|
||||
'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title="投票状态",
|
||||
description="投票状态", default="投票状态"),
|
||||
'dataset': openapi.Schema(type=openapi.TYPE_STRING, title="数据集id", description="数据集id",
|
||||
default="数据集id"),
|
||||
'paragraph': openapi.Schema(type=openapi.TYPE_STRING, title="段落id",
|
||||
description="段落id", default=1),
|
||||
'source_id': openapi.Schema(type=openapi.TYPE_STRING, title="资源id",
|
||||
description="资源id", default=1),
|
||||
'source_type': openapi.Schema(type=openapi.TYPE_STRING, title="资源类型",
|
||||
description="资源类型", default='xxx'),
|
||||
'message_tokens': openapi.Schema(type=openapi.TYPE_INTEGER, title="问题消耗token数量",
|
||||
description="问题消耗token数量", default=0),
|
||||
'answer_tokens': openapi.Schema(type=openapi.TYPE_INTEGER, title="答案消耗token数量",
|
||||
description="答案消耗token数量", default=0),
|
||||
'improve_paragraph_id_list': openapi.Schema(type=openapi.TYPE_STRING, title="改进标注列表",
|
||||
description="改进标注列表",
|
||||
'chat': openapi.Schema(type=openapi.TYPE_STRING, title=_("Session log id"),
|
||||
description=_("Conversation log id"), default=_('Conversation log id')),
|
||||
'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title=_("Voting Status"),
|
||||
description=_("Voting Status"), default=_("Voting Status")),
|
||||
'dataset': openapi.Schema(type=openapi.TYPE_STRING, title=_("Dataset id"), description=_("Dataset id"),
|
||||
default=_("Dataset id")),
|
||||
'paragraph': openapi.Schema(type=openapi.TYPE_STRING, title=_("Paragraph id"),
|
||||
description=_("Paragraph id"), default=1),
|
||||
'source_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Resource ID"),
|
||||
description=_("Resource ID"), default=1),
|
||||
'source_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Resource Type"),
|
||||
description=_("Resource Type"), default='xxx'),
|
||||
'message_tokens': openapi.Schema(type=openapi.TYPE_INTEGER,
|
||||
title=_("Number of tokens consumed by the question"),
|
||||
description=_("Number of tokens consumed by the question"), default=0),
|
||||
'answer_tokens': openapi.Schema(type=openapi.TYPE_INTEGER,
|
||||
title=_("The number of tokens consumed by the answer"),
|
||||
description=_("The number of tokens consumed by the answer"),
|
||||
default=0),
|
||||
'improve_paragraph_id_list': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Improved annotation list"),
|
||||
description=_("Improved annotation list"),
|
||||
default=[]),
|
||||
'index': openapi.Schema(type=openapi.TYPE_STRING, title="对应会话 对应下标",
|
||||
description="对应会话id对应下标",
|
||||
default="对应会话id对应下标"
|
||||
'index': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("Corresponding session Corresponding subscript"),
|
||||
description=_("Corresponding session id corresponding subscript"),
|
||||
default=0
|
||||
),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
|
||||
description="修改时间",
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"),
|
||||
description=_("Modification time"),
|
||||
default="1970-01-01 00:00:00"),
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
|
||||
description="创建时间",
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"),
|
||||
description=_("Creation time"),
|
||||
default="1970-01-01 00:00:00"
|
||||
)
|
||||
}
|
||||
|
|
@ -230,27 +435,27 @@ class ImproveApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='chat_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='会话id'),
|
||||
description=_('Conversation ID')),
|
||||
openapi.Parameter(name='chat_record_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='会话记录id'),
|
||||
description=_('Conversation record id')),
|
||||
openapi.Parameter(name='dataset_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='知识库id'),
|
||||
description=_('Knowledge base id')),
|
||||
openapi.Parameter(name='document_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='文档id'),
|
||||
description=_('Document id')),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -259,10 +464,10 @@ class ImproveApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['content'],
|
||||
properties={
|
||||
'title': openapi.Schema(type=openapi.TYPE_STRING, title="段落标题",
|
||||
description="段落标题"),
|
||||
'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容",
|
||||
description="段落内容")
|
||||
'title': openapi.Schema(type=openapi.TYPE_STRING, title=_("Section title"),
|
||||
description=_("Section title")),
|
||||
'content': openapi.Schema(type=openapi.TYPE_STRING, title=_("Paragraph content"),
|
||||
description=_("Paragraph content"))
|
||||
|
||||
}
|
||||
)
|
||||
|
|
@ -273,12 +478,12 @@ class ImproveApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['dataset_id', 'document_id', 'chat_ids'],
|
||||
properties={
|
||||
'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id",
|
||||
description="知识库id"),
|
||||
'document_id': openapi.Schema(type=openapi.TYPE_STRING, title="文档id",
|
||||
description="文档id"),
|
||||
'chat_ids': openapi.Schema(type=openapi.TYPE_ARRAY, title="会话id列表",
|
||||
description="会话id列表",
|
||||
'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Knowledge base id"),
|
||||
description=_("Knowledge base id")),
|
||||
'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Document id"),
|
||||
description=_("Document id")),
|
||||
'chat_ids': openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Conversation id list"),
|
||||
description=_("Conversation id list"),
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING))
|
||||
|
||||
}
|
||||
|
|
@ -290,12 +495,12 @@ class ImproveApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='dataset_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='知识库id'),
|
||||
description=_('Knowledge base id')),
|
||||
|
||||
]
|
||||
|
||||
|
|
@ -307,17 +512,17 @@ class VoteApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='chat_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='会话id'),
|
||||
description=_('Conversation ID')),
|
||||
openapi.Parameter(name='chat_record_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='会话记录id')
|
||||
description=_('Conversation record id'))
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -326,8 +531,8 @@ class VoteApi(ApiMixin):
|
|||
type=openapi.TYPE_OBJECT,
|
||||
required=['vote_status'],
|
||||
properties={
|
||||
'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title="投票状态",
|
||||
description="-1:取消投票|0:赞同|1:反对"),
|
||||
'vote_status': openapi.Schema(type=openapi.TYPE_STRING, title=_("Voting Status"),
|
||||
description=_("-1: Cancel vote | 0: Agree | 1: Oppose")),
|
||||
|
||||
}
|
||||
)
|
||||
|
|
@ -340,17 +545,17 @@ class ChatRecordImproveApi(ApiMixin):
|
|||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='应用id'),
|
||||
description=_('Application ID')),
|
||||
openapi.Parameter(name='chat_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='会话id'),
|
||||
description=_('Conversation ID')),
|
||||
openapi.Parameter(name='chat_record_id',
|
||||
in_=openapi.IN_PATH,
|
||||
type=openapi.TYPE_STRING,
|
||||
required=True,
|
||||
description='会话记录id')
|
||||
description=_('Conversation record id'))
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -363,27 +568,28 @@ class ChatRecordImproveApi(ApiMixin):
|
|||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
|
||||
description="id", default="xx"),
|
||||
'content': openapi.Schema(type=openapi.TYPE_STRING, title="段落内容",
|
||||
description="段落内容", default='段落内容'),
|
||||
'title': openapi.Schema(type=openapi.TYPE_STRING, title="标题",
|
||||
description="标题", default="xxx的描述"),
|
||||
'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="命中数量", description="命中数量",
|
||||
'content': openapi.Schema(type=openapi.TYPE_STRING, title=_("Paragraph content"),
|
||||
description=_("Paragraph content"), default=_('Paragraph content')),
|
||||
'title': openapi.Schema(type=openapi.TYPE_STRING, title=_("title"),
|
||||
description=_("title"), default=_("Description of xxx")),
|
||||
'hit_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_("Number of hits"),
|
||||
description=_("Number of hits"),
|
||||
default=1),
|
||||
'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点赞数量",
|
||||
description="点赞数量", default=1),
|
||||
'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title="点踩数量",
|
||||
description="点踩数", default=1),
|
||||
'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库id",
|
||||
description="知识库id", default='xxx'),
|
||||
'document_id': openapi.Schema(type=openapi.TYPE_STRING, title="文档id",
|
||||
description="文档id", default='xxx'),
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用",
|
||||
description="是否可用", default=True),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间",
|
||||
description="修改时间",
|
||||
'star_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_("Number of Likes"),
|
||||
description=_("Number of Likes"), default=1),
|
||||
'trample_num': openapi.Schema(type=openapi.TYPE_INTEGER, title=_("Number of thumbs-downs"),
|
||||
description=_("Number of thumbs-downs"), default=1),
|
||||
'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Knowledge base id"),
|
||||
description=_("Knowledge base id"), default='xxx'),
|
||||
'document_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Document id"),
|
||||
description=_("Document id"), default='xxx'),
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Availability"),
|
||||
description=_("Availability"), default=True),
|
||||
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Modification time"),
|
||||
description=_("Modification time"),
|
||||
default="1970-01-01 00:00:00"),
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间",
|
||||
description="创建时间",
|
||||
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Creation time"),
|
||||
description=_("Creation time"),
|
||||
default="1970-01-01 00:00:00"
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
(function() {
|
||||
const guideHtml=`
|
||||
<div class="maxkb-mask">
|
||||
<div class="maxkb-content"></div>
|
||||
|
|
@ -23,27 +24,27 @@ const chatButtonHtml=
|
|||
<img style="height:100%;width:100%;" src="{{float_icon}}">
|
||||
</div>`
|
||||
|
||||
|
||||
|
||||
|
||||
const getChatContainerHtml=(protocol,host,token,query)=>{
|
||||
return `<div id="maxkb-chat-container">
|
||||
<iframe id="maxkb-chat" allow="microphone" src=${protocol}://${host}/ui/chat/${token}?mode=embed${query}></iframe>
|
||||
<div class="maxkb-operate"><div class="maxkb-closeviewport maxkb-viewportnone"><svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 20 20" fill="none">
|
||||
<path d="M7.507 11.6645C7.73712 11.6645 7.94545 11.7578 8.09625 11.9086C8.24706 12.0594 8.34033 12.2677 8.34033 12.4978V16.7976C8.34033 17.0277 8.15378 17.2143 7.92366 17.2143H7.09033C6.86021 17.2143 6.67366 17.0277 6.67366 16.7976V14.5812L3.41075 17.843C3.24803 18.0057 2.98421 18.0057 2.82149 17.843L2.23224 17.2537C2.06952 17.091 2.06952 16.8272 2.23224 16.6645L5.56668 13.3311H3.19634C2.96622 13.3311 2.77967 13.1446 2.77967 12.9145V12.0811C2.77967 11.851 2.96622 11.6645 3.19634 11.6645H7.507ZM16.5991 2.1572C16.7619 1.99448 17.0257 1.99448 17.1884 2.1572L17.7777 2.74645C17.9404 2.90917 17.9404 3.17299 17.7777 3.33571L14.4432 6.66904H16.8136C17.0437 6.66904 17.2302 6.85559 17.2302 7.08571V7.91904C17.2302 8.14916 17.0437 8.33571 16.8136 8.33571H12.5029C12.2728 8.33571 12.0644 8.24243 11.9136 8.09163C11.7628 7.94082 11.6696 7.73249 11.6696 7.50237V3.20257C11.6696 2.97245 11.8561 2.7859 12.0862 2.7859H12.9196C13.1497 2.7859 13.3362 2.97245 13.3362 3.20257V5.419L16.5991 2.1572Z" fill="#646A73"/>
|
||||
<path d="M7.507 11.6645C7.73712 11.6645 7.94545 11.7578 8.09625 11.9086C8.24706 12.0594 8.34033 12.2677 8.34033 12.4978V16.7976C8.34033 17.0277 8.15378 17.2143 7.92366 17.2143H7.09033C6.86021 17.2143 6.67366 17.0277 6.67366 16.7976V14.5812L3.41075 17.843C3.24803 18.0057 2.98421 18.0057 2.82149 17.843L2.23224 17.2537C2.06952 17.091 2.06952 16.8272 2.23224 16.6645L5.56668 13.3311H3.19634C2.96622 13.3311 2.77967 13.1446 2.77967 12.9145V12.0811C2.77967 11.851 2.96622 11.6645 3.19634 11.6645H7.507ZM16.5991 2.1572C16.7619 1.99448 17.0257 1.99448 17.1884 2.1572L17.7777 2.74645C17.9404 2.90917 17.9404 3.17299 17.7777 3.33571L14.4432 6.66904H16.8136C17.0437 6.66904 17.2302 6.85559 17.2302 7.08571V7.91904C17.2302 8.14916 17.0437 8.33571 16.8136 8.33571H12.5029C12.2728 8.33571 12.0644 8.24243 11.9136 8.09163C11.7628 7.94082 11.6696 7.73249 11.6696 7.50237V3.20257C11.6696 2.97245 11.8561 2.7859 12.0862 2.7859H12.9196C13.1497 2.7859 13.3362 2.97245 13.3362 3.20257V5.419L16.5991 2.1572Z" fill="{{header_font_color}}"/>
|
||||
</svg></div>
|
||||
<div class="maxkb-openviewport">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 20 20" fill="none">
|
||||
<path d="M7.15209 11.5968C7.31481 11.4341 7.57862 11.4341 7.74134 11.5968L8.3306 12.186C8.49332 12.3487 8.49332 12.6126 8.3306 12.7753L4.99615 16.1086H7.3665C7.59662 16.1086 7.78316 16.2952 7.78316 16.5253V17.3586C7.78316 17.5887 7.59662 17.7753 7.3665 17.7753H3.05584C2.82572 17.7753 2.61738 17.682 2.46658 17.5312C2.31578 17.3804 2.2225 17.1721 2.2225 16.9419V12.6421C2.2225 12.412 2.40905 12.2255 2.63917 12.2255H3.4725C3.70262 12.2255 3.88917 12.412 3.88917 12.6421V14.8586L7.15209 11.5968ZM16.937 2.22217C17.1671 2.22217 17.3754 2.31544 17.5262 2.46625C17.677 2.61705 17.7703 2.82538 17.7703 3.0555V7.35531C17.7703 7.58543 17.5837 7.77198 17.3536 7.77198H16.5203C16.2902 7.77198 16.1036 7.58543 16.1036 7.35531V5.13888L12.8407 8.40068C12.678 8.5634 12.4142 8.5634 12.2515 8.40068L11.6622 7.81142C11.4995 7.64871 11.4995 7.38489 11.6622 7.22217L14.9966 3.88883H12.6263C12.3962 3.88883 12.2096 3.70229 12.2096 3.47217V2.63883C12.2096 2.40872 12.3962 2.22217 12.6263 2.22217H16.937Z" fill="#646A73"/>
|
||||
<path d="M7.15209 11.5968C7.31481 11.4341 7.57862 11.4341 7.74134 11.5968L8.3306 12.186C8.49332 12.3487 8.49332 12.6126 8.3306 12.7753L4.99615 16.1086H7.3665C7.59662 16.1086 7.78316 16.2952 7.78316 16.5253V17.3586C7.78316 17.5887 7.59662 17.7753 7.3665 17.7753H3.05584C2.82572 17.7753 2.61738 17.682 2.46658 17.5312C2.31578 17.3804 2.2225 17.1721 2.2225 16.9419V12.6421C2.2225 12.412 2.40905 12.2255 2.63917 12.2255H3.4725C3.70262 12.2255 3.88917 12.412 3.88917 12.6421V14.8586L7.15209 11.5968ZM16.937 2.22217C17.1671 2.22217 17.3754 2.31544 17.5262 2.46625C17.677 2.61705 17.7703 2.82538 17.7703 3.0555V7.35531C17.7703 7.58543 17.5837 7.77198 17.3536 7.77198H16.5203C16.2902 7.77198 16.1036 7.58543 16.1036 7.35531V5.13888L12.8407 8.40068C12.678 8.5634 12.4142 8.5634 12.2515 8.40068L11.6622 7.81142C11.4995 7.64871 11.4995 7.38489 11.6622 7.22217L14.9966 3.88883H12.6263C12.3962 3.88883 12.2096 3.70229 12.2096 3.47217V2.63883C12.2096 2.40872 12.3962 2.22217 12.6263 2.22217H16.937Z" fill="{{header_font_color}}"/>
|
||||
</svg></div>
|
||||
<div class="maxkb-chat-close"><svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 20 20" fill="none">
|
||||
<path d="M9.95317 8.73169L15.5511 3.13376C15.7138 2.97104 15.9776 2.97104 16.1403 3.13376L16.7296 3.72301C16.8923 3.88573 16.8923 4.14955 16.7296 4.31227L11.1317 9.9102L16.7296 15.5081C16.8923 15.6708 16.8923 15.9347 16.7296 16.0974L16.1403 16.6866C15.9776 16.8494 15.7138 16.8494 15.5511 16.6866L9.95317 11.0887L4.35524 16.6866C4.19252 16.8494 3.9287 16.8494 3.76598 16.6866L3.17673 16.0974C3.01401 15.9347 3.01401 15.6708 3.17673 15.5081L8.77465 9.9102L3.17673 4.31227C3.01401 4.14955 3.01401 3.88573 3.17673 3.72301L3.76598 3.13376C3.9287 2.97104 4.19252 2.97104 4.35524 3.13376L9.95317 8.73169Z" fill="#646A73"/>
|
||||
<path d="M9.95317 8.73169L15.5511 3.13376C15.7138 2.97104 15.9776 2.97104 16.1403 3.13376L16.7296 3.72301C16.8923 3.88573 16.8923 4.14955 16.7296 4.31227L11.1317 9.9102L16.7296 15.5081C16.8923 15.6708 16.8923 15.9347 16.7296 16.0974L16.1403 16.6866C15.9776 16.8494 15.7138 16.8494 15.5511 16.6866L9.95317 11.0887L4.35524 16.6866C4.19252 16.8494 3.9287 16.8494 3.76598 16.6866L3.17673 16.0974C3.01401 15.9347 3.01401 15.6708 3.17673 15.5081L8.77465 9.9102L3.17673 4.31227C3.01401 4.14955 3.01401 3.88573 3.17673 3.72301L3.76598 3.13376C3.9287 2.97104 4.19252 2.97104 4.35524 3.13376L9.95317 8.73169Z" fill="{{header_font_color}}"/>
|
||||
</svg>
|
||||
</div></div>
|
||||
`
|
||||
}
|
||||
/**
|
||||
* 初始化引导
|
||||
* @param {*} root
|
||||
* @param {*} root
|
||||
*/
|
||||
const initGuide=(root)=>{
|
||||
root.insertAdjacentHTML("beforeend",guideHtml)
|
||||
|
|
@ -67,6 +68,20 @@ const initChat=(root)=>{
|
|||
const chat_button_img=root.querySelector('.maxkb-chat-button > img')
|
||||
// 对话框元素
|
||||
const chat_container=root.querySelector('#maxkb-chat-container')
|
||||
// 引导层
|
||||
const mask_content = root.querySelector('.maxkb-mask > .maxkb-content')
|
||||
const mask_tips = root.querySelector('.maxkb-tips')
|
||||
chat_button_img.onload=(event)=>{
|
||||
if(mask_content){
|
||||
mask_content.style.width = chat_button_img.width + 'px'
|
||||
mask_content.style.height = chat_button_img.height + 'px'
|
||||
if('{{x_type}}'=='left'){
|
||||
mask_tips.style.marginLeft = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px'
|
||||
}else{
|
||||
mask_tips.style.marginRight = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const viewport=root.querySelector('.maxkb-openviewport')
|
||||
const closeviewport=root.querySelector('.maxkb-closeviewport')
|
||||
|
|
@ -90,17 +105,16 @@ const initChat=(root)=>{
|
|||
}
|
||||
const drag=(e)=>{
|
||||
if (['touchmove','touchstart'].includes(e.type)) {
|
||||
chat_button.style.top=(e.touches[0].clientY-25)+'px'
|
||||
chat_button.style.left=(e.touches[0].clientX-25)+'px'
|
||||
chat_button.style.top=(e.touches[0].clientY-chat_button_img.naturalHeight/2)+'px'
|
||||
chat_button.style.left=(e.touches[0].clientX-chat_button_img.naturalWidth/2)+'px'
|
||||
} else {
|
||||
chat_button.style.top=(e.y-25)+'px'
|
||||
chat_button.style.left=(e.x-25)+'px'
|
||||
chat_button.style.top=(e.y-chat_button_img.naturalHeight/2)+'px'
|
||||
chat_button.style.left=(e.x-chat_button_img.naturalWidth/2)+'px'
|
||||
}
|
||||
chat_button.style.width =chat_button_img.naturalWidth+'px'
|
||||
chat_button.style.height =chat_button_img.naturalHeight+'px'
|
||||
}
|
||||
if({{is_draggable}}){
|
||||
console.dir(chat_button_img)
|
||||
chat_button.addEventListener("drag",drag)
|
||||
chat_button.addEventListener("dragover",(e)=>{
|
||||
e.preventDefault()
|
||||
|
|
@ -118,8 +132,9 @@ const initChat=(root)=>{
|
|||
function initMaxkb(){
|
||||
const maxkb=document.createElement('div')
|
||||
const root=document.createElement('div')
|
||||
root.id="maxkb"
|
||||
initMaxkbStyle(maxkb)
|
||||
const maxkbId = 'maxkb-'+'{{max_kb_id}}'
|
||||
root.id=maxkbId
|
||||
initMaxkbStyle(maxkb, maxkbId)
|
||||
maxkb.appendChild(root)
|
||||
document.body.appendChild(maxkb)
|
||||
const maxkbMaskTip=localStorage.getItem('maxkbMaskTip')
|
||||
|
|
@ -129,9 +144,9 @@ function initMaxkb(){
|
|||
initChat(root)
|
||||
}
|
||||
|
||||
|
||||
|
||||
// 初始化全局样式
|
||||
function initMaxkbStyle(root){
|
||||
function initMaxkbStyle(root, maxkbId){
|
||||
style=document.createElement('style')
|
||||
style.type='text/css'
|
||||
style.innerText= `
|
||||
|
|
@ -155,7 +170,7 @@ function initMaxkbStyle(root){
|
|||
|
||||
#maxkb .maxkb-mask {
|
||||
position: fixed;
|
||||
z-index: 999;
|
||||
z-index: 10001;
|
||||
background-color: transparent;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
|
|
@ -242,8 +257,6 @@ function initMaxkbStyle(root){
|
|||
{{x_type}}: {{x_value}}px;
|
||||
{{y_type}}: {{y_value}}px;
|
||||
cursor: pointer;
|
||||
max-height:500px;
|
||||
max-width:500px;
|
||||
z-index:10000;
|
||||
}
|
||||
#maxkb #maxkb-chat-container{
|
||||
|
|
@ -261,6 +274,7 @@ function initMaxkbStyle(root){
|
|||
position: absolute;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
line-height: 18px;
|
||||
}
|
||||
#maxkb #maxkb-chat-container .maxkb-operate .maxkb-chat-close{
|
||||
margin-left:15px;
|
||||
|
|
@ -294,6 +308,7 @@ function initMaxkbStyle(root){
|
|||
height: 600px;
|
||||
}
|
||||
}`
|
||||
.replaceAll('#maxkb ',`#${maxkbId} `)
|
||||
root.appendChild(style)
|
||||
}
|
||||
|
||||
|
|
@ -306,4 +321,5 @@ function embedChatbot() {
|
|||
initMaxkb()
|
||||
} else console.error('invalid parameter')
|
||||
}
|
||||
window.onload = embedChatbot
|
||||
window.addEventListener('load',embedChatbot)
|
||||
})();
|
||||
|
|
@ -5,11 +5,14 @@ from . import views
|
|||
app_name = "application"
|
||||
urlpatterns = [
|
||||
path('application', views.Application.as_view(), name="application"),
|
||||
path('application/import', views.Application.Import.as_view()),
|
||||
path('application/profile', views.Application.Profile.as_view(), name='application/profile'),
|
||||
path('application/embed', views.Application.Embed.as_view()),
|
||||
path('application/authentication', views.Application.Authentication.as_view()),
|
||||
path('application/mcp_servers', views.Application.McpServers.as_view()),
|
||||
path('application/<str:application_id>/publish', views.Application.Publish.as_view()),
|
||||
path('application/<str:application_id>/edit_icon', views.Application.EditIcon.as_view()),
|
||||
path('application/<str:application_id>/export', views.Application.Export.as_view()),
|
||||
path('application/<str:application_id>/statistics/customer_count',
|
||||
views.ApplicationStatistics.CustomerCount.as_view()),
|
||||
path('application/<str:application_id>/statistics/customer_count_trend',
|
||||
|
|
@ -23,6 +26,8 @@ urlpatterns = [
|
|||
path('application/<str:application_id>/function_lib/<str:function_lib_id>',
|
||||
views.Application.FunctionLib.Operate.as_view()),
|
||||
path('application/<str:application_id>/application', views.Application.Application.as_view()),
|
||||
path('application/<str:application_id>/application/<str:app_id>',
|
||||
views.Application.Application.Operate.as_view()),
|
||||
path('application/<str:application_id>/model_params_form/<str:model_id>',
|
||||
views.Application.ModelParamsForm.as_view()),
|
||||
path('application/<str:application_id>/hit_test', views.Application.HitTest.as_view()),
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue