diff --git a/.env b/.env index 21aa0d99..85f578ac 100644 --- a/.env +++ b/.env @@ -98,3 +98,5 @@ CLICKHOUSE_DATABASE=staging_ariadne MAXMIND_LICENSE_KEY=none PAYOUTS_BUDGET=100 + +FLAME_ANVIL_URL=none \ No newline at end of file diff --git a/.sqlx/query-00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d.json b/.sqlx/query-00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d.json new file mode 100644 index 00000000..3e046f32 --- /dev/null +++ b/.sqlx/query-00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "version_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "loaders", + "type_info": "VarcharArray" + }, + { + "ordinal": 2, + "name": "project_types", + "type_info": "VarcharArray" + }, + { + "ordinal": 3, + "name": "games", + "type_info": "VarcharArray" + }, + { + "ordinal": 4, + "name": "loader_fields", + "type_info": "Int4Array" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + null, + null, + null, + null + ] + }, + "hash": "00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d" +} diff --git a/.sqlx/query-603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77.json b/.sqlx/query-04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433.json similarity index 71% rename from .sqlx/query-603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77.json rename to .sqlx/query-04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433.json index fbc6462a..6c62c2b6 100644 --- a/.sqlx/query-603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77.json +++ b/.sqlx/query-04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values\n WHERE enum_id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ", + "query": "\n SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values\n WHERE enum_id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ false ] }, - "hash": "603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77" + "hash": "04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433" } diff --git a/.sqlx/query-070174adf972b808aca7519168719e6c7b762bfbcc09d8ab2624b00113f71e77.json b/.sqlx/query-070174adf972b808aca7519168719e6c7b762bfbcc09d8ab2624b00113f71e77.json new file mode 100644 index 00000000..e9d49592 --- /dev/null +++ b/.sqlx/query-070174adf972b808aca7519168719e6c7b762bfbcc09d8ab2624b00113f71e77.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT v.id version_id, v.mod_id mod_id\n FROM versions v\n INNER JOIN version_fields vf ON vf.field_id = 3 AND v.id = vf.version_id\n INNER JOIN loader_field_enum_values lfev ON vf.enum_value = lfev.id AND (cardinality($2::varchar[]) = 0 OR lfev.value = ANY($2::varchar[]))\n INNER JOIN loaders_versions lv ON lv.version_id = v.id\n INNER JOIN loaders l on lv.loader_id = l.id AND (cardinality($3::varchar[]) = 0 OR l.loader = ANY($3::varchar[]))\n WHERE v.mod_id = ANY($1) AND (cardinality($4::varchar[]) = 0 OR v.version_type = ANY($4))\n ORDER BY v.date_published ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "version_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "mod_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8Array", + "VarcharArray", + "VarcharArray", + "VarcharArray" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "070174adf972b808aca7519168719e6c7b762bfbcc09d8ab2624b00113f71e77" +} diff --git a/.sqlx/query-0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8.json b/.sqlx/query-0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8.json new file mode 100644 index 00000000..33e8733d --- /dev/null +++ b/.sqlx/query-0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "mod_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "loaders", + "type_info": "VarcharArray" + }, + { + "ordinal": 2, + "name": "project_types", + "type_info": "VarcharArray" + }, + { + "ordinal": 3, + "name": "games", + "type_info": "VarcharArray" + }, + { + "ordinal": 4, + "name": "loader_fields", + "type_info": "Int4Array" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + null, + null, + null, + null + ] + }, + "hash": "0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8" +} diff --git a/.sqlx/query-10279b5a8383ba8e286f1bfb9a486e3f8b362c46cfc2647c90a83a10e5329569.json b/.sqlx/query-10279b5a8383ba8e286f1bfb9a486e3f8b362c46cfc2647c90a83a10e5329569.json deleted file mode 100644 index 14b87d63..00000000 --- a/.sqlx/query-10279b5a8383ba8e286f1bfb9a486e3f8b362c46cfc2647c90a83a10e5329569.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE threads\n SET show_in_mod_inbox = FALSE\n WHERE id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "10279b5a8383ba8e286f1bfb9a486e3f8b362c46cfc2647c90a83a10e5329569" -} diff --git a/.sqlx/query-2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75.json b/.sqlx/query-10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51.json similarity index 71% rename from .sqlx/query-2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75.json rename to .sqlx/query-10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51.json index 12c65eea..4caa1739 100644 --- a/.sqlx/query-2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75.json +++ b/.sqlx/query-10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT version_id, field_id, int_value, enum_value, string_value\n FROM version_fields\n WHERE version_id = ANY($1)\n ", + "query": "\n SELECT version_id, field_id, int_value, enum_value, string_value\n FROM version_fields\n WHERE version_id = ANY($1)\n ", "describe": { "columns": [ { @@ -42,5 +42,5 @@ true ] }, - "hash": "2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75" + "hash": "10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51" } diff --git a/.sqlx/query-93c0fdb2bdc9c57602671d50108957654ede51e944944d4af59fe1ba1f6a336e.json b/.sqlx/query-21c44c435bf9a6c138d40cd40d70ccecfd09d877e84f3fbe5cd190dd69d3b7e1.json similarity index 52% rename from .sqlx/query-93c0fdb2bdc9c57602671d50108957654ede51e944944d4af59fe1ba1f6a336e.json rename to .sqlx/query-21c44c435bf9a6c138d40cd40d70ccecfd09d877e84f3fbe5cd190dd69d3b7e1.json index f5bb3982..a706ac67 100644 --- a/.sqlx/query-93c0fdb2bdc9c57602671d50108957654ede51e944944d4af59fe1ba1f6a336e.json +++ b/.sqlx/query-21c44c435bf9a6c138d40cd40d70ccecfd09d877e84f3fbe5cd190dd69d3b7e1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT t.id, t.thread_type, t.mod_id, t.report_id, t.show_in_mod_inbox,\n ARRAY_AGG(DISTINCT tm.user_id) filter (where tm.user_id is not null) members,\n JSONB_AGG(DISTINCT jsonb_build_object('id', tmsg.id, 'author_id', tmsg.author_id, 'thread_id', tmsg.thread_id, 'body', tmsg.body, 'created', tmsg.created)) filter (where tmsg.id is not null) messages\n FROM threads t\n LEFT OUTER JOIN threads_messages tmsg ON tmsg.thread_id = t.id\n LEFT OUTER JOIN threads_members tm ON tm.thread_id = t.id\n WHERE t.id = ANY($1)\n GROUP BY t.id\n ", + "query": "\n SELECT t.id, t.thread_type, t.mod_id, t.report_id,\n ARRAY_AGG(DISTINCT tm.user_id) filter (where tm.user_id is not null) members,\n JSONB_AGG(DISTINCT jsonb_build_object('id', tmsg.id, 'author_id', tmsg.author_id, 'thread_id', tmsg.thread_id, 'body', tmsg.body, 'created', tmsg.created, 'hide_identity', tmsg.hide_identity)) filter (where tmsg.id is not null) messages\n FROM threads t\n LEFT OUTER JOIN threads_messages tmsg ON tmsg.thread_id = t.id\n LEFT OUTER JOIN threads_members tm ON tm.thread_id = t.id\n WHERE t.id = ANY($1)\n GROUP BY t.id\n ", "describe": { "columns": [ { @@ -25,16 +25,11 @@ }, { "ordinal": 4, - "name": "show_in_mod_inbox", - "type_info": "Bool" - }, - { - "ordinal": 5, "name": "members", "type_info": "Int8Array" }, { - "ordinal": 6, + "ordinal": 5, "name": "messages", "type_info": "Jsonb" } @@ -49,10 +44,9 @@ false, true, true, - false, null, null ] }, - "hash": "93c0fdb2bdc9c57602671d50108957654ede51e944944d4af59fe1ba1f6a336e" + "hash": "21c44c435bf9a6c138d40cd40d70ccecfd09d877e84f3fbe5cd190dd69d3b7e1" } diff --git a/.sqlx/query-4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849.json b/.sqlx/query-28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163.json similarity index 71% rename from .sqlx/query-4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849.json rename to .sqlx/query-28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163.json index b9780b84..a901da94 100644 --- a/.sqlx/query-4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849.json +++ b/.sqlx/query-28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color\n FROM organizations o\n WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)\n GROUP BY o.id;\n ", + "query": "\n SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color\n FROM organizations o\n WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)\n GROUP BY o.id;\n ", "describe": { "columns": [ { @@ -55,5 +55,5 @@ true ] }, - "hash": "4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849" + "hash": "28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163" } diff --git a/.sqlx/query-3151ef71738a1f0d097aa14967d7b9eb1f24d4de1f81b80c4bd186427edc1399.json b/.sqlx/query-3151ef71738a1f0d097aa14967d7b9eb1f24d4de1f81b80c4bd186427edc1399.json new file mode 100644 index 00000000..057d8602 --- /dev/null +++ b/.sqlx/query-3151ef71738a1f0d097aa14967d7b9eb1f24d4de1f81b80c4bd186427edc1399.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT mel.id, mel.flame_project_id, mel.status status\n FROM moderation_external_licenses mel\n WHERE mel.flame_project_id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "flame_project_id", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "status", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int4Array" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "3151ef71738a1f0d097aa14967d7b9eb1f24d4de1f81b80c4bd186427edc1399" +} diff --git a/.sqlx/query-8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301.json b/.sqlx/query-32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6.json similarity index 72% rename from .sqlx/query-8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301.json rename to .sqlx/query-32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6.json index 5489c2b5..5fc3bd90 100644 --- a/.sqlx/query-8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301.json +++ b/.sqlx/query-32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1)\n ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC;\n ", + "query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1);\n ", "describe": { "columns": [ { @@ -90,5 +90,5 @@ true ] }, - "hash": "8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301" + "hash": "32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6" } diff --git a/.sqlx/query-6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538.json b/.sqlx/query-34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff.json similarity index 63% rename from .sqlx/query-6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538.json rename to .sqlx/query-34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff.json index 1b275596..c1c37d27 100644 --- a/.sqlx/query-6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538.json +++ b/.sqlx/query-34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash\n FROM hashes\n WHERE file_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash\n FROM hashes\n WHERE file_id = ANY($1)\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ null ] }, - "hash": "6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538" + "hash": "34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff" } diff --git a/.sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json b/.sqlx/query-3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4.json similarity index 61% rename from .sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json rename to .sqlx/query-3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4.json index 6d206d58..b1f9dab6 100644 --- a/.sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json +++ b/.sqlx/query-3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions v on v.id = f.version_id\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published\n ORDER BY v.date_published\n ", + "query": "\n SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions v on v.id = f.version_id\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published\n ORDER BY v.date_published\n ", "describe": { "columns": [ { @@ -67,5 +67,5 @@ null ] }, - "hash": "0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816" + "hash": "3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4" } diff --git a/.sqlx/query-3c875a8a1c03432f258040c436e19dbab6e78bd1789dc70f445578c779c7b995.json b/.sqlx/query-3c875a8a1c03432f258040c436e19dbab6e78bd1789dc70f445578c779c7b995.json new file mode 100644 index 00000000..5d919103 --- /dev/null +++ b/.sqlx/query-3c875a8a1c03432f258040c436e19dbab6e78bd1789dc70f445578c779c7b995.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT mel.id, mel.flame_project_id, mel.status status\n FROM moderation_external_licenses mel\n WHERE mel.flame_project_id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "flame_project_id", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "status", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int4Array" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "3c875a8a1c03432f258040c436e19dbab6e78bd1789dc70f445578c779c7b995" +} diff --git a/.sqlx/query-8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47.json b/.sqlx/query-4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc.json similarity index 58% rename from .sqlx/query-8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47.json rename to .sqlx/query-4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc.json index 082ad636..81134180 100644 --- a/.sqlx/query-8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47.json +++ b/.sqlx/query-4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation\n FROM mods_links ml\n INNER JOIN mods m ON ml.joining_mod_id = m.id \n INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", + "query": "\n SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation\n FROM mods_links ml\n INNER JOIN mods m ON ml.joining_mod_id = m.id\n INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ false ] }, - "hash": "8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47" + "hash": "4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc" } diff --git a/.sqlx/query-99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0.json b/.sqlx/query-43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af.json similarity index 69% rename from .sqlx/query-99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0.json rename to .sqlx/query-43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af.json index 5d70c257..c66e8601 100644 --- a/.sqlx/query-99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0.json +++ b/.sqlx/query-43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1) \n ORDER BY enum_id, ordering, created ASC\n ", + "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ true ] }, - "hash": "99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0" + "hash": "43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af" } diff --git a/.sqlx/query-49813a96f007216072d69468aae705d73d5b85dcdd64a22060009b12d947ed5a.json b/.sqlx/query-49813a96f007216072d69468aae705d73d5b85dcdd64a22060009b12d947ed5a.json deleted file mode 100644 index 32840287..00000000 --- a/.sqlx/query-49813a96f007216072d69468aae705d73d5b85dcdd64a22060009b12d947ed5a.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE threads\n SET show_in_mod_inbox = $1\n WHERE id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bool", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "49813a96f007216072d69468aae705d73d5b85dcdd64a22060009b12d947ed5a" -} diff --git a/.sqlx/query-4cb9fe3dbb2cbfe30a49487f896fb7890f726af2ff11da53f450a88c3dc5fc64.json b/.sqlx/query-4cb9fe3dbb2cbfe30a49487f896fb7890f726af2ff11da53f450a88c3dc5fc64.json new file mode 100644 index 00000000..0397073b --- /dev/null +++ b/.sqlx/query-4cb9fe3dbb2cbfe30a49487f896fb7890f726af2ff11da53f450a88c3dc5fc64.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT encode(mef.sha1, 'escape') sha1, mel.status status\n FROM moderation_external_files mef\n INNER JOIN moderation_external_licenses mel ON mef.external_license_id = mel.id\n WHERE mef.sha1 = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sha1", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "ByteaArray" + ] + }, + "nullable": [ + null, + false + ] + }, + "hash": "4cb9fe3dbb2cbfe30a49487f896fb7890f726af2ff11da53f450a88c3dc5fc64" +} diff --git a/.sqlx/query-f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0.json b/.sqlx/query-4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4.json similarity index 66% rename from .sqlx/query-f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0.json rename to .sqlx/query-4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4.json index c1b79a18..5c5d3861 100644 --- a/.sqlx/query-f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0.json +++ b/.sqlx/query-4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT c.id id, c.name name, c.description description,\n c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,\n c.updated updated, c.status status,\n ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods\n FROM collections c\n LEFT JOIN collections_mods cm ON cm.collection_id = c.id\n WHERE c.id = ANY($1)\n GROUP BY c.id;\n ", + "query": "\n SELECT c.id id, c.name name, c.description description,\n c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,\n c.updated updated, c.status status,\n ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods\n FROM collections c\n LEFT JOIN collections_mods cm ON cm.collection_id = c.id\n WHERE c.id = ANY($1)\n GROUP BY c.id;\n ", "describe": { "columns": [ { @@ -72,5 +72,5 @@ null ] }, - "hash": "f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0" + "hash": "4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4" } diff --git a/.sqlx/query-520b6b75e79245e9ec19dbe5c30f041d8081eb317a21b122c0d61d7b13f58072.json b/.sqlx/query-520b6b75e79245e9ec19dbe5c30f041d8081eb317a21b122c0d61d7b13f58072.json new file mode 100644 index 00000000..893e3ac9 --- /dev/null +++ b/.sqlx/query-520b6b75e79245e9ec19dbe5c30f041d8081eb317a21b122c0d61d7b13f58072.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(SELECT 1 FROM notifications WHERE id = ANY($1))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + null + ] + }, + "hash": "520b6b75e79245e9ec19dbe5c30f041d8081eb317a21b122c0d61d7b13f58072" +} diff --git a/.sqlx/query-b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64.json b/.sqlx/query-623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3.json similarity index 63% rename from .sqlx/query-b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64.json rename to .sqlx/query-623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3.json index 9c8ffbaf..6ad1c4b9 100644 --- a/.sqlx/query-b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64.json +++ b/.sqlx/query-623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type\n FROM dependencies d\n WHERE dependent_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type\n FROM dependencies d\n WHERE dependent_id = ANY($1)\n ", "describe": { "columns": [ { @@ -42,5 +42,5 @@ false ] }, - "hash": "b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64" + "hash": "623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3" } diff --git a/.sqlx/query-5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a.json b/.sqlx/query-64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643.json similarity index 78% rename from .sqlx/query-5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a.json rename to .sqlx/query-64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643.json index e1d35b11..e24329c3 100644 --- a/.sqlx/query-5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a.json +++ b/.sqlx/query-64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id\n FROM uploaded_images\n WHERE id = ANY($1)\n GROUP BY id;\n ", + "query": "\n SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id\n FROM uploaded_images\n WHERE id = ANY($1)\n GROUP BY id;\n ", "describe": { "columns": [ { @@ -72,5 +72,5 @@ true ] }, - "hash": "5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a" + "hash": "64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643" } diff --git a/.sqlx/query-6e4ff5010b19890e26867611a243a308fb32f7439a18c83d1e16d3e537a43e7d.json b/.sqlx/query-6e4ff5010b19890e26867611a243a308fb32f7439a18c83d1e16d3e537a43e7d.json new file mode 100644 index 00000000..feafe67d --- /dev/null +++ b/.sqlx/query-6e4ff5010b19890e26867611a243a308fb32f7439a18c83d1e16d3e537a43e7d.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT encode(mef.sha1, 'escape') sha1, mel.status status\n FROM moderation_external_files mef\n INNER JOIN moderation_external_licenses mel ON mef.external_license_id = mel.id\n WHERE mef.sha1 = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sha1", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "ByteaArray" + ] + }, + "nullable": [ + null, + false + ] + }, + "hash": "6e4ff5010b19890e26867611a243a308fb32f7439a18c83d1e16d3e537a43e7d" +} diff --git a/.sqlx/query-21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64.json b/.sqlx/query-6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842.json similarity index 52% rename from .sqlx/query-21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64.json rename to .sqlx/query-6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842.json index aff58048..c7ccefa7 100644 --- a/.sqlx/query-21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64.json +++ b/.sqlx/query-6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id\n FROM pats\n WHERE user_id = $1\n ORDER BY created DESC\n ", + "query": "\n SELECT id\n FROM pats\n WHERE user_id = $1\n ORDER BY created DESC\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64" + "hash": "6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842" } diff --git a/.sqlx/query-c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514.json b/.sqlx/query-74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84.json similarity index 79% rename from .sqlx/query-c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514.json rename to .sqlx/query-74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84.json index b02376be..5c868155 100644 --- a/.sqlx/query-c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514.json +++ b/.sqlx/query-74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,\n city, country, ip, user_agent\n FROM sessions\n WHERE id = ANY($1) OR session = ANY($2)\n ORDER BY created DESC\n ", + "query": "\n SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,\n city, country, ip, user_agent\n FROM sessions\n WHERE id = ANY($1) OR session = ANY($2)\n ORDER BY created DESC\n ", "describe": { "columns": [ { @@ -91,5 +91,5 @@ false ] }, - "hash": "c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514" + "hash": "74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84" } diff --git a/.sqlx/query-7ab21e7613dd88e97cf602e76bff62170c13ceef8104a4ce4cb2d101f8ce4f48.json b/.sqlx/query-7ab21e7613dd88e97cf602e76bff62170c13ceef8104a4ce4cb2d101f8ce4f48.json deleted file mode 100644 index 5fb8d0de..00000000 --- a/.sqlx/query-7ab21e7613dd88e97cf602e76bff62170c13ceef8104a4ce4cb2d101f8ce4f48.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE users\n SET balance = balance + $1\n WHERE id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Numeric", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "7ab21e7613dd88e97cf602e76bff62170c13ceef8104a4ce4cb2d101f8ce4f48" -} diff --git a/.sqlx/query-1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec.json b/.sqlx/query-7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab.json similarity index 59% rename from .sqlx/query-1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec.json rename to .sqlx/query-7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab.json index fdfa60d6..b02c6c74 100644 --- a/.sqlx/query-1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec.json +++ b/.sqlx/query-7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id, v.id as id, date_published\n FROM mods m\n INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", + "query": "\n SELECT DISTINCT mod_id, v.id as id, date_published\n FROM mods m\n INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "describe": { "columns": [ { @@ -32,5 +32,5 @@ false ] }, - "hash": "1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec" + "hash": "7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab" } diff --git a/.sqlx/query-ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c.json b/.sqlx/query-7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a.json similarity index 68% rename from .sqlx/query-ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c.json rename to .sqlx/query-7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a.json index 6f4550b9..da471b1d 100644 --- a/.sqlx/query-ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c.json +++ b/.sqlx/query-7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value\n FROM versions v\n INNER JOIN version_fields vf ON v.id = vf.version_id\n WHERE v.id = ANY($1)\n ", + "query": "\n SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value\n FROM versions v\n INNER JOIN version_fields vf ON v.id = vf.version_id\n WHERE v.id = ANY($1)\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ true ] }, - "hash": "ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c" + "hash": "7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a" } diff --git a/.sqlx/query-5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478.json b/.sqlx/query-887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c.json similarity index 75% rename from .sqlx/query-5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478.json rename to .sqlx/query-887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c.json index c869cb7b..abe6d421 100644 --- a/.sqlx/query-5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478.json +++ b/.sqlx/query-887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional\n FROM loader_fields lf\n WHERE id = ANY($1) \n ", + "query": "\n SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional\n FROM loader_fields lf\n WHERE id = ANY($1)\n ", "describe": { "columns": [ { @@ -54,5 +54,5 @@ false ] }, - "hash": "5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478" + "hash": "887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c" } diff --git a/.sqlx/query-8f5e2a570cf35b2d158182bac37fd40bcec277bbdeddaece5efaa88600048a70.json b/.sqlx/query-8f5e2a570cf35b2d158182bac37fd40bcec277bbdeddaece5efaa88600048a70.json deleted file mode 100644 index 30713236..00000000 --- a/.sqlx/query-8f5e2a570cf35b2d158182bac37fd40bcec277bbdeddaece5efaa88600048a70.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE threads\n SET show_in_mod_inbox = FALSE\n WHERE id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "8f5e2a570cf35b2d158182bac37fd40bcec277bbdeddaece5efaa88600048a70" -} diff --git a/.sqlx/query-2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d.json b/.sqlx/query-92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b.json similarity index 68% rename from .sqlx/query-2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d.json rename to .sqlx/query-92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b.json index 5d9e7c19..dc6e4a41 100644 --- a/.sqlx/query-2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d.json +++ b/.sqlx/query-92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,\n m.icon_url icon_url, m.description description, m.published published,\n m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status,\n m.license_url license_url,\n m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body,\n m.webhook_sent, m.color,\n t.id thread_id, m.monetization_status monetization_status,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories\n FROM mods m \n INNER JOIN threads t ON t.mod_id = m.id\n LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id\n LEFT JOIN categories c ON mc.joining_category_id = c.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n GROUP BY t.id, m.id;\n ", + "query": "\n SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,\n m.icon_url icon_url, m.description description, m.published published,\n m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status,\n m.license_url license_url,\n m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body,\n m.webhook_sent, m.color,\n t.id thread_id, m.monetization_status monetization_status,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories\n FROM mods m\n INNER JOIN threads t ON t.mod_id = m.id\n LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id\n LEFT JOIN categories c ON mc.joining_category_id = c.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n GROUP BY t.id, m.id;\n ", "describe": { "columns": [ { @@ -169,5 +169,5 @@ null ] }, - "hash": "2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d" + "hash": "92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b" } diff --git a/.sqlx/query-95e17b2512494ffcbfe6278b87aa273edc5729633aeaa87f6239667d2f861e68.json b/.sqlx/query-95e17b2512494ffcbfe6278b87aa273edc5729633aeaa87f6239667d2f861e68.json new file mode 100644 index 00000000..063c2e0e --- /dev/null +++ b/.sqlx/query-95e17b2512494ffcbfe6278b87aa273edc5729633aeaa87f6239667d2f861e68.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE mods\n SET status = 'rejected'\n WHERE id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "95e17b2512494ffcbfe6278b87aa273edc5729633aeaa87f6239667d2f861e68" +} diff --git a/.sqlx/query-9c8f3f9503b5bb52e05bbc8a8eee7f640ab7d6b04a59ec111ce8b23e886911de.json b/.sqlx/query-9c8f3f9503b5bb52e05bbc8a8eee7f640ab7d6b04a59ec111ce8b23e886911de.json deleted file mode 100644 index 77c8db51..00000000 --- a/.sqlx/query-9c8f3f9503b5bb52e05bbc8a8eee7f640ab7d6b04a59ec111ce8b23e886911de.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM dependencies WHERE dependent_id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "9c8f3f9503b5bb52e05bbc8a8eee7f640ab7d6b04a59ec111ce8b23e886911de" -} diff --git a/.sqlx/query-9d46594c3dda50dc84defee87fa98210989dd59b06941a5e71b6661f059c9692.json b/.sqlx/query-9d46594c3dda50dc84defee87fa98210989dd59b06941a5e71b6661f059c9692.json new file mode 100644 index 00000000..089981a7 --- /dev/null +++ b/.sqlx/query-9d46594c3dda50dc84defee87fa98210989dd59b06941a5e71b6661f059c9692.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO threads_messages (\n id, author_id, body, thread_id, hide_identity\n )\n VALUES (\n $1, $2, $3, $4, $5\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Jsonb", + "Int8", + "Bool" + ] + }, + "nullable": [] + }, + "hash": "9d46594c3dda50dc84defee87fa98210989dd59b06941a5e71b6661f059c9692" +} diff --git a/.sqlx/query-e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d.json b/.sqlx/query-a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517.json similarity index 74% rename from .sqlx/query-e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d.json rename to .sqlx/query-a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517.json index 384c572e..165e3c68 100644 --- a/.sqlx/query-e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d.json +++ b/.sqlx/query-a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, name, access_token, scopes, user_id, created, expires, last_used\n FROM pats\n WHERE id = ANY($1) OR access_token = ANY($2)\n ORDER BY created DESC\n ", + "query": "\n SELECT id, name, access_token, scopes, user_id, created, expires, last_used\n FROM pats\n WHERE id = ANY($1) OR access_token = ANY($2)\n ORDER BY created DESC\n ", "describe": { "columns": [ { @@ -61,5 +61,5 @@ true ] }, - "hash": "e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d" + "hash": "a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517" } diff --git a/.sqlx/query-a40e4075ba1bff5b6fde104ed1557ad8d4a75d7d90d481decd222f31685c4981.json b/.sqlx/query-a40e4075ba1bff5b6fde104ed1557ad8d4a75d7d90d481decd222f31685c4981.json new file mode 100644 index 00000000..dd7086e8 --- /dev/null +++ b/.sqlx/query-a40e4075ba1bff5b6fde104ed1557ad8d4a75d7d90d481decd222f31685c4981.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM dependencies WHERE dependent_id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a40e4075ba1bff5b6fde104ed1557ad8d4a75d7d90d481decd222f31685c4981" +} diff --git a/.sqlx/query-5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0.json b/.sqlx/query-a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785.json similarity index 81% rename from .sqlx/query-5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0.json rename to .sqlx/query-a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785.json index 2932ef87..fca3ad56 100644 --- a/.sqlx/query-5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0.json +++ b/.sqlx/query-a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, name, email,\n avatar_url, username, bio,\n created, role, badges,\n balance,\n github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,\n email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,\n venmo_handle\n FROM users\n WHERE id = ANY($1) OR LOWER(username) = ANY($2)\n ", + "query": "\n SELECT id, name, email,\n avatar_url, username, bio,\n created, role, badges,\n balance,\n github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,\n email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,\n venmo_handle\n FROM users\n WHERE id = ANY($1) OR LOWER(username) = ANY($2)\n ", "describe": { "columns": [ { @@ -151,5 +151,5 @@ true ] }, - "hash": "5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0" + "hash": "a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785" } diff --git a/.sqlx/query-c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345.json b/.sqlx/query-a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b.json similarity index 74% rename from .sqlx/query-c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345.json rename to .sqlx/query-a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b.json index dd76374d..1b838c4a 100644 --- a/.sqlx/query-c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345.json +++ b/.sqlx/query-a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,\n accepted, payouts_split, \n ordering, user_id\n FROM team_members\n WHERE team_id = ANY($1)\n ORDER BY team_id, ordering;\n ", + "query": "\n SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,\n accepted, payouts_split,\n ordering, user_id\n FROM team_members\n WHERE team_id = ANY($1)\n ORDER BY team_id, ordering;\n ", "describe": { "columns": [ { @@ -72,5 +72,5 @@ false ] }, - "hash": "c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345" + "hash": "a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b" } diff --git a/.sqlx/query-b768d9db6c785d6a701324ea746794d33e94121403163a774b6ef775640fd3d3.json b/.sqlx/query-a8bfce13de871daf0bb1cf73b4c5ded611ff58d94461404182942210492e8010.json similarity index 76% rename from .sqlx/query-b768d9db6c785d6a701324ea746794d33e94121403163a774b6ef775640fd3d3.json rename to .sqlx/query-a8bfce13de871daf0bb1cf73b4c5ded611ff58d94461404182942210492e8010.json index 7d789042..f762fb0e 100644 --- a/.sqlx/query-b768d9db6c785d6a701324ea746794d33e94121403163a774b6ef775640fd3d3.json +++ b/.sqlx/query-a8bfce13de871daf0bb1cf73b4c5ded611ff58d94461404182942210492e8010.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT m.id id, tm.user_id user_id, tm.payouts_split payouts_split\n FROM mods m\n INNER JOIN team_members tm on m.team_id = tm.team_id AND tm.accepted = TRUE\n WHERE m.id = ANY($1) AND m.monetization_status = $2\n ", + "query": "\n SELECT m.id id, tm.user_id user_id, tm.payouts_split payouts_split\n FROM mods m\n INNER JOIN team_members tm on m.team_id = tm.team_id AND tm.accepted = TRUE\n WHERE m.id = ANY($1) AND m.monetization_status = $2 AND m.status = ANY($3)\n ", "describe": { "columns": [ { @@ -22,7 +22,8 @@ "parameters": { "Left": [ "Int8Array", - "Text" + "Text", + "TextArray" ] }, "nullable": [ @@ -31,5 +32,5 @@ false ] }, - "hash": "b768d9db6c785d6a701324ea746794d33e94121403163a774b6ef775640fd3d3" + "hash": "a8bfce13de871daf0bb1cf73b4c5ded611ff58d94461404182942210492e8010" } diff --git a/.sqlx/query-b0c29c51bd3ae5b93d487471a98ee9bbb43a4df468ba781852b137dd315b9608.json b/.sqlx/query-b0c29c51bd3ae5b93d487471a98ee9bbb43a4df468ba781852b137dd315b9608.json deleted file mode 100644 index a06786be..00000000 --- a/.sqlx/query-b0c29c51bd3ae5b93d487471a98ee9bbb43a4df468ba781852b137dd315b9608.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO threads_messages (\n id, author_id, body, thread_id\n )\n VALUES (\n $1, $2, $3, $4\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Jsonb", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "b0c29c51bd3ae5b93d487471a98ee9bbb43a4df468ba781852b137dd315b9608" -} diff --git a/.sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json b/.sqlx/query-b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198.json similarity index 75% rename from .sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json rename to .sqlx/query-b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198.json index 20c4ed62..31772e96 100644 --- a/.sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json +++ b/.sqlx/query-b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type\n FROM files f\n WHERE f.version_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type\n FROM files f\n WHERE f.version_id = ANY($1)\n ", "describe": { "columns": [ { @@ -54,5 +54,5 @@ true ] }, - "hash": "e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823" + "hash": "b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198" } diff --git a/.sqlx/query-03cd8926d18aa8c11934fdc0da32ccbbbccf2527c523336f230c0e344c471a0f.json b/.sqlx/query-b82d35429e009e515ae1e0332142b3bd0bec55f38807eded9130b932929f2ebe.json similarity index 81% rename from .sqlx/query-03cd8926d18aa8c11934fdc0da32ccbbbccf2527c523336f230c0e344c471a0f.json rename to .sqlx/query-b82d35429e009e515ae1e0332142b3bd0bec55f38807eded9130b932929f2ebe.json index 6671362a..d78e5d15 100644 --- a/.sqlx/query-03cd8926d18aa8c11934fdc0da32ccbbbccf2527c523336f230c0e344c471a0f.json +++ b/.sqlx/query-b82d35429e009e515ae1e0332142b3bd0bec55f38807eded9130b932929f2ebe.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT m.id id, tm.user_id user_id, tm.payouts_split payouts_split\n FROM mods m\n INNER JOIN organizations o ON m.organization_id = o.id\n INNER JOIN team_members tm on o.team_id = tm.team_id AND tm.accepted = TRUE\n WHERE m.id = ANY($1) AND m.monetization_status = $2 AND m.organization_id IS NOT NULL\n ", + "query": "\n SELECT m.id id, tm.user_id user_id, tm.payouts_split payouts_split\n FROM mods m\n INNER JOIN organizations o ON m.organization_id = o.id\n INNER JOIN team_members tm on o.team_id = tm.team_id AND tm.accepted = TRUE\n WHERE m.id = ANY($1) AND m.monetization_status = $2 AND m.status = ANY($3) AND m.organization_id IS NOT NULL\n ", "describe": { "columns": [ { @@ -22,7 +22,8 @@ "parameters": { "Left": [ "Int8Array", - "Text" + "Text", + "TextArray" ] }, "nullable": [ @@ -31,5 +32,5 @@ false ] }, - "hash": "03cd8926d18aa8c11934fdc0da32ccbbbccf2527c523336f230c0e344c471a0f" + "hash": "b82d35429e009e515ae1e0332142b3bd0bec55f38807eded9130b932929f2ebe" } diff --git a/.sqlx/query-b993ec7579f06603a2a308dccd1ea1fbffd94286db48bc0e36a30f4f6a9d39af.json b/.sqlx/query-b993ec7579f06603a2a308dccd1ea1fbffd94286db48bc0e36a30f4f6a9d39af.json deleted file mode 100644 index 0db3e537..00000000 --- a/.sqlx/query-b993ec7579f06603a2a308dccd1ea1fbffd94286db48bc0e36a30f4f6a9d39af.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE versions\n SET downloads = downloads + 1\n WHERE id = ANY($1)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "b993ec7579f06603a2a308dccd1ea1fbffd94286db48bc0e36a30f4f6a9d39af" -} diff --git a/.sqlx/query-bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50.json b/.sqlx/query-c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c.json similarity index 70% rename from .sqlx/query-bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50.json rename to .sqlx/query-c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c.json index 01b0c698..f7b9866a 100644 --- a/.sqlx/query-bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50.json +++ b/.sqlx/query-c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id\n FROM loader_fields lf\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id\n WHERE lfl.loader_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id\n FROM loader_fields lf\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id\n WHERE lfl.loader_id = ANY($1)\n ", "describe": { "columns": [ { @@ -60,5 +60,5 @@ false ] }, - "hash": "bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50" + "hash": "c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c" } diff --git a/.sqlx/query-c2924fff035e92f7bd2279517310ba391ced72b38be97d462cdfe60048e947db.json b/.sqlx/query-c2924fff035e92f7bd2279517310ba391ced72b38be97d462cdfe60048e947db.json new file mode 100644 index 00000000..fba958d5 --- /dev/null +++ b/.sqlx/query-c2924fff035e92f7bd2279517310ba391ced72b38be97d462cdfe60048e947db.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE files\n SET metadata = $1\n WHERE id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "c2924fff035e92f7bd2279517310ba391ced72b38be97d462cdfe60048e947db" +} diff --git a/.sqlx/query-320d73cd900a6e00f0e74b7a8c34a7658d16034b01a35558cb42fa9c16185eb5.json b/.sqlx/query-caa4f261950f027cd34e2099e5489c02de214299004ea182f5eae93396e1d313.json similarity index 70% rename from .sqlx/query-320d73cd900a6e00f0e74b7a8c34a7658d16034b01a35558cb42fa9c16185eb5.json rename to .sqlx/query-caa4f261950f027cd34e2099e5489c02de214299004ea182f5eae93396e1d313.json index 6284d141..0fc2034d 100644 --- a/.sqlx/query-320d73cd900a6e00f0e74b7a8c34a7658d16034b01a35558cb42fa9c16185eb5.json +++ b/.sqlx/query-caa4f261950f027cd34e2099e5489c02de214299004ea182f5eae93396e1d313.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT tm.id, tm.author_id, tm.thread_id, tm.body, tm.created\n FROM threads_messages tm\n WHERE tm.id = ANY($1)\n ", + "query": "\n SELECT tm.id, tm.author_id, tm.thread_id, tm.body, tm.created, tm.hide_identity\n FROM threads_messages tm\n WHERE tm.id = ANY($1)\n ", "describe": { "columns": [ { @@ -27,6 +27,11 @@ "ordinal": 4, "name": "created", "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "hide_identity", + "type_info": "Bool" } ], "parameters": { @@ -39,8 +44,9 @@ true, false, false, + false, false ] }, - "hash": "320d73cd900a6e00f0e74b7a8c34a7658d16034b01a35558cb42fa9c16185eb5" + "hash": "caa4f261950f027cd34e2099e5489c02de214299004ea182f5eae93396e1d313" } diff --git a/.sqlx/query-cc1f2f568a0ba1d285a95fd9b6e3b118a0eaa26e2851bcc3f1920ae0140b48ae.json b/.sqlx/query-cc1f2f568a0ba1d285a95fd9b6e3b118a0eaa26e2851bcc3f1920ae0140b48ae.json new file mode 100644 index 00000000..953a6002 --- /dev/null +++ b/.sqlx/query-cc1f2f568a0ba1d285a95fd9b6e3b118a0eaa26e2851bcc3f1920ae0140b48ae.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n f.metadata, v.id version_id\n FROM versions v\n INNER JOIN files f ON f.version_id = v.id\n WHERE v.mod_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "metadata", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "version_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true, + false + ] + }, + "hash": "cc1f2f568a0ba1d285a95fd9b6e3b118a0eaa26e2851bcc3f1920ae0140b48ae" +} diff --git a/.sqlx/query-ccf57f9c1026927afc940a20ebad9fb58ded7171b21e91973d1f13c91eab9b37.json b/.sqlx/query-ccf57f9c1026927afc940a20ebad9fb58ded7171b21e91973d1f13c91eab9b37.json new file mode 100644 index 00000000..8b28b3d9 --- /dev/null +++ b/.sqlx/query-ccf57f9c1026927afc940a20ebad9fb58ded7171b21e91973d1f13c91eab9b37.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE files\n SET metadata = $1\n WHERE id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ccf57f9c1026927afc940a20ebad9fb58ded7171b21e91973d1f13c91eab9b37" +} diff --git a/.sqlx/query-d08c9ef6a8829ce1d23d66f27c58f4b9b64f4ce985e60ded871d1f31eb0c818b.json b/.sqlx/query-d08c9ef6a8829ce1d23d66f27c58f4b9b64f4ce985e60ded871d1f31eb0c818b.json deleted file mode 100644 index 7eab9304..00000000 --- a/.sqlx/query-d08c9ef6a8829ce1d23d66f27c58f4b9b64f4ce985e60ded871d1f31eb0c818b.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE mods\n SET downloads = downloads + 1\n WHERE id = ANY($1)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "d08c9ef6a8829ce1d23d66f27c58f4b9b64f4ce985e60ded871d1f31eb0c818b" -} diff --git a/.sqlx/query-2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95.json b/.sqlx/query-d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51.json similarity index 69% rename from .sqlx/query-2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95.json rename to .sqlx/query-d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51.json index 9b62665f..7141f46a 100644 --- a/.sqlx/query-2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95.json +++ b/.sqlx/query-d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1) \n ORDER BY enum_id, ordering, created DESC\n ", + "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1)\n ORDER BY enum_id, ordering, created ASC\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ true ] }, - "hash": "2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95" + "hash": "d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51" } diff --git a/.sqlx/query-e1c24a57013cbc64f463d3a49cb68989eced49b475c0bbab90b21908ae0e77b4.json b/.sqlx/query-e1c24a57013cbc64f463d3a49cb68989eced49b475c0bbab90b21908ae0e77b4.json new file mode 100644 index 00000000..dc23d4e2 --- /dev/null +++ b/.sqlx/query-e1c24a57013cbc64f463d3a49cb68989eced49b475c0bbab90b21908ae0e77b4.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE users u\n SET balance = u.balance + v.amount\n FROM unnest($1::BIGINT[], $2::NUMERIC[]) AS v(id, amount)\n WHERE u.id = v.id\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array", + "NumericArray" + ] + }, + "nullable": [] + }, + "hash": "e1c24a57013cbc64f463d3a49cb68989eced49b475c0bbab90b21908ae0e77b4" +} diff --git a/.sqlx/query-e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681.json b/.sqlx/query-e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681.json deleted file mode 100644 index 793918ef..00000000 --- a/.sqlx/query-e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "mod_id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "loaders", - "type_info": "VarcharArray" - }, - { - "ordinal": 2, - "name": "project_types", - "type_info": "VarcharArray" - }, - { - "ordinal": 3, - "name": "games", - "type_info": "VarcharArray" - }, - { - "ordinal": 4, - "name": "loader_fields", - "type_info": "Int4Array" - } - ], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [ - false, - null, - null, - null, - null - ] - }, - "hash": "e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681" -} diff --git a/.sqlx/query-e37ecb6dc1509d390bb6f68ba25899d19f693554d8969bbf8f8ee14a78adf0f9.json b/.sqlx/query-e37ecb6dc1509d390bb6f68ba25899d19f693554d8969bbf8f8ee14a78adf0f9.json deleted file mode 100644 index 798f248b..00000000 --- a/.sqlx/query-e37ecb6dc1509d390bb6f68ba25899d19f693554d8969bbf8f8ee14a78adf0f9.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE threads\n SET show_in_mod_inbox = $1\n WHERE id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bool", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "e37ecb6dc1509d390bb6f68ba25899d19f693554d8969bbf8f8ee14a78adf0f9" -} diff --git a/.sqlx/query-e9d863c1793939d5ae7137d810f23d06460c28a9058b251448e3786c436f80cd.json b/.sqlx/query-e9d863c1793939d5ae7137d810f23d06460c28a9058b251448e3786c436f80cd.json deleted file mode 100644 index d07589c7..00000000 --- a/.sqlx/query-e9d863c1793939d5ae7137d810f23d06460c28a9058b251448e3786c436f80cd.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT id\n FROM threads\n WHERE show_in_mod_inbox = TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "e9d863c1793939d5ae7137d810f23d06460c28a9058b251448e3786c436f80cd" -} diff --git a/.sqlx/query-f297b517bc3bbd8628c0c222c0e3daf8f4efbe628ee2e8ddbbb4b9734cc9c915.json b/.sqlx/query-f297b517bc3bbd8628c0c222c0e3daf8f4efbe628ee2e8ddbbb4b9734cc9c915.json new file mode 100644 index 00000000..dc923578 --- /dev/null +++ b/.sqlx/query-f297b517bc3bbd8628c0c222c0e3daf8f4efbe628ee2e8ddbbb4b9734cc9c915.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO moderation_external_files (sha1, external_license_id)\n SELECT * FROM UNNEST ($1::bytea[], $2::bigint[])\n ON CONFLICT (sha1) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "f297b517bc3bbd8628c0c222c0e3daf8f4efbe628ee2e8ddbbb4b9734cc9c915" +} diff --git a/.sqlx/query-f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2.json b/.sqlx/query-f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2.json deleted file mode 100644 index 2863c6bf..00000000 --- a/.sqlx/query-f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "version_id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "loaders", - "type_info": "VarcharArray" - }, - { - "ordinal": 2, - "name": "project_types", - "type_info": "VarcharArray" - }, - { - "ordinal": 3, - "name": "games", - "type_info": "VarcharArray" - }, - { - "ordinal": 4, - "name": "loader_fields", - "type_info": "Int4Array" - } - ], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [ - false, - null, - null, - null, - null - ] - }, - "hash": "f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2" -} diff --git a/.sqlx/query-7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b.json b/.sqlx/query-f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84.json similarity index 70% rename from .sqlx/query-7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b.json rename to .sqlx/query-f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84.json index fdb571de..d47e6fda 100644 --- a/.sqlx/query-7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b.json +++ b/.sqlx/query-f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering\n FROM mods_gallery mg\n INNER JOIN mods m ON mg.mod_id = m.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", + "query": "\n SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering\n FROM mods_gallery mg\n INNER JOIN mods m ON mg.mod_id = m.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "describe": { "columns": [ { @@ -55,5 +55,5 @@ false ] }, - "hash": "7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b" + "hash": "f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84" } diff --git a/Cargo.lock b/Cargo.lock index 46936899..1ebca561 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,31 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "actix" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cba56612922b907719d4a01cf11c8d5b458e7d3dba946d0435f20f58d6795ed2" -dependencies = [ - "actix-macros", - "actix-rt", - "actix_derive", - "bitflags 2.4.1", - "bytes", - "crossbeam-channel", - "futures-core", - "futures-sink", - "futures-task", - "futures-util", - "log", - "once_cell", - "parking_lot", - "pin-project-lite", - "smallvec", - "tokio", - "tokio-util", -] - [[package]] name = "actix-codec" version = "0.5.1" @@ -309,17 +284,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "actix_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c7db3d5a9718568e4cf4a537cfd7070e6e6ff7481510d0237fb529ac850f6d3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.48", -] - [[package]] name = "addr2line" version = "0.21.0" @@ -690,9 +654,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", ] @@ -1021,15 +985,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam-channel" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "crossbeam-deque" version = "0.8.5" @@ -1633,6 +1588,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "2.0.0" @@ -1797,6 +1758,26 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if", + "dashmap", + "futures", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "quanta", + "rand", + "smallvec", + "spinning_top", +] + [[package]] name = "h2" version = "0.3.23" @@ -2222,6 +2203,27 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +[[package]] +name = "jemalloc-sys" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d3b9f3f5c9b31aa0f5ed3260385ac205db665baa41d49bb8338008ae94ede45" +dependencies = [ + "cc", + "fs_extra", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43ae63fcfc45e99ab3d1b29a46782ad679e98436c3169d15a167a1108a724b69" +dependencies = [ + "jemalloc-sys", + "libc", +] + [[package]] name = "jobserver" version = "0.1.27" @@ -2277,7 +2279,6 @@ dependencies = [ name = "labrinth" version = "2.7.0" dependencies = [ - "actix", "actix-cors", "actix-files", "actix-http", @@ -2303,18 +2304,22 @@ dependencies = [ "flate2", "futures", "futures-timer", + "futures-util", + "governor", "hex", "hmac 0.11.0", "hyper", "hyper-tls", "image", "itertools 0.12.0", + "jemallocator", "json-patch", "lazy_static", "lettre", "log", "maxminddb", "meilisearch-sdk", + "murmur2", "rand", "rand_chacha", "redis", @@ -2667,6 +2672,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "murmur2" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb585ade2549a017db2e35978b77c319214fa4b37cede841e27954dd6e8f3ca8" + [[package]] name = "native-tls" version = "0.2.11" @@ -2696,6 +2707,12 @@ dependencies = [ "libc", ] +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + [[package]] name = "nom" version = "7.1.3" @@ -2706,6 +2723,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + [[package]] name = "num-bigint-dig" version = "0.8.4" @@ -3097,6 +3120,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + [[package]] name = "powerfmt" version = "0.2.0" @@ -3217,6 +3246,21 @@ dependencies = [ "bytemuck", ] +[[package]] +name = "quanta" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + [[package]] name = "quick-error" version = "2.0.1" @@ -3304,6 +3348,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "raw-cpuid" +version = "11.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" +dependencies = [ + "bitflags 2.4.1", +] + [[package]] name = "rayon" version = "1.8.0" @@ -4169,6 +4222,15 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.3" diff --git a/Cargo.toml b/Cargo.toml index 90189225..092a15e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,6 @@ name = "labrinth" path = "src/main.rs" [dependencies] -actix = "0.13.1" actix-web = "4.4.1" actix-rt = "2.9.0" actix-multipart = "0.6.1" @@ -19,12 +18,14 @@ actix-cors = "0.7.0" actix-ws = "0.2.5" actix-files = "0.6.5" actix-web-prom = "0.7.0" +governor = "0.6.3" tokio = { version = "1.35.1", features = ["sync"] } tokio-stream = "0.1.14" futures = "0.3.30" futures-timer = "3.0.2" +futures-util = "0.3.30" async-trait = "0.1.70" dashmap = "5.4.0" lazy_static = "1.4.0" @@ -51,6 +52,7 @@ sha1 = { version = "0.6.1", features = ["std"] } sha2 = "0.9.9" hmac = "0.11.0" argon2 = { version = "0.5.0", features = ["std"] } +murmur2 = "0.1.0" bitflags = "2.4.0" hex = "0.4.3" zxcvbn = "2.2.2" @@ -108,6 +110,8 @@ lettre = "0.11.3" derive-new = "0.6.0" rust_iso3166 = "0.1.11" +jemallocator = {version = "0.3.2", optional = true} + [dev-dependencies] actix-http = "3.4.0" json-patch = "*" @@ -116,3 +120,6 @@ opt-level = 0 # Minimal optimization, speeds up compilation lto = false # Disables Link Time Optimization incremental = true # Enables incremental compilation codegen-units = 16 # Higher number can improve compile times but reduce runtime performance + +[features] +jemalloc = ["jemallocator"] \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index d972e613..6a8aab79 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY docker_utils/dummy.rs . # Change temporarely the path of the code RUN sed -i 's|src/main.rs|dummy.rs|' Cargo.toml # Build only deps -RUN cargo build --release +RUN cargo build --release --features jemalloc # Now return the file back to normal RUN sed -i 's|dummy.rs|src/main.rs|' Cargo.toml @@ -17,7 +17,7 @@ RUN sed -i 's|dummy.rs|src/main.rs|' Cargo.toml COPY . . # Build our code ARG SQLX_OFFLINE=true -RUN cargo build --release +RUN cargo build --release --features jemalloc # Final Stage FROM ubuntu:latest diff --git a/migrations/20240131224610_moderation_packs.sql b/migrations/20240131224610_moderation_packs.sql new file mode 100644 index 00000000..49040ec5 --- /dev/null +++ b/migrations/20240131224610_moderation_packs.sql @@ -0,0 +1,19 @@ +CREATE TABLE moderation_external_licenses ( + id bigint PRIMARY KEY, + title text not null, + status text not null, + link text null, + exceptions text null, + proof text null, + flame_project_id integer null +); + +CREATE TABLE moderation_external_files ( + sha1 bytea PRIMARY KEY, + external_license_id bigint references moderation_external_licenses not null +); + +ALTER TABLE files ADD COLUMN metadata jsonb NULL; + +INSERT INTO users (id, username, name, email, avatar_url, bio, role, badges, balance) +VALUES (0, 'AutoMod', 'AutoMod', 'support@modrinth.com', 'https://cdn.modrinth.com/user/2REoufqX/6aabaf2d1fca2935662eca4ce451cd9775054c22.png', 'An automated account performing moderation utilities for Modrinth.', 'moderator', 0, 0) \ No newline at end of file diff --git a/migrations/20240221215354_moderation_pack_fixes.sql b/migrations/20240221215354_moderation_pack_fixes.sql new file mode 100644 index 00000000..67eff677 --- /dev/null +++ b/migrations/20240221215354_moderation_pack_fixes.sql @@ -0,0 +1,2 @@ +ALTER TABLE moderation_external_files ALTER COLUMN sha1 SET NOT NULL; +ALTER TABLE moderation_external_licenses ALTER COLUMN title DROP NOT NULL; diff --git a/migrations/20240319195753_threads-updates.sql b/migrations/20240319195753_threads-updates.sql new file mode 100644 index 00000000..4681958b --- /dev/null +++ b/migrations/20240319195753_threads-updates.sql @@ -0,0 +1,9 @@ +ALTER TABLE threads DROP COLUMN show_in_mod_inbox; + +ALTER TABLE threads_messages ADD COLUMN hide_identity BOOLEAN default false NOT NULL; + +UPDATE threads_messages +SET hide_identity = TRUE +FROM users +WHERE threads_messages.author_id = users.id +AND users.role IN ('moderator', 'admin'); \ No newline at end of file diff --git a/src/auth/checks.rs b/src/auth/checks.rs index fdf173f8..fe3a7878 100644 --- a/src/auth/checks.rs +++ b/src/auth/checks.rs @@ -6,7 +6,6 @@ use crate::database::redis::RedisPool; use crate::database::{models, Project, Version}; use crate::models::users::User; use crate::routes::ApiError; -use actix_web::web; use itertools::Itertools; use sqlx::PgPool; @@ -32,9 +31,10 @@ where pub async fn is_visible_project( project_data: &Project, user_option: &Option, - pool: &web::Data, + pool: &PgPool, + hide_unlisted: bool, ) -> Result { - filter_visible_project_ids(vec![project_data], user_option, pool) + filter_visible_project_ids(vec![project_data], user_option, pool, hide_unlisted) .await .map(|x| !x.is_empty()) } @@ -42,7 +42,7 @@ pub async fn is_visible_project( pub async fn is_team_member_project( project_data: &Project, user_option: &Option, - pool: &web::Data, + pool: &PgPool, ) -> Result { filter_enlisted_projects_ids(vec![project_data], user_option, pool) .await @@ -52,12 +52,14 @@ pub async fn is_team_member_project( pub async fn filter_visible_projects( mut projects: Vec, user_option: &Option, - pool: &web::Data, + pool: &PgPool, + hide_unlisted: bool, ) -> Result, ApiError> { let filtered_project_ids = filter_visible_project_ids( projects.iter().map(|x| &x.inner).collect_vec(), user_option, pool, + hide_unlisted, ) .await .unwrap(); @@ -73,18 +75,22 @@ pub async fn filter_visible_projects( pub async fn filter_visible_project_ids( projects: Vec<&Project>, user_option: &Option, - pool: &web::Data, + pool: &PgPool, + hide_unlisted: bool, ) -> Result, ApiError> { let mut return_projects = Vec::new(); let mut check_projects = Vec::new(); // Return projects that are not hidden or we are a mod of for project in projects { - if !project.status.is_hidden() - || user_option - .as_ref() - .map(|x| x.role.is_mod()) - .unwrap_or(false) + if (if hide_unlisted { + project.status.is_searchable() + } else { + !project.status.is_hidden() + }) || user_option + .as_ref() + .map(|x| x.role.is_mod()) + .unwrap_or(false) { return_projects.push(project.id); } else if user_option.is_some() { @@ -107,7 +113,7 @@ pub async fn filter_visible_project_ids( pub async fn filter_enlisted_projects_ids( projects: Vec<&Project>, user_option: &Option, - pool: &web::Data, + pool: &PgPool, ) -> Result, ApiError> { let mut return_projects = vec![]; @@ -135,7 +141,7 @@ pub async fn filter_enlisted_projects_ids( .collect::>(), user_id as database::models::ids::UserId, ) - .fetch_many(&***pool) + .fetch_many(pool) .try_for_each(|e| { if let Some(row) = e.right() { for x in projects.iter() { @@ -156,7 +162,7 @@ pub async fn filter_enlisted_projects_ids( pub async fn is_visible_version( version_data: &Version, user_option: &Option, - pool: &web::Data, + pool: &PgPool, redis: &RedisPool, ) -> Result { filter_visible_version_ids(vec![version_data], user_option, pool, redis) @@ -167,7 +173,7 @@ pub async fn is_visible_version( pub async fn is_team_member_version( version_data: &Version, user_option: &Option, - pool: &web::Data, + pool: &PgPool, redis: &RedisPool, ) -> Result { filter_enlisted_version_ids(vec![version_data], user_option, pool, redis) @@ -178,7 +184,7 @@ pub async fn is_team_member_version( pub async fn filter_visible_versions( mut versions: Vec, user_option: &Option, - pool: &web::Data, + pool: &PgPool, redis: &RedisPool, ) -> Result, ApiError> { let filtered_version_ids = filter_visible_version_ids( @@ -213,7 +219,7 @@ impl ValidateAuthorized for models::OAuthClient { pub async fn filter_visible_version_ids( versions: Vec<&Version>, user_option: &Option, - pool: &web::Data, + pool: &PgPool, redis: &RedisPool, ) -> Result, ApiError> { let mut return_versions = Vec::new(); @@ -226,13 +232,14 @@ pub async fn filter_visible_version_ids( // Get visible projects- ones we are allowed to see public versions for. let visible_project_ids = filter_visible_project_ids( - Project::get_many_ids(&project_ids, &***pool, redis) + Project::get_many_ids(&project_ids, pool, redis) .await? .iter() .map(|x| &x.inner) .collect(), user_option, pool, + false, ) .await?; @@ -265,7 +272,7 @@ pub async fn filter_visible_version_ids( pub async fn filter_enlisted_version_ids( versions: Vec<&Version>, user_option: &Option, - pool: &web::Data, + pool: &PgPool, redis: &RedisPool, ) -> Result, ApiError> { let mut return_versions = Vec::new(); @@ -275,7 +282,7 @@ pub async fn filter_enlisted_version_ids( // Get enlisted projects- ones we are allowed to see hidden versions for. let authorized_project_ids = filter_enlisted_projects_ids( - Project::get_many_ids(&project_ids, &***pool, redis) + Project::get_many_ids(&project_ids, pool, redis) .await? .iter() .map(|x| &x.inner) diff --git a/src/auth/mod.rs b/src/auth/mod.rs index 305743c3..bd7ac0ef 100644 --- a/src/auth/mod.rs +++ b/src/auth/mod.rs @@ -72,7 +72,7 @@ impl actix_web::ResponseError for AuthenticationError { fn error_response(&self) -> HttpResponse { HttpResponse::build(self.status_code()).json(ApiError { error: self.error_name(), - description: &self.to_string(), + description: self.to_string(), }) } } diff --git a/src/auth/oauth/errors.rs b/src/auth/oauth/errors.rs index 72a65abb..744d507c 100644 --- a/src/auth/oauth/errors.rs +++ b/src/auth/oauth/errors.rs @@ -100,7 +100,7 @@ impl actix_web::ResponseError for OAuthError { } else { HttpResponse::build(self.status_code()).json(ApiError { error: &self.error_type.error_name(), - description: &self.error_type.to_string(), + description: self.error_type.to_string(), }) } } diff --git a/src/clickhouse/mod.rs b/src/clickhouse/mod.rs index c1763dc6..a89d47f4 100644 --- a/src/clickhouse/mod.rs +++ b/src/clickhouse/mod.rs @@ -42,14 +42,15 @@ pub async fn init_client_with_database( user_id UInt64, project_id UInt64, + monetized Bool DEFAULT True, ip IPv6, country String, user_agent String, - headers Array(Tuple(String, String)), + headers Array(Tuple(String, String)) ) ENGINE = MergeTree() - PRIMARY KEY (project_id, recorded) + PRIMARY KEY (project_id, recorded, ip) " )) .execute() @@ -71,10 +72,10 @@ pub async fn init_client_with_database( ip IPv6, country String, user_agent String, - headers Array(Tuple(String, String)), + headers Array(Tuple(String, String)) ) ENGINE = MergeTree() - PRIMARY KEY (project_id, recorded) + PRIMARY KEY (project_id, recorded, ip) " )) .execute() @@ -94,10 +95,10 @@ pub async fn init_client_with_database( loader String, game_version String, - parent UInt64, + parent UInt64 ) ENGINE = MergeTree() - PRIMARY KEY (project_id, recorded) + PRIMARY KEY (project_id, recorded, user_id) " )) .execute() diff --git a/src/database/models/collection_item.rs b/src/database/models/collection_item.rs index a2c29283..1f703950 100644 --- a/src/database/models/collection_item.rs +++ b/src/database/models/collection_item.rs @@ -4,6 +4,8 @@ use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; use crate::models::collections::CollectionStatus; use chrono::{DateTime, Utc}; +use dashmap::DashMap; +use futures::TryStreamExt; use serde::{Deserialize, Serialize}; const COLLECTIONS_NAMESPACE: &str = "collections"; @@ -155,93 +157,55 @@ impl Collection { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::TryStreamExt; - - let mut redis = redis.connect().await?; - - if collection_ids.is_empty() { - return Ok(Vec::new()); - } - - let mut found_collections = Vec::new(); - let mut remaining_collections: Vec = collection_ids.to_vec(); - - if !collection_ids.is_empty() { - let collections = redis - .multi_get::( - COLLECTIONS_NAMESPACE, - collection_ids.iter().map(|x| x.0.to_string()), - ) - .await?; - - for collection in collections { - if let Some(collection) = - collection.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_collections.retain(|x| collection.id.0 != x.0); - found_collections.push(collection); - continue; - } - } - } + let val = redis + .get_cached_keys( + COLLECTIONS_NAMESPACE, + &collection_ids.iter().map(|x| x.0).collect::>(), + |collection_ids| async move { + let collections = sqlx::query!( + " + SELECT c.id id, c.name name, c.description description, + c.icon_url icon_url, c.color color, c.created created, c.user_id user_id, + c.updated updated, c.status status, + ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods + FROM collections c + LEFT JOIN collections_mods cm ON cm.collection_id = c.id + WHERE c.id = ANY($1) + GROUP BY c.id; + ", + &collection_ids, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, m| { + let collection = Collection { + id: CollectionId(m.id), + user_id: UserId(m.user_id), + name: m.name.clone(), + description: m.description.clone(), + icon_url: m.icon_url.clone(), + color: m.color.map(|x| x as u32), + created: m.created, + updated: m.updated, + status: CollectionStatus::from_string(&m.status), + projects: m + .mods + .unwrap_or_default() + .into_iter() + .map(ProjectId) + .collect(), + }; + + acc.insert(m.id, collection); + async move { Ok(acc) } + }) + .await?; - if !remaining_collections.is_empty() { - let collection_ids_parsed: Vec = - remaining_collections.iter().map(|x| x.0).collect(); - let db_collections: Vec = sqlx::query!( - " - SELECT c.id id, c.name name, c.description description, - c.icon_url icon_url, c.color color, c.created created, c.user_id user_id, - c.updated updated, c.status status, - ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods - FROM collections c - LEFT JOIN collections_mods cm ON cm.collection_id = c.id - WHERE c.id = ANY($1) - GROUP BY c.id; - ", - &collection_ids_parsed, + Ok(collections) + }, ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| { - let id = m.id; - - Collection { - id: CollectionId(id), - user_id: UserId(m.user_id), - name: m.name.clone(), - description: m.description.clone(), - icon_url: m.icon_url.clone(), - color: m.color.map(|x| x as u32), - created: m.created, - updated: m.updated, - status: CollectionStatus::from_string(&m.status), - projects: m - .mods - .unwrap_or_default() - .into_iter() - .map(ProjectId) - .collect(), - } - })) - }) - .try_collect::>() .await?; - for collection in db_collections { - redis - .set_serialized_to_json( - COLLECTIONS_NAMESPACE, - collection.id.0, - &collection, - None, - ) - .await?; - found_collections.push(collection); - } - } - - Ok(found_collections) + Ok(val) } pub async fn clear_cache(id: CollectionId, redis: &RedisPool) -> Result<(), DatabaseError> { diff --git a/src/database/models/ids.rs b/src/database/models/ids.rs index d7e4a97a..1adfac19 100644 --- a/src/database/models/ids.rs +++ b/src/database/models/ids.rs @@ -1,7 +1,9 @@ use super::DatabaseError; use crate::models::ids::base62_impl::to_base62; -use crate::models::ids::random_base62_rng; +use crate::models::ids::{random_base62_rng, random_base62_rng_range}; use censor::Censor; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; use serde::{Deserialize, Serialize}; use sqlx::sqlx_macros::Type; @@ -12,7 +14,7 @@ macro_rules! generate_ids { $vis async fn $function_name( con: &mut sqlx::Transaction<'_, sqlx::Postgres>, ) -> Result<$return_type, DatabaseError> { - let mut rng = rand::thread_rng(); + let mut rng = ChaCha20Rng::from_entropy(); let length = $id_length; let mut id = random_base62_rng(&mut rng, length); let mut retry_count = 0; @@ -41,6 +43,37 @@ macro_rules! generate_ids { }; } +macro_rules! generate_bulk_ids { + ($vis:vis $function_name:ident, $return_type:ty, $select_stmnt:literal, $id_function:expr) => { + $vis async fn $function_name( + count: usize, + con: &mut sqlx::Transaction<'_, sqlx::Postgres>, + ) -> Result, DatabaseError> { + let mut rng = rand::thread_rng(); + let mut retry_count = 0; + + // Check if ID is unique + loop { + let base = random_base62_rng_range(&mut rng, 1, 10) as i64; + let ids = (0..count).map(|x| base + x as i64).collect::>(); + + let results = sqlx::query!($select_stmnt, &ids) + .fetch_one(&mut **con) + .await?; + + if !results.exists.unwrap_or(true) { + return Ok(ids.into_iter().map(|x| $id_function(x)).collect()); + } + + retry_count += 1; + if retry_count > ID_RETRY_COUNT { + return Err(DatabaseError::RandomId); + } + } + } + }; +} + generate_ids!( pub generate_project_id, ProjectId, @@ -121,6 +154,13 @@ generate_ids!( NotificationId ); +generate_bulk_ids!( + pub generate_many_notification_ids, + NotificationId, + "SELECT EXISTS(SELECT 1 FROM notifications WHERE id = ANY($1))", + NotificationId +); + generate_ids!( pub generate_thread_id, ThreadId, diff --git a/src/database/models/image_item.rs b/src/database/models/image_item.rs index 68477304..28297c15 100644 --- a/src/database/models/image_item.rs +++ b/src/database/models/image_item.rs @@ -2,6 +2,7 @@ use super::ids::*; use crate::database::redis::RedisPool; use crate::{database::models::DatabaseError, models::images::ImageContext}; use chrono::{DateTime, Utc}; +use dashmap::DashMap; use serde::{Deserialize, Serialize}; const IMAGES_NAMESPACE: &str = "images"; @@ -180,70 +181,44 @@ impl Image { { use futures::TryStreamExt; - let mut redis = redis.connect().await?; - if image_ids.is_empty() { - return Ok(Vec::new()); - } - - let mut found_images = Vec::new(); - let mut remaining_ids = image_ids.to_vec(); - - let image_ids = image_ids.iter().map(|x| x.0).collect::>(); - - if !image_ids.is_empty() { - let images = redis - .multi_get::(IMAGES_NAMESPACE, image_ids.iter().map(|x| x.to_string())) - .await?; - for image in images { - if let Some(image) = image.and_then(|x| serde_json::from_str::(&x).ok()) { - remaining_ids.retain(|x| image.id.0 != x.0); - found_images.push(image); - continue; - } - } - } - - if !remaining_ids.is_empty() { - let db_images: Vec = sqlx::query!( - " - SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id - FROM uploaded_images - WHERE id = ANY($1) - GROUP BY id; - ", - &remaining_ids.iter().map(|x| x.0).collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|i| { - let id = i.id; - - Image { - id: ImageId(id), - url: i.url, - size: i.size as u64, - created: i.created, - owner_id: UserId(i.owner_id), - context: i.context, - project_id: i.mod_id.map(ProjectId), - version_id: i.version_id.map(VersionId), - thread_message_id: i.thread_message_id.map(ThreadMessageId), - report_id: i.report_id.map(ReportId), - } - })) - }) - .try_collect::>() - .await?; - - for image in db_images { - redis - .set_serialized_to_json(IMAGES_NAMESPACE, image.id.0, &image, None) + let val = redis.get_cached_keys( + IMAGES_NAMESPACE, + &image_ids.iter().map(|x| x.0).collect::>(), + |image_ids| async move { + let images = sqlx::query!( + " + SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id + FROM uploaded_images + WHERE id = ANY($1) + GROUP BY id; + ", + &image_ids, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, i| { + let img = Image { + id: ImageId(i.id), + url: i.url, + size: i.size as u64, + created: i.created, + owner_id: UserId(i.owner_id), + context: i.context, + project_id: i.mod_id.map(ProjectId), + version_id: i.version_id.map(VersionId), + thread_message_id: i.thread_message_id.map(ThreadMessageId), + report_id: i.report_id.map(ReportId), + }; + + acc.insert(i.id, img); + async move { Ok(acc) } + }) .await?; - found_images.push(image); - } - } - Ok(found_images) + Ok(images) + }, + ).await?; + + Ok(val) } pub async fn clear_cache(id: ImageId, redis: &RedisPool) -> Result<(), DatabaseError> { diff --git a/src/database/models/legacy_loader_fields.rs b/src/database/models/legacy_loader_fields.rs index 8fbb425d..adb4e463 100644 --- a/src/database/models/legacy_loader_fields.rs +++ b/src/database/models/legacy_loader_fields.rs @@ -208,6 +208,13 @@ impl<'a> MinecraftGameVersionBuilder<'a> { .fetch_one(exec) .await?; + let mut conn = redis.connect().await?; + conn.delete( + crate::database::models::loader_fields::LOADER_FIELD_ENUM_VALUES_NAMESPACE, + game_versions_enum.id.0, + ) + .await?; + Ok(LoaderFieldEnumValueId(result.id)) } } diff --git a/src/database/models/loader_fields.rs b/src/database/models/loader_fields.rs index 5f3f72d4..e31b07ee 100644 --- a/src/database/models/loader_fields.rs +++ b/src/database/models/loader_fields.rs @@ -6,6 +6,7 @@ use super::DatabaseError; use crate::database::redis::RedisPool; use chrono::DateTime; use chrono::Utc; +use dashmap::DashMap; use futures::TryStreamExt; use itertools::Itertools; use serde::{Deserialize, Serialize}; @@ -16,7 +17,7 @@ const LOADERS_LIST_NAMESPACE: &str = "loaders"; const LOADER_FIELDS_NAMESPACE: &str = "loader_fields"; const LOADER_FIELDS_NAMESPACE_ALL: &str = "loader_fields_all"; const LOADER_FIELD_ENUMS_ID_NAMESPACE: &str = "loader_field_enums"; -const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values"; +pub const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values"; #[derive(Clone, Serialize, Deserialize, Debug)] pub struct Game { @@ -380,75 +381,47 @@ impl LoaderField { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - type RedisLoaderFieldTuple = (LoaderId, Vec); - - let mut redis = redis.connect().await?; - - let mut loader_ids = loader_ids.to_vec(); - let cached_fields: Vec = redis - .multi_get::(LOADER_FIELDS_NAMESPACE, loader_ids.iter().map(|x| x.0)) - .await? - .into_iter() - .flatten() - .filter_map(|x: String| serde_json::from_str::(&x).ok()) - .collect(); - - let mut found_loader_fields = HashMap::new(); - if !cached_fields.is_empty() { - for (loader_id, fields) in cached_fields { - if loader_ids.contains(&loader_id) { - found_loader_fields.insert(loader_id, fields); - loader_ids.retain(|x| x != &loader_id); - } - } - } - - if !loader_ids.is_empty() { - let result = sqlx::query!( - " - SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id - FROM loader_fields lf - LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id - WHERE lfl.loader_id = ANY($1) - ", - &loader_ids.iter().map(|x| x.0).collect::>() - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().and_then(|r| { - Some((LoaderId(r.loader_id) ,LoaderField { - id: LoaderFieldId(r.id), - field_type: LoaderFieldType::build(&r.field_type, r.enum_type)?, - field: r.field, - optional: r.optional, - min_val: r.min_val, - max_val: r.max_val, - })) - })) - }) - .try_collect::>() - .await?; - - let result: Vec = result - .into_iter() - .fold( - HashMap::new(), - |mut acc: HashMap>, x| { - acc.entry(x.0).or_default().push(x.1); - acc - }, + let val = redis.get_cached_keys_raw( + LOADER_FIELDS_NAMESPACE, + &loader_ids.iter().map(|x| x.0).collect::>(), + |loader_ids| async move { + let result = sqlx::query!( + " + SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id + FROM loader_fields lf + LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id + WHERE lfl.loader_id = ANY($1) + ", + &loader_ids, ) - .into_iter() - .collect_vec(); - - for (k, v) in result.into_iter() { - redis - .set_serialized_to_json(LOADER_FIELDS_NAMESPACE, k.0, (k, &v), None) + .fetch(exec) + .try_fold(DashMap::new(), |acc: DashMap>, r| { + if let Some(field_type) = LoaderFieldType::build(&r.field_type, r.enum_type) { + let loader_field = LoaderField { + id: LoaderFieldId(r.id), + field_type, + field: r.field, + optional: r.optional, + min_val: r.min_val, + max_val: r.max_val, + }; + + acc.entry(r.loader_id) + .or_default() + .push(loader_field); + } + + async move { + Ok(acc) + } + }) .await?; - found_loader_fields.insert(k, v); - } - } - Ok(found_loader_fields) + + Ok(result) + }, + ).await?; + + Ok(val.into_iter().map(|x| (LoaderId(x.0), x.1)).collect()) } // Gets all fields for a given loader(s) @@ -597,71 +570,51 @@ impl LoaderFieldEnumValue { loader_field_enum_ids: &[LoaderFieldEnumId], exec: E, redis: &RedisPool, - ) -> Result)>, DatabaseError> + ) -> Result>, DatabaseError> where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - let mut redis = redis.connect().await?; - let mut found_enums = Vec::new(); - let mut remaining_enums: Vec = loader_field_enum_ids.to_vec(); - - if !remaining_enums.is_empty() { - let enums = redis - .multi_get::( - LOADER_FIELD_ENUM_VALUES_NAMESPACE, - loader_field_enum_ids.iter().map(|x| x.0), + let val = redis.get_cached_keys_raw( + LOADER_FIELD_ENUM_VALUES_NAMESPACE, + &loader_field_enum_ids.iter().map(|x| x.0).collect::>(), + |loader_field_enum_ids| async move { + let values = sqlx::query!( + " + SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values + WHERE enum_id = ANY($1) + ORDER BY enum_id, ordering, created DESC + ", + &loader_field_enum_ids ) - .await?; - - for lfe in enums { - if let Some(lfe) = lfe.and_then(|x| { - serde_json::from_str::<(LoaderFieldEnumId, Vec)>(&x).ok() - }) { - remaining_enums.retain(|x| lfe.0 .0 != x.0); - found_enums.push(lfe.1); - continue; - } - } - } + .fetch(exec) + .try_fold(DashMap::new(), |acc: DashMap>, c| { + let value = LoaderFieldEnumValue { + id: LoaderFieldEnumValueId(c.id), + enum_id: LoaderFieldEnumId(c.enum_id), + value: c.value, + ordering: c.ordering, + created: c.created, + metadata: c.metadata.unwrap_or_default(), + }; + + acc.entry(c.enum_id) + .or_default() + .push(value); + + async move { + Ok(acc) + } + }) + .await?; - let remaining_enums = remaining_enums.iter().map(|x| x.0).collect::>(); - let result = sqlx::query!( - " - SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values - WHERE enum_id = ANY($1) - ORDER BY enum_id, ordering, created DESC - ", - &remaining_enums - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|c| LoaderFieldEnumValue { - id: LoaderFieldEnumValueId(c.id), - enum_id: LoaderFieldEnumId(c.enum_id), - value: c.value, - ordering: c.ordering, - created: c.created, - metadata: c.metadata.unwrap_or_default(), - })) - }) - .try_collect::>() - .await?; + Ok(values) + }, + ).await?; - // Convert from an Vec to a Vec<(LoaderFieldEnumId, Vec)> - let cachable_enum_sets: Vec<(LoaderFieldEnumId, Vec)> = result - .clone() - .into_iter() - .group_by(|x| x.enum_id) // we sort by enum_id, so this will group all values of the same enum_id together + Ok(val .into_iter() - .map(|(k, v)| (k, v.collect::>().to_vec())) - .collect(); - for (k, v) in cachable_enum_sets.iter() { - redis - .set_serialized_to_json(LOADER_FIELD_ENUM_VALUES_NAMESPACE, k.0, v, None) - .await?; - } - - Ok(cachable_enum_sets) + .map(|x| (LoaderFieldEnumId(x.0), x.1)) + .collect()) } // Matches filter against metadata of enum values diff --git a/src/database/models/mod.rs b/src/database/models/mod.rs index eb931f7d..eafde1b4 100644 --- a/src/database/models/mod.rs +++ b/src/database/models/mod.rs @@ -48,4 +48,6 @@ pub enum DatabaseError { SerdeCacheError(#[from] serde_json::Error), #[error("Schema error: {0}")] SchemaError(String), + #[error("Timeout when waiting for cache subscriber")] + CacheTimeout, } diff --git a/src/database/models/notification_item.rs b/src/database/models/notification_item.rs index 206c5373..49d2fe1f 100644 --- a/src/database/models/notification_item.rs +++ b/src/database/models/notification_item.rs @@ -3,7 +3,6 @@ use crate::database::{models::DatabaseError, redis::RedisPool}; use crate::models::notifications::NotificationBody; use chrono::{DateTime, Utc}; use futures::TryStreamExt; -use itertools::Itertools; use serde::{Deserialize, Serialize}; const USER_NOTIFICATIONS_NAMESPACE: &str = "user_notifications"; @@ -46,37 +45,15 @@ impl NotificationBuilder { transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, redis: &RedisPool, ) -> Result<(), DatabaseError> { - let mut notifications = Vec::new(); - for user in users { - let id = generate_notification_id(&mut *transaction).await?; - - notifications.push(Notification { - id, - user_id: user, - body: self.body.clone(), - read: false, - created: Utc::now(), - }); - } - - Notification::insert_many(¬ifications, transaction, redis).await?; - - Ok(()) - } -} + let notification_ids = + generate_many_notification_ids(users.len(), &mut *transaction).await?; -impl Notification { - pub async fn insert_many( - notifications: &[Notification], - transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, - redis: &RedisPool, - ) -> Result<(), DatabaseError> { - let notification_ids = notifications.iter().map(|n| n.id.0).collect_vec(); - let user_ids = notifications.iter().map(|n| n.user_id.0).collect_vec(); - let bodies = notifications + let body = serde_json::value::to_value(&self.body)?; + let bodies = notification_ids .iter() - .map(|n| Ok(serde_json::value::to_value(n.body.clone())?)) - .collect::, DatabaseError>>()?; + .map(|_| body.clone()) + .collect::>(); + sqlx::query!( " INSERT INTO notifications ( @@ -84,22 +61,23 @@ impl Notification { ) SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::jsonb[]) ", - ¬ification_ids[..], - &user_ids[..], + ¬ification_ids + .into_iter() + .map(|x| x.0) + .collect::>()[..], + &users.iter().map(|x| x.0).collect::>()[..], &bodies[..], ) .execute(&mut **transaction) .await?; - Notification::clear_user_notifications_cache( - notifications.iter().map(|n| &n.user_id), - redis, - ) - .await?; + Notification::clear_user_notifications_cache(&users, redis).await?; Ok(()) } +} +impl Notification { pub async fn get<'a, 'b, E>( id: NotificationId, executor: E, diff --git a/src/database/models/organization_item.rs b/src/database/models/organization_item.rs index c0c08949..7f9a9073 100644 --- a/src/database/models/organization_item.rs +++ b/src/database/models/organization_item.rs @@ -1,7 +1,8 @@ -use crate::{ - database::redis::RedisPool, - models::ids::base62_impl::{parse_base62, to_base62}, -}; +use crate::{database::redis::RedisPool, models::ids::base62_impl::parse_base62}; +use dashmap::DashMap; +use futures::TryStreamExt; +use std::fmt::{Debug, Display}; +use std::hash::Hash; use super::{ids::*, TeamMember}; use serde::{Deserialize, Serialize}; @@ -97,7 +98,7 @@ impl Organization { Self::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( organization_strings: &[T], exec: E, redis: &RedisPool, @@ -105,120 +106,56 @@ impl Organization { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::stream::TryStreamExt; - - let mut redis = redis.connect().await?; - - if organization_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_organizations = Vec::new(); - let mut remaining_strings = organization_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut organization_ids = organization_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - organization_ids.append( - &mut redis - .multi_get::( - ORGANIZATIONS_TITLES_NAMESPACE, - organization_strings + let val = redis + .get_cached_keys_with_slug( + ORGANIZATIONS_NAMESPACE, + ORGANIZATIONS_TITLES_NAMESPACE, + false, + organization_strings, + |ids| async move { + let org_ids: Vec = ids .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids + .into_iter() .map(|x| x.to_string().to_lowercase()) - .collect::>(), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - - if !organization_ids.is_empty() { - let organizations = redis - .multi_get::( - ORGANIZATIONS_NAMESPACE, - organization_ids.iter().map(|x| x.to_string()), - ) - .await?; - - for organization in organizations { - if let Some(organization) = - organization.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings.retain(|x| { - &to_base62(organization.id.0 as u64) != x - && organization.slug.to_lowercase() != x.to_lowercase() - }); - found_organizations.push(organization); - continue; - } - } - } - - if !remaining_strings.is_empty() { - let organization_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - - let organizations: Vec = sqlx::query!( - " - SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color - FROM organizations o - WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2) - GROUP BY o.id; - ", - &organization_ids_parsed, - &remaining_strings - .into_iter() - .map(|x| x.to_string().to_lowercase()) - .collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| Organization { - id: OrganizationId(m.id), - slug: m.slug, - name: m.name, - team_id: TeamId(m.team_id), - description: m.description, - icon_url: m.icon_url, - color: m.color.map(|x| x as u32), - })) - }) - .try_collect::>() - .await?; - - for organization in organizations { - redis - .set_serialized_to_json( - ORGANIZATIONS_NAMESPACE, - organization.id.0, - &organization, - None, - ) - .await?; - redis - .set( - ORGANIZATIONS_TITLES_NAMESPACE, - &organization.slug.to_lowercase(), - &organization.id.0.to_string(), - None, + .collect::>(); + + let organizations = sqlx::query!( + " + SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color + FROM organizations o + WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2) + GROUP BY o.id; + ", + &org_ids, + &slugs, ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, m| { + let org = Organization { + id: OrganizationId(m.id), + slug: m.slug.clone(), + name: m.name, + team_id: TeamId(m.team_id), + description: m.description, + icon_url: m.icon_url, + color: m.color.map(|x| x as u32), + }; + + acc.insert(m.id, (Some(m.slug), org)); + async move { Ok(acc) } + }) .await?; - found_organizations.push(organization); - } - } + Ok(organizations) + }, + ) + .await?; - Ok(found_organizations) + Ok(val) } // Gets organization associated with a project ID, if it exists and there is one diff --git a/src/database/models/pat_item.rs b/src/database/models/pat_item.rs index 9352d637..4e83e12b 100644 --- a/src/database/models/pat_item.rs +++ b/src/database/models/pat_item.rs @@ -1,10 +1,14 @@ use super::ids::*; use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; -use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use crate::models::ids::base62_impl::parse_base62; use crate::models::pats::Scopes; use chrono::{DateTime, Utc}; +use dashmap::DashMap; +use futures::TryStreamExt; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; const PATS_NAMESPACE: &str = "pats"; const PATS_TOKENS_NAMESPACE: &str = "pats_tokens"; @@ -51,7 +55,7 @@ impl PersonalAccessToken { Ok(()) } - pub async fn get<'a, E, T: ToString>( + pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( id: T, exec: E, redis: &RedisPool, @@ -79,7 +83,7 @@ impl PersonalAccessToken { PersonalAccessToken::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( pat_strings: &[T], exec: E, redis: &RedisPool, @@ -87,105 +91,53 @@ impl PersonalAccessToken { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::TryStreamExt; - - let mut redis = redis.connect().await?; - - if pat_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_pats = Vec::new(); - let mut remaining_strings = pat_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut pat_ids = pat_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - pat_ids.append( - &mut redis - .multi_get::( - PATS_TOKENS_NAMESPACE, - pat_strings.iter().map(|x| x.to_string()), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - - if !pat_ids.is_empty() { - let pats = redis - .multi_get::(PATS_NAMESPACE, pat_ids.iter().map(|x| x.to_string())) - .await?; - for pat in pats { - if let Some(pat) = - pat.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings - .retain(|x| &to_base62(pat.id.0 as u64) != x && &pat.access_token != x); - found_pats.push(pat); - continue; - } - } - } - - if !remaining_strings.is_empty() { - let pat_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let db_pats: Vec = sqlx::query!( - " - SELECT id, name, access_token, scopes, user_id, created, expires, last_used - FROM pats - WHERE id = ANY($1) OR access_token = ANY($2) - ORDER BY created DESC - ", - &pat_ids_parsed, - &remaining_strings - .into_iter() - .map(|x| x.to_string()) - .collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|x| PersonalAccessToken { - id: PatId(x.id), - name: x.name, - access_token: x.access_token, - scopes: Scopes::from_bits(x.scopes as u64).unwrap_or(Scopes::NONE), - user_id: UserId(x.user_id), - created: x.created, - expires: x.expires, - last_used: x.last_used, - })) - }) - .try_collect::>() - .await?; - - for pat in db_pats { - redis - .set_serialized_to_json(PATS_NAMESPACE, pat.id.0, &pat, None) - .await?; - redis - .set( - PATS_TOKENS_NAMESPACE, - &pat.access_token, - &pat.id.0.to_string(), - None, + let val = redis + .get_cached_keys_with_slug( + PATS_NAMESPACE, + PATS_TOKENS_NAMESPACE, + true, + pat_strings, + |ids| async move { + let pat_ids: Vec = ids + .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids.into_iter().map(|x| x.to_string()).collect::>(); + + let pats = sqlx::query!( + " + SELECT id, name, access_token, scopes, user_id, created, expires, last_used + FROM pats + WHERE id = ANY($1) OR access_token = ANY($2) + ORDER BY created DESC + ", + &pat_ids, + &slugs, ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, x| { + let pat = PersonalAccessToken { + id: PatId(x.id), + name: x.name, + access_token: x.access_token.clone(), + scopes: Scopes::from_bits(x.scopes as u64).unwrap_or(Scopes::NONE), + user_id: UserId(x.user_id), + created: x.created, + expires: x.expires, + last_used: x.last_used, + }; + + acc.insert(x.id, (Some(x.access_token), pat)); + async move { Ok(acc) } + }) .await?; - found_pats.push(pat); - } - } + Ok(pats) + }, + ) + .await?; - Ok(found_pats) + Ok(val) } pub async fn get_user_pats<'a, E>( @@ -206,14 +158,13 @@ impl PersonalAccessToken { return Ok(res.into_iter().map(PatId).collect()); } - use futures::TryStreamExt; let db_pats: Vec = sqlx::query!( " - SELECT id - FROM pats - WHERE user_id = $1 - ORDER BY created DESC - ", + SELECT id + FROM pats + WHERE user_id = $1 + ORDER BY created DESC + ", user_id.0, ) .fetch_many(exec) diff --git a/src/database/models/project_item.rs b/src/database/models/project_item.rs index 8a8251bc..609d9069 100644 --- a/src/database/models/project_item.rs +++ b/src/database/models/project_item.rs @@ -5,13 +5,15 @@ use super::{ids::*, User}; use crate::database::models; use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; -use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use crate::models::ids::base62_impl::parse_base62; use crate::models::projects::{MonetizationStatus, ProjectStatus}; use chrono::{DateTime, Utc}; use dashmap::{DashMap, DashSet}; use futures::TryStreamExt; use itertools::Itertools; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; pub const PROJECTS_NAMESPACE: &str = "projects"; pub const PROJECTS_SLUGS_NAMESPACE: &str = "projects_slugs"; @@ -505,7 +507,7 @@ impl Project { Project::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( project_strings: &[T], exec: E, redis: &RedisPool, @@ -513,301 +515,253 @@ impl Project { where E: sqlx::Acquire<'a, Database = sqlx::Postgres>, { - let project_strings = project_strings - .iter() - .map(|x| x.to_string()) - .unique() - .collect::>(); - - if project_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut redis = redis.connect().await?; - let mut exec = exec.acquire().await?; - - let mut found_projects = Vec::new(); - let mut remaining_strings = project_strings.clone(); - - let mut project_ids = project_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - project_ids.append( - &mut redis - .multi_get::( - PROJECTS_SLUGS_NAMESPACE, - project_strings.iter().map(|x| x.to_string().to_lowercase()), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - if !project_ids.is_empty() { - let projects = redis - .multi_get::( - PROJECTS_NAMESPACE, - project_ids.iter().map(|x| x.to_string()), + let val = redis.get_cached_keys_with_slug( + PROJECTS_NAMESPACE, + PROJECTS_SLUGS_NAMESPACE, + false, + project_strings, + |ids| async move { + let mut exec = exec.acquire().await?; + let project_ids_parsed: Vec = ids + .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids + .into_iter() + .map(|x| x.to_string().to_lowercase()) + .collect::>(); + + let all_version_ids = DashSet::new(); + let versions: DashMap)>> = sqlx::query!( + " + SELECT DISTINCT mod_id, v.id as id, date_published + FROM mods m + INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3) + WHERE m.id = ANY($1) OR m.slug = ANY($2) + ", + &project_ids_parsed, + &slugs, + &*crate::models::projects::VersionStatus::iterator() + .filter(|x| x.is_listed()) + .map(|x| x.to_string()) + .collect::>() ) - .await?; - for project in projects { - if let Some(project) = - project.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings.retain(|x| { - &to_base62(project.inner.id.0 as u64) != x - && project.inner.slug.as_ref().map(|x| x.to_lowercase()) - != Some(x.to_lowercase()) - }); - found_projects.push(project); - continue; - } - } - } - if !remaining_strings.is_empty() { - let project_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let slugs = remaining_strings - .into_iter() - .map(|x| x.to_lowercase()) - .collect::>(); - - let all_version_ids = DashSet::new(); - let versions: DashMap)>> = sqlx::query!( - " - SELECT DISTINCT mod_id, v.id as id, date_published - FROM mods m - INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3) - WHERE m.id = ANY($1) OR m.slug = ANY($2) - ", - &project_ids_parsed, - &slugs, - &*crate::models::projects::VersionStatus::iterator() - .filter(|x| x.is_listed()) - .map(|x| x.to_string()) - .collect::>() - ) - .fetch(&mut *exec) - .try_fold( - DashMap::new(), - |acc: DashMap)>>, m| { - let version_id = VersionId(m.id); - let date_published = m.date_published; - all_version_ids.insert(version_id); - acc.entry(ProjectId(m.mod_id)) - .or_default() - .push((version_id, date_published)); - async move { Ok(acc) } - }, - ) - .await?; - - let loader_field_enum_value_ids = DashSet::new(); - let version_fields: DashMap> = sqlx::query!( - " - SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value - FROM versions v - INNER JOIN version_fields vf ON v.id = vf.version_id - WHERE v.id = ANY($1) - ", - &all_version_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .try_fold( - DashMap::new(), - |acc: DashMap>, m| { - let qvf = QueryVersionField { - version_id: VersionId(m.version_id), - field_id: LoaderFieldId(m.field_id), - int_value: m.int_value, - enum_value: m.enum_value.map(LoaderFieldEnumValueId), - string_value: m.string_value, - }; - - if let Some(enum_value) = m.enum_value { - loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); - } - - acc.entry(ProjectId(m.mod_id)).or_default().push(qvf); - async move { Ok(acc) } - }, - ) - .await?; + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap)>>, m| { + let version_id = VersionId(m.id); + let date_published = m.date_published; + all_version_ids.insert(version_id); + acc.entry(ProjectId(m.mod_id)) + .or_default() + .push((version_id, date_published)); + async move { Ok(acc) } + }, + ) + .await?; - let loader_field_enum_values: Vec = sqlx::query!( - " - SELECT DISTINCT id, enum_id, value, ordering, created, metadata - FROM loader_field_enum_values lfev - WHERE id = ANY($1) - ORDER BY enum_id, ordering, created DESC - ", - &loader_field_enum_value_ids - .iter() - .map(|x| x.0) - .collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderFieldEnumValue { - id: LoaderFieldEnumValueId(m.id), - enum_id: LoaderFieldEnumId(m.enum_id), - value: m.value, - ordering: m.ordering, - created: m.created, - metadata: m.metadata, - }) - .try_collect() - .await?; + let loader_field_enum_value_ids = DashSet::new(); + let version_fields: DashMap> = sqlx::query!( + " + SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value + FROM versions v + INNER JOIN version_fields vf ON v.id = vf.version_id + WHERE v.id = ANY($1) + ", + &all_version_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap>, m| { + let qvf = QueryVersionField { + version_id: VersionId(m.version_id), + field_id: LoaderFieldId(m.field_id), + int_value: m.int_value, + enum_value: m.enum_value.map(LoaderFieldEnumValueId), + string_value: m.string_value, + }; + + if let Some(enum_value) = m.enum_value { + loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); + } + + acc.entry(ProjectId(m.mod_id)).or_default().push(qvf); + async move { Ok(acc) } + }, + ) + .await?; - let mods_gallery: DashMap> = sqlx::query!( - " - SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering - FROM mods_gallery mg - INNER JOIN mods m ON mg.mod_id = m.id - WHERE m.id = ANY($1) OR m.slug = ANY($2) - ", - &project_ids_parsed, - &slugs - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap>, m| { - acc.entry(ProjectId(m.mod_id)) - .or_default() - .push(GalleryItem { - image_url: m.image_url, - featured: m.featured.unwrap_or(false), - name: m.name, - description: m.description, - created: m.created, + let loader_field_enum_values: Vec = sqlx::query!( + " + SELECT DISTINCT id, enum_id, value, ordering, created, metadata + FROM loader_field_enum_values lfev + WHERE id = ANY($1) + ORDER BY enum_id, ordering, created DESC + ", + &loader_field_enum_value_ids + .iter() + .map(|x| x.0) + .collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderFieldEnumValue { + id: LoaderFieldEnumValueId(m.id), + enum_id: LoaderFieldEnumId(m.enum_id), + value: m.value, ordering: m.ordering, - }); - async move { Ok(acc) } - } - ).await?; - - let links: DashMap> = sqlx::query!( - " - SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation - FROM mods_links ml - INNER JOIN mods m ON ml.joining_mod_id = m.id - INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id - WHERE m.id = ANY($1) OR m.slug = ANY($2) - ", - &project_ids_parsed, - &slugs - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap>, m| { - acc.entry(ProjectId(m.mod_id)) - .or_default() - .push(LinkUrl { - platform_id: LinkPlatformId(m.platform_id), - platform_name: m.platform_name, - url: m.url, - donation: m.donation, - }); - async move { Ok(acc) } - } - ).await?; - - #[derive(Default)] - struct VersionLoaderData { - loaders: Vec, - project_types: Vec, - games: Vec, - loader_loader_field_ids: Vec, - } + created: m.created, + metadata: m.metadata, + }) + .try_collect() + .await?; - let loader_field_ids = DashSet::new(); - let loaders_ptypes_games: DashMap = sqlx::query!( - " - SELECT DISTINCT mod_id, - ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, - ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, - ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, - ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields - FROM versions v - INNER JOIN loaders_versions lv ON v.id = lv.version_id - INNER JOIN loaders l ON lv.loader_id = l.id - INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id - INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id - INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id - INNER JOIN games g ON lptg.game_id = g.id - LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id - WHERE v.id = ANY($1) - GROUP BY mod_id - ", - &all_version_ids.iter().map(|x| x.0).collect::>() - ).fetch(&mut *exec) - .map_ok(|m| { - let project_id = ProjectId(m.mod_id); - - // Add loader fields to the set we need to fetch - let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); - for loader_field_id in loader_loader_field_ids.iter() { - loader_field_ids.insert(*loader_field_id); + let mods_gallery: DashMap> = sqlx::query!( + " + SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering + FROM mods_gallery mg + INNER JOIN mods m ON mg.mod_id = m.id + WHERE m.id = ANY($1) OR m.slug = ANY($2) + ", + &project_ids_parsed, + &slugs + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap>, m| { + acc.entry(ProjectId(m.mod_id)) + .or_default() + .push(GalleryItem { + image_url: m.image_url, + featured: m.featured.unwrap_or(false), + name: m.name, + description: m.description, + created: m.created, + ordering: m.ordering, + }); + async move { Ok(acc) } + } + ).await?; + + let links: DashMap> = sqlx::query!( + " + SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation + FROM mods_links ml + INNER JOIN mods m ON ml.joining_mod_id = m.id + INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id + WHERE m.id = ANY($1) OR m.slug = ANY($2) + ", + &project_ids_parsed, + &slugs + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap>, m| { + acc.entry(ProjectId(m.mod_id)) + .or_default() + .push(LinkUrl { + platform_id: LinkPlatformId(m.platform_id), + platform_name: m.platform_name, + url: m.url, + donation: m.donation, + }); + async move { Ok(acc) } + } + ).await?; + + #[derive(Default)] + struct VersionLoaderData { + loaders: Vec, + project_types: Vec, + games: Vec, + loader_loader_field_ids: Vec, } - // Add loader + loader associated data to the map - let version_loader_data = VersionLoaderData { - loaders: m.loaders.unwrap_or_default(), - project_types: m.project_types.unwrap_or_default(), - games: m.games.unwrap_or_default(), - loader_loader_field_ids, - }; - - (project_id, version_loader_data) + let loader_field_ids = DashSet::new(); + let loaders_ptypes_games: DashMap = sqlx::query!( + " + SELECT DISTINCT mod_id, + ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, + ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, + ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, + ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields + FROM versions v + INNER JOIN loaders_versions lv ON v.id = lv.version_id + INNER JOIN loaders l ON lv.loader_id = l.id + INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id + INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id + INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id + INNER JOIN games g ON lptg.game_id = g.id + LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id + WHERE v.id = ANY($1) + GROUP BY mod_id + ", + &all_version_ids.iter().map(|x| x.0).collect::>() + ).fetch(&mut *exec) + .map_ok(|m| { + let project_id = ProjectId(m.mod_id); + + // Add loader fields to the set we need to fetch + let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); + for loader_field_id in loader_loader_field_ids.iter() { + loader_field_ids.insert(*loader_field_id); + } + + // Add loader + loader associated data to the map + let version_loader_data = VersionLoaderData { + loaders: m.loaders.unwrap_or_default(), + project_types: m.project_types.unwrap_or_default(), + games: m.games.unwrap_or_default(), + loader_loader_field_ids, + }; - } - ).try_collect().await?; + (project_id, version_loader_data) - let loader_fields: Vec = sqlx::query!( - " - SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional - FROM loader_fields lf - WHERE id = ANY($1) - ", - &loader_field_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderField { - id: LoaderFieldId(m.id), - field: m.field, - field_type: m.field_type, - enum_type: m.enum_type.map(LoaderFieldEnumId), - min_val: m.min_val, - max_val: m.max_val, - optional: m.optional, - }) - .try_collect() - .await?; + } + ).try_collect().await?; + + let loader_fields: Vec = sqlx::query!( + " + SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional + FROM loader_fields lf + WHERE id = ANY($1) + ", + &loader_field_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderField { + id: LoaderFieldId(m.id), + field: m.field, + field_type: m.field_type, + enum_type: m.enum_type.map(LoaderFieldEnumId), + min_val: m.min_val, + max_val: m.max_val, + optional: m.optional, + }) + .try_collect() + .await?; - let db_projects: Vec = sqlx::query!( - " - SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows, - m.icon_url icon_url, m.description description, m.published published, - m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status, - m.license_url license_url, - m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body, - m.webhook_sent, m.color, - t.id thread_id, m.monetization_status monetization_status, - ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories, - ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories - FROM mods m - INNER JOIN threads t ON t.mod_id = m.id - LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id - LEFT JOIN categories c ON mc.joining_category_id = c.id - WHERE m.id = ANY($1) OR m.slug = ANY($2) - GROUP BY t.id, m.id; - ", - &project_ids_parsed, - &slugs, - ) - .fetch_many(&mut *exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| { + let projects = sqlx::query!( + " + SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows, + m.icon_url icon_url, m.description description, m.published published, + m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status, + m.license_url license_url, + m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body, + m.webhook_sent, m.color, + t.id thread_id, m.monetization_status monetization_status, + ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories, + ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories + FROM mods m + INNER JOIN threads t ON t.mod_id = m.id + LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id + LEFT JOIN categories c ON mc.joining_category_id = c.id + WHERE m.id = ANY($1) OR m.slug = ANY($2) + GROUP BY t.id, m.id; + ", + &project_ids_parsed, + &slugs, + ) + .fetch(&mut *exec) + .try_fold(DashMap::new(), |acc, m| { let id = m.id; let project_id = ProjectId(id); let VersionLoaderData { @@ -815,54 +769,54 @@ impl Project { project_types, games, loader_loader_field_ids, - } = loaders_ptypes_games.remove(&project_id).map(|x|x.1).unwrap_or_default(); + } = loaders_ptypes_games.remove(&project_id).map(|x|x.1).unwrap_or_default(); let mut versions = versions.remove(&project_id).map(|x| x.1).unwrap_or_default(); let mut gallery = mods_gallery.remove(&project_id).map(|x| x.1).unwrap_or_default(); let urls = links.remove(&project_id).map(|x| x.1).unwrap_or_default(); let version_fields = version_fields.remove(&project_id).map(|x| x.1).unwrap_or_default(); let loader_fields = loader_fields.iter() - .filter(|x| loader_loader_field_ids.contains(&x.id)) - .collect::>(); - - QueryProject { - inner: Project { - id: ProjectId(id), - team_id: TeamId(m.team_id), - organization_id: m.organization_id.map(OrganizationId), - name: m.name.clone(), - summary: m.summary.clone(), - downloads: m.downloads, - icon_url: m.icon_url.clone(), - published: m.published, - updated: m.updated, - license_url: m.license_url.clone(), - status: ProjectStatus::from_string( - &m.status, - ), - requested_status: m.requested_status.map(|x| ProjectStatus::from_string( - &x, - )), - license: m.license.clone(), - slug: m.slug.clone(), - description: m.description.clone(), - follows: m.follows, - moderation_message: m.moderation_message, - moderation_message_body: m.moderation_message_body, - approved: m.approved, - webhook_sent: m.webhook_sent, - color: m.color.map(|x| x as u32), - queued: m.queued, - monetization_status: MonetizationStatus::from_string( - &m.monetization_status, - ), - loaders, - }, - categories: m.categories.unwrap_or_default(), - additional_categories: m.additional_categories.unwrap_or_default(), - project_types, - games, - versions: { + .filter(|x| loader_loader_field_ids.contains(&x.id)) + .collect::>(); + + let project = QueryProject { + inner: Project { + id: ProjectId(id), + team_id: TeamId(m.team_id), + organization_id: m.organization_id.map(OrganizationId), + name: m.name.clone(), + summary: m.summary.clone(), + downloads: m.downloads, + icon_url: m.icon_url.clone(), + published: m.published, + updated: m.updated, + license_url: m.license_url.clone(), + status: ProjectStatus::from_string( + &m.status, + ), + requested_status: m.requested_status.map(|x| ProjectStatus::from_string( + &x, + )), + license: m.license.clone(), + slug: m.slug.clone(), + description: m.description.clone(), + follows: m.follows, + moderation_message: m.moderation_message, + moderation_message_body: m.moderation_message_body, + approved: m.approved, + webhook_sent: m.webhook_sent, + color: m.color.map(|x| x as u32), + queued: m.queued, + monetization_status: MonetizationStatus::from_string( + &m.monetization_status, + ), + loaders, + }, + categories: m.categories.unwrap_or_default(), + additional_categories: m.additional_categories.unwrap_or_default(), + project_types, + games, + versions: { // Each version is a tuple of (VersionId, DateTime) versions.sort_by(|a, b| a.1.cmp(&b.1)); versions.into_iter().map(|x| x.0).collect() @@ -872,32 +826,20 @@ impl Project { gallery }, urls, - aggregate_version_fields: VersionField::from_query_json(version_fields, &loader_fields, &loader_field_enum_values, true), - thread_id: ThreadId(m.thread_id), - }})) - }) - .try_collect::>() - .await?; - - for project in db_projects { - redis - .set_serialized_to_json(PROJECTS_NAMESPACE, project.inner.id.0, &project, None) + aggregate_version_fields: VersionField::from_query_json(version_fields, &loader_fields, &loader_field_enum_values, true), + thread_id: ThreadId(m.thread_id), + }; + + acc.insert(m.id, (m.slug, project)); + async move { Ok(acc) } + }) .await?; - if let Some(slug) = &project.inner.slug { - redis - .set( - PROJECTS_SLUGS_NAMESPACE, - &slug.to_lowercase(), - &project.inner.id.0.to_string(), - None, - ) - .await?; - } - found_projects.push(project); - } - } - Ok(found_projects) + Ok(projects) + }, + ).await?; + + Ok(val) } pub async fn get_dependencies<'a, E>( diff --git a/src/database/models/session_item.rs b/src/database/models/session_item.rs index f27af5bb..dac42b1e 100644 --- a/src/database/models/session_item.rs +++ b/src/database/models/session_item.rs @@ -1,9 +1,12 @@ use super::ids::*; use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; -use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use crate::models::ids::base62_impl::parse_base62; use chrono::{DateTime, Utc}; +use dashmap::DashMap; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; const SESSIONS_NAMESPACE: &str = "sessions"; const SESSIONS_IDS_NAMESPACE: &str = "sessions_ids"; @@ -79,7 +82,7 @@ pub struct Session { } impl Session { - pub async fn get<'a, E, T: ToString>( + pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( id: T, exec: E, redis: &RedisPool, @@ -120,7 +123,7 @@ impl Session { Session::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( session_strings: &[T], exec: E, redis: &RedisPool, @@ -130,109 +133,60 @@ impl Session { { use futures::TryStreamExt; - let mut redis = redis.connect().await?; - - if session_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_sessions = Vec::new(); - let mut remaining_strings = session_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut session_ids = session_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - session_ids.append( - &mut redis - .multi_get::( - SESSIONS_IDS_NAMESPACE, - session_strings.iter().map(|x| x.to_string()), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - - if !session_ids.is_empty() { - let sessions = redis - .multi_get::( - SESSIONS_NAMESPACE, - session_ids.iter().map(|x| x.to_string()), + let val = redis.get_cached_keys_with_slug( + SESSIONS_NAMESPACE, + SESSIONS_IDS_NAMESPACE, + true, + session_strings, + |ids| async move { + let session_ids: Vec = ids + .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids + .into_iter() + .map(|x| x.to_string()) + .collect::>(); + let db_sessions = sqlx::query!( + " + SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform, + city, country, ip, user_agent + FROM sessions + WHERE id = ANY($1) OR session = ANY($2) + ORDER BY created DESC + ", + &session_ids, + &slugs, ) - .await?; - for session in sessions { - if let Some(session) = - session.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings - .retain(|x| &to_base62(session.id.0 as u64) != x && &session.session != x); - found_sessions.push(session); - continue; - } - } - } - - if !remaining_strings.is_empty() { - let session_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let db_sessions: Vec = sqlx::query!( - " - SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform, - city, country, ip, user_agent - FROM sessions - WHERE id = ANY($1) OR session = ANY($2) - ORDER BY created DESC - ", - &session_ids_parsed, - &remaining_strings.into_iter().map(|x| x.to_string()).collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|x| Session { - id: SessionId(x.id), - session: x.session, - user_id: UserId(x.user_id), - created: x.created, - last_login: x.last_login, - expires: x.expires, - refresh_expires: x.refresh_expires, - os: x.os, - platform: x.platform, - city: x.city, - country: x.country, - ip: x.ip, - user_agent: x.user_agent, - })) - }) - .try_collect::>() - .await?; - - for session in db_sessions { - redis - .set_serialized_to_json(SESSIONS_NAMESPACE, session.id.0, &session, None) - .await?; - redis - .set( - SESSIONS_IDS_NAMESPACE, - &session.session, - &session.id.0.to_string(), - None, - ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, x| { + let session = Session { + id: SessionId(x.id), + session: x.session.clone(), + user_id: UserId(x.user_id), + created: x.created, + last_login: x.last_login, + expires: x.expires, + refresh_expires: x.refresh_expires, + os: x.os, + platform: x.platform, + city: x.city, + country: x.country, + ip: x.ip, + user_agent: x.user_agent, + }; + + acc.insert(x.id, (Some(x.session), session)); + + async move { Ok(acc) } + }) .await?; - found_sessions.push(session); - } - } - Ok(found_sessions) + Ok(db_sessions) + }).await?; + + Ok(val) } pub async fn get_user_sessions<'a, E>( diff --git a/src/database/models/team_item.rs b/src/database/models/team_item.rs index 2dd0a2f7..b43fdd7b 100644 --- a/src/database/models/team_item.rs +++ b/src/database/models/team_item.rs @@ -3,6 +3,8 @@ use crate::{ database::redis::RedisPool, models::teams::{OrganizationPermissions, ProjectPermissions}, }; +use dashmap::DashMap; +use futures::TryStreamExt; use itertools::Itertools; use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; @@ -203,83 +205,56 @@ impl TeamMember { where E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy, { - use futures::stream::TryStreamExt; - if team_ids.is_empty() { return Ok(Vec::new()); } - let mut redis = redis.connect().await?; - - let mut team_ids_parsed: Vec = team_ids.iter().map(|x| x.0).collect(); - - let mut found_teams = Vec::new(); - - let teams = redis - .multi_get::( - TEAMS_NAMESPACE, - team_ids_parsed.iter().map(|x| x.to_string()), - ) - .await?; - - for team_raw in teams { - if let Some(mut team) = team_raw - .clone() - .and_then(|x| serde_json::from_str::>(&x).ok()) - { - if let Some(team_id) = team.first().map(|x| x.team_id) { - team_ids_parsed.retain(|x| &team_id.0 != x); - } - - found_teams.append(&mut team); - continue; - } - } - - if !team_ids_parsed.is_empty() { - let teams: Vec = sqlx::query!( - " - SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions, - accepted, payouts_split, - ordering, user_id - FROM team_members - WHERE team_id = ANY($1) - ORDER BY team_id, ordering; - ", - &team_ids_parsed - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| TeamMember { - id: TeamMemberId(m.id), - team_id: TeamId(m.team_id), - role: m.member_role, - is_owner: m.is_owner, - permissions: ProjectPermissions::from_bits(m.permissions as u64) - .unwrap_or_default(), - organization_permissions: m - .organization_permissions - .map(|p| OrganizationPermissions::from_bits(p as u64).unwrap_or_default()), - accepted: m.accepted, - user_id: UserId(m.user_id), - payouts_split: m.payouts_split, - ordering: m.ordering, - })) - }) - .try_collect::>() - .await?; - - for (id, members) in &teams.into_iter().group_by(|x| x.team_id) { - let mut members = members.collect::>(); - - redis - .set_serialized_to_json(TEAMS_NAMESPACE, id.0, &members, None) + let val = redis.get_cached_keys( + TEAMS_NAMESPACE, + &team_ids.iter().map(|x| x.0).collect::>(), + |team_ids| async move { + let teams = sqlx::query!( + " + SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions, + accepted, payouts_split, + ordering, user_id + FROM team_members + WHERE team_id = ANY($1) + ORDER BY team_id, ordering; + ", + &team_ids + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc: DashMap>, m| { + let member = TeamMember { + id: TeamMemberId(m.id), + team_id: TeamId(m.team_id), + role: m.member_role, + is_owner: m.is_owner, + permissions: ProjectPermissions::from_bits(m.permissions as u64) + .unwrap_or_default(), + organization_permissions: m + .organization_permissions + .map(|p| OrganizationPermissions::from_bits(p as u64).unwrap_or_default()), + accepted: m.accepted, + user_id: UserId(m.user_id), + payouts_split: m.payouts_split, + ordering: m.ordering, + }; + + acc.entry(m.team_id) + .or_default() + .push(member); + + async move { Ok(acc) } + }) .await?; - found_teams.append(&mut members); - } - } - Ok(found_teams) + Ok(teams) + }, + ).await?; + + Ok(val.into_iter().flatten().collect()) } pub async fn clear_cache(id: TeamId, redis: &RedisPool) -> Result<(), super::DatabaseError> { @@ -311,8 +286,6 @@ impl TeamMember { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::stream::TryStreamExt; - let team_ids_parsed: Vec = team_ids.iter().map(|x| x.0).collect(); let team_members = sqlx::query!( diff --git a/src/database/models/thread_item.rs b/src/database/models/thread_item.rs index ce9a3c36..e085bb1b 100644 --- a/src/database/models/thread_item.rs +++ b/src/database/models/thread_item.rs @@ -21,13 +21,13 @@ pub struct Thread { pub messages: Vec, pub members: Vec, - pub show_in_mod_inbox: bool, } pub struct ThreadMessageBuilder { pub author_id: Option, pub body: MessageBody, pub thread_id: ThreadId, + pub hide_identity: bool, } #[derive(Serialize, Deserialize, Clone)] @@ -37,6 +37,7 @@ pub struct ThreadMessage { pub author_id: Option, pub body: MessageBody, pub created: DateTime, + pub hide_identity: bool, } impl ThreadMessageBuilder { @@ -49,16 +50,17 @@ impl ThreadMessageBuilder { sqlx::query!( " INSERT INTO threads_messages ( - id, author_id, body, thread_id + id, author_id, body, thread_id, hide_identity ) VALUES ( - $1, $2, $3, $4 + $1, $2, $3, $4, $5 ) ", thread_message_id as ThreadMessageId, self.author_id.map(|x| x.0), serde_json::value::to_value(self.body.clone())?, self.thread_id as ThreadId, + self.hide_identity ) .execute(&mut **transaction) .await?; @@ -131,9 +133,9 @@ impl Thread { let thread_ids_parsed: Vec = thread_ids.iter().map(|x| x.0).collect(); let threads = sqlx::query!( " - SELECT t.id, t.thread_type, t.mod_id, t.report_id, t.show_in_mod_inbox, + SELECT t.id, t.thread_type, t.mod_id, t.report_id, ARRAY_AGG(DISTINCT tm.user_id) filter (where tm.user_id is not null) members, - JSONB_AGG(DISTINCT jsonb_build_object('id', tmsg.id, 'author_id', tmsg.author_id, 'thread_id', tmsg.thread_id, 'body', tmsg.body, 'created', tmsg.created)) filter (where tmsg.id is not null) messages + JSONB_AGG(DISTINCT jsonb_build_object('id', tmsg.id, 'author_id', tmsg.author_id, 'thread_id', tmsg.thread_id, 'body', tmsg.body, 'created', tmsg.created, 'hide_identity', tmsg.hide_identity)) filter (where tmsg.id is not null) messages FROM threads t LEFT OUTER JOIN threads_messages tmsg ON tmsg.thread_id = t.id LEFT OUTER JOIN threads_members tm ON tm.thread_id = t.id @@ -159,7 +161,6 @@ impl Thread { messages }, members: x.members.unwrap_or_default().into_iter().map(UserId).collect(), - show_in_mod_inbox: x.show_in_mod_inbox, })) }) .try_collect::>() @@ -229,7 +230,7 @@ impl ThreadMessage { let message_ids_parsed: Vec = message_ids.iter().map(|x| x.0).collect(); let messages = sqlx::query!( " - SELECT tm.id, tm.author_id, tm.thread_id, tm.body, tm.created + SELECT tm.id, tm.author_id, tm.thread_id, tm.body, tm.created, tm.hide_identity FROM threads_messages tm WHERE tm.id = ANY($1) ", @@ -244,6 +245,7 @@ impl ThreadMessage { body: serde_json::from_value(x.body) .unwrap_or(MessageBody::Deleted { private: false }), created: x.created, + hide_identity: x.hide_identity, })) }) .try_collect::>() diff --git a/src/database/models/user_item.rs b/src/database/models/user_item.rs index 6f821db8..06b73d52 100644 --- a/src/database/models/user_item.rs +++ b/src/database/models/user_item.rs @@ -5,8 +5,11 @@ use crate::database::redis::RedisPool; use crate::models::ids::base62_impl::{parse_base62, to_base62}; use crate::models::users::Badges; use chrono::{DateTime, Utc}; +use dashmap::DashMap; use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; const USERS_NAMESPACE: &str = "users"; const USER_USERNAMES_NAMESPACE: &str = "users_usernames"; @@ -132,7 +135,7 @@ impl User { User::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( users_strings: &[T], exec: E, redis: &RedisPool, @@ -142,123 +145,73 @@ impl User { { use futures::TryStreamExt; - let mut redis = redis.connect().await?; - - if users_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_users = Vec::new(); - let mut remaining_strings = users_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut user_ids = users_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - user_ids.append( - &mut redis - .multi_get::( - USER_USERNAMES_NAMESPACE, - users_strings.iter().map(|x| x.to_string().to_lowercase()), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - - if !user_ids.is_empty() { - let users = redis - .multi_get::(USERS_NAMESPACE, user_ids.iter().map(|x| x.to_string())) - .await?; - for user in users { - if let Some(user) = user.and_then(|x| serde_json::from_str::(&x).ok()) { - remaining_strings.retain(|x| { - &to_base62(user.id.0 as u64) != x - && user.username.to_lowercase() != x.to_lowercase() - }); - found_users.push(user); - continue; - } - } - } - - if !remaining_strings.is_empty() { - let user_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let db_users: Vec = sqlx::query!( - " - SELECT id, name, email, - avatar_url, username, bio, - created, role, badges, - balance, - github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id, - email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email, - venmo_handle - FROM users - WHERE id = ANY($1) OR LOWER(username) = ANY($2) - ", - &user_ids_parsed, - &remaining_strings + let val = redis.get_cached_keys_with_slug( + USERS_NAMESPACE, + USER_USERNAMES_NAMESPACE, + false, + users_strings, + |ids| async move { + let user_ids: Vec = ids + .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids .into_iter() .map(|x| x.to_string().to_lowercase()) - .collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|u| User { - id: UserId(u.id), - github_id: u.github_id, - discord_id: u.discord_id, - gitlab_id: u.gitlab_id, - google_id: u.google_id, - steam_id: u.steam_id, - microsoft_id: u.microsoft_id, - name: u.name, - email: u.email, - email_verified: u.email_verified, - avatar_url: u.avatar_url, - username: u.username, - bio: u.bio, - created: u.created, - role: u.role, - badges: Badges::from_bits(u.badges as u64).unwrap_or_default(), - balance: u.balance, - password: u.password, - paypal_id: u.paypal_id, - paypal_country: u.paypal_country, - paypal_email: u.paypal_email, - venmo_handle: u.venmo_handle, - totp_secret: u.totp_secret, - })) - }) - .try_collect::>() - .await?; - - for user in db_users { - redis - .set_serialized_to_json(USERS_NAMESPACE, user.id.0, &user, None) - .await?; - redis - .set( - USER_USERNAMES_NAMESPACE, - &user.username.to_lowercase(), - &user.id.0.to_string(), - None, - ) + .collect::>(); + + let users = sqlx::query!( + " + SELECT id, name, email, + avatar_url, username, bio, + created, role, badges, + balance, + github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id, + email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email, + venmo_handle + FROM users + WHERE id = ANY($1) OR LOWER(username) = ANY($2) + ", + &user_ids, + &slugs, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, u| { + let user = User { + id: UserId(u.id), + github_id: u.github_id, + discord_id: u.discord_id, + gitlab_id: u.gitlab_id, + google_id: u.google_id, + steam_id: u.steam_id, + microsoft_id: u.microsoft_id, + name: u.name, + email: u.email, + email_verified: u.email_verified, + avatar_url: u.avatar_url, + username: u.username.clone(), + bio: u.bio, + created: u.created, + role: u.role, + badges: Badges::from_bits(u.badges as u64).unwrap_or_default(), + balance: u.balance, + password: u.password, + paypal_id: u.paypal_id, + paypal_country: u.paypal_country, + paypal_email: u.paypal_email, + venmo_handle: u.venmo_handle, + totp_secret: u.totp_secret, + }; + + acc.insert(u.id, (Some(u.username), user)); + async move { Ok(acc) } + }) .await?; - found_users.push(user); - } - } - Ok(found_users) + Ok(users) + }).await?; + Ok(val) } pub async fn get_email<'a, E>(email: &str, exec: E) -> Result, sqlx::Error> diff --git a/src/database/models/version_item.rs b/src/database/models/version_item.rs index eeb6a965..5d654ab2 100644 --- a/src/database/models/version_item.rs +++ b/src/database/models/version_item.rs @@ -8,6 +8,7 @@ use crate::database::redis::RedisPool; use crate::models::projects::{FileType, VersionStatus}; use chrono::{DateTime, Utc}; use dashmap::{DashMap, DashSet}; +use futures::TryStreamExt; use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; @@ -469,301 +470,263 @@ impl Version { where E: sqlx::Acquire<'a, Database = sqlx::Postgres>, { - let version_ids = version_ids - .iter() - .unique() - .copied() - .collect::>(); - - use futures::stream::TryStreamExt; - - if version_ids.is_empty() { - return Ok(Vec::new()); - } - - let mut exec = exec.acquire().await?; - let mut redis = redis.connect().await?; - - let mut version_ids_parsed: Vec = version_ids.iter().map(|x| x.0).collect(); + let mut val = redis.get_cached_keys( + VERSIONS_NAMESPACE, + &version_ids.iter().map(|x| x.0).collect::>(), + |version_ids| async move { + let mut exec = exec.acquire().await?; + + let loader_field_enum_value_ids = DashSet::new(); + let version_fields: DashMap> = sqlx::query!( + " + SELECT version_id, field_id, int_value, enum_value, string_value + FROM version_fields + WHERE version_id = ANY($1) + ", + &version_ids + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap>, m| { + let qvf = QueryVersionField { + version_id: VersionId(m.version_id), + field_id: LoaderFieldId(m.field_id), + int_value: m.int_value, + enum_value: m.enum_value.map(LoaderFieldEnumValueId), + string_value: m.string_value, + }; + + if let Some(enum_value) = m.enum_value { + loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); + } + + acc.entry(VersionId(m.version_id)).or_default().push(qvf); + async move { Ok(acc) } + }, + ) + .await?; - let mut found_versions = Vec::new(); + #[derive(Default)] + struct VersionLoaderData { + loaders: Vec, + project_types: Vec, + games: Vec, + loader_loader_field_ids: Vec, + } - let versions = redis - .multi_get::( - VERSIONS_NAMESPACE, - version_ids_parsed - .clone() - .iter() - .map(|x| x.to_string()) - .collect::>(), - ) - .await?; + let loader_field_ids = DashSet::new(); + let loaders_ptypes_games: DashMap = sqlx::query!( + " + SELECT DISTINCT version_id, + ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, + ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, + ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, + ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields + FROM versions v + INNER JOIN loaders_versions lv ON v.id = lv.version_id + INNER JOIN loaders l ON lv.loader_id = l.id + INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id + INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id + INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id + INNER JOIN games g ON lptg.game_id = g.id + LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id + WHERE v.id = ANY($1) + GROUP BY version_id + ", + &version_ids + ).fetch(&mut *exec) + .map_ok(|m| { + let version_id = VersionId(m.version_id); + + // Add loader fields to the set we need to fetch + let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); + for loader_field_id in loader_loader_field_ids.iter() { + loader_field_ids.insert(*loader_field_id); + } - for version in versions { - if let Some(version) = - version.and_then(|x| serde_json::from_str::(&x).ok()) - { - version_ids_parsed.retain(|x| &version.inner.id.0 != x); - found_versions.push(version); - continue; - } - } + // Add loader + loader associated data to the map + let version_loader_data = VersionLoaderData { + loaders: m.loaders.unwrap_or_default(), + project_types: m.project_types.unwrap_or_default(), + games: m.games.unwrap_or_default(), + loader_loader_field_ids, + }; + (version_id,version_loader_data) - if !version_ids_parsed.is_empty() { - let loader_field_enum_value_ids = DashSet::new(); - let version_fields: DashMap> = sqlx::query!( - " - SELECT version_id, field_id, int_value, enum_value, string_value - FROM version_fields - WHERE version_id = ANY($1) - ", - &version_ids_parsed - ) - .fetch(&mut *exec) - .try_fold( - DashMap::new(), - |acc: DashMap>, m| { - let qvf = QueryVersionField { - version_id: VersionId(m.version_id), - field_id: LoaderFieldId(m.field_id), - int_value: m.int_value, - enum_value: m.enum_value.map(LoaderFieldEnumValueId), - string_value: m.string_value, - }; - - if let Some(enum_value) = m.enum_value { - loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); } + ).try_collect().await?; + + // Fetch all loader fields from any version + let loader_fields: Vec = sqlx::query!( + " + SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional + FROM loader_fields lf + WHERE id = ANY($1) + ", + &loader_field_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderField { + id: LoaderFieldId(m.id), + field: m.field, + field_type: m.field_type, + enum_type: m.enum_type.map(LoaderFieldEnumId), + min_val: m.min_val, + max_val: m.max_val, + optional: m.optional, + }) + .try_collect() + .await?; - acc.entry(VersionId(m.version_id)).or_default().push(qvf); - async move { Ok(acc) } - }, - ) - .await?; - - #[derive(Default)] - struct VersionLoaderData { - loaders: Vec, - project_types: Vec, - games: Vec, - loader_loader_field_ids: Vec, - } - - let loader_field_ids = DashSet::new(); - let loaders_ptypes_games: DashMap = sqlx::query!( - " - SELECT DISTINCT version_id, - ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, - ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, - ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, - ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields - FROM versions v - INNER JOIN loaders_versions lv ON v.id = lv.version_id - INNER JOIN loaders l ON lv.loader_id = l.id - INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id - INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id - INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id - INNER JOIN games g ON lptg.game_id = g.id - LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id - WHERE v.id = ANY($1) - GROUP BY version_id - ", - &version_ids_parsed - ).fetch(&mut *exec) - .map_ok(|m| { - let version_id = VersionId(m.version_id); - - // Add loader fields to the set we need to fetch - let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); - for loader_field_id in loader_loader_field_ids.iter() { - loader_field_ids.insert(*loader_field_id); - } - - // Add loader + loader associated data to the map - let version_loader_data = VersionLoaderData { - loaders: m.loaders.unwrap_or_default(), - project_types: m.project_types.unwrap_or_default(), - games: m.games.unwrap_or_default(), - loader_loader_field_ids, - }; - (version_id,version_loader_data) + let loader_field_enum_values: Vec = sqlx::query!( + " + SELECT DISTINCT id, enum_id, value, ordering, created, metadata + FROM loader_field_enum_values lfev + WHERE id = ANY($1) + ORDER BY enum_id, ordering, created ASC + ", + &loader_field_enum_value_ids + .iter() + .map(|x| x.0) + .collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderFieldEnumValue { + id: LoaderFieldEnumValueId(m.id), + enum_id: LoaderFieldEnumId(m.enum_id), + value: m.value, + ordering: m.ordering, + created: m.created, + metadata: m.metadata, + }) + .try_collect() + .await?; + #[derive(Deserialize)] + struct Hash { + pub file_id: FileId, + pub algorithm: String, + pub hash: String, } - ).try_collect().await?; - // Fetch all loader fields from any version - let loader_fields: Vec = sqlx::query!( - " - SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional - FROM loader_fields lf - WHERE id = ANY($1) - ", - &loader_field_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderField { - id: LoaderFieldId(m.id), - field: m.field, - field_type: m.field_type, - enum_type: m.enum_type.map(LoaderFieldEnumId), - min_val: m.min_val, - max_val: m.max_val, - optional: m.optional, - }) - .try_collect() - .await?; - - let loader_field_enum_values: Vec = sqlx::query!( - " - SELECT DISTINCT id, enum_id, value, ordering, created, metadata - FROM loader_field_enum_values lfev - WHERE id = ANY($1) - ORDER BY enum_id, ordering, created ASC - ", - &loader_field_enum_value_ids - .iter() - .map(|x| x.0) - .collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderFieldEnumValue { - id: LoaderFieldEnumValueId(m.id), - enum_id: LoaderFieldEnumId(m.enum_id), - value: m.value, - ordering: m.ordering, - created: m.created, - metadata: m.metadata, - }) - .try_collect() - .await?; - - #[derive(Deserialize)] - struct Hash { - pub file_id: FileId, - pub algorithm: String, - pub hash: String, - } - - #[derive(Deserialize)] - struct File { - pub id: FileId, - pub url: String, - pub filename: String, - pub primary: bool, - pub size: u32, - pub file_type: Option, - } - - let file_ids = DashSet::new(); - let reverse_file_map = DashMap::new(); - let files : DashMap> = sqlx::query!( - " - SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type - FROM files f - WHERE f.version_id = ANY($1) - ", - &version_ids_parsed - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap>, m| { - let file = File { - id: FileId(m.id), - url: m.url, - filename: m.filename, - primary: m.is_primary, - size: m.size as u32, - file_type: m.file_type.map(|x| FileType::from_string(&x)), - }; - - file_ids.insert(FileId(m.id)); - reverse_file_map.insert(FileId(m.id), VersionId(m.version_id)); - - acc.entry(VersionId(m.version_id)) - .or_default() - .push(file); - async move { Ok(acc) } + #[derive(Deserialize)] + struct File { + pub id: FileId, + pub url: String, + pub filename: String, + pub primary: bool, + pub size: u32, + pub file_type: Option, } - ).await?; - let hashes: DashMap> = sqlx::query!( - " - SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash - FROM hashes - WHERE file_id = ANY($1) - ", - &file_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .try_fold(DashMap::new(), |acc: DashMap>, m| { - if let Some(found_hash) = m.hash { - let hash = Hash { - file_id: FileId(m.file_id), - algorithm: m.algorithm, - hash: found_hash, - }; - - if let Some(version_id) = reverse_file_map.get(&FileId(m.file_id)) { - acc.entry(*version_id).or_default().push(hash); + let file_ids = DashSet::new(); + let reverse_file_map = DashMap::new(); + let files : DashMap> = sqlx::query!( + " + SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type + FROM files f + WHERE f.version_id = ANY($1) + ", + &version_ids + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap>, m| { + let file = File { + id: FileId(m.id), + url: m.url, + filename: m.filename, + primary: m.is_primary, + size: m.size as u32, + file_type: m.file_type.map(|x| FileType::from_string(&x)), + }; + + file_ids.insert(FileId(m.id)); + reverse_file_map.insert(FileId(m.id), VersionId(m.version_id)); + + acc.entry(VersionId(m.version_id)) + .or_default() + .push(file); + async move { Ok(acc) } } - } - async move { Ok(acc) } - }) - .await?; - - let dependencies : DashMap> = sqlx::query!( - " - SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type - FROM dependencies d - WHERE dependent_id = ANY($1) - ", - &version_ids_parsed - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap<_,Vec>, m| { - let dependency = QueryDependency { - project_id: m.dependency_project_id.map(ProjectId), - version_id: m.dependency_version_id.map(VersionId), - file_name: m.file_name, - dependency_type: m.dependency_type, - }; - - acc.entry(VersionId(m.version_id)) - .or_default() - .push(dependency); - async move { Ok(acc) } - } - ).await?; + ).await?; + + let hashes: DashMap> = sqlx::query!( + " + SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash + FROM hashes + WHERE file_id = ANY($1) + ", + &file_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .try_fold(DashMap::new(), |acc: DashMap>, m| { + if let Some(found_hash) = m.hash { + let hash = Hash { + file_id: FileId(m.file_id), + algorithm: m.algorithm, + hash: found_hash, + }; + + if let Some(version_id) = reverse_file_map.get(&FileId(m.file_id)) { + acc.entry(*version_id).or_default().push(hash); + } + } + async move { Ok(acc) } + }) + .await?; - let db_versions: Vec = sqlx::query!( - " - SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number, - v.changelog changelog, v.date_published date_published, v.downloads downloads, - v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering - FROM versions v - WHERE v.id = ANY($1) - ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC; - ", - &version_ids_parsed - ) - .fetch_many(&mut *exec) - .try_filter_map(|e| async { - Ok(e.right().map(|v| - { + let dependencies : DashMap> = sqlx::query!( + " + SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type + FROM dependencies d + WHERE dependent_id = ANY($1) + ", + &version_ids + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap<_,Vec>, m| { + let dependency = QueryDependency { + project_id: m.dependency_project_id.map(ProjectId), + version_id: m.dependency_version_id.map(VersionId), + file_name: m.file_name, + dependency_type: m.dependency_type, + }; + + acc.entry(VersionId(m.version_id)) + .or_default() + .push(dependency); + async move { Ok(acc) } + } + ).await?; + + let res = sqlx::query!( + " + SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number, + v.changelog changelog, v.date_published date_published, v.downloads downloads, + v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering + FROM versions v + WHERE v.id = ANY($1); + ", + &version_ids + ) + .fetch(&mut *exec) + .try_fold(DashMap::new(), |acc, v| { let version_id = VersionId(v.id); let VersionLoaderData { loaders, project_types, games, loader_loader_field_ids, - } = loaders_ptypes_games.remove(&version_id).map(|x|x.1).unwrap_or_default(); + } = loaders_ptypes_games.remove(&version_id).map(|x|x.1).unwrap_or_default(); let files = files.remove(&version_id).map(|x|x.1).unwrap_or_default(); let hashes = hashes.remove(&version_id).map(|x|x.1).unwrap_or_default(); let version_fields = version_fields.remove(&version_id).map(|x|x.1).unwrap_or_default(); let dependencies = dependencies.remove(&version_id).map(|x|x.1).unwrap_or_default(); let loader_fields = loader_fields.iter() - .filter(|x| loader_loader_field_ids.contains(&x.id)) - .collect::>(); + .filter(|x| loader_loader_field_ids.contains(&x.id)) + .collect::>(); - QueryVersion { + let query_version = QueryVersion { inner: Version { id: VersionId(v.id), project_id: ProjectId(v.mod_id), @@ -821,22 +784,20 @@ impl Version { project_types, games, dependencies, - } - })) - }) - .try_collect::>() - .await?; - - for version in db_versions { - redis - .set_serialized_to_json(VERSIONS_NAMESPACE, version.inner.id.0, &version, None) + }; + + acc.insert(v.id, query_version); + async move { Ok(acc) } + }) .await?; - found_versions.push(version); - } - } + Ok(res) + }, + ).await?; + + val.sort(); - Ok(found_versions) + Ok(val) } pub async fn get_file_from_hash<'a, 'b, E>( @@ -866,110 +827,66 @@ impl Version { where E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy, { - use futures::stream::TryStreamExt; - - let mut redis = redis.connect().await?; - - if hashes.is_empty() { - return Ok(Vec::new()); - } - - let mut file_ids_parsed = hashes.to_vec(); - - let mut found_files = Vec::new(); - - let files = redis - .multi_get::( - VERSION_FILES_NAMESPACE, - file_ids_parsed - .iter() - .map(|hash| format!("{}_{}", algorithm, hash)) - .collect::>(), - ) - .await?; - for file in files { - if let Some(mut file) = - file.and_then(|x| serde_json::from_str::>(&x).ok()) - { - file_ids_parsed.retain(|x| { - !file - .iter() - .any(|y| y.hashes.iter().any(|z| z.0 == &algorithm && z.1 == x)) - }); - found_files.append(&mut file); - continue; - } - } - - if !file_ids_parsed.is_empty() { - let db_files: Vec = sqlx::query!( - " - SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type, - JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes - FROM files f - INNER JOIN versions v on v.id = f.version_id - INNER JOIN hashes h on h.file_id = f.id - WHERE h.algorithm = $1 AND h.hash = ANY($2) - GROUP BY f.id, v.mod_id, v.date_published - ORDER BY v.date_published - ", - algorithm, - &file_ids_parsed.into_iter().map(|x| x.as_bytes().to_vec()).collect::>(), - ) - .fetch_many(executor) - .try_filter_map(|e| async { - Ok(e.right().map(|f| { + let val = redis.get_cached_keys( + VERSION_FILES_NAMESPACE, + &hashes.iter().map(|x| format!("{algorithm}_{x}")).collect::>(), + |file_ids| async move { + let files = sqlx::query!( + " + SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type, + JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes + FROM files f + INNER JOIN versions v on v.id = f.version_id + INNER JOIN hashes h on h.file_id = f.id + WHERE h.algorithm = $1 AND h.hash = ANY($2) + GROUP BY f.id, v.mod_id, v.date_published + ORDER BY v.date_published + ", + algorithm, + &file_ids.into_iter().flat_map(|x| x.split('_').last().map(|x| x.as_bytes().to_vec())).collect::>(), + ) + .fetch(executor) + .try_fold(DashMap::new(), |acc, f| { #[derive(Deserialize)] struct Hash { pub algorithm: String, pub hash: String, } - SingleFile { - id: FileId(f.id), - version_id: VersionId(f.version_id), - project_id: ProjectId(f.mod_id), - url: f.url, - filename: f.filename, - hashes: serde_json::from_value::>( - f.hashes.unwrap_or_default(), - ) - .ok() - .unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash)).collect(), - primary: f.is_primary, - size: f.size as u32, - file_type: f.file_type.map(|x| FileType::from_string(&x)), + let hashes = serde_json::from_value::>( + f.hashes.unwrap_or_default(), + ) + .ok() + .unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash)) + .collect::>(); + + if let Some(hash) = hashes.get(&algorithm) { + let key = format!("{algorithm}_{hash}"); + + let file = SingleFile { + id: FileId(f.id), + version_id: VersionId(f.version_id), + project_id: ProjectId(f.mod_id), + url: f.url, + filename: f.filename, + hashes, + primary: f.is_primary, + size: f.size as u32, + file_type: f.file_type.map(|x| FileType::from_string(&x)), + }; + + acc.insert(key, file); } - } - )) - }) - .try_collect::>() - .await?; - - let mut save_files: HashMap> = HashMap::new(); - for file in db_files { - for (algo, hash) in &file.hashes { - let key = format!("{}_{}", algo, hash); - - if let Some(files) = save_files.get_mut(&key) { - files.push(file.clone()); - } else { - save_files.insert(key, vec![file.clone()]); - } - } - } - - for (key, mut files) in save_files { - redis - .set_serialized_to_json(VERSION_FILES_NAMESPACE, key, &files, None) + async move { Ok(acc) } + }) .await?; - found_files.append(&mut files); + Ok(files) } - } + ).await?; - Ok(found_files) + Ok(val) } pub async fn clear_cache( diff --git a/src/database/redis.rs b/src/database/redis.rs index c80450cd..e63a37bc 100644 --- a/src/database/redis.rs +++ b/src/database/redis.rs @@ -1,10 +1,20 @@ use super::models::DatabaseError; +use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use chrono::{TimeZone, Utc}; +use dashmap::DashMap; use deadpool_redis::{Config, Runtime}; -use itertools::Itertools; -use redis::{cmd, Cmd, FromRedisValue}; -use std::fmt::Display; +use redis::{cmd, Cmd, ExistenceCheck, SetExpiry, SetOptions}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fmt::{Debug, Display}; +use std::future::Future; +use std::hash::Hash; +use std::pin::Pin; +use std::time::Duration; -const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes +const DEFAULT_EXPIRY: i64 = 60 * 60 * 12; // 12 hours +const ACTUAL_EXPIRY: i64 = 60 * 30; // 30 minutes #[derive(Clone)] pub struct RedisPool { @@ -47,6 +57,364 @@ impl RedisPool { meta_namespace: self.meta_namespace.clone(), }) } + + pub async fn get_cached_keys( + &self, + namespace: &str, + keys: &[K], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, DatabaseError>>, + T: Serialize + DeserializeOwned, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug, + { + Ok(self + .get_cached_keys_raw(namespace, keys, closure) + .await? + .into_iter() + .map(|x| x.1) + .collect()) + } + + pub async fn get_cached_keys_raw( + &self, + namespace: &str, + keys: &[K], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, DatabaseError>>, + T: Serialize + DeserializeOwned, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug, + { + self.get_cached_keys_raw_with_slug(namespace, None, false, keys, |ids| async move { + Ok(closure(ids) + .await? + .into_iter() + .map(|(key, val)| (key, (None::, val))) + .collect()) + }) + .await + } + + pub async fn get_cached_keys_with_slug( + &self, + namespace: &str, + slug_namespace: &str, + case_sensitive: bool, + keys: &[I], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, T)>, DatabaseError>>, + T: Serialize + DeserializeOwned, + I: Display + Hash + Eq + PartialEq + Clone + Debug, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize, + S: Display + Clone + DeserializeOwned + Serialize + Debug, + { + Ok(self + .get_cached_keys_raw_with_slug( + namespace, + Some(slug_namespace), + case_sensitive, + keys, + closure, + ) + .await? + .into_iter() + .map(|x| x.1) + .collect()) + } + + pub async fn get_cached_keys_raw_with_slug( + &self, + namespace: &str, + slug_namespace: Option<&str>, + case_sensitive: bool, + keys: &[I], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, T)>, DatabaseError>>, + T: Serialize + DeserializeOwned, + I: Display + Hash + Eq + PartialEq + Clone + Debug, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize, + S: Display + Clone + DeserializeOwned + Serialize + Debug, + { + let connection = self.connect().await?.connection; + + let ids = keys + .iter() + .map(|x| (x.to_string(), x.clone())) + .collect::>(); + + if ids.is_empty() { + return Ok(HashMap::new()); + } + + let get_cached_values = + |ids: DashMap, mut connection: deadpool_redis::Connection| async move { + let slug_ids = if let Some(slug_namespace) = slug_namespace { + cmd("MGET") + .arg( + ids.iter() + .map(|x| { + format!( + "{}_{slug_namespace}:{}", + self.meta_namespace, + if case_sensitive { + x.value().to_string() + } else { + x.value().to_string().to_lowercase() + } + ) + }) + .collect::>(), + ) + .query_async::<_, Vec>>(&mut connection) + .await? + .into_iter() + .flatten() + .collect::>() + } else { + Vec::new() + }; + + let cached_values = cmd("MGET") + .arg( + ids.iter() + .map(|x| x.value().to_string()) + .chain(ids.iter().filter_map(|x| { + parse_base62(&x.value().to_string()) + .ok() + .map(|x| x.to_string()) + })) + .chain(slug_ids) + .map(|x| format!("{}_{namespace}:{x}", self.meta_namespace)) + .collect::>(), + ) + .query_async::<_, Vec>>(&mut connection) + .await? + .into_iter() + .filter_map(|x| { + x.and_then(|val| serde_json::from_str::>(&val).ok()) + .map(|val| (val.key.clone(), val)) + }) + .collect::>(); + + Ok::<_, DatabaseError>((cached_values, connection, ids)) + }; + + let current_time = Utc::now(); + let mut expired_values = HashMap::new(); + + let (cached_values_raw, mut connection, ids) = get_cached_values(ids, connection).await?; + let mut cached_values = cached_values_raw + .into_iter() + .filter_map(|(key, val)| { + if Utc.timestamp(val.iat + ACTUAL_EXPIRY, 0) < current_time { + expired_values.insert(val.key.to_string(), val); + + None + } else { + let key_str = val.key.to_string(); + ids.remove(&key_str); + + if let Ok(value) = key_str.parse::() { + let base62 = to_base62(value); + ids.remove(&base62); + } + + if let Some(ref alias) = val.alias { + ids.remove(&alias.to_string()); + } + + Some((key, val)) + } + }) + .collect::>(); + + let subscribe_ids = DashMap::new(); + + if !ids.is_empty() { + let mut pipe = redis::pipe(); + + let fetch_ids = ids.iter().map(|x| x.key().clone()).collect::>(); + + fetch_ids.iter().for_each(|key| { + pipe.atomic().set_options( + format!("{}_{namespace}:{}/lock", self.meta_namespace, key), + 100, + SetOptions::default() + .get(true) + .conditional_set(ExistenceCheck::NX) + .with_expiration(SetExpiry::EX(60)), + ); + }); + let results = pipe + .query_async::<_, Vec>>(&mut connection) + .await?; + + for (idx, key) in fetch_ids.into_iter().enumerate() { + if let Some(locked) = results.get(idx) { + if locked.is_none() { + continue; + } + } + + if let Some((key, raw_key)) = ids.remove(&key) { + if let Some(val) = expired_values.remove(&key) { + if let Some(ref alias) = val.alias { + ids.remove(&alias.to_string()); + } + + if let Ok(value) = val.key.to_string().parse::() { + let base62 = to_base62(value); + ids.remove(&base62); + } + + cached_values.insert(val.key.clone(), val); + } else { + subscribe_ids.insert(key, raw_key); + } + } + } + } + + #[allow(clippy::type_complexity)] + let mut fetch_tasks: Vec< + Pin>, DatabaseError>>>>, + > = Vec::new(); + + if !ids.is_empty() { + fetch_tasks.push(Box::pin(async { + let fetch_ids = ids.iter().map(|x| x.value().clone()).collect::>(); + + let vals = closure(fetch_ids).await?; + let mut return_values = HashMap::new(); + + let mut pipe = redis::pipe(); + if !vals.is_empty() { + for (key, (slug, value)) in vals { + let value = RedisValue { + key: key.clone(), + iat: Utc::now().timestamp(), + val: value, + alias: slug.clone(), + }; + + pipe.atomic().set_ex( + format!("{}_{namespace}:{key}", self.meta_namespace), + serde_json::to_string(&value)?, + DEFAULT_EXPIRY as u64, + ); + + if let Some(slug) = slug { + ids.remove(&slug.to_string()); + + if let Some(slug_namespace) = slug_namespace { + let actual_slug = if case_sensitive { + slug.to_string() + } else { + slug.to_string().to_lowercase() + }; + + pipe.atomic().set_ex( + format!( + "{}_{slug_namespace}:{}", + self.meta_namespace, actual_slug + ), + key.to_string(), + DEFAULT_EXPIRY as u64, + ); + + pipe.atomic().del(format!( + "{}_{namespace}:{}/lock", + self.meta_namespace, actual_slug + )); + } + } + + let key_str = key.to_string(); + ids.remove(&key_str); + + if let Ok(value) = key_str.parse::() { + let base62 = to_base62(value); + ids.remove(&base62); + + pipe.atomic() + .del(format!("{}_{namespace}:{base62}/lock", self.meta_namespace)); + } + + pipe.atomic() + .del(format!("{}_{namespace}:{key}/lock", self.meta_namespace)); + + return_values.insert(key, value); + } + } + + for (key, _) in ids { + pipe.atomic() + .del(format!("{}_{namespace}:{key}/lock", self.meta_namespace)); + } + + pipe.query_async(&mut connection).await?; + + Ok(return_values) + })); + } + + if !subscribe_ids.is_empty() { + fetch_tasks.push(Box::pin(async { + let mut connection = self.pool.get().await?; + + let mut interval = tokio::time::interval(Duration::from_millis(100)); + let start = Utc::now(); + loop { + let results = cmd("MGET") + .arg( + subscribe_ids + .iter() + .map(|x| { + format!("{}_{namespace}:{}/lock", self.meta_namespace, x.key()) + }) + .collect::>(), + ) + .query_async::<_, Vec>>(&mut connection) + .await?; + + if results.into_iter().all(|x| x.is_none()) { + break; + } + + if (Utc::now() - start) > chrono::Duration::seconds(5) { + return Err(DatabaseError::CacheTimeout); + } + + interval.tick().await; + } + + let (return_values, _, _) = get_cached_values(subscribe_ids, connection).await?; + + Ok(return_values) + })); + } + + if !fetch_tasks.is_empty() { + for map in futures::future::try_join_all(fetch_tasks).await? { + for (key, value) in map { + cached_values.insert(key, value); + } + } + } + + Ok(cached_values.into_iter().map(|x| (x.0, x.1.val)).collect()) + } } impl RedisConnection { @@ -120,26 +488,6 @@ impl RedisConnection { .and_then(|x| serde_json::from_str(&x).ok())) } - pub async fn multi_get( - &mut self, - namespace: &str, - ids: impl IntoIterator, - ) -> Result>, DatabaseError> - where - R: FromRedisValue, - { - let mut cmd = cmd("MGET"); - - let ids = ids.into_iter().map(|x| x.to_string()).collect_vec(); - redis_args( - &mut cmd, - &ids.into_iter() - .map(|x| format!("{}_{}:{}", self.meta_namespace, namespace, x)) - .collect_vec(), - ); - Ok(redis_execute(&mut cmd, &mut self.connection).await?) - } - pub async fn delete(&mut self, namespace: &str, id: T1) -> Result<(), DatabaseError> where T1: Display, @@ -177,6 +525,15 @@ impl RedisConnection { } } +#[derive(Serialize, Deserialize)] +pub struct RedisValue { + key: K, + #[serde(skip_serializing_if = "Option::is_none")] + alias: Option, + iat: i64, + val: T, +} + pub fn redis_args(cmd: &mut Cmd, args: &[String]) { for arg in args { cmd.arg(arg); diff --git a/src/lib.rs b/src/lib.rs index cac75903..a1d7a5b5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,6 @@ +use std::num::NonZeroU32; use std::sync::Arc; +use std::time::Duration; use actix_web::web; use database::redis::RedisPool; @@ -6,19 +8,22 @@ use log::{info, warn}; use queue::{ analytics::AnalyticsQueue, payouts::PayoutsQueue, session::AuthQueue, socket::ActiveSockets, }; -use scheduler::Scheduler; use sqlx::Postgres; use tokio::sync::RwLock; extern crate clickhouse as clickhouse_crate; use clickhouse_crate::Client; +use governor::{Quota, RateLimiter}; +use governor::middleware::StateInformationMiddleware; use util::cors::default_cors; +use crate::queue::moderation::AutomatedModerationQueue; use crate::{ queue::payouts::process_payout, search::indexing::index_projects, util::env::{parse_strings_from_var, parse_var}, }; +use crate::util::ratelimit::KeyedRateLimiter; pub mod auth; pub mod clickhouse; @@ -26,7 +31,6 @@ pub mod database; pub mod file_hosting; pub mod models; pub mod queue; -pub mod ratelimit; pub mod routes; pub mod scheduler; pub mod search; @@ -45,13 +49,15 @@ pub struct LabrinthConfig { pub clickhouse: Client, pub file_host: Arc, pub maxmind: Arc, - pub scheduler: Arc, + pub scheduler: Arc, pub ip_salt: Pepper, pub search_config: search::SearchConfig, pub session_queue: web::Data, pub payouts_queue: web::Data, pub analytics_queue: Arc, pub active_sockets: web::Data>, + pub automated_moderation_queue: web::Data, + pub rate_limiter: KeyedRateLimiter, } pub fn app_setup( @@ -67,8 +73,38 @@ pub fn app_setup( dotenvy::var("BIND_ADDR").unwrap() ); + let automated_moderation_queue = web::Data::new(AutomatedModerationQueue::default()); + + let automated_moderation_queue_ref = automated_moderation_queue.clone(); + let pool_ref = pool.clone(); + let redis_pool_ref = redis_pool.clone(); + actix_rt::spawn(async move { + automated_moderation_queue_ref + .task(pool_ref, redis_pool_ref) + .await; + }); + let mut scheduler = scheduler::Scheduler::new(); + let limiter: KeyedRateLimiter = Arc::new( + RateLimiter::keyed(Quota::per_minute(NonZeroU32::new(300).unwrap())) + .with_middleware::(), + ); + let limiter_clone = Arc::clone(&limiter); + scheduler.run(Duration::from_secs(60), move || { + info!( + "Clearing ratelimiter, storage size: {}", + limiter_clone.len() + ); + limiter_clone.retain_recent(); + info!( + "Done clearing ratelimiter, storage size: {}", + limiter_clone.len() + ); + + async move {} + }); + // The interval in seconds at which the local database is indexed // for searching. Defaults to 1 hour if unset. let local_index_interval = @@ -241,6 +277,8 @@ pub fn app_setup( payouts_queue, analytics_queue, active_sockets, + automated_moderation_queue, + rate_limiter: limiter, } } @@ -272,6 +310,7 @@ pub fn app_config(cfg: &mut web::ServiceConfig, labrinth_config: LabrinthConfig) .app_data(web::Data::new(labrinth_config.clickhouse.clone())) .app_data(web::Data::new(labrinth_config.maxmind.clone())) .app_data(labrinth_config.active_sockets.clone()) + .app_data(labrinth_config.automated_moderation_queue.clone()) .configure(routes::v2::config) .configure(routes::v3::config) .configure(routes::internal::config) @@ -397,5 +436,7 @@ pub fn check_env_vars() -> bool { failed |= check_var::("PAYOUTS_BUDGET"); + failed |= check_var::("FLAME_ANVIL_URL"); + failed } diff --git a/src/main.rs b/src/main.rs index e242509f..5cd49379 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,16 +1,21 @@ use actix_web::{App, HttpServer}; use actix_web_prom::PrometheusMetricsBuilder; use env_logger::Env; +use governor::middleware::StateInformationMiddleware; +use governor::{Quota, RateLimiter}; use labrinth::database::redis::RedisPool; use labrinth::file_hosting::S3Host; -use labrinth::ratelimit::errors::ARError; -use labrinth::ratelimit::memory::{MemoryStore, MemoryStoreActor}; -use labrinth::ratelimit::middleware::RateLimiter; use labrinth::search; -use labrinth::util::env::parse_var; +use labrinth::util::ratelimit::{KeyedRateLimiter, RateLimit}; use labrinth::{check_env_vars, clickhouse, database, file_hosting, queue}; use log::{error, info}; +use std::num::NonZeroU32; use std::sync::Arc; +use std::time::Duration; + +#[cfg(feature = "jemalloc")] +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; #[derive(Clone)] pub struct Pepper { @@ -86,17 +91,14 @@ async fn main() -> std::io::Result<()> { let maxmind_reader = Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap()); - let store = MemoryStore::new(); - let prometheus = PrometheusMetricsBuilder::new("labrinth") .endpoint("/metrics") .build() .expect("Failed to create prometheus metrics middleware"); let search_config = search::SearchConfig::new(None); - info!("Starting Actix HTTP server!"); - let labrinth_config = labrinth::app_setup( + let mut labrinth_config = labrinth::app_setup( pool.clone(), redis_pool.clone(), search_config.clone(), @@ -105,32 +107,14 @@ async fn main() -> std::io::Result<()> { maxmind_reader.clone(), ); + info!("Starting Actix HTTP server!"); + // Init App HttpServer::new(move || { App::new() .wrap(prometheus.clone()) + .wrap(RateLimit(Arc::clone(&labrinth_config.rate_limiter))) .wrap(actix_web::middleware::Compress::default()) - .wrap( - RateLimiter::new(MemoryStoreActor::from(store.clone()).start()) - .with_identifier(|req| { - let connection_info = req.connection_info(); - let ip = - String::from(if parse_var("CLOUDFLARE_INTEGRATION").unwrap_or(false) { - if let Some(header) = req.headers().get("CF-Connecting-IP") { - header.to_str().map_err(|_| ARError::Identification)? - } else { - connection_info.peer_addr().ok_or(ARError::Identification)? - } - } else { - connection_info.peer_addr().ok_or(ARError::Identification)? - }); - - Ok(ip) - }) - .with_interval(std::time::Duration::from_secs(60)) - .with_max_requests(300) - .with_ignore_key(dotenvy::var("RATE_LIMIT_IGNORE_KEY").ok()), - ) .wrap(sentry_actix::Sentry::new()) .configure(|cfg| labrinth::app_config(cfg, labrinth_config.clone())) }) diff --git a/src/models/error.rs b/src/models/error.rs index 5ac3c607..28f737c1 100644 --- a/src/models/error.rs +++ b/src/models/error.rs @@ -4,5 +4,5 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize)] pub struct ApiError<'a> { pub error: &'a str, - pub description: &'a str, + pub description: String, } diff --git a/src/models/v3/analytics.rs b/src/models/v3/analytics.rs index 669175ad..b59254a7 100644 --- a/src/models/v3/analytics.rs +++ b/src/models/v3/analytics.rs @@ -34,6 +34,8 @@ pub struct PageView { pub user_id: u64, // Modrinth Project ID (used for payouts) pub project_id: u64, + // whether this view will be monetized / counted for payouts + pub monetized: bool, // The below information is used exclusively for data aggregation and fraud detection // (ex: page view botting). diff --git a/src/models/v3/ids.rs b/src/models/v3/ids.rs index 73d0c32c..d2a6672d 100644 --- a/src/models/v3/ids.rs +++ b/src/models/v3/ids.rs @@ -38,11 +38,15 @@ pub fn random_base62(n: usize) -> u64 { /// This method panics if `n` is 0 or greater than 11, since a `u64` /// can only represent up to 11 character base62 strings pub fn random_base62_rng(rng: &mut R, n: usize) -> u64 { + random_base62_rng_range(rng, n, n) +} + +pub fn random_base62_rng_range(rng: &mut R, n_min: usize, n_max: usize) -> u64 { use rand::Rng; - assert!(n > 0 && n <= 11); + assert!(n_min > 0 && n_max <= 11 && n_min <= n_max); // gen_range is [low, high): max value is `MULTIPLES[n] - 1`, // which is n characters long when encoded - rng.gen_range(MULTIPLES[n - 1]..MULTIPLES[n]) + rng.gen_range(MULTIPLES[n_min - 1]..MULTIPLES[n_max]) } const MULTIPLES: [u64; 12] = [ diff --git a/src/models/v3/organizations.rs b/src/models/v3/organizations.rs index 11a0f72d..f2817e36 100644 --- a/src/models/v3/organizations.rs +++ b/src/models/v3/organizations.rs @@ -5,7 +5,7 @@ use super::{ use serde::{Deserialize, Serialize}; /// The ID of a team -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)] #[serde(from = "Base62Id")] #[serde(into = "Base62Id")] pub struct OrganizationId(pub u64); diff --git a/src/models/v3/pack.rs b/src/models/v3/pack.rs index c73def00..49e22ca3 100644 --- a/src/models/v3/pack.rs +++ b/src/models/v3/pack.rs @@ -18,7 +18,7 @@ pub struct PackFormat { pub dependencies: std::collections::HashMap, } -#[derive(Serialize, Deserialize, Validate, Eq, PartialEq, Debug)] +#[derive(Serialize, Deserialize, Validate, Eq, PartialEq, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct PackFile { pub path: String, @@ -54,7 +54,7 @@ fn validate_download_url(values: &[String]) -> Result<(), validator::ValidationE Ok(()) } -#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, Debug)] +#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, Debug, Clone)] #[serde(rename_all = "camelCase", from = "String")] pub enum PackFileHash { Sha1, @@ -72,7 +72,7 @@ impl From for PackFileHash { } } -#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, Debug)] +#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, Debug, Clone)] #[serde(rename_all = "camelCase")] pub enum EnvType { Client, diff --git a/src/models/v3/pats.rs b/src/models/v3/pats.rs index d4ef6e28..4de7e7c8 100644 --- a/src/models/v3/pats.rs +++ b/src/models/v3/pats.rs @@ -5,7 +5,7 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; /// The ID of a team -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)] #[serde(from = "Base62Id")] #[serde(into = "Base62Id")] pub struct PatId(pub u64); diff --git a/src/models/v3/projects.rs b/src/models/v3/projects.rs index 5bb0710b..8e75d079 100644 --- a/src/models/v3/projects.rs +++ b/src/models/v3/projects.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use validator::Validate; /// The ID of a specific project, encoded as base62 for usage in the API -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug, Hash)] #[serde(from = "Base62Id")] #[serde(into = "Base62Id")] pub struct ProjectId(pub u64); diff --git a/src/models/v3/sessions.rs b/src/models/v3/sessions.rs index 9cfb6d50..46a8a69a 100644 --- a/src/models/v3/sessions.rs +++ b/src/models/v3/sessions.rs @@ -3,7 +3,7 @@ use crate::models::users::UserId; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)] #[serde(from = "Base62Id")] #[serde(into = "Base62Id")] pub struct SessionId(pub u64); diff --git a/src/models/v3/threads.rs b/src/models/v3/threads.rs index 5a18de7f..2a7436ab 100644 --- a/src/models/v3/threads.rs +++ b/src/models/v3/threads.rs @@ -32,6 +32,7 @@ pub struct ThreadMessage { pub author_id: Option, pub body: MessageBody, pub created: DateTime, + pub hide_identity: bool, } #[derive(Serialize, Deserialize, Clone)] @@ -114,24 +115,17 @@ impl Thread { }) .map(|x| ThreadMessage { id: x.id.into(), - author_id: if users - .iter() - .find(|y| x.author_id == Some(y.id.into())) - .map(|x| x.role.is_mod() && !user.role.is_mod()) - .unwrap_or(false) - { + author_id: if x.hide_identity && !user.role.is_mod() { None } else { x.author_id.map(|x| x.into()) }, body: x.body, created: x.created, + hide_identity: x.hide_identity, }) .collect(), - members: users - .into_iter() - .filter(|x| !x.role.is_mod() || user.role.is_mod()) - .collect(), + members: users, } } } diff --git a/src/queue/analytics.rs b/src/queue/analytics.rs index 0d0fa869..f1fd9121 100644 --- a/src/queue/analytics.rs +++ b/src/queue/analytics.rs @@ -5,12 +5,15 @@ use crate::routes::ApiError; use dashmap::{DashMap, DashSet}; use redis::cmd; use sqlx::PgPool; +use std::collections::HashMap; +use std::net::Ipv6Addr; const DOWNLOADS_NAMESPACE: &str = "downloads"; +const VIEWS_NAMESPACE: &str = "views"; pub struct AnalyticsQueue { - views_queue: DashSet, - downloads_queue: DashMap, + views_queue: DashMap<(u64, u64), Vec>, + downloads_queue: DashMap<(u64, u64), Download>, playtime_queue: DashSet, } @@ -24,26 +27,37 @@ impl Default for AnalyticsQueue { impl AnalyticsQueue { pub fn new() -> Self { AnalyticsQueue { - views_queue: DashSet::with_capacity(1000), + views_queue: DashMap::with_capacity(1000), downloads_queue: DashMap::with_capacity(1000), playtime_queue: DashSet::with_capacity(1000), } } - pub fn add_view(&self, page_view: PageView) { - self.views_queue.insert(page_view); + fn strip_ip(ip: Ipv6Addr) -> u64 { + if let Some(ip) = ip.to_ipv4_mapped() { + let octets = ip.octets(); + u64::from_be_bytes([octets[0], octets[1], octets[2], octets[3], 0, 0, 0, 0]) + } else { + let octets = ip.octets(); + u64::from_be_bytes([ + octets[0], octets[1], octets[2], octets[3], octets[4], octets[5], octets[6], + octets[7], + ]) + } } + pub fn add_view(&self, page_view: PageView) { + let ip_stripped = Self::strip_ip(page_view.ip); + + self.views_queue + .entry((ip_stripped, page_view.project_id)) + .or_default() + .push(page_view); + } pub fn add_download(&self, download: Download) { - let ip_stripped = if let Some(ip) = download.ip.to_ipv4_mapped() { - let octets = ip.octets(); - u64::from_be_bytes([0, 0, 0, 0, octets[0], octets[1], octets[2], octets[3]]) - } else { - let octets = download.ip.octets(); - u64::from_be_bytes([0, 0, 0, 0, octets[0], octets[1], octets[2], octets[3]]) - }; + let ip_stripped = Self::strip_ip(download.ip); self.downloads_queue - .insert(format!("{}-{}", ip_stripped, download.project_id), download); + .insert((ip_stripped, download.project_id), download); } pub fn add_playtime(&self, playtime: Playtime) { @@ -65,16 +79,6 @@ impl AnalyticsQueue { let playtime_queue = self.playtime_queue.clone(); self.playtime_queue.clear(); - if !views_queue.is_empty() { - let mut views = client.insert("views")?; - - for view in views_queue { - views.write(&view).await?; - } - - views.end().await?; - } - if !playtime_queue.is_empty() { let mut playtimes = client.insert("playtime")?; @@ -85,6 +89,76 @@ impl AnalyticsQueue { playtimes.end().await?; } + if !views_queue.is_empty() { + let mut views_keys = Vec::new(); + let mut raw_views = Vec::new(); + + for (key, views) in views_queue { + views_keys.push(key); + raw_views.push((views, true)); + } + + let mut redis = redis.pool.get().await.map_err(DatabaseError::RedisPool)?; + + let results = cmd("MGET") + .arg( + views_keys + .iter() + .map(|x| format!("{}:{}-{}", VIEWS_NAMESPACE, x.0, x.1)) + .collect::>(), + ) + .query_async::<_, Vec>>(&mut redis) + .await + .map_err(DatabaseError::CacheError)?; + + let mut pipe = redis::pipe(); + for (idx, count) in results.into_iter().enumerate() { + let key = &views_keys[idx]; + + let new_count = if let Some((views, monetized)) = raw_views.get_mut(idx) { + if let Some(count) = count { + if count > 3 { + *monetized = false; + continue; + } + + if (count + views.len() as u32) > 3 { + *monetized = false; + } + + count + (views.len() as u32) + } else { + views.len() as u32 + } + } else { + 1 + }; + + pipe.atomic().set_ex( + format!("{}:{}-{}", VIEWS_NAMESPACE, key.0, key.1), + new_count, + 6 * 60 * 60, + ); + } + pipe.query_async(&mut *redis) + .await + .map_err(DatabaseError::CacheError)?; + + let mut views = client.insert("views")?; + + for (all_views, monetized) in raw_views { + for (idx, mut view) in all_views.into_iter().enumerate() { + if idx != 0 || !monetized { + view.monetized = false; + } + + views.write(&view).await?; + } + } + + views.end().await?; + } + if !downloads_queue.is_empty() { let mut downloads_keys = Vec::new(); let raw_downloads = DashMap::new(); @@ -100,7 +174,7 @@ impl AnalyticsQueue { .arg( downloads_keys .iter() - .map(|x| format!("{}:{}", DOWNLOADS_NAMESPACE, x)) + .map(|x| format!("{}:{}-{}", DOWNLOADS_NAMESPACE, x.0, x.1)) .collect::>(), ) .query_async::<_, Vec>>(&mut redis) @@ -123,7 +197,7 @@ impl AnalyticsQueue { }; pipe.atomic().set_ex( - format!("{}:{}", DOWNLOADS_NAMESPACE, key), + format!("{}:{}-{}", DOWNLOADS_NAMESPACE, key.0, key.1), new_count, 6 * 60 * 60, ); @@ -132,37 +206,46 @@ impl AnalyticsQueue { .await .map_err(DatabaseError::CacheError)?; - let version_ids = raw_downloads - .iter() - .map(|x| x.version_id as i64) - .collect::>(); - let project_ids = raw_downloads - .iter() - .map(|x| x.project_id as i64) - .collect::>(); - let mut transaction = pool.begin().await?; let mut downloads = client.insert("downloads")?; + let mut version_downloads: HashMap = HashMap::new(); + let mut project_downloads: HashMap = HashMap::new(); + for (_, download) in raw_downloads { + *version_downloads + .entry(download.version_id as i64) + .or_default() += 1; + *project_downloads + .entry(download.project_id as i64) + .or_default() += 1; + downloads.write(&download).await?; } - sqlx::query!( - "UPDATE versions - SET downloads = downloads + 1 - WHERE id = ANY($1)", - &version_ids + sqlx::query( + " + UPDATE versions v + SET downloads = v.downloads + x.amount + FROM unnest($1::BIGINT[], $2::int[]) AS x(id, amount) + WHERE v.id = x.id + ", ) + .bind(version_downloads.keys().copied().collect::>()) + .bind(version_downloads.values().copied().collect::>()) .execute(&mut *transaction) .await?; - sqlx::query!( - "UPDATE mods - SET downloads = downloads + 1 - WHERE id = ANY($1)", - &project_ids + sqlx::query( + " + UPDATE mods m + SET downloads = m.downloads + x.amount + FROM unnest($1::BIGINT[], $2::int[]) AS x(id, amount) + WHERE m.id = x.id + ", ) + .bind(project_downloads.keys().copied().collect::>()) + .bind(project_downloads.values().copied().collect::>()) .execute(&mut *transaction) .await?; diff --git a/src/queue/mod.rs b/src/queue/mod.rs index 9501640b..7ccf81c0 100644 --- a/src/queue/mod.rs +++ b/src/queue/mod.rs @@ -1,5 +1,6 @@ pub mod analytics; pub mod maxmind; +pub mod moderation; pub mod payouts; pub mod session; pub mod socket; diff --git a/src/queue/moderation.rs b/src/queue/moderation.rs new file mode 100644 index 00000000..761b6f4f --- /dev/null +++ b/src/queue/moderation.rs @@ -0,0 +1,881 @@ +use crate::auth::checks::filter_visible_versions; +use crate::database; +use crate::database::models::notification_item::NotificationBuilder; +use crate::database::models::thread_item::ThreadMessageBuilder; +use crate::database::redis::RedisPool; +use crate::models::ids::ProjectId; +use crate::models::notifications::NotificationBody; +use crate::models::pack::{PackFile, PackFileHash, PackFormat}; +use crate::models::projects::ProjectStatus; +use crate::models::threads::MessageBody; +use crate::routes::ApiError; +use dashmap::DashSet; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use std::collections::HashMap; +use std::io::{Cursor, Read}; +use std::time::Duration; +use zip::ZipArchive; + +const AUTOMOD_ID: i64 = 0; + +pub struct ModerationMessages { + pub messages: Vec, + pub version_specific: HashMap>, +} + +impl ModerationMessages { + pub fn is_empty(&self) -> bool { + self.messages.is_empty() && self.version_specific.is_empty() + } + + pub fn markdown(&self, auto_mod: bool) -> String { + let mut str = "".to_string(); + + for message in &self.messages { + str.push_str(&format!("## {}\n", message.header())); + str.push_str(&format!("{}\n", message.body())); + str.push('\n'); + } + + for (version_num, messages) in &self.version_specific { + for message in messages { + str.push_str(&format!( + "## Version {}: {}\n", + version_num, + message.header() + )); + str.push_str(&format!("{}\n", message.body())); + str.push('\n'); + } + } + + if auto_mod { + str.push_str("
\n\n"); + str.push_str("🤖 This is an automated message generated by AutoMod (BETA). If you are facing issues, please [contact support](https://support.modrinth.com)."); + } + + str + } + + pub fn should_reject(&self, first_time: bool) -> bool { + self.messages.iter().any(|x| x.rejectable(first_time)) + || self + .version_specific + .values() + .any(|x| x.iter().any(|x| x.rejectable(first_time))) + } + + pub fn approvable(&self) -> bool { + self.messages.iter().all(|x| x.approvable()) + && self + .version_specific + .values() + .all(|x| x.iter().all(|x| x.approvable())) + } +} + +pub enum ModerationMessage { + MissingGalleryImage, + NoPrimaryFile, + NoSideTypes, + PackFilesNotAllowed { + files: HashMap, + incomplete: bool, + }, + MissingLicense, + MissingCustomLicenseUrl { + license: String, + }, +} + +impl ModerationMessage { + pub fn rejectable(&self, first_time: bool) -> bool { + match self { + ModerationMessage::NoPrimaryFile => true, + ModerationMessage::PackFilesNotAllowed { files, incomplete } => { + (!incomplete || first_time) + && files.values().any(|x| match x.status { + ApprovalType::Yes => false, + ApprovalType::WithAttributionAndSource => false, + ApprovalType::WithAttribution => false, + ApprovalType::No => first_time, + ApprovalType::PermanentNo => true, + ApprovalType::Unidentified => first_time, + }) + } + ModerationMessage::MissingGalleryImage => true, + ModerationMessage::MissingLicense => true, + ModerationMessage::MissingCustomLicenseUrl { .. } => true, + ModerationMessage::NoSideTypes => true, + } + } + + pub fn approvable(&self) -> bool { + match self { + ModerationMessage::NoPrimaryFile => false, + ModerationMessage::PackFilesNotAllowed { files, .. } => { + files.values().all(|x| x.status.approved()) + } + ModerationMessage::MissingGalleryImage => false, + ModerationMessage::MissingLicense => false, + ModerationMessage::MissingCustomLicenseUrl { .. } => false, + ModerationMessage::NoSideTypes => false, + } + } + + pub fn header(&self) -> &'static str { + match self { + ModerationMessage::NoPrimaryFile => "No primary files", + ModerationMessage::PackFilesNotAllowed { .. } => "Copyrighted Content", + ModerationMessage::MissingGalleryImage => "Missing Gallery Images", + ModerationMessage::MissingLicense => "Missing License", + ModerationMessage::MissingCustomLicenseUrl { .. } => "Missing License URL", + ModerationMessage::NoSideTypes => "Missing Environment Information", + } + } + + pub fn body(&self) -> String { + match self { + ModerationMessage::NoPrimaryFile => "Please attach a file to this version. All files on Modrinth must have files associated with their versions.\n".to_string(), + ModerationMessage::PackFilesNotAllowed { files, .. } => { + let mut str = "".to_string(); + str.push_str("This pack redistributes copyrighted material. Please refer to [Modrinth's guide on obtaining modpack permissions](https://docs.modrinth.com/modpacks/permissions) for more information.\n\n"); + + let mut attribute_mods = Vec::new(); + let mut no_mods = Vec::new(); + let mut permanent_no_mods = Vec::new(); + let mut unidentified_mods = Vec::new(); + for (_, approval) in files.iter() { + match approval.status { + ApprovalType::Yes | ApprovalType::WithAttributionAndSource => {} + ApprovalType::WithAttribution => attribute_mods.push(&approval.file_name), + ApprovalType::No => no_mods.push(&approval.file_name), + ApprovalType::PermanentNo => permanent_no_mods.push(&approval.file_name), + ApprovalType::Unidentified => unidentified_mods.push(&approval.file_name), + } + } + + fn print_mods(projects: Vec<&String>, headline: &str, val: &mut String) { + if projects.is_empty() { return } + + val.push_str(&format!("{headline}\n\n")); + + for project in &projects { + let additional_text = if project.contains("ftb-quests") { + Some("Heracles") + } else if project.contains("ftb-ranks") || project.contains("ftb-essentials") { + Some("Prometheus") + } else if project.contains("ftb-teams") { + Some("Argonauts") + } else if project.contains("ftb-chunks") { + Some("Cadmus") + } else { + None + }; + + val.push_str(&if let Some(additional_text) = additional_text { + format!("- {project}(consider using [{additional_text}](https://modrinth.com/mod/{}) instead)\n", additional_text.to_lowercase()) + } else { + format!("- {project}\n") + }) + } + + if !projects.is_empty() { + val.push('\n'); + } + } + + print_mods(attribute_mods, "The following content has attribution requirements, meaning that you must link back to the page where you originally found this content in your modpack description or version changelog (e.g. linking a mod's CurseForge page if you got it from CurseForge):", &mut str); + print_mods(no_mods, "The following content is not allowed in Modrinth modpacks due to licensing restrictions. Please contact the author(s) directly for permission or remove the content from your modpack:", &mut str); + print_mods(permanent_no_mods, "The following content is not allowed in Modrinth modpacks, regardless of permission obtained. This may be because it breaks Modrinth's content rules or because the authors, upon being contacted for permission, have declined. Please remove the content from your modpack:", &mut str); + print_mods(unidentified_mods, "The following content could not be identified. Please provide proof of its origin along with proof that you have permission to include it:", &mut str); + + str + }, + ModerationMessage::MissingGalleryImage => "We ask that resource packs like yours show off their content using images in the Gallery, or optionally in the Description, in order to effectively and clearly inform users of the content in your pack per section 2.1 of [Modrinth's content rules](https://modrinth.com/legal/rules#general-expectations).\n +Keep in mind that you should:\n +- Set a featured image that best represents your pack. +- Ensure all your images have titles that accurately label the image, and optionally, details on the contents of the image in the images Description. +- Upload any relevant images in your Description to your Gallery tab for best results.".to_string(), + ModerationMessage::MissingLicense => "You must select a License before your project can be published publicly, having a License associated with your project is important to protecting your rights and allowing others to use your content as you intend. For more information, you can see our [Guide to Licensing Mods]().".to_string(), + ModerationMessage::MissingCustomLicenseUrl { license } => format!("It looks like you've selected the License \"{license}\" without providing a valid License link. When using a custom License you must provide a link directly to the License in the License Link field."), + ModerationMessage::NoSideTypes => "Your project's side types are currently set to Unknown on both sides. Please set accurate side types!".to_string(), + } + } +} + +pub struct AutomatedModerationQueue { + pub projects: DashSet, +} + +impl Default for AutomatedModerationQueue { + fn default() -> Self { + Self { + projects: DashSet::new(), + } + } +} + +impl AutomatedModerationQueue { + pub async fn task(&self, pool: PgPool, redis: RedisPool) { + loop { + let projects = self.projects.clone(); + self.projects.clear(); + + for project in projects { + async { + let project = + database::Project::get_id((project).into(), &pool, &redis).await?; + + if let Some(project) = project { + let res = async { + let mut mod_messages = ModerationMessages { + messages: vec![], + version_specific: HashMap::new(), + }; + + if project.project_types.iter().any(|x| ["mod", "modpack"].contains(&&**x)) && !project.aggregate_version_fields.iter().any(|x| ["server_only", "client_only", "client_and_server", "singleplayer"].contains(&&*x.field_name)) { + mod_messages.messages.push(ModerationMessage::NoSideTypes); + } + + if project.inner.license == "LicenseRef-Unknown" || project.inner.license == "LicenseRef-" { + mod_messages.messages.push(ModerationMessage::MissingLicense); + } else if project.inner.license.starts_with("LicenseRef-") && project.inner.license != "LicenseRef-All-Rights-Reserved" && project.inner.license_url.is_none() { + mod_messages.messages.push(ModerationMessage::MissingCustomLicenseUrl { license: project.inner.license.clone() }); + } + + if (project.project_types.contains(&"resourcepack".to_string()) || project.project_types.contains(&"shader".to_string())) && project.gallery_items.is_empty() { + mod_messages.messages.push(ModerationMessage::MissingGalleryImage); + } + + let versions = + database::Version::get_many(&project.versions, &pool, &redis) + .await? + .into_iter() + // we only support modpacks at this time + .filter(|x| x.project_types.contains(&"modpack".to_string())) + .collect::>(); + + for version in versions { + let primary_file = version.files.iter().find_or_first(|x| x.primary); + + if let Some(primary_file) = primary_file { + let data = reqwest::get(&primary_file.url).await?.bytes().await?; + + let reader = Cursor::new(data); + let mut zip = ZipArchive::new(reader)?; + + let pack: PackFormat = { + let mut file = + if let Ok(file) = zip.by_name("modrinth.index.json") { + file + } else { + continue; + }; + + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + + serde_json::from_str(&contents)? + }; + + // sha1, pack file, file path, murmur + let mut hashes: Vec<( + String, + Option, + String, + Option, + )> = pack + .files + .clone() + .into_iter() + .flat_map(|x| { + let hash = x.hashes.get(&PackFileHash::Sha1); + + if let Some(hash) = hash { + let path = x.path.clone(); + Some((hash.clone(), Some(x), path, None)) + } else { + None + } + }) + .collect(); + + for i in 0..zip.len() { + let mut file = zip.by_index(i)?; + + if file.name().starts_with("overrides/mods") + || file.name().starts_with("client-overrides/mods") + || file.name().starts_with("server-overrides/mods") + || file.name().starts_with("overrides/shaderpacks") + || file.name().starts_with("client-overrides/shaderpacks") + || file.name().starts_with("overrides/resourcepacks") + || file.name().starts_with("client-overrides/resourcepacks") + { + if file.name().matches('/').count() > 2 || file.name().ends_with(".txt") { + continue; + } + + let mut contents = Vec::new(); + file.read_to_end(&mut contents)?; + + let hash = sha1::Sha1::from(&contents).hexdigest(); + let murmur = hash_flame_murmur32(contents); + + hashes.push(( + hash, + None, + file.name().to_string(), + Some(murmur), + )); + } + } + + let files = database::models::Version::get_files_from_hash( + "sha1".to_string(), + &hashes.iter().map(|x| x.0.clone()).collect::>(), + &pool, + &redis, + ) + .await?; + + let version_ids = + files.iter().map(|x| x.version_id).collect::>(); + let versions_data = filter_visible_versions( + database::models::Version::get_many( + &version_ids, + &pool, + &redis, + ) + .await?, + &None, + &pool, + &redis, + ) + .await?; + + let mut final_hashes = HashMap::new(); + + for version in versions_data { + for file in + files.iter().filter(|x| x.version_id == version.id.into()) + { + if let Some(hash) = file.hashes.get(&"sha1".to_string()) { + if let Some((index, (sha1, _, file_name, _))) = hashes + .iter() + .enumerate() + .find(|(_, (value, _, _, _))| value == hash) + { + final_hashes + .insert(sha1.clone(), IdentifiedFile { status: ApprovalType::Yes, file_name: file_name.clone() }); + + hashes.remove(index); + } + } + } + } + + // All files are on Modrinth, so we don't send any messages + if hashes.is_empty() { + sqlx::query!( + " + UPDATE files + SET metadata = $1 + WHERE id = $2 + ", + serde_json::to_value(&MissingMetadata { + identified: final_hashes, + flame_files: Default::default(), + unknown_files: Default::default(), + })?, + primary_file.id.0 + ) + .execute(&pool) + .await?; + + continue; + } + + let rows = sqlx::query!( + " + SELECT encode(mef.sha1, 'escape') sha1, mel.status status + FROM moderation_external_files mef + INNER JOIN moderation_external_licenses mel ON mef.external_license_id = mel.id + WHERE mef.sha1 = ANY($1) + ", + &hashes.iter().map(|x| x.0.as_bytes().to_vec()).collect::>() + ) + .fetch_all(&pool) + .await?; + + for row in rows { + if let Some(sha1) = row.sha1 { + if let Some((index, (sha1, _, file_name, _))) = hashes.iter().enumerate().find(|(_, (value, _, _, _))| value == &sha1) { + final_hashes.insert(sha1.clone(), IdentifiedFile { file_name: file_name.clone(), status: ApprovalType::from_string(&row.status).unwrap_or(ApprovalType::Unidentified) }); + hashes.remove(index); + } + } + } + + if hashes.is_empty() { + let metadata = MissingMetadata { + identified: final_hashes, + flame_files: Default::default(), + unknown_files: Default::default(), + }; + + sqlx::query!( + " + UPDATE files + SET metadata = $1 + WHERE id = $2 + ", + serde_json::to_value(&metadata)?, + primary_file.id.0 + ) + .execute(&pool) + .await?; + + if metadata.identified.values().any(|x| x.status != ApprovalType::Yes && x.status != ApprovalType::WithAttributionAndSource) { + let val = mod_messages.version_specific.entry(version.inner.version_number).or_default(); + val.push(ModerationMessage::PackFilesNotAllowed {files: metadata.identified, incomplete: false }); + } + continue; + } + + let client = reqwest::Client::new(); + let res = client + .post(format!("{}/v1/fingerprints", dotenvy::var("FLAME_ANVIL_URL")?)) + .json(&serde_json::json!({ + "fingerprints": hashes.iter().filter_map(|x| x.3).collect::>() + })) + .send() + .await?.text() + .await?; + + let flame_hashes = serde_json::from_str::>(&res)? + .data + .exact_matches + .into_iter() + .map(|x| x.file) + .collect::>(); + + let mut flame_files = Vec::new(); + + for file in flame_hashes { + let hash = file + .hashes + .iter() + .find(|x| x.algo == 1) + .map(|x| x.value.clone()); + + if let Some(hash) = hash { + flame_files.push((hash, file.mod_id)) + } + } + + let rows = sqlx::query!( + " + SELECT mel.id, mel.flame_project_id, mel.status status + FROM moderation_external_licenses mel + WHERE mel.flame_project_id = ANY($1) + ", + &flame_files.iter().map(|x| x.1 as i32).collect::>() + ) + .fetch_all(&pool).await?; + + let mut insert_hashes = Vec::new(); + let mut insert_ids = Vec::new(); + + for row in rows { + if let Some((curse_index, (hash, _flame_id))) = flame_files.iter().enumerate().find(|(_, x)| Some(x.1 as i32) == row.flame_project_id) { + if let Some((index, (sha1, _, file_name, _))) = hashes.iter().enumerate().find(|(_, (value, _, _, _))| value == hash) { + final_hashes.insert(sha1.clone(), IdentifiedFile { + file_name: file_name.clone(), + status: ApprovalType::from_string(&row.status).unwrap_or(ApprovalType::Unidentified), + }); + + insert_hashes.push(hash.clone().as_bytes().to_vec()); + insert_ids.push(row.id); + + hashes.remove(index); + flame_files.remove(curse_index); + } + } + } + + if !insert_ids.is_empty() && !insert_hashes.is_empty() { + sqlx::query!( + " + INSERT INTO moderation_external_files (sha1, external_license_id) + SELECT * FROM UNNEST ($1::bytea[], $2::bigint[]) + ON CONFLICT (sha1) DO NOTHING + ", + &insert_hashes[..], + &insert_ids[..] + ) + .execute(&pool) + .await?; + } + + if hashes.is_empty() { + let metadata = MissingMetadata { + identified: final_hashes, + flame_files: Default::default(), + unknown_files: Default::default(), + }; + + sqlx::query!( + " + UPDATE files + SET metadata = $1 + WHERE id = $2 + ", + serde_json::to_value(&metadata)?, + primary_file.id.0 + ) + .execute(&pool) + .await?; + + if metadata.identified.values().any(|x| x.status != ApprovalType::Yes && x.status != ApprovalType::WithAttributionAndSource) { + let val = mod_messages.version_specific.entry(version.inner.version_number).or_default(); + val.push(ModerationMessage::PackFilesNotAllowed {files: metadata.identified, incomplete: false }); + } + + continue; + } + + let flame_projects = if flame_files.is_empty() { + Vec::new() + } else { + let res = client + .post(format!("{}v1/mods", dotenvy::var("FLAME_ANVIL_URL")?)) + .json(&serde_json::json!({ + "modIds": flame_files.iter().map(|x| x.1).collect::>() + })) + .send() + .await? + .text() + .await?; + + serde_json::from_str::>>(&res)?.data + }; + + let mut missing_metadata = MissingMetadata { + identified: final_hashes, + flame_files: HashMap::new(), + unknown_files: HashMap::new(), + }; + + for (sha1, _pack_file, file_name, _mumur2) in hashes { + let flame_file = flame_files.iter().find(|x| x.0 == sha1); + + if let Some((_, flame_project_id)) = flame_file { + if let Some(project) = flame_projects.iter().find(|x| &x.id == flame_project_id) { + missing_metadata.flame_files.insert(sha1, MissingMetadataFlame { + title: project.name.clone(), + file_name, + url: project.links.website_url.clone(), + id: *flame_project_id, + }); + + continue; + } + } + + missing_metadata.unknown_files.insert(sha1, file_name); + } + + sqlx::query!( + " + UPDATE files + SET metadata = $1 + WHERE id = $2 + ", + serde_json::to_value(&missing_metadata)?, + primary_file.id.0 + ) + .execute(&pool) + .await?; + + if missing_metadata.identified.values().any(|x| x.status != ApprovalType::Yes && x.status != ApprovalType::WithAttributionAndSource) { + let val = mod_messages.version_specific.entry(version.inner.version_number).or_default(); + val.push(ModerationMessage::PackFilesNotAllowed {files: missing_metadata.identified, incomplete: true }); + } + } else { + let val = mod_messages.version_specific.entry(version.inner.version_number).or_default(); + val.push(ModerationMessage::NoPrimaryFile); + } + } + + if !mod_messages.is_empty() { + let first_time = database::models::Thread::get(project.thread_id, &pool).await? + .map(|x| x.messages.iter().all(|x| x.author_id == Some(database::models::UserId(AUTOMOD_ID)) || x.hide_identity)) + .unwrap_or(true); + + let mut transaction = pool.begin().await?; + let id = ThreadMessageBuilder { + author_id: Some(database::models::UserId(AUTOMOD_ID)), + body: MessageBody::Text { + body: mod_messages.markdown(true), + private: false, + replying_to: None, + associated_images: vec![], + }, + thread_id: project.thread_id, + hide_identity: false, + } + .insert(&mut transaction) + .await?; + + let members = database::models::TeamMember::get_from_team_full( + project.inner.team_id, + &pool, + &redis, + ) + .await?; + + if mod_messages.should_reject(first_time) { + ThreadMessageBuilder { + author_id: Some(database::models::UserId(AUTOMOD_ID)), + body: MessageBody::StatusChange { + new_status: ProjectStatus::Rejected, + old_status: project.inner.status, + }, + thread_id: project.thread_id, + hide_identity: false, + } + .insert(&mut transaction) + .await?; + + NotificationBuilder { + body: NotificationBody::StatusChange { + project_id: project.inner.id.into(), + old_status: project.inner.status, + new_status: ProjectStatus::Rejected, + }, + } + .insert_many(members.into_iter().map(|x| x.user_id).collect(), &mut transaction, &redis) + .await?; + + if let Ok(webhook_url) = dotenvy::var("MODERATION_DISCORD_WEBHOOK") { + crate::util::webhook::send_discord_webhook( + project.inner.id.into(), + &pool, + &redis, + webhook_url, + Some( + format!( + "**[AutoMod]({}/user/AutoMod)** changed project status from **{}** to **Rejected**", + dotenvy::var("SITE_URL")?, + &project.inner.status.as_friendly_str(), + ) + .to_string(), + ), + ) + .await + .ok(); + } + + sqlx::query!( + " + UPDATE mods + SET status = 'rejected' + WHERE id = $1 + ", + project.inner.id.0 + ) + .execute(&pool) + .await?; + + database::models::Project::clear_cache( + project.inner.id, + project.inner.slug.clone(), + None, + &redis, + ) + .await?; + } else { + NotificationBuilder { + body: NotificationBody::ModeratorMessage { + thread_id: project.thread_id.into(), + message_id: id.into(), + project_id: Some(project.inner.id.into()), + report_id: None, + }, + } + .insert_many( + members.into_iter().map(|x| x.user_id).collect(), + &mut transaction, + &redis, + ) + .await?; + } + + transaction.commit().await?; + } + + Ok::<(), ApiError>(()) + }.await; + + if let Err(err) = res { + let err = err.as_api_error(); + + let mut str = String::new(); + str.push_str("## Internal AutoMod Error\n\n"); + str.push_str(&format!("Error code: {}\n\n", err.error)); + str.push_str(&format!("Error description: {}\n\n", err.description)); + + let mut transaction = pool.begin().await?; + ThreadMessageBuilder { + author_id: Some(database::models::UserId(AUTOMOD_ID)), + body: MessageBody::Text { + body: str, + private: true, + replying_to: None, + associated_images: vec![], + }, + thread_id: project.thread_id, + hide_identity: false, + } + .insert(&mut transaction) + .await?; + transaction.commit().await?; + } + } + + Ok::<(), ApiError>(()) + }.await.ok(); + } + + tokio::time::sleep(Duration::from_secs(5)).await + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct MissingMetadata { + pub identified: HashMap, + pub flame_files: HashMap, + pub unknown_files: HashMap, +} + +#[derive(Serialize, Deserialize)] +pub struct IdentifiedFile { + pub file_name: String, + pub status: ApprovalType, +} + +#[derive(Serialize, Deserialize)] +pub struct MissingMetadataFlame { + pub title: String, + pub file_name: String, + pub url: String, + pub id: u32, +} + +#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq, Debug)] +#[serde(rename_all = "kebab-case")] +pub enum ApprovalType { + Yes, + WithAttributionAndSource, + WithAttribution, + No, + PermanentNo, + Unidentified, +} + +impl ApprovalType { + fn approved(&self) -> bool { + match self { + ApprovalType::Yes => true, + ApprovalType::WithAttributionAndSource => true, + ApprovalType::WithAttribution => true, + ApprovalType::No => false, + ApprovalType::PermanentNo => false, + ApprovalType::Unidentified => false, + } + } + + pub fn from_string(string: &str) -> Option { + match string { + "yes" => Some(ApprovalType::Yes), + "with-attribution-and-source" => Some(ApprovalType::WithAttributionAndSource), + "with-attribution" => Some(ApprovalType::WithAttribution), + "no" => Some(ApprovalType::No), + "permanent-no" => Some(ApprovalType::PermanentNo), + "unidentified" => Some(ApprovalType::Unidentified), + _ => None, + } + } + + pub(crate) fn as_str(&self) -> &'static str { + match self { + ApprovalType::Yes => "yes", + ApprovalType::WithAttributionAndSource => "with-attribution-and-source", + ApprovalType::WithAttribution => "with-attribution", + ApprovalType::No => "no", + ApprovalType::PermanentNo => "permanent-no", + ApprovalType::Unidentified => "unidentified", + } + } +} + +#[derive(Deserialize, Serialize)] +pub struct FlameResponse { + pub data: T, +} + +#[derive(Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FingerprintResponse { + pub exact_matches: Vec, +} + +#[derive(Deserialize, Serialize)] +pub struct FingerprintMatch { + pub id: u32, + pub file: FlameFile, +} + +#[derive(Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FlameFile { + pub id: u32, + pub mod_id: u32, + pub hashes: Vec, + pub file_fingerprint: u32, +} + +#[derive(Deserialize, Serialize, Debug)] +pub struct FlameFileHash { + pub value: String, + pub algo: u32, +} + +#[derive(Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FlameProject { + pub id: u32, + pub name: String, + pub slug: String, + pub links: FlameLinks, +} + +#[derive(Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FlameLinks { + pub website_url: String, +} + +fn hash_flame_murmur32(input: Vec) -> u32 { + murmur2::murmur2( + &input + .into_iter() + .filter(|x| *x != 9 && *x != 10 && *x != 13 && *x != 32) + .collect::>(), + 1, + ) +} diff --git a/src/queue/payouts.rs b/src/queue/payouts.rs index fe341616..eed9079e 100644 --- a/src/queue/payouts.rs +++ b/src/queue/payouts.rs @@ -1,4 +1,3 @@ -use crate::models::ids::UserId; use crate::models::payouts::{ PayoutDecimal, PayoutInterval, PayoutMethod, PayoutMethodFee, PayoutMethodType, }; @@ -8,6 +7,7 @@ use crate::{database::redis::RedisPool, models::projects::MonetizationStatus}; use base64::Engine; use chrono::{DateTime, Datelike, Duration, Utc, Weekday}; use dashmap::DashMap; +use futures::TryStreamExt; use reqwest::Method; use rust_decimal::Decimal; use serde::de::DeserializeOwned; @@ -16,13 +16,12 @@ use serde_json::Value; use sqlx::postgres::PgQueryResult; use sqlx::PgPool; use std::collections::HashMap; -use std::sync::Arc; use tokio::sync::{Mutex, RwLock}; pub struct PayoutsQueue { credential: RwLock>, payout_options: RwLock>, - payouts_locks: DashMap>>, + pub payouts_locks: Mutex<()>, } #[derive(Clone)] @@ -49,7 +48,7 @@ impl PayoutsQueue { PayoutsQueue { credential: RwLock::new(None), payout_options: RwLock::new(None), - payouts_locks: DashMap::new(), + payouts_locks: Mutex::new(()), } } @@ -346,8 +345,14 @@ impl PayoutsQueue { "OEFTMSBA5ELH", "A3CQK6UHNV27", ]; - const SUPPORTED_METHODS: &[&str] = - &["merchant_cards", "visa", "bank", "ach", "visa_card"]; + const SUPPORTED_METHODS: &[&str] = &[ + "merchant_cards", + "merchant_card", + "visa", + "bank", + "ach", + "visa_card", + ]; if !SUPPORTED_METHODS.contains(&&*product.category) || BLACKLISTED_IDS.contains(&&*product.id) @@ -506,13 +511,6 @@ impl PayoutsQueue { Ok(options.options) } - - pub fn lock_user_payouts(&self, user_id: UserId) -> Arc> { - self.payouts_locks - .entry(user_id) - .or_insert_with(|| Arc::new(Mutex::new(()))) - .clone() - } } pub async fn process_payout( @@ -552,7 +550,7 @@ pub async fn process_payout( r#" SELECT COUNT(1) page_views, project_id FROM views - WHERE (recorded BETWEEN ? AND ?) AND (project_id != 0) + WHERE (recorded BETWEEN ? AND ?) AND (project_id != 0) AND (monetized = TRUE) GROUP BY project_id ORDER BY page_views DESC "#, @@ -561,7 +559,7 @@ pub async fn process_payout( .bind(end.timestamp()) .fetch_all::(), client - .query("SELECT COUNT(1) FROM views WHERE (recorded BETWEEN ? AND ?) AND (project_id != 0)") + .query("SELECT COUNT(1) FROM views WHERE (recorded BETWEEN ? AND ?) AND (project_id != 0) AND (monetized = TRUE)") .bind(start.timestamp()) .bind(end.timestamp()) .fetch_one::(), @@ -631,12 +629,22 @@ pub async fn process_payout( FROM mods m INNER JOIN organizations o ON m.organization_id = o.id INNER JOIN team_members tm on o.team_id = tm.team_id AND tm.accepted = TRUE - WHERE m.id = ANY($1) AND m.monetization_status = $2 AND m.organization_id IS NOT NULL + WHERE m.id = ANY($1) AND m.monetization_status = $2 AND m.status = ANY($3) AND m.organization_id IS NOT NULL ", &project_ids, MonetizationStatus::Monetized.as_str(), + &*crate::models::projects::ProjectStatus::iterator() + .filter(|x| !x.is_hidden()) + .map(|x| x.to_string()) + .collect::>(), ) - .fetch_all(&mut *transaction) + .fetch(&mut *transaction) + .try_fold(DashMap::new(), |acc: DashMap>, r| { + acc.entry(r.id) + .or_default() + .insert(r.user_id, r.payouts_split); + async move { Ok(acc) } + }) .await?; let project_team_members = sqlx::query!( @@ -644,25 +652,36 @@ pub async fn process_payout( SELECT m.id id, tm.user_id user_id, tm.payouts_split payouts_split FROM mods m INNER JOIN team_members tm on m.team_id = tm.team_id AND tm.accepted = TRUE - WHERE m.id = ANY($1) AND m.monetization_status = $2 + WHERE m.id = ANY($1) AND m.monetization_status = $2 AND m.status = ANY($3) ", &project_ids, MonetizationStatus::Monetized.as_str(), + &*crate::models::projects::ProjectStatus::iterator() + .filter(|x| !x.is_hidden()) + .map(|x| x.to_string()) + .collect::>(), + ) + .fetch(&mut *transaction) + .try_fold( + DashMap::new(), + |acc: DashMap>, r| { + acc.entry(r.id) + .or_default() + .insert(r.user_id, r.payouts_split); + async move { Ok(acc) } + }, ) - .fetch_all(&mut *transaction) .await?; for project_id in project_ids { let team_members: HashMap = project_team_members - .iter() - .filter(|r| r.id == project_id) - .map(|r| (r.user_id, r.payouts_split)) - .collect(); + .remove(&project_id) + .unwrap_or((0, HashMap::new())) + .1; let org_team_members: HashMap = project_org_members - .iter() - .filter(|r| r.id == project_id) - .map(|r| (r.user_id, r.payouts_split)) - .collect(); + .remove(&project_id) + .unwrap_or((0, HashMap::new())) + .1; let mut all_team_members = vec![]; @@ -707,6 +726,7 @@ pub async fn process_payout( let mut clear_cache_users = Vec::new(); let (mut insert_user_ids, mut insert_project_ids, mut insert_payouts, mut insert_starts) = (Vec::new(), Vec::new(), Vec::new(), Vec::new()); + let mut update_user_balance: HashMap = HashMap::new(); for (id, project) in projects_map { if let Some(value) = &multipliers.values.get(&(id as u64)) { let project_multiplier: Decimal = @@ -724,17 +744,7 @@ pub async fn process_payout( insert_payouts.push(payout); insert_starts.push(start); - sqlx::query!( - " - UPDATE users - SET balance = balance + $1 - WHERE id = $2 - ", - payout, - user_id - ) - .execute(&mut *transaction) - .await?; + *update_user_balance.entry(user_id).or_default() += payout; clear_cache_users.push(user_id); } @@ -743,6 +753,26 @@ pub async fn process_payout( } } + let (mut update_user_ids, mut update_user_balances) = (Vec::new(), Vec::new()); + + for (user_id, payout) in update_user_balance { + update_user_ids.push(user_id); + update_user_balances.push(payout); + } + + sqlx::query!( + " + UPDATE users u + SET balance = u.balance + v.amount + FROM unnest($1::BIGINT[], $2::NUMERIC[]) AS v(id, amount) + WHERE u.id = v.id + ", + &update_user_ids, + &update_user_balances + ) + .execute(&mut *transaction) + .await?; + sqlx::query!( " INSERT INTO payouts_values (user_id, mod_id, amount, created) diff --git a/src/ratelimit/errors.rs b/src/ratelimit/errors.rs deleted file mode 100644 index ef103117..00000000 --- a/src/ratelimit/errors.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Errors that can occur during middleware processing stage -use crate::models::error::ApiError; -use actix_web::ResponseError; -use log::*; -use thiserror::Error; - -/// Custom error type. Useful for logging and debugging different kinds of errors. -/// This type can be converted to Actix Error, which defaults to -/// InternalServerError -/// -#[derive(Debug, Error)] -pub enum ARError { - /// Read/Write error on store - #[error("read/write operation failed: {0}")] - ReadWrite(String), - - /// Identifier error - #[error("client identification failed")] - Identification, - /// Limited Error - #[error("You are being rate-limited. Please wait {reset} seconds. {remaining}/{max_requests} remaining.")] - Limited { - max_requests: usize, - remaining: usize, - reset: u64, - }, -} - -impl ResponseError for ARError { - fn error_response(&self) -> actix_web::HttpResponse { - match self { - Self::Limited { - max_requests, - remaining, - reset, - } => { - let mut response = actix_web::HttpResponse::TooManyRequests(); - response.insert_header(("x-ratelimit-limit", max_requests.to_string())); - response.insert_header(("x-ratelimit-remaining", remaining.to_string())); - response.insert_header(("x-ratelimit-reset", reset.to_string())); - response.json(ApiError { - error: "ratelimit_error", - description: &self.to_string(), - }) - } - _ => actix_web::HttpResponse::build(self.status_code()).json(ApiError { - error: "ratelimit_error", - description: &self.to_string(), - }), - } - } -} diff --git a/src/ratelimit/memory.rs b/src/ratelimit/memory.rs deleted file mode 100644 index ee52ca6a..00000000 --- a/src/ratelimit/memory.rs +++ /dev/null @@ -1,243 +0,0 @@ -//! In memory store for rate limiting -use actix::prelude::*; -use dashmap::DashMap; -use futures::future::{self}; -use log::*; -use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -use crate::ratelimit::errors::ARError; -use crate::ratelimit::{ActorMessage, ActorResponse}; - -/// Type used to create a concurrent hashmap store -#[derive(Clone)] -pub struct MemoryStore { - inner: Arc>, -} - -impl Default for MemoryStore { - fn default() -> Self { - Self::new() - } -} - -impl MemoryStore { - /// Create a new hashmap - /// - /// # Example - /// ```rust - /// use labrinth::ratelimit::memory::MemoryStore; - /// - /// let store = MemoryStore::new(); - /// ``` - pub fn new() -> Self { - debug!("Creating new MemoryStore"); - MemoryStore { - inner: Arc::new(DashMap::::new()), - } - } -} - -/// Actor for memory store -pub struct MemoryStoreActor { - inner: Arc>, -} - -impl From for MemoryStoreActor { - fn from(store: MemoryStore) -> Self { - MemoryStoreActor { inner: store.inner } - } -} - -impl MemoryStoreActor { - /// Starts the memory actor and returns it's address - pub fn start(self) -> Addr { - debug!("Started memory store"); - Supervisor::start(|_| self) - } -} - -impl Actor for MemoryStoreActor { - type Context = Context; -} - -impl Supervised for MemoryStoreActor { - fn restarting(&mut self, _: &mut Self::Context) { - debug!("Restarting memory store"); - } -} - -impl Handler for MemoryStoreActor { - type Result = ActorResponse; - fn handle(&mut self, msg: ActorMessage, ctx: &mut Self::Context) -> Self::Result { - match msg { - ActorMessage::Set { key, value, expiry } => { - debug!("Inserting key {} with expiry {}", &key, &expiry.as_secs()); - let future_key = String::from(&key); - let now = SystemTime::now(); - let now = now.duration_since(UNIX_EPOCH).unwrap(); - self.inner.insert(key, (value, now + expiry)); - ctx.notify_later(ActorMessage::Remove(future_key), expiry); - ActorResponse::Set(Box::pin(future::ready(Ok(())))) - } - ActorMessage::Update { key, value } => match self.inner.get_mut(&key) { - Some(mut c) => { - let val_mut: &mut (usize, Duration) = c.value_mut(); - if val_mut.0 > value { - val_mut.0 -= value; - } else { - val_mut.0 = 0; - } - let new_val = val_mut.0; - ActorResponse::Update(Box::pin(future::ready(Ok(new_val)))) - } - None => ActorResponse::Update(Box::pin(future::ready(Err(ARError::ReadWrite( - "memory store: read failed!".to_string(), - ))))), - }, - ActorMessage::Get(key) => { - if self.inner.contains_key(&key) { - let val = match self.inner.get(&key) { - Some(c) => c, - None => { - return ActorResponse::Get(Box::pin(future::ready(Err( - ARError::ReadWrite("memory store: read failed!".to_string()), - )))) - } - }; - let val = val.value().0; - ActorResponse::Get(Box::pin(future::ready(Ok(Some(val))))) - } else { - ActorResponse::Get(Box::pin(future::ready(Ok(None)))) - } - } - ActorMessage::Expire(key) => { - let c = match self.inner.get(&key) { - Some(d) => d, - None => { - return ActorResponse::Expire(Box::pin(future::ready(Err( - ARError::ReadWrite("memory store: read failed!".to_string()), - )))) - } - }; - let dur = c.value().1; - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - let res = dur.checked_sub(now).unwrap_or_else(|| Duration::new(0, 0)); - ActorResponse::Expire(Box::pin(future::ready(Ok(res)))) - } - ActorMessage::Remove(key) => { - debug!("Removing key: {}", &key); - let val = match self.inner.remove::(&key) { - Some(c) => c, - None => { - return ActorResponse::Remove(Box::pin(future::ready(Err( - ARError::ReadWrite("memory store: remove failed!".to_string()), - )))) - } - }; - let val = val.1; - ActorResponse::Remove(Box::pin(future::ready(Ok(val.0)))) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[actix_rt::test] - async fn test_set() { - let store = MemoryStore::new(); - let addr = MemoryStoreActor::from(store.clone()).start(); - let res = addr - .send(ActorMessage::Set { - key: "hello".to_string(), - value: 30usize, - expiry: Duration::from_secs(5), - }) - .await; - let res = res.expect("Failed to send msg"); - match res { - ActorResponse::Set(c) => match c.await { - Ok(()) => {} - Err(e) => panic!("Shouldn't happen {}", &e), - }, - _ => panic!("Shouldn't happen!"), - } - } - - #[actix_rt::test] - async fn test_get() { - let store = MemoryStore::new(); - let addr = MemoryStoreActor::from(store.clone()).start(); - let expiry = Duration::from_secs(5); - let res = addr - .send(ActorMessage::Set { - key: "hello".to_string(), - value: 30usize, - expiry, - }) - .await; - let res = res.expect("Failed to send msg"); - match res { - ActorResponse::Set(c) => match c.await { - Ok(()) => {} - Err(e) => panic!("Shouldn't happen {}", &e), - }, - _ => panic!("Shouldn't happen!"), - } - let res2 = addr.send(ActorMessage::Get("hello".to_string())).await; - let res2 = res2.expect("Failed to send msg"); - match res2 { - ActorResponse::Get(c) => match c.await { - Ok(d) => { - let d = d.unwrap(); - assert_eq!(d, 30usize); - } - Err(e) => panic!("Shouldn't happen {}", &e), - }, - _ => panic!("Shouldn't happen!"), - }; - } - - #[actix_rt::test] - async fn test_expiry() { - let store = MemoryStore::new(); - let addr = MemoryStoreActor::from(store.clone()).start(); - let expiry = Duration::from_secs(3); - let res = addr - .send(ActorMessage::Set { - key: "hello".to_string(), - value: 30usize, - expiry, - }) - .await; - let res = res.expect("Failed to send msg"); - match res { - ActorResponse::Set(c) => match c.await { - Ok(()) => {} - Err(e) => panic!("Shouldn't happen {}", &e), - }, - _ => panic!("Shouldn't happen!"), - } - assert!(addr.connected()); - - let res3 = addr.send(ActorMessage::Expire("hello".to_string())).await; - let res3 = res3.expect("Failed to send msg"); - match res3 { - ActorResponse::Expire(c) => match c.await { - Ok(dur) => { - let now = Duration::from_secs(3); - if dur > now || dur > now + Duration::from_secs(4) { - panic!("Expiry is invalid!"); - } - } - Err(e) => { - panic!("Shouldn't happen: {}", &e); - } - }, - _ => panic!("Shouldn't happen!"), - }; - } -} diff --git a/src/ratelimit/middleware.rs b/src/ratelimit/middleware.rs deleted file mode 100644 index 495dcad5..00000000 --- a/src/ratelimit/middleware.rs +++ /dev/null @@ -1,260 +0,0 @@ -use crate::ratelimit::errors::ARError; -use crate::ratelimit::{ActorMessage, ActorResponse}; -use actix::dev::*; -use actix_web::{ - dev::{Service, ServiceRequest, ServiceResponse, Transform}, - error::Error as AWError, - http::header::{HeaderName, HeaderValue}, -}; -use futures::future::{ok, Ready}; -use log::*; -use std::{ - cell::RefCell, - future::Future, - ops::Fn, - pin::Pin, - rc::Rc, - task::{Context, Poll}, - time::Duration, -}; - -type RateLimiterIdentifier = Rc Result + 'static>>; - -pub struct RateLimiter -where - T: Handler + Send + Sync + 'static, - T::Context: ToEnvelope, -{ - interval: Duration, - max_requests: usize, - store: Addr, - identifier: RateLimiterIdentifier, - ignore_key: Option, -} - -impl RateLimiter -where - T: Handler + Send + Sync + 'static, - ::Context: ToEnvelope, -{ - /// Creates a new instance of `RateLimiter` with the provided address of `StoreActor`. - pub fn new(store: Addr) -> Self { - let identifier = |req: &ServiceRequest| { - let connection_info = req.connection_info(); - let ip = connection_info.peer_addr().ok_or(ARError::Identification)?; - Ok(String::from(ip)) - }; - RateLimiter { - interval: Duration::from_secs(0), - max_requests: 0, - store, - identifier: Rc::new(Box::new(identifier)), - ignore_key: None, - } - } - - /// Specify the interval. The counter for a client is reset after this interval - pub fn with_interval(mut self, interval: Duration) -> Self { - self.interval = interval; - self - } - - /// Specify the maximum number of requests allowed in the given interval. - pub fn with_max_requests(mut self, max_requests: usize) -> Self { - self.max_requests = max_requests; - self - } - - /// Sets key which can be used to bypass rate-limiter - pub fn with_ignore_key(mut self, ignore_key: Option) -> Self { - self.ignore_key = ignore_key; - self - } - - /// Function to get the identifier for the client request - pub fn with_identifier Result + 'static>( - mut self, - identifier: F, - ) -> Self { - self.identifier = Rc::new(Box::new(identifier)); - self - } -} - -impl Transform for RateLimiter -where - T: Handler + Send + Sync + 'static, - T::Context: ToEnvelope, - S: Service, Error = AWError> + 'static, - S::Future: 'static, - B: 'static, -{ - type Response = ServiceResponse; - type Error = S::Error; - type Transform = RateLimitMiddleware; - type InitError = (); - type Future = Ready>; - - fn new_transform(&self, service: S) -> Self::Future { - ok(RateLimitMiddleware { - service: Rc::new(RefCell::new(service)), - store: self.store.clone(), - max_requests: self.max_requests, - interval: self.interval.as_secs(), - identifier: self.identifier.clone(), - ignore_key: self.ignore_key.clone(), - }) - } -} - -/// Service factory for RateLimiter -pub struct RateLimitMiddleware -where - S: 'static, - T: Handler + 'static, -{ - service: Rc>, - store: Addr, - // Exists here for the sole purpose of knowing the max_requests and interval from RateLimiter - max_requests: usize, - interval: u64, - identifier: RateLimiterIdentifier, - ignore_key: Option, -} - -impl Service for RateLimitMiddleware -where - T: Handler + 'static, - S: Service, Error = AWError> + 'static, - S::Future: 'static, - B: 'static, - T::Context: ToEnvelope, -{ - type Response = ServiceResponse; - type Error = S::Error; - type Future = Pin>>>; - - fn poll_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.service.borrow_mut().poll_ready(cx) - } - - fn call(&self, req: ServiceRequest) -> Self::Future { - let store = self.store.clone(); - let srv = self.service.clone(); - let max_requests = self.max_requests; - let interval = Duration::from_secs(self.interval); - let identifier = self.identifier.clone(); - let ignore_key = self.ignore_key.clone(); - Box::pin(async move { - let identifier: String = (identifier)(&req)?; - - if let Some(ignore_key) = ignore_key { - if let Some(key) = req.headers().get("x-ratelimit-key") { - if key.to_str().ok().unwrap_or_default() == &*ignore_key { - let fut = srv.call(req); - let res = fut.await?; - return Ok(res); - } - } - } - - let remaining: ActorResponse = store - .send(ActorMessage::Get(String::from(&identifier))) - .await - .map_err(|_| ARError::Identification)?; - match remaining { - ActorResponse::Get(opt) => { - let opt = opt.await?; - if let Some(c) = opt { - // Existing entry in store - let expiry = store - .send(ActorMessage::Expire(String::from(&identifier))) - .await - .map_err(|_| ARError::ReadWrite("Setting timeout".to_string()))?; - let reset: Duration = match expiry { - ActorResponse::Expire(dur) => dur.await?, - _ => unreachable!(), - }; - if c == 0 { - info!("Limit exceeded for client: {}", &identifier); - Err(ARError::Limited { - max_requests, - remaining: c, - reset: reset.as_secs(), - } - .into()) - } else { - // Decrement value - let res: ActorResponse = store - .send(ActorMessage::Update { - key: identifier, - value: 1, - }) - .await - .map_err(|_| { - ARError::ReadWrite("Decrementing ratelimit".to_string()) - })?; - let updated_value: usize = match res { - ActorResponse::Update(c) => c.await?, - _ => unreachable!(), - }; - // Execute the request - let fut = srv.call(req); - let mut res = fut.await?; - let headers = res.headers_mut(); - // Safe unwraps, since usize is always convertible to string - headers.insert( - HeaderName::from_static("x-ratelimit-limit"), - HeaderValue::from_str(max_requests.to_string().as_str())?, - ); - headers.insert( - HeaderName::from_static("x-ratelimit-remaining"), - HeaderValue::from_str(updated_value.to_string().as_str())?, - ); - headers.insert( - HeaderName::from_static("x-ratelimit-reset"), - HeaderValue::from_str(reset.as_secs().to_string().as_str())?, - ); - Ok(res) - } - } else { - // New client, create entry in store - let current_value = max_requests - 1; - let res = store - .send(ActorMessage::Set { - key: String::from(&identifier), - value: current_value, - expiry: interval, - }) - .await - .map_err(|_| ARError::ReadWrite("Creating store entry".to_string()))?; - match res { - ActorResponse::Set(c) => c.await?, - _ => unreachable!(), - } - let fut = srv.call(req); - let mut res = fut.await?; - let headers = res.headers_mut(); - // Safe unwraps, since usize is always convertible to string - headers.insert( - HeaderName::from_static("x-ratelimit-limit"), - HeaderValue::from_str(max_requests.to_string().as_str()).unwrap(), - ); - headers.insert( - HeaderName::from_static("x-ratelimit-remaining"), - HeaderValue::from_str(current_value.to_string().as_str()).unwrap(), - ); - headers.insert( - HeaderName::from_static("x-ratelimit-reset"), - HeaderValue::from_str(interval.as_secs().to_string().as_str()).unwrap(), - ); - Ok(res) - } - } - _ => { - unreachable!(); - } - } - }) - } -} diff --git a/src/ratelimit/mod.rs b/src/ratelimit/mod.rs deleted file mode 100644 index 2d659c87..00000000 --- a/src/ratelimit/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::future::Future; -use std::marker::Send; -use std::pin::Pin; -use std::time::Duration; - -use crate::ratelimit::errors::ARError; -use actix::dev::*; - -pub mod errors; -pub mod memory; -/// The code for this module was directly taken from https://github.com/TerminalWitchcraft/actix-ratelimit -/// with some modifications including upgrading it to Actix 4! -pub mod middleware; - -/// Represents message that can be handled by a `StoreActor` -pub enum ActorMessage { - /// Get the remaining count based on the provided identifier - Get(String), - /// Set the count of the client identified by `key` to `value` valid for `expiry` - Set { - key: String, - value: usize, - expiry: Duration, - }, - /// Change the value of count for the client identified by `key` by `value` - Update { key: String, value: usize }, - /// Get the expiration time for the client. - Expire(String), - /// Remove the client from the store - Remove(String), -} - -impl Message for ActorMessage { - type Result = ActorResponse; -} - -/// Wrapper type for `Pin>` type -pub type Output = Pin> + Send>>; - -/// Represents data returned in response to `Messages` by a `StoreActor` -pub enum ActorResponse { - /// Returned in response to [Messages::Get](enum.Messages.html) - Get(Output>), - /// Returned in response to [Messages::Set](enum.Messages.html) - Set(Output<()>), - /// Returned in response to [Messages::Update](enum.Messages.html) - Update(Output), - /// Returned in response to [Messages::Expire](enum.Messages.html) - Expire(Output), - /// Returned in response to [Messages::Remove](enum.Messages.html) - Remove(Output), -} - -impl MessageResponse for ActorResponse -where - A: Actor, - M: actix::Message, -{ - fn handle(self, _: &mut A::Context, tx: Option>) { - if let Some(tx) = tx { - let _ = tx.send(self); - } - } -} diff --git a/src/routes/analytics.rs b/src/routes/analytics.rs index 04db14be..1d28f863 100644 --- a/src/routes/analytics.rs +++ b/src/routes/analytics.rs @@ -118,6 +118,7 @@ pub async fn page_view_ingest( .into_iter() .filter(|x| !FILTERED_HEADERS.contains(&&*x.0)) .collect(), + monetized: true, }; if let Some(segments) = url.path_segments() { diff --git a/src/routes/internal/mod.rs b/src/routes/internal/mod.rs index 81ac4c9b..5c1d782a 100644 --- a/src/routes/internal/mod.rs +++ b/src/routes/internal/mod.rs @@ -1,5 +1,6 @@ pub(crate) mod admin; pub mod flows; +pub mod moderation; pub mod pats; pub mod session; @@ -12,10 +13,10 @@ pub fn config(cfg: &mut actix_web::web::ServiceConfig) { actix_web::web::scope("_internal") .wrap(default_cors()) .configure(admin::config) - // TODO: write tests that catch these .configure(oauth_clients::config) .configure(session::config) .configure(flows::config) - .configure(pats::config), + .configure(pats::config) + .configure(moderation::config), ); } diff --git a/src/routes/internal/moderation.rs b/src/routes/internal/moderation.rs new file mode 100644 index 00000000..0918c638 --- /dev/null +++ b/src/routes/internal/moderation.rs @@ -0,0 +1,313 @@ +use super::ApiError; +use crate::database; +use crate::database::redis::RedisPool; +use crate::models::ids::random_base62; +use crate::models::projects::ProjectStatus; +use crate::queue::moderation::{ApprovalType, IdentifiedFile, MissingMetadata}; +use crate::queue::session::AuthQueue; +use crate::{auth::check_is_moderator_from_headers, models::pats::Scopes}; +use actix_web::{web, HttpRequest, HttpResponse}; +use serde::Deserialize; +use sqlx::PgPool; +use std::collections::HashMap; + +pub fn config(cfg: &mut web::ServiceConfig) { + cfg.route("moderation/projects", web::get().to(get_projects)); + cfg.route("moderation/project/{id}", web::get().to(get_project_meta)); + cfg.route("moderation/project", web::post().to(set_project_meta)); +} + +#[derive(Deserialize)] +pub struct ResultCount { + #[serde(default = "default_count")] + pub count: i16, +} + +fn default_count() -> i16 { + 100 +} + +pub async fn get_projects( + req: HttpRequest, + pool: web::Data, + redis: web::Data, + count: web::Query, + session_queue: web::Data, +) -> Result { + check_is_moderator_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::PROJECT_READ]), + ) + .await?; + + use futures::stream::TryStreamExt; + + let project_ids = sqlx::query!( + " + SELECT id FROM mods + WHERE status = $1 + ORDER BY queued ASC + LIMIT $2; + ", + ProjectStatus::Processing.as_str(), + count.count as i64 + ) + .fetch_many(&**pool) + .try_filter_map(|e| async { Ok(e.right().map(|m| database::models::ProjectId(m.id))) }) + .try_collect::>() + .await?; + + let projects: Vec<_> = database::Project::get_many_ids(&project_ids, &**pool, &redis) + .await? + .into_iter() + .map(crate::models::projects::Project::from) + .collect(); + + Ok(HttpResponse::Ok().json(projects)) +} + +pub async fn get_project_meta( + req: HttpRequest, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, + info: web::Path<(String,)>, +) -> Result { + check_is_moderator_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::PROJECT_READ]), + ) + .await?; + + let project_id = info.into_inner().0; + let project = database::models::Project::get(&project_id, &**pool, &redis).await?; + + if let Some(project) = project { + let rows = sqlx::query!( + " + SELECT + f.metadata, v.id version_id + FROM versions v + INNER JOIN files f ON f.version_id = v.id + WHERE v.mod_id = $1 + ", + project.inner.id.0 + ) + .fetch_all(&**pool) + .await?; + + let mut merged = MissingMetadata { + identified: HashMap::new(), + flame_files: HashMap::new(), + unknown_files: HashMap::new(), + }; + + let mut check_hashes = Vec::new(); + let mut check_flames = Vec::new(); + + for row in rows { + if let Some(metadata) = row + .metadata + .and_then(|x| serde_json::from_value::(x).ok()) + { + merged.identified.extend(metadata.identified); + merged.flame_files.extend(metadata.flame_files); + merged.unknown_files.extend(metadata.unknown_files); + + check_hashes.extend(merged.flame_files.keys().cloned()); + check_hashes.extend(merged.unknown_files.keys().cloned()); + check_flames.extend(merged.flame_files.values().map(|x| x.id as i32)); + } + } + + let rows = sqlx::query!( + " + SELECT encode(mef.sha1, 'escape') sha1, mel.status status + FROM moderation_external_files mef + INNER JOIN moderation_external_licenses mel ON mef.external_license_id = mel.id + WHERE mef.sha1 = ANY($1) + ", + &check_hashes + .iter() + .map(|x| x.as_bytes().to_vec()) + .collect::>() + ) + .fetch_all(&**pool) + .await?; + + for row in rows { + if let Some(sha1) = row.sha1 { + if let Some(val) = merged.flame_files.remove(&sha1) { + merged.identified.insert( + sha1, + IdentifiedFile { + file_name: val.file_name, + status: ApprovalType::from_string(&row.status) + .unwrap_or(ApprovalType::Unidentified), + }, + ); + } else if let Some(val) = merged.unknown_files.remove(&sha1) { + merged.identified.insert( + sha1, + IdentifiedFile { + file_name: val, + status: ApprovalType::from_string(&row.status) + .unwrap_or(ApprovalType::Unidentified), + }, + ); + } + } + } + + let rows = sqlx::query!( + " + SELECT mel.id, mel.flame_project_id, mel.status status + FROM moderation_external_licenses mel + WHERE mel.flame_project_id = ANY($1) + ", + &check_flames, + ) + .fetch_all(&**pool) + .await?; + + for row in rows { + if let Some(sha1) = merged + .flame_files + .iter() + .find(|x| Some(x.1.id as i32) == row.flame_project_id) + .map(|x| x.0.clone()) + { + if let Some(val) = merged.flame_files.remove(&sha1) { + merged.identified.insert( + sha1, + IdentifiedFile { + file_name: val.file_name.clone(), + status: ApprovalType::from_string(&row.status) + .unwrap_or(ApprovalType::Unidentified), + }, + ); + } + } + } + + Ok(HttpResponse::Ok().json(merged)) + } else { + Err(ApiError::NotFound) + } +} + +#[derive(Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum Judgement { + Flame { + id: i32, + status: ApprovalType, + link: String, + title: String, + }, + Unknown { + status: ApprovalType, + proof: Option, + link: Option, + title: Option, + }, +} + +pub async fn set_project_meta( + req: HttpRequest, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, + judgements: web::Json>, +) -> Result { + check_is_moderator_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::PROJECT_READ]), + ) + .await?; + + let mut transaction = pool.begin().await?; + + let mut ids = Vec::new(); + let mut titles = Vec::new(); + let mut statuses = Vec::new(); + let mut links = Vec::new(); + let mut proofs = Vec::new(); + let mut flame_ids = Vec::new(); + + let mut file_hashes = Vec::new(); + + for (hash, judgement) in judgements.0 { + let id = random_base62(8); + + let (title, status, link, proof, flame_id) = match judgement { + Judgement::Flame { + id, + status, + link, + title, + } => ( + Some(title), + status, + Some(link), + Some("See Flame page/license for permission".to_string()), + Some(id), + ), + Judgement::Unknown { + status, + proof, + link, + title, + } => (title, status, link, proof, None), + }; + + ids.push(id as i64); + titles.push(title); + statuses.push(status.as_str()); + links.push(link); + proofs.push(proof); + flame_ids.push(flame_id); + file_hashes.push(hash); + } + + sqlx::query( + " + INSERT INTO moderation_external_licenses (id, title, status, link, proof, flame_project_id) + SELECT * FROM UNNEST ($1::bigint[], $2::varchar[], $3::varchar[], $4::varchar[], $5::varchar[], $6::integer[]) + " + ) + .bind(&ids[..]) + .bind(&titles[..]) + .bind(&statuses[..]) + .bind(&links[..]) + .bind(&proofs[..]) + .bind(&flame_ids[..]) + .execute(&mut *transaction) + .await?; + + sqlx::query( + " + INSERT INTO moderation_external_files (sha1, external_license_id) + SELECT * FROM UNNEST ($1::bytea[], $2::bigint[]) + ON CONFLICT (sha1) + DO NOTHING + ", + ) + .bind(&file_hashes[..]) + .bind(&ids[..]) + .execute(&mut *transaction) + .await?; + + transaction.commit().await?; + + Ok(HttpResponse::NoContent().finish()) +} diff --git a/src/routes/maven.rs b/src/routes/maven.rs index aeb9bb88..37cfe17d 100644 --- a/src/routes/maven.rs +++ b/src/routes/maven.rs @@ -92,7 +92,7 @@ pub async fn maven_metadata( .map(|x| x.1) .ok(); - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::NotFound); } @@ -286,7 +286,7 @@ pub async fn version_file( .map(|x| x.1) .ok(); - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::NotFound); } @@ -347,7 +347,7 @@ pub async fn version_file_sha1( .map(|x| x.1) .ok(); - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::NotFound); } @@ -389,7 +389,7 @@ pub async fn version_file_sha512( .map(|x| x.1) .ok(); - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::NotFound); } diff --git a/src/routes/mod.rs b/src/routes/mod.rs index 18581eaa..be706988 100644 --- a/src/routes/mod.rs +++ b/src/routes/mod.rs @@ -123,8 +123,50 @@ pub enum ApiError { Mail(#[from] crate::auth::email::MailError), #[error("Error while rerouting request: {0}")] Reroute(#[from] reqwest::Error), + #[error("Unable to read Zip Archive: {0}")] + Zip(#[from] zip::result::ZipError), + #[error("IO Error: {0}")] + Io(#[from] std::io::Error), #[error("Resource not found")] NotFound, + #[error("You are being rate-limited. Please wait {0} milliseconds. 0/{1} remaining.")] + RateLimitError(u128, u32), +} + +impl ApiError { + pub fn as_api_error<'a>(&self) -> crate::models::error::ApiError<'a> { + crate::models::error::ApiError { + error: match self { + ApiError::Env(..) => "environment_error", + ApiError::SqlxDatabase(..) => "database_error", + ApiError::Database(..) => "database_error", + ApiError::Authentication(..) => "unauthorized", + ApiError::CustomAuthentication(..) => "unauthorized", + ApiError::Xml(..) => "xml_error", + ApiError::Json(..) => "json_error", + ApiError::Search(..) => "search_error", + ApiError::Indexing(..) => "indexing_error", + ApiError::FileHosting(..) => "file_hosting_error", + ApiError::InvalidInput(..) => "invalid_input", + ApiError::Validation(..) => "invalid_input", + ApiError::Payments(..) => "payments_error", + ApiError::Discord(..) => "discord_error", + ApiError::Turnstile => "turnstile_error", + ApiError::Decoding(..) => "decoding_error", + ApiError::ImageParse(..) => "invalid_image", + ApiError::PasswordHashing(..) => "password_hashing_error", + ApiError::PasswordStrengthCheck(..) => "strength_check_error", + ApiError::Mail(..) => "mail_error", + ApiError::Clickhouse(..) => "clickhouse_error", + ApiError::Reroute(..) => "reroute_error", + ApiError::NotFound => "not_found", + ApiError::Zip(..) => "zip_error", + ApiError::Io(..) => "io_error", + ApiError::RateLimitError(..) => "ratelimit_error", + }, + description: self.to_string(), + } + } } impl actix_web::ResponseError for ApiError { @@ -153,37 +195,13 @@ impl actix_web::ResponseError for ApiError { ApiError::Mail(..) => StatusCode::INTERNAL_SERVER_ERROR, ApiError::Reroute(..) => StatusCode::INTERNAL_SERVER_ERROR, ApiError::NotFound => StatusCode::NOT_FOUND, + ApiError::Zip(..) => StatusCode::BAD_REQUEST, + ApiError::Io(..) => StatusCode::BAD_REQUEST, + ApiError::RateLimitError(..) => StatusCode::TOO_MANY_REQUESTS, } } fn error_response(&self) -> HttpResponse { - HttpResponse::build(self.status_code()).json(crate::models::error::ApiError { - error: match self { - ApiError::Env(..) => "environment_error", - ApiError::SqlxDatabase(..) => "database_error", - ApiError::Database(..) => "database_error", - ApiError::Authentication(..) => "unauthorized", - ApiError::CustomAuthentication(..) => "unauthorized", - ApiError::Xml(..) => "xml_error", - ApiError::Json(..) => "json_error", - ApiError::Search(..) => "search_error", - ApiError::Indexing(..) => "indexing_error", - ApiError::FileHosting(..) => "file_hosting_error", - ApiError::InvalidInput(..) => "invalid_input", - ApiError::Validation(..) => "invalid_input", - ApiError::Payments(..) => "payments_error", - ApiError::Discord(..) => "discord_error", - ApiError::Turnstile => "turnstile_error", - ApiError::Decoding(..) => "decoding_error", - ApiError::ImageParse(..) => "invalid_image", - ApiError::PasswordHashing(..) => "password_hashing_error", - ApiError::PasswordStrengthCheck(..) => "strength_check_error", - ApiError::Mail(..) => "mail_error", - ApiError::Clickhouse(..) => "clickhouse_error", - ApiError::Reroute(..) => "reroute_error", - ApiError::NotFound => "not_found", - }, - description: &self.to_string(), - }) + HttpResponse::build(self.status_code()).json(self.as_api_error()) } } diff --git a/src/routes/not_found.rs b/src/routes/not_found.rs index aa01aac9..2da930bd 100644 --- a/src/routes/not_found.rs +++ b/src/routes/not_found.rs @@ -4,7 +4,7 @@ use actix_web::{HttpResponse, Responder}; pub async fn not_found() -> impl Responder { let data = ApiError { error: "not_found", - description: "the requested route does not exist", + description: "the requested route does not exist".to_string(), }; HttpResponse::NotFound().json(data) diff --git a/src/routes/updates.rs b/src/routes/updates.rs index f5688a91..e3e9c7fa 100644 --- a/src/routes/updates.rs +++ b/src/routes/updates.rs @@ -57,7 +57,7 @@ pub async fn forge_updates( .map(|x| x.1) .ok(); - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::InvalidInput(ERROR.to_string())); } diff --git a/src/routes/v2/moderation.rs b/src/routes/v2/moderation.rs index fac02f15..db4517a8 100644 --- a/src/routes/v2/moderation.rs +++ b/src/routes/v2/moderation.rs @@ -2,7 +2,7 @@ use super::ApiError; use crate::models::projects::Project; use crate::models::v2::projects::LegacyProject; use crate::queue::session::AuthQueue; -use crate::routes::v3; +use crate::routes::internal; use crate::{database::redis::RedisPool, routes::v2_reroute}; use actix_web::{get, web, HttpRequest, HttpResponse}; use serde::Deserialize; @@ -30,11 +30,11 @@ pub async fn get_projects( count: web::Query, session_queue: web::Data, ) -> Result { - let response = v3::moderation::get_projects( + let response = internal::moderation::get_projects( req, pool.clone(), redis.clone(), - web::Query(v3::moderation::ResultCount { count: count.count }), + web::Query(internal::moderation::ResultCount { count: count.count }), session_queue, ) .await diff --git a/src/routes/v2/projects.rs b/src/routes/v2/projects.rs index 512ad920..f5ce258c 100644 --- a/src/routes/v2/projects.rs +++ b/src/routes/v2/projects.rs @@ -7,6 +7,7 @@ use crate::models::projects::{ }; use crate::models::v2::projects::{DonationLink, LegacyProject, LegacySideType, LegacyVersion}; use crate::models::v2::search::LegacySearchResults; +use crate::queue::moderation::AutomatedModerationQueue; use crate::queue::session::AuthQueue; use crate::routes::v3::projects::ProjectIds; use crate::routes::{v2_reroute, v3, ApiError}; @@ -380,6 +381,7 @@ pub struct EditProject { } #[patch("{id}")] +#[allow(clippy::too_many_arguments)] pub async fn project_edit( req: HttpRequest, info: web::Path<(String,)>, @@ -388,6 +390,7 @@ pub async fn project_edit( new_project: web::Json, redis: web::Data, session_queue: web::Data, + moderation_queue: web::Data, ) -> Result { let v2_new_project = new_project.into_inner(); let client_side = v2_new_project.client_side; @@ -494,6 +497,7 @@ pub async fn project_edit( web::Json(new_project), redis.clone(), session_queue.clone(), + moderation_queue, ) .await .or_else(v2_reroute::flatten_404_error)?; diff --git a/src/routes/v2/threads.rs b/src/routes/v2/threads.rs index fd0c2ec6..7b3c4f71 100644 --- a/src/routes/v2/threads.rs +++ b/src/routes/v2/threads.rs @@ -14,10 +14,8 @@ use sqlx::PgPool; pub fn config(cfg: &mut web::ServiceConfig) { cfg.service( web::scope("thread") - .service(moderation_inbox) .service(thread_get) - .service(thread_send_message) - .service(thread_read), + .service(thread_send_message), ); cfg.service(web::scope("message").service(message_delete)); cfg.service(threads_get); @@ -102,44 +100,6 @@ pub async fn thread_send_message( .or_else(v2_reroute::flatten_404_error) } -#[get("inbox")] -pub async fn moderation_inbox( - req: HttpRequest, - pool: web::Data, - redis: web::Data, - session_queue: web::Data, -) -> Result { - let response = v3::threads::moderation_inbox(req, pool, redis, session_queue) - .await - .or_else(v2_reroute::flatten_404_error)?; - - // Convert response to V2 format - match v2_reroute::extract_ok_json::>(response).await { - Ok(threads) => { - let threads = threads - .into_iter() - .map(LegacyThread::from) - .collect::>(); - Ok(HttpResponse::Ok().json(threads)) - } - Err(response) => Ok(response), - } -} - -#[post("{id}/read")] -pub async fn thread_read( - req: HttpRequest, - info: web::Path<(ThreadId,)>, - pool: web::Data, - redis: web::Data, - session_queue: web::Data, -) -> Result { - // Returns NoContent, so we don't need to convert the response - v3::threads::thread_read(req, info, pool, redis, session_queue) - .await - .or_else(v2_reroute::flatten_404_error) -} - #[delete("{id}")] pub async fn message_delete( req: HttpRequest, diff --git a/src/routes/v2/version_file.rs b/src/routes/v2/version_file.rs index 05224fe6..24e01270 100644 --- a/src/routes/v2/version_file.rs +++ b/src/routes/v2/version_file.rs @@ -248,33 +248,22 @@ pub struct ManyUpdateData { #[post("update")] pub async fn update_files( - req: HttpRequest, pool: web::Data, redis: web::Data, update_data: web::Json, - session_queue: web::Data, ) -> Result { let update_data = update_data.into_inner(); - let mut loader_fields = HashMap::new(); - let mut game_versions = vec![]; - for gv in update_data.game_versions.into_iter().flatten() { - game_versions.push(serde_json::json!(gv.clone())); - } - if !game_versions.is_empty() { - loader_fields.insert("game_versions".to_string(), game_versions); - } let update_data = v3::version_file::ManyUpdateData { loaders: update_data.loaders.clone(), version_types: update_data.version_types.clone(), - loader_fields: Some(loader_fields), + game_versions: update_data.game_versions.clone(), algorithm: update_data.algorithm, hashes: update_data.hashes, }; - let response = - v3::version_file::update_files(req, pool, redis, web::Json(update_data), session_queue) - .await - .or_else(v2_reroute::flatten_404_error)?; + let response = v3::version_file::update_files(pool, redis, web::Json(update_data)) + .await + .or_else(v2_reroute::flatten_404_error)?; // Convert response to V2 format match v2_reroute::extract_ok_json::>(response).await { diff --git a/src/routes/v3/mod.rs b/src/routes/v3/mod.rs index a5165fec..23a92a00 100644 --- a/src/routes/v3/mod.rs +++ b/src/routes/v3/mod.rs @@ -6,7 +6,6 @@ use serde_json::json; pub mod analytics_get; pub mod collections; pub mod images; -pub mod moderation; pub mod notifications; pub mod organizations; pub mod payouts; @@ -31,7 +30,6 @@ pub fn config(cfg: &mut web::ServiceConfig) { .configure(analytics_get::config) .configure(collections::config) .configure(images::config) - .configure(moderation::config) .configure(notifications::config) .configure(organizations::config) .configure(project_creation::config) diff --git a/src/routes/v3/moderation.rs b/src/routes/v3/moderation.rs deleted file mode 100644 index 8b72e036..00000000 --- a/src/routes/v3/moderation.rs +++ /dev/null @@ -1,65 +0,0 @@ -use super::ApiError; -use crate::database; -use crate::database::redis::RedisPool; -use crate::models::projects::ProjectStatus; -use crate::queue::session::AuthQueue; -use crate::{auth::check_is_moderator_from_headers, models::pats::Scopes}; -use actix_web::{web, HttpRequest, HttpResponse}; -use serde::Deserialize; -use sqlx::PgPool; - -pub fn config(cfg: &mut web::ServiceConfig) { - cfg.route("moderation/projects", web::get().to(get_projects)); -} - -#[derive(Deserialize)] -pub struct ResultCount { - #[serde(default = "default_count")] - pub count: i16, -} - -fn default_count() -> i16 { - 100 -} - -pub async fn get_projects( - req: HttpRequest, - pool: web::Data, - redis: web::Data, - count: web::Query, - session_queue: web::Data, -) -> Result { - check_is_moderator_from_headers( - &req, - &**pool, - &redis, - &session_queue, - Some(&[Scopes::PROJECT_READ]), - ) - .await?; - - use futures::stream::TryStreamExt; - - let project_ids = sqlx::query!( - " - SELECT id FROM mods - WHERE status = $1 - ORDER BY queued ASC - LIMIT $2; - ", - ProjectStatus::Processing.as_str(), - count.count as i64 - ) - .fetch_many(&**pool) - .try_filter_map(|e| async { Ok(e.right().map(|m| database::models::ProjectId(m.id))) }) - .try_collect::>() - .await?; - - let projects: Vec<_> = database::Project::get_many_ids(&project_ids, &**pool, &redis) - .await? - .into_iter() - .map(crate::models::projects::Project::from) - .collect(); - - Ok(HttpResponse::Ok().json(projects)) -} diff --git a/src/routes/v3/oauth_clients.rs b/src/routes/v3/oauth_clients.rs index 03e50b9c..32cec006 100644 --- a/src/routes/v3/oauth_clients.rs +++ b/src/routes/v3/oauth_clients.rs @@ -14,12 +14,6 @@ use sqlx::PgPool; use validator::Validate; use super::ApiError; -use crate::{ - auth::checks::ValidateAllAuthorized, - file_hosting::FileHost, - models::{ids::base62_impl::parse_base62, oauth_clients::DeleteOAuthClientQueryParam}, - util::routes::read_from_payload, -}; use crate::{ auth::{checks::ValidateAuthorized, get_user_from_headers}, database::{ @@ -40,6 +34,11 @@ use crate::{ routes::v3::project_creation::CreateError, util::validate::validation_errors_to_string, }; +use crate::{ + file_hosting::FileHost, + models::{ids::base62_impl::parse_base62, oauth_clients::DeleteOAuthClientQueryParam}, + util::routes::read_from_payload, +}; use crate::database::models::oauth_client_item::OAuthClient as DBOAuthClient; use crate::models::ids::OAuthClientId as ApiOAuthClientId; @@ -80,10 +79,13 @@ pub async fn get_user_clients( let target_user = User::get(&info.into_inner(), &**pool, &redis).await?; if let Some(target_user) = target_user { + if target_user.id != current_user.id.into() && !current_user.role.is_admin() { + return Err(ApiError::CustomAuthentication( + "You do not have permission to see the OAuth clients of this user!".to_string(), + )); + } + let clients = OAuthClient::get_all_user_clients(target_user.id, &**pool).await?; - clients - .iter() - .validate_all_authorized(Some(¤t_user))?; let response = clients .into_iter() @@ -98,13 +100,10 @@ pub async fn get_user_clients( #[get("app/{id}")] pub async fn get_client( - req: HttpRequest, id: web::Path, pool: web::Data, - redis: web::Data, - session_queue: web::Data, ) -> Result { - let clients = get_clients_inner(&[id.into_inner()], req, pool, redis, session_queue).await?; + let clients = get_clients_inner(&[id.into_inner()], pool).await?; if let Some(client) = clients.into_iter().next() { Ok(HttpResponse::Ok().json(client)) } else { @@ -114,11 +113,8 @@ pub async fn get_client( #[get("apps")] pub async fn get_clients( - req: HttpRequest, info: web::Query, pool: web::Data, - redis: web::Data, - session_queue: web::Data, ) -> Result { let ids: Vec<_> = info .ids @@ -126,7 +122,7 @@ pub async fn get_clients( .map(|id| parse_base62(id).map(ApiOAuthClientId)) .collect::>()?; - let clients = get_clients_inner(&ids, req, pool, redis, session_queue).await?; + let clients = get_clients_inner(&ids, pool).await?; Ok(HttpResponse::Ok().json(clients)) } @@ -583,26 +579,10 @@ async fn edit_redirects( pub async fn get_clients_inner( ids: &[ApiOAuthClientId], - req: HttpRequest, pool: web::Data, - redis: web::Data, - session_queue: web::Data, ) -> Result, ApiError> { - let current_user = get_user_from_headers( - &req, - &**pool, - &redis, - &session_queue, - Some(&[Scopes::SESSION_ACCESS]), - ) - .await? - .1; - let ids: Vec = ids.iter().map(|i| (*i).into()).collect(); let clients = OAuthClient::get_many(&ids, &**pool).await?; - clients - .iter() - .validate_all_authorized(Some(¤t_user))?; Ok(clients.into_iter().map(|c| c.into()).collect_vec()) } diff --git a/src/routes/v3/organizations.rs b/src/routes/v3/organizations.rs index 9d29b0a1..48b2867b 100644 --- a/src/routes/v3/organizations.rs +++ b/src/routes/v3/organizations.rs @@ -85,7 +85,7 @@ pub async fn organization_projects_get( let projects_data = crate::database::models::Project::get_many_ids(&project_ids, &**pool, &redis).await?; - let projects = filter_visible_projects(projects_data, ¤t_user, &pool).await?; + let projects = filter_visible_projects(projects_data, ¤t_user, &pool, true).await?; Ok(HttpResponse::Ok().json(projects)) } diff --git a/src/routes/v3/payouts.rs b/src/routes/v3/payouts.rs index 981384da..dc713ee2 100644 --- a/src/routes/v3/payouts.rs +++ b/src/routes/v3/payouts.rs @@ -128,9 +128,7 @@ pub async fn paypal_webhook( .await?; if let Some(result) = result { - let mtx = - payouts.lock_user_payouts(crate::models::ids::UserId(result.user_id as u64)); - let _guard = mtx.lock().await; + let _guard = payouts.payouts_locks.lock().await; sqlx::query!( " @@ -249,9 +247,7 @@ pub async fn tremendous_webhook( .await?; if let Some(result) = result { - let mtx = - payouts.lock_user_payouts(crate::models::ids::UserId(result.user_id as u64)); - let _guard = mtx.lock().await; + let _guard = payouts.payouts_locks.lock().await; sqlx::query!( " @@ -371,8 +367,7 @@ pub async fn create_payout( )); } - let mtx = payouts_queue.lock_user_payouts(user.id.into()); - let _guard = mtx.lock().await; + let _guard = payouts_queue.payouts_locks.lock().await; if user.balance < body.amount || body.amount < Decimal::ZERO { return Err(ApiError::InvalidInput( diff --git a/src/routes/v3/project_creation.rs b/src/routes/v3/project_creation.rs index 4e5203bf..536aaf38 100644 --- a/src/routes/v3/project_creation.rs +++ b/src/routes/v3/project_creation.rs @@ -12,7 +12,7 @@ use crate::models::pats::Scopes; use crate::models::projects::{ License, Link, MonetizationStatus, ProjectId, ProjectStatus, VersionId, VersionStatus, }; -use crate::models::teams::ProjectPermissions; +use crate::models::teams::{OrganizationPermissions, ProjectPermissions}; use crate::models::threads::ThreadType; use crate::models::users::UserId; use crate::queue::session::AuthQueue; @@ -137,7 +137,7 @@ impl actix_web::ResponseError for CreateError { CreateError::ImageError(..) => "invalid_image", CreateError::RerouteError(..) => "reroute_error", }, - description: &self.to_string(), + description: self.to_string(), }) } } @@ -614,7 +614,30 @@ async fn project_create_inner( let mut members = vec![]; - if project_create_data.organization_id.is_none() { + if let Some(organization_id) = project_create_data.organization_id { + let org = models::Organization::get_id(organization_id.into(), pool, redis) + .await? + .ok_or_else(|| { + CreateError::InvalidInput("Invalid organization ID specified!".to_string()) + })?; + + let team_member = + models::TeamMember::get_from_user_id(org.team_id, current_user.id.into(), pool) + .await?; + + let perms = + OrganizationPermissions::get_permissions_by_role(¤t_user.role, &team_member); + + if !perms + .map(|x| x.contains(OrganizationPermissions::ADD_PROJECT)) + .unwrap_or(false) + { + return Err(CreateError::CustomAuthenticationError( + "You do not have the permissions to create projects in this organization!" + .to_string(), + )); + } + } else { members.push(models::team_item::TeamMemberBuilder { user_id: current_user.id.into(), role: crate::models::teams::DEFAULT_ROLE.to_owned(), @@ -626,7 +649,6 @@ async fn project_create_inner( ordering: 0, }) } - let team = models::team_item::TeamBuilder { members }; let team_id = team.insert(&mut *transaction).await?; diff --git a/src/routes/v3/projects.rs b/src/routes/v3/projects.rs index 8a426e3f..cc8ccffe 100644 --- a/src/routes/v3/projects.rs +++ b/src/routes/v3/projects.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::sync::Arc; -use crate::auth::checks::is_visible_project; +use crate::auth::checks::{filter_visible_versions, is_visible_project}; use crate::auth::{filter_visible_projects, get_user_from_headers}; use crate::database::models::notification_item::NotificationBuilder; use crate::database::models::project_item::{GalleryItem, ModCategory}; @@ -20,6 +20,7 @@ use crate::models::projects::{ }; use crate::models::teams::ProjectPermissions; use crate::models::threads::MessageBody; +use crate::queue::moderation::AutomatedModerationQueue; use crate::queue::session::AuthQueue; use crate::routes::ApiError; use crate::search::indexing::remove_documents; @@ -137,7 +138,7 @@ pub async fn projects_get( .map(|x| x.1) .ok(); - let projects = filter_visible_projects(projects_data, &user_option, &pool).await?; + let projects = filter_visible_projects(projects_data, &user_option, &pool, false).await?; Ok(HttpResponse::Ok().json(projects)) } @@ -164,7 +165,7 @@ pub async fn project_get( .ok(); if let Some(data) = project_data { - if is_visible_project(&data.inner, &user_option, &pool).await? { + if is_visible_project(&data.inner, &user_option, &pool, false).await? { return Ok(HttpResponse::Ok().json(Project::from(data))); } } @@ -229,6 +230,7 @@ pub struct EditProject { pub monetization_status: Option, } +#[allow(clippy::too_many_arguments)] pub async fn project_edit( req: HttpRequest, info: web::Path<(String,)>, @@ -237,6 +239,7 @@ pub async fn project_edit( new_project: web::Json, redis: web::Data, session_queue: web::Data, + moderation_queue: web::Data, ) -> Result { let user = get_user_from_headers( &req, @@ -352,16 +355,9 @@ pub async fn project_edit( .execute(&mut *transaction) .await?; - sqlx::query!( - " - UPDATE threads - SET show_in_mod_inbox = FALSE - WHERE id = $1 - ", - project_item.thread_id as db_ids::ThreadId, - ) - .execute(&mut *transaction) - .await?; + moderation_queue + .projects + .insert(project_item.inner.id.into()); } if status.is_approved() && !project_item.inner.status.is_approved() { @@ -457,6 +453,7 @@ pub async fn project_edit( old_status: project_item.inner.status, }, thread_id: project_item.thread_id, + hide_identity: true, } .insert(&mut transaction) .await?; @@ -971,7 +968,7 @@ pub async fn dependency_list( .ok(); if let Some(project) = result { - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::NotFound); } @@ -1004,14 +1001,10 @@ pub async fn dependency_list( ) .await?; - let mut projects = projects_result - .into_iter() - .map(models::projects::Project::from) - .collect::>(); - let mut versions = versions_result - .into_iter() - .map(models::projects::Version::from) - .collect::>(); + let mut projects = + filter_visible_projects(projects_result, &user_option, &pool, false).await?; + let mut versions = + filter_visible_versions(versions_result, &user_option, &pool, &redis).await?; projects.sort_by(|a, b| b.published.cmp(&a.published)); projects.dedup_by(|a, b| a.id == b.id); @@ -2064,7 +2057,7 @@ pub async fn project_follow( let user_id: db_ids::UserId = user.id.into(); let project_id: db_ids::ProjectId = result.inner.id; - if !is_visible_project(&result.inner, &Some(user), &pool).await? { + if !is_visible_project(&result.inner, &Some(user), &pool, false).await? { return Err(ApiError::NotFound); } @@ -2215,7 +2208,7 @@ pub async fn project_get_organization( ApiError::InvalidInput("The specified project does not exist!".to_string()) })?; - if !is_visible_project(&result.inner, ¤t_user, &pool).await? { + if !is_visible_project(&result.inner, ¤t_user, &pool, false).await? { Err(ApiError::InvalidInput( "The specified project does not exist!".to_string(), )) diff --git a/src/routes/v3/reports.rs b/src/routes/v3/reports.rs index 3e8aa20d..1a88e1ca 100644 --- a/src/routes/v3/reports.rs +++ b/src/routes/v3/reports.rs @@ -435,6 +435,7 @@ pub async fn report_edit( MessageBody::ThreadClosure }, thread_id: report.thread_id, + hide_identity: true, } .insert(&mut transaction) .await?; @@ -450,18 +451,6 @@ pub async fn report_edit( ) .execute(&mut *transaction) .await?; - - sqlx::query!( - " - UPDATE threads - SET show_in_mod_inbox = $1 - WHERE id = $2 - ", - !(edit_closed || report.closed), - report.thread_id.0, - ) - .execute(&mut *transaction) - .await?; } // delete any images no longer in the body diff --git a/src/routes/v3/teams.rs b/src/routes/v3/teams.rs index 9191b20b..bb93ebe7 100644 --- a/src/routes/v3/teams.rs +++ b/src/routes/v3/teams.rs @@ -60,7 +60,7 @@ pub async fn team_members_get_project( .map(|x| x.1) .ok(); - if !is_visible_project(&project.inner, ¤t_user, &pool).await? { + if !is_visible_project(&project.inner, ¤t_user, &pool, false).await? { return Err(ApiError::NotFound); } let members_data = diff --git a/src/routes/v3/threads.rs b/src/routes/v3/threads.rs index 87907878..4cb6aaac 100644 --- a/src/routes/v3/threads.rs +++ b/src/routes/v3/threads.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::auth::{check_is_moderator_from_headers, get_user_from_headers}; +use crate::auth::get_user_from_headers; use crate::database; use crate::database::models::image_item; use crate::database::models::notification_item::NotificationBuilder; @@ -24,10 +24,8 @@ use sqlx::PgPool; pub fn config(cfg: &mut web::ServiceConfig) { cfg.service( web::scope("thread") - .route("inbox", web::get().to(moderation_inbox)) .route("{id}", web::get().to(thread_get)) - .route("{id}", web::post().to(thread_send_message)) - .route("{id}/read", web::post().to(thread_read)), + .route("{id}", web::post().to(thread_send_message)), ); cfg.service(web::scope("message").route("{id}", web::delete().to(message_delete))); cfg.route("threads", web::get().to(threads_get)); @@ -252,7 +250,13 @@ pub async fn filter_authorized_threads( &mut thread .messages .iter() - .filter_map(|x| x.author_id) + .filter_map(|x| { + if x.hide_identity && !user.role.is_mod() { + None + } else { + x.author_id + } + }) .collect::>(), ); @@ -299,7 +303,13 @@ pub async fn thread_get( &mut data .messages .iter() - .filter_map(|x| x.author_id) + .filter_map(|x| { + if x.hide_identity && !user.role.is_mod() { + None + } else { + x.author_id + } + }) .collect::>(), ); @@ -429,11 +439,12 @@ pub async fn thread_send_message( author_id: Some(user.id.into()), body: new_message.body.clone(), thread_id: thread.id, + hide_identity: user.role.is_mod(), } .insert(&mut transaction) .await?; - let mod_notif = if let Some(project_id) = thread.project_id { + if let Some(project_id) = thread.project_id { let project = database::models::Project::get_id(project_id, &**pool, &redis).await?; if let Some(project) = project { @@ -461,8 +472,6 @@ pub async fn thread_send_message( .await?; } } - - !user.role.is_mod() } else if let Some(report_id) = thread.report_id { let report = database::models::report_item::Report::get(report_id, &**pool).await?; @@ -486,23 +495,7 @@ pub async fn thread_send_message( .await?; } } - - !user.role.is_mod() - } else { - false - }; - - sqlx::query!( - " - UPDATE threads - SET show_in_mod_inbox = $1 - WHERE id = $2 - ", - mod_notif, - thread.id.0, - ) - .execute(&mut *transaction) - .await?; + } if let MessageBody::Text { associated_images, .. @@ -552,72 +545,6 @@ pub async fn thread_send_message( } } -pub async fn moderation_inbox( - req: HttpRequest, - pool: web::Data, - redis: web::Data, - session_queue: web::Data, -) -> Result { - let user = check_is_moderator_from_headers( - &req, - &**pool, - &redis, - &session_queue, - Some(&[Scopes::THREAD_READ]), - ) - .await?; - let ids = sqlx::query!( - " - SELECT id - FROM threads - WHERE show_in_mod_inbox = TRUE - " - ) - .fetch_many(&**pool) - .try_filter_map(|e| async { Ok(e.right().map(|m| database::models::ThreadId(m.id))) }) - .try_collect::>() - .await?; - - let threads_data = database::models::Thread::get_many(&ids, &**pool).await?; - let threads = filter_authorized_threads(threads_data, &user, &pool, &redis).await?; - Ok(HttpResponse::Ok().json(threads)) -} - -pub async fn thread_read( - req: HttpRequest, - info: web::Path<(ThreadId,)>, - pool: web::Data, - redis: web::Data, - session_queue: web::Data, -) -> Result { - check_is_moderator_from_headers( - &req, - &**pool, - &redis, - &session_queue, - Some(&[Scopes::THREAD_READ]), - ) - .await?; - - let id = info.into_inner().0; - let mut transaction = pool.begin().await?; - - sqlx::query!( - " - UPDATE threads - SET show_in_mod_inbox = FALSE - WHERE id = $1 - ", - id.0 as i64, - ) - .execute(&mut *transaction) - .await?; - - transaction.commit().await?; - - Ok(HttpResponse::NoContent().body("")) -} - pub async fn message_delete( req: HttpRequest, info: web::Path<(ThreadMessageId,)>, diff --git a/src/routes/v3/users.rs b/src/routes/v3/users.rs index 455e6fb0..f2cb1629 100644 --- a/src/routes/v3/users.rs +++ b/src/routes/v3/users.rs @@ -69,7 +69,7 @@ pub async fn projects_list( let projects: Vec<_> = crate::database::Project::get_many_ids(&project_data, &**pool, &redis).await?; - let projects = filter_visible_projects(projects, &user, &pool).await?; + let projects = filter_visible_projects(projects, &user, &pool, true).await?; Ok(HttpResponse::Ok().json(projects)) } else { Err(ApiError::NotFound) diff --git a/src/routes/v3/version_file.rs b/src/routes/v3/version_file.rs index 8d3885c8..36e2f881 100644 --- a/src/routes/v3/version_file.rs +++ b/src/routes/v3/version_file.rs @@ -9,6 +9,8 @@ use crate::models::teams::ProjectPermissions; use crate::queue::session::AuthQueue; use crate::{database, models}; use actix_web::{web, HttpRequest, HttpResponse}; +use dashmap::DashMap; +use futures::TryStreamExt; use itertools::Itertools; use serde::{Deserialize, Serialize}; use sqlx::PgPool; @@ -283,6 +285,7 @@ pub async fn get_projects_from_hashes( database::models::Project::get_many_ids(&project_ids, &**pool, &redis).await?, &user_option, &pool, + false, ) .await?; @@ -304,27 +307,14 @@ pub struct ManyUpdateData { pub algorithm: Option, // Defaults to calculation based on size of hash pub hashes: Vec, pub loaders: Option>, - pub loader_fields: Option>>, + pub game_versions: Option>, pub version_types: Option>, } pub async fn update_files( - req: HttpRequest, pool: web::Data, redis: web::Data, update_data: web::Json, - session_queue: web::Data, ) -> Result { - let user_option = get_user_from_headers( - &req, - &**pool, - &redis, - &session_queue, - Some(&[Scopes::VERSION_READ]), - ) - .await - .map(|x| x.1) - .ok(); - let algorithm = update_data .algorithm .clone() @@ -337,16 +327,36 @@ pub async fn update_files( ) .await?; - let projects = database::models::Project::get_many_ids( - &files.iter().map(|x| x.project_id).collect::>(), - &**pool, - &redis, + // TODO: de-hardcode this and actually use version fields system + let update_version_ids = sqlx::query!( + " + SELECT v.id version_id, v.mod_id mod_id + FROM versions v + INNER JOIN version_fields vf ON vf.field_id = 3 AND v.id = vf.version_id + INNER JOIN loader_field_enum_values lfev ON vf.enum_value = lfev.id AND (cardinality($2::varchar[]) = 0 OR lfev.value = ANY($2::varchar[])) + INNER JOIN loaders_versions lv ON lv.version_id = v.id + INNER JOIN loaders l on lv.loader_id = l.id AND (cardinality($3::varchar[]) = 0 OR l.loader = ANY($3::varchar[])) + WHERE v.mod_id = ANY($1) AND (cardinality($4::varchar[]) = 0 OR v.version_type = ANY($4)) + ORDER BY v.date_published ASC + ", + &files.iter().map(|x| x.project_id.0).collect::>(), + &update_data.game_versions.clone().unwrap_or_default(), + &update_data.loaders.clone().unwrap_or_default(), + &update_data.version_types.clone().unwrap_or_default().iter().map(|x| x.to_string()).collect::>(), ) - .await?; - let all_versions = database::models::Version::get_many( - &projects - .iter() - .flat_map(|x| x.versions.clone()) + .fetch(&**pool) + .try_fold(DashMap::new(), |acc : DashMap<_,Vec>, m| { + acc.entry(database::models::ProjectId(m.mod_id)) + .or_default() + .push(database::models::VersionId(m.version_id)); + async move { Ok(acc) } + }) + .await?; + + let versions = database::models::Version::get_many( + &update_version_ids + .into_iter() + .filter_map(|x| x.1.last().copied()) .collect::>(), &**pool, &redis, @@ -354,50 +364,16 @@ pub async fn update_files( .await?; let mut response = HashMap::new(); - - for project in projects { - for file in files.iter().filter(|x| x.project_id == project.inner.id) { - let version = all_versions - .iter() - .filter(|x| x.inner.project_id == file.project_id) - .filter(|x| { - // TODO: Behaviour here is repeated in a few other filtering places, should be abstracted - let mut bool = true; - - if let Some(version_types) = &update_data.version_types { - bool &= version_types - .iter() - .any(|y| y.as_str() == x.inner.version_type); - } - if let Some(loaders) = &update_data.loaders { - bool &= x.loaders.iter().any(|y| loaders.contains(y)); - } - if let Some(loader_fields) = &update_data.loader_fields { - for (key, values) in loader_fields { - bool &= if let Some(x_vf) = - x.version_fields.iter().find(|y| y.field_name == *key) - { - values.iter().any(|v| x_vf.value.contains_json_value(v)) - } else { - true - }; - } - } - - bool - }) - .sorted() - .last(); - - if let Some(version) = version { - if is_visible_version(&version.inner, &user_option, &pool, &redis).await? { - if let Some(hash) = file.hashes.get(&algorithm) { - response.insert( - hash.clone(), - models::projects::Version::from(version.clone()), - ); - } - } + for file in files { + if let Some(version) = versions + .iter() + .find(|x| x.inner.project_id == file.project_id) + { + if let Some(hash) = file.hashes.get(&algorithm) { + response.insert( + hash.clone(), + models::projects::Version::from(version.clone()), + ); } } } diff --git a/src/routes/v3/versions.rs b/src/routes/v3/versions.rs index 1ec821f0..ab9697b9 100644 --- a/src/routes/v3/versions.rs +++ b/src/routes/v3/versions.rs @@ -80,7 +80,7 @@ pub async fn version_project_get_helper( .ok(); if let Some(project) = result { - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::NotFound); } @@ -354,13 +354,10 @@ pub async fn version_edit_helper( } if let Some(dependencies) = &new_version.dependencies { - // TODO: Re-add this exclusions when modpack also has separate dependency retrieval that was removed from validators - // if let Some(project) = project_item { - // if project.project_type != "modpack" { sqlx::query!( " - DELETE FROM dependencies WHERE dependent_id = $1 - ", + DELETE FROM dependencies WHERE dependent_id = $1 + ", id as database::models::ids::VersionId, ) .execute(&mut *transaction) @@ -378,8 +375,6 @@ pub async fn version_edit_helper( DependencyBuilder::insert_many(builders, version_item.inner.id, &mut transaction) .await?; - // } - // } } if !new_version.fields.is_empty() { @@ -724,7 +719,7 @@ pub async fn version_list( .ok(); if let Some(project) = result { - if !is_visible_project(&project.inner, &user_option, &pool).await? { + if !is_visible_project(&project.inner, &user_option, &pool, false).await? { return Err(ApiError::NotFound); } diff --git a/src/scheduler.rs b/src/scheduler.rs index 68cb593b..63487882 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -19,9 +19,9 @@ impl Scheduler { } pub fn run(&mut self, interval: std::time::Duration, mut task: F) - where - F: FnMut() -> R + Send + 'static, - R: std::future::Future + Send + 'static, + where + F: FnMut() -> R + Send + 'static, + R: std::future::Future + Send + 'static, { let future = IntervalStream::new(actix_rt::time::interval(interval)) .for_each_concurrent(2, move |_| task()); @@ -207,4 +207,4 @@ async fn update_versions( } Ok(()) -} +} \ No newline at end of file diff --git a/src/search/indexing/mod.rs b/src/search/indexing/mod.rs index 05919a1b..35b7f72c 100644 --- a/src/search/indexing/mod.rs +++ b/src/search/indexing/mod.rs @@ -115,14 +115,19 @@ pub async fn get_indexes_for_indexing( let client = config.make_client(); let project_name = config.get_index_name("projects", next); let project_filtered_name = config.get_index_name("projects_filtered", next); - let projects_index = create_or_update_index(&client, &project_name, Some(&[ - "words", - "typo", - "proximity", - "attribute", - "exactness", - "sort", - ]),).await?; + let projects_index = create_or_update_index( + &client, + &project_name, + Some(&[ + "words", + "typo", + "proximity", + "attribute", + "exactness", + "sort", + ]), + ) + .await?; let projects_filtered_index = create_or_update_index( &client, &project_filtered_name, diff --git a/src/search/mod.rs b/src/search/mod.rs index 3b6f2c6b..e5d105bd 100644 --- a/src/search/mod.rs +++ b/src/search/mod.rs @@ -52,7 +52,7 @@ impl actix_web::ResponseError for SearchError { SearchError::InvalidIndex(..) => "invalid_input", SearchError::FormatError(..) => "invalid_input", }, - description: &self.to_string(), + description: self.to_string(), }) } } diff --git a/src/util/mod.rs b/src/util/mod.rs index 03512d3e..b7271c70 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -7,6 +7,7 @@ pub mod env; pub mod ext; pub mod guards; pub mod img; +pub mod ratelimit; pub mod redis; pub mod routes; pub mod validate; diff --git a/src/util/ratelimit.rs b/src/util/ratelimit.rs new file mode 100644 index 00000000..74c7cf5b --- /dev/null +++ b/src/util/ratelimit.rs @@ -0,0 +1,167 @@ +use governor::clock::{Clock, DefaultClock}; +use governor::{middleware, state, RateLimiter}; +use std::str::FromStr; +use std::sync::Arc; + +use crate::routes::ApiError; +use crate::util::env::parse_var; +use actix_web::{ + body::EitherBody, + dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform}, + Error, ResponseError, +}; +use futures_util::future::LocalBoxFuture; +use futures_util::future::{ready, Ready}; + +pub type KeyedRateLimiter = + Arc, DefaultClock, MW>>; + +pub struct RateLimit(pub KeyedRateLimiter); + +impl Transform for RateLimit +where + S: Service, Error = Error>, + S::Future: 'static, + B: 'static, +{ + type Response = ServiceResponse>; + type Error = Error; + type Transform = RateLimitService; + type InitError = (); + type Future = Ready>; + + fn new_transform(&self, service: S) -> Self::Future { + ready(Ok(RateLimitService { + service, + rate_limiter: Arc::clone(&self.0), + })) + } +} + +#[doc(hidden)] +pub struct RateLimitService { + service: S, + rate_limiter: KeyedRateLimiter, +} + +impl Service for RateLimitService +where + S: Service, Error = Error>, + S::Future: 'static, + B: 'static, +{ + type Response = ServiceResponse>; + type Error = Error; + type Future = LocalBoxFuture<'static, Result>; + + forward_ready!(service); + + fn call(&self, req: ServiceRequest) -> Self::Future { + if let Some(key) = req.headers().get("x-ratelimit-key") { + if key.to_str().ok() == dotenvy::var("RATE_LIMIT_IGNORE_KEY").ok().as_deref() { + let res = self.service.call(req); + + return Box::pin(async move { + let service_response = res.await?; + Ok(service_response.map_into_left_body()) + }); + } + } + + let conn_info = req.connection_info().clone(); + let ip = if parse_var("CLOUDFLARE_INTEGRATION").unwrap_or(false) { + if let Some(header) = req.headers().get("CF-Connecting-IP") { + header.to_str().ok() + } else { + conn_info.peer_addr() + } + } else { + conn_info.peer_addr() + }; + + if let Some(ip) = ip { + let ip = ip.to_string(); + + match self.rate_limiter.check_key(&ip) { + Ok(snapshot) => { + let fut = self.service.call(req); + + Box::pin(async move { + match fut.await { + Ok(mut service_response) => { + // Now you have a mutable reference to the ServiceResponse, so you can modify its headers. + let headers = service_response.headers_mut(); + headers.insert( + actix_web::http::header::HeaderName::from_str( + "x-ratelimit-limit", + ) + .unwrap(), + snapshot.quota().burst_size().get().into(), + ); + headers.insert( + actix_web::http::header::HeaderName::from_str( + "x-ratelimit-remaining", + ) + .unwrap(), + snapshot.remaining_burst_capacity().into(), + ); + + headers.insert( + actix_web::http::header::HeaderName::from_str( + "x-ratelimit-reset", + ) + .unwrap(), + snapshot + .quota() + .burst_size_replenished_in() + .as_secs() + .into(), + ); + + // Return the modified response as Ok. + Ok(service_response.map_into_left_body()) + } + Err(e) => { + // Handle error case + Err(e) + } + } + }) + } + Err(negative) => { + let wait_time = negative.wait_time_from(DefaultClock::default().now()); + + let mut response = ApiError::RateLimitError( + wait_time.as_millis(), + negative.quota().burst_size().get(), + ) + .error_response(); + + let headers = response.headers_mut(); + + headers.insert( + actix_web::http::header::HeaderName::from_str("x-ratelimit-limit").unwrap(), + negative.quota().burst_size().get().into(), + ); + headers.insert( + actix_web::http::header::HeaderName::from_str("x-ratelimit-remaining") + .unwrap(), + 0.into(), + ); + headers.insert( + actix_web::http::header::HeaderName::from_str("x-ratelimit-reset").unwrap(), + wait_time.as_secs().into(), + ); + + Box::pin(async { Ok(req.into_response(response.map_into_right_body())) }) + } + } + } else { + let response = + ApiError::CustomAuthentication("Unable to obtain user IP address!".to_string()) + .error_response(); + + Box::pin(async { Ok(req.into_response(response.map_into_right_body())) }) + } + } +} diff --git a/tests/common/api_v3/version.rs b/tests/common/api_v3/version.rs index 34ec443f..005a61e3 100644 --- a/tests/common/api_v3/version.rs +++ b/tests/common/api_v3/version.rs @@ -338,9 +338,7 @@ impl ApiVersion for ApiV3 { json["loaders"] = serde_json::to_value(loaders).unwrap(); } if let Some(game_versions) = game_versions { - json["loader_fields"] = json!({ - "game_versions": game_versions, - }); + json["game_versions"] = serde_json::to_value(game_versions).unwrap(); } if let Some(version_types) = version_types { json["version_types"] = serde_json::to_value(version_types).unwrap(); diff --git a/tests/oauth_clients.rs b/tests/oauth_clients.rs index 75257a69..7bed86d5 100644 --- a/tests/oauth_clients.rs +++ b/tests/oauth_clients.rs @@ -112,21 +112,6 @@ async fn get_oauth_client_for_client_creator_succeeds() { .await; } -#[actix_rt::test] -async fn get_oauth_client_for_unrelated_user_fails() { - with_test_environment(None, |env: TestEnvironment| async move { - let DummyOAuthClientAlpha { client_id, .. } = env.dummy.oauth_client_alpha.clone(); - - let resp = env - .api - .get_oauth_client(client_id.clone(), FRIEND_USER_PAT) - .await; - - assert_status!(&resp, StatusCode::UNAUTHORIZED); - }) - .await; -} - #[actix_rt::test] async fn can_delete_oauth_client() { with_test_environment(None, |env: TestEnvironment| async move { diff --git a/tests/pats.rs b/tests/pats.rs index d9ab3226..07b130f9 100644 --- a/tests/pats.rs +++ b/tests/pats.rs @@ -115,6 +115,8 @@ pub async fn pat_full_test() { "expires": Utc::now() + Duration::days(1), // no longer expired! })) .to_request(); + + println!("PAT ID FOR TEST: {}", id); let resp = test_env.call(req).await; assert_status!(&resp, StatusCode::NO_CONTENT); assert_eq!(mock_pat_test(access_token).await, 200); // Works again diff --git a/tests/project.rs b/tests/project.rs index 74170565..99c68c3b 100644 --- a/tests/project.rs +++ b/tests/project.rs @@ -69,7 +69,10 @@ async fn test_get_project() { .unwrap() .unwrap(); let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap(); - assert_eq!(cached_project["inner"]["slug"], json!(alpha_project_slug)); + assert_eq!( + cached_project["val"]["inner"]["slug"], + json!(alpha_project_slug) + ); // Make the request again, this time it should be cached let resp = api.get_project(alpha_project_id, USER_USER_PAT).await; diff --git a/tests/scopes.rs b/tests/scopes.rs index 7ebc637b..387d7121 100644 --- a/tests/scopes.rs +++ b/tests/scopes.rs @@ -836,43 +836,6 @@ pub async fn thread_scopes() { .test(req_gen, thread_write) .await .unwrap(); - - // Check moderation inbox - // Uses moderator PAT, as only moderators can see the moderation inbox - let req_gen = - |pat: Option| async move { api.get_moderation_inbox(pat.as_deref()).await }; - let (_, success) = ScopeTest::new(&test_env) - .with_user_id(MOD_USER_ID_PARSED) - .test(req_gen, thread_read) - .await - .unwrap(); - let thread_id: &str = success[0]["id"].as_str().unwrap(); - - // Moderator 'read' thread - // Uses moderator PAT, as only moderators can see the moderation inbox - let req_gen = - |pat: Option| async move { api.read_thread(thread_id, pat.as_deref()).await }; - ScopeTest::new(&test_env) - .with_user_id(MOD_USER_ID_PARSED) - .test(req_gen, thread_read) - .await - .unwrap(); - - // Delete that message - // First, get message id - let resp = api.get_thread(thread_id, USER_USER_PAT).await; - let success: serde_json::Value = test::read_body_json(resp).await; - let thread_message_id = success["messages"][0]["id"].as_str().unwrap(); - - let req_gen = |pat: Option| async move { - api.delete_thread_message(thread_message_id, pat.as_deref()) - .await - }; - ScopeTest::new(&test_env) - .with_user_id(MOD_USER_ID_PARSED) - .test(req_gen, thread_write) - .await - .unwrap(); }) .await; } diff --git a/tests/version.rs b/tests/version.rs index de587831..f482bc35 100644 --- a/tests/version.rs +++ b/tests/version.rs @@ -55,7 +55,7 @@ async fn test_get_version() { .unwrap(); let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap(); assert_eq!( - cached_project["inner"]["project_id"], + cached_project["val"]["inner"]["project_id"], json!(parse_base62(alpha_project_id).unwrap()) ); @@ -617,6 +617,7 @@ async fn version_ordering_for_specified_orderings_orders_lower_order_first() { USER_USER_PAT, ) .await; + assert_common_version_ids(&versions, vec![new_version_id, alpha_version_id]); }) .await;