From 069a5660ddc4ae4239d2d5fbdabd4dce9b9885ae Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Sun, 20 Oct 2024 11:38:11 +0200 Subject: [PATCH 1/8] aiken 1.1 stdlib 2 --- aiken.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aiken.lock b/aiken.lock index 990f2f8..a55f193 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1725207797, nanos_since_epoch = 651057000 }, "d79382d2b6ecb3aee9b0755c31d8a5bbafe88a7b3706d7fb8a52fd4d05818501"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1729416025, nanos_since_epoch = 103074239 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] From 3ca1b0a684469890078ae53c06ff59dc4bbbbd8f Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Thu, 7 Nov 2024 15:20:08 +0100 Subject: [PATCH 2/8] Initial structure --- aiken.lock | 2 +- lib/calculation/process.ak | 17 +- lib/calculation/shared.ak | 2 + .../sub_conditions/default/process.ak | 306 +++++++ .../sub_conditions/default/verify_datum.ak | 11 + lib/calculation/sub_conditions/shared.ak | 287 ++++++ .../sub_conditions/trading_hours/process.ak | 42 + .../trading_hours/verify_datum.ak | 10 + lib/shared.ak | 84 +- lib/types/condition_pool.ak | 109 +++ lib/types/conditions/default.ak | 6 + lib/types/conditions/default_trading_hours.ak | 8 + validators/condition_pool.ak | 860 ++++++++++++++++++ validators/conditions/default.ak | 141 +++ .../conditions/default_trading_hours.ak | 153 ++++ validators/pool.ak | 88 +- validators/tests/pool.ak | 8 +- 17 files changed, 2034 insertions(+), 100 deletions(-) create mode 100644 lib/calculation/sub_conditions/default/process.ak create mode 100644 lib/calculation/sub_conditions/default/verify_datum.ak create mode 100644 lib/calculation/sub_conditions/shared.ak create mode 100644 lib/calculation/sub_conditions/trading_hours/process.ak create mode 100644 lib/calculation/sub_conditions/trading_hours/verify_datum.ak create mode 100644 lib/types/condition_pool.ak create mode 100644 lib/types/conditions/default.ak create mode 100644 lib/types/conditions/default_trading_hours.ak create mode 100644 validators/condition_pool.ak create mode 100644 validators/conditions/default.ak create mode 100644 validators/conditions/default_trading_hours.ak diff --git a/aiken.lock b/aiken.lock index a55f193..b285a5e 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1729416025, nanos_since_epoch = 103074239 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1730989129, nanos_since_epoch = 210284116 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] diff --git a/lib/calculation/process.ak b/lib/calculation/process.ak index 51927e6..6a45e41 100644 --- a/lib/calculation/process.ak +++ b/lib/calculation/process.ak @@ -88,7 +88,10 @@ pub fn pool_input_to_state( } /// If the order is restricted to a specific pool, then make sure pool_ident matches that; otherwise just return true -fn validate_pool_id(order_pool_ident: Option, pool_ident: Ident) -> Bool { +pub fn validate_pool_id( + order_pool_ident: Option, + pool_ident: Ident, +) -> Bool { when order_pool_ident is { Some(i) -> i == pool_ident None -> True @@ -575,12 +578,9 @@ test process_orders_test() { } let valid_range = interval.between(1, 2) - let input_order = - [(0, None, 0)] - let inputs = - [input] - let outputs = - [output] + let input_order = [(0, None, 0)] + let inputs = [input] + let outputs = [output] let new_a, @@ -715,8 +715,7 @@ test process_30_shuffled_orders_test() { (20, None, 0), (29, None, 0), (19, None, 0), (21, None, 0), (9, None, 0), (25, None, 0), (6, None, 0), (4, None, 0), (3, None, 0), (15, None, 0), ] - let outputs = - [output] + let outputs = [output] let new_a, diff --git a/lib/calculation/shared.ak b/lib/calculation/shared.ak index 8456c69..9e68595 100644 --- a/lib/calculation/shared.ak +++ b/lib/calculation/shared.ak @@ -150,3 +150,5 @@ pub fn small_pow2(exponent: Int) -> Int { math.pow2(exponent) } } + +pub const millis_per_day = 86_400_000 diff --git a/lib/calculation/sub_conditions/default/process.ak b/lib/calculation/sub_conditions/default/process.ak new file mode 100644 index 0000000..0918541 --- /dev/null +++ b/lib/calculation/sub_conditions/default/process.ak @@ -0,0 +1,306 @@ +use aiken/collection/dict.{Dict} +use aiken/collection/list +use aiken/crypto.{DataHash, ScriptHash, VerificationKeyHash} +use aiken/interval +use calculation/process.{process_orders} +use calculation/sub_conditions/shared.{minted_correct_pool_tokens} as sub_conditions_shared +use cardano/address.{Address, Credential} +use cardano/assets.{AssetName, Lovelace, PolicyId, Value} +use cardano/transaction.{Input, Output, ValidityRange} +use shared.{ + AssetClass, Ident, count_orders, has_expected_pool_value, pool_lp_name, +} +use types/order.{SignedStrategyExecution} +use types/settings.{SettingsDatum} + +pub fn scoop_default( + settings_datum: SettingsDatum, + inputs: List, + pool_script_hash: ScriptHash, + actual_ask_fees_per_10_thousand: Int, + actual_bid_fees_per_10_thousand: Int, + pool_input: Output, + actual_identifier: Ident, + validity_range: ValidityRange, + withdrawals: Pairs, + datums: Dict, + input_order: List<(Int, Option, Int)>, + outputs: List, + actual_protocol_fees: Int, + mint: Value, + extra_signatories: List, + signatory_index: Int, + scooper_index: Int, + pool_output_value: Value, + actual_circulating_lp: Int, + actual_assets: (AssetClass, AssetClass), + actual_market_open: Int, + pool_output_address: Address, + assets: (AssetClass, AssetClass), + protocol_fees: Int, + identifier: Ident, + circulating_lp: Int, + market_open: Int, + bid_fees_per_10_thousand: Int, + ask_fees_per_10_thousand: Int, +) -> Bool { + // Deconstruct the settings datum with the fields we need for a scoop + let SettingsDatum { + authorized_scoopers, + base_fee, + simple_fee, + strategy_fee, + .. + } = settings_datum + + // Do a simple scan over the orders to count up the number of orders we'll be processing + // This is unavoidable, because it's part of making sure that the provided redeemer set isn't + // excluding orders + let real_order_count = count_orders(inputs) + // Calculate the portion of the fee that each order will be + // entitled to pay; + // Because the division is rounded down, we add real_order_count and subtact 1 + // to ensure that we take the ceiling instead, and round in the protocols favor. + let amortized_base_fee = + ( base_fee + real_order_count - 1 ) / real_order_count + // Make sure it's not negative, for example if base_fee was negative + expect amortized_base_fee >= 0 + + // Construct the initial pool state from the datum and the locked values + // This intermediate state will be updated as we process each order, allowing us to do a scan over each input + // In particular, it calculates what fees we should be charging (because of the linear fee decay) and the actual tradable reserves + // (excluding protocol fees, which shouldn't factor into the price) + // Note: this abomination is brought to you by the fact that constructing and destructuring structs + // is expensive, so it's cheaper to have **massive** lambdas / continuations + let + pool_policy_a, + pool_asset_name_a, + pool_quantity_a, + pool_policy_b, + pool_asset_name_b, + pool_quantity_b, + pool_policy_lp, + pool_asset_name_lp, + pool_quantity_lp, + bid_fees, + ask_fees, + initial_protocol_fees, + <- + default_pool_input_to_state( + pool_script_hash, + assets, + protocol_fees, + identifier, + circulating_lp, + bid_fees_per_10_thousand, + ask_fees_per_10_thousand, + pool_input, + ) + + // Process the orders in order, and decide the final pool state we should see + // This also counts up the number of simple / strategy orders, which let us compute the effective protocol fee. + // for optimization purposes, there are quite a lot of parameters, and their interaction is quite subtle + let + final_a, + final_b, + final_lp, + simple_count, + strategy_count, + <- + process_orders( + actual_identifier, + // The pool identifier, so we can check that each order is for this pool + validity_range, + // The validity range of the transaction, so we can check strategies haven't expired + withdrawals, + // Include the withdrawals, in case a strategy has some kind of attached script condition + datums, + // The datums, so we can look up the datum of each order (which may be inline, but may also be in the datums dict) + // The initial pool state, such as the reserves and circulating LP + pool_policy_a, + pool_asset_name_a, + pool_quantity_a, + pool_policy_b, + pool_asset_name_b, + pool_quantity_b, + pool_policy_lp, + pool_asset_name_lp, + pool_quantity_lp, + input_order, + // The input ordering specified by the scooper + bid_fees, + // The liquidity provider fee to charge for bids (A -> B), in parts per 10,000 (basis points) + ask_fees, + // ... for Ask (swap B -> A) + amortized_base_fee, + // The base fee split by the number of orders, paid for each user + simple_fee, + // The fee to charge for each "simple" order (swap, deposit, withdrawal, etc.) + strategy_fee, + // The fee to charge for each "strategy" order + 0, + // The previous index we processed, intitially 0; this lets us detect if we need to "restart" the input list + inputs, + // *All* inputs, so we can start over at the beginning of the list if we want + inputs, + // *Remaining* inputs, so we can advance through the list one by one so long as the orders are in order + list.drop(outputs, 1), + // The list of outputs we should be comparing orders against + 0, + // A uniqueness bit-flag, to detect which orders have already been processed; see lib/calculation/InputSorting.md + 0, + // The accumulated count of "simple" orders, for calculating the fee; set to 0 to start, but incremented in each recursion + 0, + ) + // The accumulated count of "strategy" orders, see line above. + // We need to make sure that the number of orders matches the amount that we processed + // so the scooper doesn't "under-report" the orders and steal the funds on the order + expect simple_count + strategy_count == real_order_count + + // We calculate the expected total collected protocol fee + // We multiply amortized_base_fee, which everyone paid, by the number of orders + // and then the respective fees for each simple order and strategy order + let expected_fees_collected = + amortized_base_fee * real_order_count + simple_count * simple_fee + strategy_count * strategy_fee + + // Make sure we actually increased the protocol fee by exactly this amount + expect actual_protocol_fees == initial_protocol_fees + expected_fees_collected + + // The pool should have all of the scooper fees, and the quantity of each token of the outcome + // Note that initializing the state with `-transaction.fee` means this gets subracted out of the protocol fees + // TODO: do we need to account for this? it seems to have gotten lost in some changes. + expect + minted_correct_pool_tokens( + pool_script_hash, + mint, + identifier, + circulating_lp, + final_lp, + ) + + // Check that the scooper is authorized; the protocol can allow *any* scoopers, or limit it to a set of actors + // It's safe to use values provided in the redeemer to efficiently skip to the expected scooper / expected signature + // because at the end of the day, we just care that the scooper has signed the transaction. If the scooper provides + // anything but the correct indexes, it'll just fail the transaction. + expect + when authorized_scoopers is { + Some(authorized_scoopers) -> { + // OPTIMIZATION: skip 10 entries at a time + // OPTIMIZATION: assume scooper is first extra_signatory? have to assume there will only ever be one extra_signatory + expect Some(scooper_sig) = list.at(extra_signatories, signatory_index) + expect Some(scooper) = list.at(authorized_scoopers, scooper_index) + // must be an authorized scooper + scooper_sig == scooper + } + _ -> True + } + + // the market must have opened; this allows projects to pre-create their pool, potentially across multiple protocols, and allows + // people to open orders ahead of time, and avoids things like sniping bots, etc. + // TODO: should we *only* prevent swaps / withdrawals? would it be ok to allow deposits? + // TODO: should we have a "blackout period", where withdrawals are prevented, similar to IPOs? + expect interval.is_entirely_after(validity_range, market_open) + + // We also check that the pool output has the right value (as mentioned above) + // In particular, the pool must have: + // - the pool NFT + // - the correctly adjusted assets from swapping, deposits, withdrawals, etc. + // - an additional amount of ADA corresponding to the protocol fees + // - NOTHING ELSE; This is important because someone could add tons of junk tokens and increase the execution units, potentially even freezing the UTXO + expect + has_expected_pool_value( + pool_script_hash, + actual_identifier, + pool_output_value, + pool_policy_a, + pool_asset_name_a, + final_a, + pool_policy_b, + pool_asset_name_b, + final_b, + final_lp, + actual_protocol_fees, + ) + // Now, we check various things about the output datum to ensure they're each correct. + // Check that the datum correctly records the final circulating LP, accounting for any deposits and withdrawals + // In particular, this is important because that circulating supply is exaclty what determines the users ownership of assets in the pool + // If this gets out of sync with what we've actually minted, then users will be able to either redeem assets they aren't entitled to, + // or be unable to access funds they are entitled to. + expect actual_circulating_lp == final_lp + + // Make sure the protocol fees have been correctly updated + expect actual_protocol_fees == initial_protocol_fees + expected_fees_collected + + // And make sure each of these fields is unchanged + and { + identifier == actual_identifier, + assets == actual_assets, + bid_fees_per_10_thousand == actual_bid_fees_per_10_thousand, + ask_fees_per_10_thousand == actual_ask_fees_per_10_thousand, + market_open == actual_market_open, + // Finally, make sure we don't change the stake credential; this can only be done when withdrawing fees, by the treasury administrator + pool_input.address.stake_credential == pool_output_address.stake_credential, + } +} + +/// Construct the initial pool state for processing a set of orders +pub fn default_pool_input_to_state( + pool_token_policy: PolicyId, + assets: (AssetClass, AssetClass), + protocol_fees: Int, + identifier: Ident, + circulating_lp: Int, + bid_fees_per_10_thousand: Int, + ask_fees_per_10_thousand: Int, + input: Output, + continuation: fn( + PolicyId, + AssetName, + Int, + PolicyId, + AssetName, + Int, + PolicyId, + AssetName, + Int, + Int, + Int, + Int, + ) -> + Bool, +) -> Bool { + let (asset_a, asset_b) = assets + let (asset_a_policy_id, asset_a_name) = asset_a + let (asset_b_policy_id, asset_b_name) = asset_b + // If asset_a is ADA, then we need to not consider the protocol fees as part of this + // We don't have to check asset_b, because assets are guaranteed to be in lexicographical order. + let min_utxo = + if asset_a_policy_id == assets.ada_policy_id { + protocol_fees + } else { + 0 + } + // Get the maximum of market_open and the transaction valid from so we can calculate the fees correctly + // Note: we use valid_from, as this favors the protocol: you pay the fees for the *earliest* moment your order *could* have executed. + // Scoopers could in theory set a wide validity range to cause users to overpay, but this should be considered malicious activity and + // get the scooper removed from the list of valid scoopers / ignore scooper rewards + // TODO: we could solve this by enforcing a validity range, and checking the length is within 4 hours. + let bid_fees = bid_fees_per_10_thousand + let ask_fees = ask_fees_per_10_thousand + // Then construct the pool state. We include the assets here, instead of just the reserves, so we can check the values of each order + // TODO: we could potentially save quite a bit by not passing around this object, and passing around a lot of parameters instead... + continuation( + asset_a_policy_id, + asset_a_name, + assets.quantity_of(input.value, asset_a_policy_id, asset_a_name) - min_utxo, + asset_b_policy_id, + asset_b_name, + assets.quantity_of(input.value, asset_b_policy_id, asset_b_name), + pool_token_policy, + pool_lp_name(identifier), + circulating_lp, + bid_fees, + ask_fees, + protocol_fees, + ) +} diff --git a/lib/calculation/sub_conditions/default/verify_datum.ak b/lib/calculation/sub_conditions/default/verify_datum.ak new file mode 100644 index 0000000..b86d1bb --- /dev/null +++ b/lib/calculation/sub_conditions/default/verify_datum.ak @@ -0,0 +1,11 @@ +use shared + +pub fn default_verify_datum( + bid_fees_per_10_thousand: Int, + ask_fees_per_10_thousand: Int, +) -> Bool { + and { + shared.fees_in_legal_range(bid_fees_per_10_thousand), + shared.fees_in_legal_range(ask_fees_per_10_thousand), + } +} diff --git a/lib/calculation/sub_conditions/shared.ak b/lib/calculation/sub_conditions/shared.ak new file mode 100644 index 0000000..2fad000 --- /dev/null +++ b/lib/calculation/sub_conditions/shared.ak @@ -0,0 +1,287 @@ +use aiken/collection/dict.{Dict} +use aiken/crypto.{Blake2b_256, Hash} +use calculation/process.{validate_pool_id} +use calculation/shared.{check_and_set_unique, + unsafe_fast_index_skip_with_tail} as calculation_shared +use calculation/withdrawal +use cardano/assets.{AssetName, PolicyId, Value} +use cardano/transaction.{Input, Output} +use shared.{AssetClass, Ident, datum_of, is_script, pool_lp_name} +use types/order.{Destination, Order, OrderDatum, SignedStrategyExecution} + +/// Construct the initial pool state for processing a set of orders +pub fn condition_pool_input_to_state( + pool_token_policy: PolicyId, + assets: (AssetClass, AssetClass), + protocol_fees: Int, + identifier: Ident, + circulating_lp: Int, + input: Output, + continuation: fn( + PolicyId, + AssetName, + Int, + PolicyId, + AssetName, + Int, + PolicyId, + AssetName, + Int, + Int, + ) -> + Bool, +) -> Bool { + let (asset_a, asset_b) = assets + let (asset_a_policy_id, asset_a_name) = asset_a + let (asset_b_policy_id, asset_b_name) = asset_b + // If asset_a is ADA, then we need to not consider the protocol fees as part of this + // We don't have to check asset_b, because assets are guaranteed to be in lexicographical order. + let min_utxo = + if asset_a_policy_id == assets.ada_policy_id { + protocol_fees + } else { + 0 + } + + // Then construct the pool state. We include the assets here, instead of just the reserves, so we can check the values of each order + // TODO: we could potentially save quite a bit by not passing around this object, and passing around a lot of parameters instead... + continuation( + asset_a_policy_id, + asset_a_name, + assets.quantity_of(input.value, asset_a_policy_id, asset_a_name) - min_utxo, + asset_b_policy_id, + asset_b_name, + assets.quantity_of(input.value, asset_b_policy_id, asset_b_name), + pool_token_policy, + pool_lp_name(identifier), + circulating_lp, + protocol_fees, + ) +} + +/// Process a single order, comparing it to the output to ensure it was executed faithfully, and returning the new pool state +/// +/// Most of the parameters here are for performance reasons, to avoid destructuring objects, since thats very expensive +pub fn process_withdrawal_order( + // The pool state as of the time the order is executed; If we process multiple orders, this gets passed through each time + pool_policy_a: PolicyId, + pool_asset_name_a: AssetName, + pool_quantity_a: Int, + pool_policy_b: PolicyId, + pool_asset_name_b: AssetName, + pool_quantity_b: Int, + pool_policy_lp: PolicyId, + pool_asset_name_lp: AssetName, + pool_quantity_lp: Int, + // The input being processed + input: Output, + // The details of the order to execute, such as whether it's a swap, the limit, etc. + details: Order, + // The max protocol fee that *can* be charged from the order; depending on how big the batch size is, some may be returned as a rebate, but this lets the user limit the maximum that gets charged in case the protocol fees in the settings change + max_protocol_fee: Int, + // The destination where the result of the order must be sent; useful for chaining transactions, as it lets you specify a datum for the output + destination: Destination, + // The base fee, divided among all the participants in the scoop + amortized_base_fee: Int, + // The amount to charge for simple vs strategy orders, taken from the settings + simple_fee: Int, + // A list of outputs, so we can destructure the next output + // TODO: we can probably avoid returning the outputs, and just return a boolean + outputs: List, + // A continuation to call with the next pool state and the list of outputs; this is more efficient than constructing an object and tuples + continuation: fn(Int, Int, Int, List) -> Bool, +) -> Bool { + // Returns the updated pool state, the correct list of outputs to resume from, and total fee charged by the order + expect order.Withdrawal(amount) = details + expect [output, ..rest_outputs] = outputs + // Make sure the scooper can only take up to the max fee the user has agreed to + // (See above) + let fee = amortized_base_fee + simple_fee + expect max_protocol_fee >= fee + // Calculate and validate the result of a withdrawal + let + new_a, + new_b, + new_lp, + <- + withdrawal.do_withdrawal( + pool_policy_a, + pool_asset_name_a, + pool_quantity_a, + pool_policy_b, + pool_asset_name_b, + pool_quantity_b, + pool_policy_lp, + pool_asset_name_lp, + pool_quantity_lp, + input, + amount, + destination, + fee, + output, + ) + continuation(new_a, new_b, new_lp, rest_outputs) +} + +/// Recursively process all orders in the correct order +/// There's a lot of parameters here, mostly for efficiency (though with some redundancies being removed in another branch) +pub fn process_withdrawal_orders( + // The pool identifier we're processing, so we can check the order if it has a specific pool + this_pool_ident: Ident, + // The datums in the witness set, in case we need to lookup a non-inline datum + datums: Dict, Data>, + // The initial / current pool state, passed recursively as we process each order + pool_policy_a: PolicyId, + pool_asset_name_a: AssetName, + pool_quantity_a: Int, + pool_policy_b: PolicyId, + pool_asset_name_b: AssetName, + pool_quantity_b: Int, + pool_policy_lp: PolicyId, + pool_asset_name_lp: AssetName, + pool_quantity_lp: Int, + // The list of remaining indices into the inputs, specifying which orders to process + input_order: List<(Int, Option, Int)>, + // The protocol base fee, split across each order + amortized_base_fee: Int, + // The simple and strategy fees from the settings datum + simple_fee: Int, + // The previous order we processed, to check if we need to restart the loop; TODO: we actually pass +1 from this, and i'm not sure how to explain why we do this... + prev_index: Int, + // *all* inputs on the transaction, in case we need to start over from the beginning (i.e. wrap around) + all_inputs: List, + // Just the remaining inputs in the list, in case it's more efficient to keep walking from here + remaining_inputs: List, + // The list of remaining outputs to compare the orders against; usually we pass the `tail` of this list recursively, but in the case of donations with no change, we pass outputs through unchanged + outputs: List, + // A number that, when interpreted as a bit flag, indicates which orders we've already processed; used to check if an order is processed more than once (see InputSorting.md) + uniqueness_flag: Int, + // A continuation to call with the final pool state; more efficient than constructing tuples / objects + continuation: fn(Int, Int, Int) -> Bool, +) -> Bool { + // Returns the final pool state, and the count of each order type + // The main "pump" of the recursive loop is the input_order, which is a set of indices into the inputs list + // specified by the scooper for the order to process each order in. + // Once we've reached the end of the list, we can return, but otherwise + when input_order is { + [] -> continuation(pool_quantity_a, pool_quantity_b, pool_quantity_lp) + [(idx, _, _), ..rest] -> { + // First, it's important to check that each order is processed only once; + // This is quite subtle, so check InputSorting.md for a full explanation + let next_uniqueness_flag = check_and_set_unique(uniqueness_flag, idx) + + // Then, we identify where to find the inputs; in particular, to avoid "starting from the beginning" every single time + // when indices are monotonic through the list, we can just continue to advance through the list + // so, all_inputs will always contain the full list of inputs + // while remaining_inputs will just contain the ones "after" the last one we processed. + // So, here, we check if we can continue down this path, or if we need to start from the beginning again + let next_input_list = + if idx >= prev_index { + unsafe_fast_index_skip_with_tail(remaining_inputs, idx - prev_index) + } else { + unsafe_fast_index_skip_with_tail(all_inputs, idx) + } + + expect [input_to_process, ..rest_of_input_list] = next_input_list + let Input { output: order, .. } = input_to_process + + // It's important that we fail if we ever try to process a UTXO from a wallet address + // This is a bit unfortunate, because it means we can't support processing orders directly out of a users wallet + // but is important, because we rely on this to check that every order is processed. + // If we didn't do this check, a scooper could include a UTXO from their wallet, and leave a *real* order un-processed, and steal those users funds. + expect is_script(order.address.payment_credential) + + // Find the datum that is associated with this order; we allow that datum to be either inline, or in the witness set, + // to aid in composibility with other protocols + // We also check that the datum is in the format we expect; + // Note: we don't actually check the order address anywhere!! As long as it's a script, and the datum is in the correct format, we're good. + // This lets us upgrade the order contract, or add other types of orders over time. + expect Some(datum) = datum_of(datums, order) + expect datum: OrderDatum = datum + let OrderDatum { pool_ident, destination, max_protocol_fee, details, .. } = + datum + // Make sure we're allowed to process this order (i.e. if the user specified a specific pool, we have to honor that) + expect validate_pool_id(pool_ident, this_pool_ident) + // And finally, process this one individual order and compute the next state + // Note that we get back next_orders here, which is needed if we process a donation that has no change UTXO + let + new_a, + new_b, + new_lp, + next_orders, + <- + process_withdrawal_order( + pool_policy_a, + pool_asset_name_a, + pool_quantity_a, + pool_policy_b, + pool_asset_name_b, + pool_quantity_b, + pool_policy_lp, + pool_asset_name_lp, + pool_quantity_lp, + order, + details, + max_protocol_fee, + destination, + amortized_base_fee, + simple_fee, + outputs, + ) + + // And recursively process the rest of the orders + process_withdrawal_orders( + this_pool_ident, + datums, + pool_policy_a, + pool_asset_name_a, + new_a, + pool_policy_b, + pool_asset_name_b, + new_b, + pool_policy_lp, + pool_asset_name_lp, + new_lp, + rest, + // This advances to the next element from input_order + amortized_base_fee, + simple_fee, + idx + 1, + // This is the "previous index" within the input list; TODO: I'm not actually sure why we add 1? + all_inputs, + // See the notes above about all_inputs vs remaining_inputs + rest_of_input_list, + next_orders, + next_uniqueness_flag, + continuation, + ) + } + } +} + +/// This is responsible for checking that the minting value on the transaction is valid +/// based on the pool state, the policy ID, and the initial incoming datum. +pub fn minted_correct_pool_tokens( + pool_policy_id: PolicyId, + mint: Value, + identifier: Ident, + circulating_lp: Int, + quantity_lp: Int, +) -> Bool { + // Unwrap the silly MintedValue special type + // Note also we only look at the tokens with this policyID + // so that we can still mint other tokens + let minted_tokens = assets.tokens(mint, pool_policy_id) + + // If the initial datum has the same circulating LP as the outcome, then we expect no minted tokens + // Otherwise, the minted tokens should be exactly the pool LP tokens and nothing else + // TODO: confirm that the "minting 0 ada" problem doesn't apply here; we have real-world transactions, so I doubt it does, but I want to confirm. + // TODO: This should allow minting of other tokens, from other policy IDs, perhaps, for composibility? + if circulating_lp == quantity_lp { + dict.is_empty(minted_tokens) + } else { + dict.to_pairs(minted_tokens) == [ + Pair(pool_lp_name(identifier), quantity_lp - circulating_lp), + ] + } +} diff --git a/lib/calculation/sub_conditions/trading_hours/process.ak b/lib/calculation/sub_conditions/trading_hours/process.ak new file mode 100644 index 0000000..116bae2 --- /dev/null +++ b/lib/calculation/sub_conditions/trading_hours/process.ak @@ -0,0 +1,42 @@ +use aiken/interval.{Finite, Interval, IntervalBound} +use calculation/shared.{millis_per_day} +use cardano/transaction.{ValidityRange} + +pub fn scoop_trading_hours( + validity_range: ValidityRange, + open_time: Int, + close_time: Int, +) -> Bool { + expect Interval { + lower_bound: IntervalBound { bound_type: Finite(low_val), .. }, + upper_bound: IntervalBound { bound_type: Finite(high_val), .. }, + } = validity_range + let low_val_d = low_val % millis_per_day + let high_val_d = high_val % millis_per_day + let validity_less_than_1_day = high_val - low_val < millis_per_day + and { + validity_less_than_1_day, + time_between_start_finish(low_val_d, open_time, close_time), + time_between_start_finish(high_val_d, open_time, close_time), + } +} + +fn time_between_start_finish(time: Int, start: Int, finish: Int) -> Bool { + if start < finish { + and { + time > start, + time < finish, + } + } else { + or { + and { + time < start, + time < finish, + }, + and { + time > start, + time > finish, + }, + } + } +} diff --git a/lib/calculation/sub_conditions/trading_hours/verify_datum.ak b/lib/calculation/sub_conditions/trading_hours/verify_datum.ak new file mode 100644 index 0000000..325f319 --- /dev/null +++ b/lib/calculation/sub_conditions/trading_hours/verify_datum.ak @@ -0,0 +1,10 @@ +use calculation/shared.{millis_per_day} + +pub fn trading_hours_verify_datum(open_time: Int, close_time: Int) -> Bool { + and { + open_time >= 0, + open_time < millis_per_day, + close_time >= 0, + close_time < millis_per_day, + } +} diff --git a/lib/shared.ak b/lib/shared.ak index 9146973..c08a9fa 100644 --- a/lib/shared.ak +++ b/lib/shared.ak @@ -4,7 +4,7 @@ use aiken/collection/list use aiken/crypto.{Blake2b_256, Hash} use aiken/primitive/bytearray use cardano/address.{Credential, Script} -use cardano/assets.{AssetName, PolicyId} +use cardano/assets.{AssetName, PolicyId, Value, ada_policy_id} use cardano/transaction.{ DatumHash, InlineDatum, Input, NoDatum, Output, OutputReference, Transaction, find_input, @@ -51,6 +51,88 @@ pub fn datum_of( } } +/// Check that the UTXO contents are correct given a specific pool outcome +/// In particular, it must have the final A reserves, the final B reserves, the pool NFT, and the protocol fees +pub fn has_expected_pool_value( + pool_script_hash: PolicyId, + identifier: Ident, + output_value: Value, + pool_policy_a: PolicyId, + pool_asset_name_a: AssetName, + pool_quantity_a: Int, + pool_policy_b: PolicyId, + pool_asset_name_b: AssetName, + pool_quantity_b: Int, + final_lp: Int, + final_protocol_fees: Int, +) -> Bool { + // Asset A *could* be ADA; in which case there should be 3 tokens on the output + // (ADA, Asset B, and the NFT) + if pool_policy_a == ada_policy_id { + let actual = + list.foldl( + assets.flatten(output_value), + // (token count, lovelace amount, token b amount, pool nft amount) + (0, 0, 0, 0), + fn(asset, acc) { + let token_count = acc.1st + 1 + if asset.1st == pool_policy_a { + (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th) + } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { + (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th) + } else { + expect asset == (pool_script_hash, pool_nft_name(identifier), 1) + (token_count, acc.2nd, acc.3rd, acc.4th + 1) + } + }, + ) + // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token + let expected = + if final_lp == 0 { + expect pool_quantity_a == 0 + expect pool_quantity_b == 0 + (2, final_protocol_fees, 0, 1) + } else { + (3, final_protocol_fees + pool_quantity_a, pool_quantity_b, 1) + } + // Rather than constructing a value directly (which can be expensive) + // we can just compare the expected token count and amounts with a single pass over the value + expected == actual + } else { + // Asset A isn't ADA, Asset B will *never* be ADA; in this case, there should be 4 tokens on the output: + // ADA, the Pool NFT, Asset A, and Asset B + let actual = + list.foldl( + assets.flatten(output_value), + // (token count, lovelace amount, token a amount, token b amount, pool nft amount) + (0, 0, 0, 0, 0), + fn(asset, acc) { + let token_count = acc.1st + 1 + if asset.1st == ada_policy_id { + (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th, acc.5th) + } else if asset.1st == pool_policy_a && asset.2nd == pool_asset_name_a { + (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th, acc.5th) + } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { + (token_count, acc.2nd, acc.3rd, acc.4th + asset.3rd, acc.5th) + } else { + expect asset == (pool_script_hash, pool_nft_name(identifier), 1) + (token_count, acc.2nd, acc.3rd, acc.4th, acc.5th + 1) + } + }, + ) + // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token + let expected = + if final_lp == 0 { + expect pool_quantity_a == 0 + expect pool_quantity_b == 0 + (2, final_protocol_fees, 0, 0, 1) + } else { + (4, final_protocol_fees, pool_quantity_a, pool_quantity_b, 1) + } + expected == actual + } +} + /// Find the **input** (which was the output of some other transaction) for which we're actually evaluating the script to determine if it is spendable /// Also called "own_input" in places pub fn spent_output( diff --git a/lib/types/condition_pool.ak b/lib/types/condition_pool.ak new file mode 100644 index 0000000..f791235 --- /dev/null +++ b/lib/types/condition_pool.ak @@ -0,0 +1,109 @@ +use aiken/collection/list +use aiken/crypto.{ScriptHash} +use cardano/transaction.{InlineDatum, Output} +use shared.{AssetClass, Ident} +use sundae/multisig +use types/order.{SignedStrategyExecution} + +/// The current state of a AMM liquidity pool at a UTXO. +pub type ConditionPoolDatum { + /// the unique identifier of the pool. Produced by hashing one of the input UTXOs used to produce the pool + /// to ensure uniqueness. + identifier: Ident, + /// The two asset IDs that this pool can exchange, in alphabetical order + /// Used to validate that the assets being swapped are indeed the intended assets + assets: (AssetClass, AssetClass), + /// The total number of LP tokens in circulation + /// Maintains the following two invariants on each deposit or withdrawal: + /// - circulating_lp is always equal to the number of LP tokens that have been minted and are in circulation + /// - A users LP tokens (or burned LP tokens), as a percentage of the circulating LP tokens, represent the percentage of assets they just deposited or withdrew. + circulating_lp: Int, + // An optional multisig condition under which the protocol fees can be updated + fee_manager: Option, + /// The UNIX millisecond timestamp at which trading against the pool should be allowed + /// TODO: deposits and arguably withdrawals should be processed before the market open + market_open: Int, + /// The amount of ADA on the UTXO that is set aside by collecting protocol fees + /// This should be increased on each scoop to represent collecting fees; deducted from the reserve amount (if one of the tokens in the pair is ADA) + /// to calculate the swap amounts, and decreased when some amount is withdrawn. + /// Note that this also allows us to conveniently sidestep minUTXO woes, because the creator of the pool can set the initial protocol fees to whatever minUTXO is needed + /// and withdrawals never have to be for the full amount. + /// TODO: should we add a field to the settings object to set a minimum initial protocol_fees on pool mint? + protocol_fees: Int, + /// The condition script that needs to be present in the withdrawals + condition: ScriptHash, + /// If the condition needs extra data it can be stored in this field + condition_datum: Data, +} + +/// A pool UTXO can be spent for two purposes: +pub type ConditionPoolRedeemer { + /// Execute a scoop, a batch of transactions all processed at once + ConditionPoolScoop { + /// The index in the transaction extra_signatories that represents the "scooper" signature + /// This lets us quickly skip to the scooper if there are multiple signatures on the transaciton for some reason + /// And this is safe to do, because at the end of the day the value pointed at has to be in the list of authorized scoopers anyway + signatory_index: Int, + /// The index in the list of authorized scoopers on the settings datum of the scooper processing the order + /// This is so that we can quickly skip to the correct scooper in the list of authorized scoopers + /// And this is safe to do, because at the end of the day, the pointed at scooper must have signed the transaction + scooper_index: Int, + /// The order to process the transaction inputs in, and optionally the signed strategy execution to execute for strategy orders + /// This is because the cardano ledger re-orders transaction inputs when building the script context; so this could end up processing + /// users orders out of order, and open the protocol to sandwich attacks and line-cutting. + /// Much of the complexity of the protocol comes from ensuring this list is processed both efficiently and safely. + input_order: List<(Int, Option, Int)>, + /// A withdrawal only scoop does should always be allowed, no matter the condition + withdrawal_only: Bool, + } + /// Withdraw the earned protocol fees into the treasury, or update the pool + /// fees + ConditionManage +} + +/// We use the pool mint script for two different purposes +pub type ConditionPoolMintRedeemer { + /// to mint LP tokens for the user for a specific pool (by identifier) + ConditionMintLP { identifier: Ident } + /// to mint the actual pool itself + ConditionCreatePool { + /// The assets to create the pool with; + /// provided so we can enforce that they are paid to the pool output, and the pool datum is constructed correctly + assets: (AssetClass, AssetClass), + /// The index in the outputs that corresponds to the pool output + /// This is done so we can efficiently skip to the correct output + /// This is safe because we validate that the datum, value, and pool token are paid into this UTXO + pool_output: Int, + /// The index in the outputs that the corresponding CIP-68 metadata token is paid to + /// Note that the tokens are structured as such: + /// - A CIP-68 (222) token, indicating an NFT, held by the pool script address + /// - A CIP-68 (333) token, indicating a fungible token, paid anywhere the user wants; this is the LP token + /// - A CIP-68 (111) token; Whichever UTXO holds this token can set a datum that determines on-chain metadata like decimals, name, and image + /// That token is given to a "metadata administrator" who can set the appropriate metadata, and update it if needed. + /// This is done so we can efficiently skip to the correct metadata output + /// This is safe because we validate that the token is paid to the metadata admin + metadata_output: Int, + } + /// to burn the pool NFT (when permitted by the spending validator) + ConditionBurnPool { identifier: Ident } +} + +/// Manage settings about a pool (used against the pool manage script) +pub type ConditionManageRedeemer { + // Withdraw some subset of the fees in the pool, paid into the treasury + ConditionWithdrawFees { amount: Int, treasury_output: Int, pool_input: Int } + // Update the percentage fee the pool charges + ConditionUpdateCondition { pool_input: Int } +} + +pub fn find_pool_output(outputs: List) -> (Output, ConditionPoolDatum) { + // Find the pool output; we can assume the pool output is the first output, because: + // - The ledger doesn't reorder outputs, just inputs + // - We check that the address is correct, so if the first output was to a different contract, we would fail + // - We check that the datum is the correct type, meaning we can't construct an invalid pool output + // - Later, we check that the pool output has the correct value, meaning it *must* contain the pool token, so we can't pay to the pool script multiple times + expect Some(pool_output) = list.head(outputs) + expect InlineDatum(output_datum) = pool_output.datum + expect output_datum: ConditionPoolDatum = output_datum + (pool_output, output_datum) +} diff --git a/lib/types/conditions/default.ak b/lib/types/conditions/default.ak new file mode 100644 index 0000000..3a69350 --- /dev/null +++ b/lib/types/conditions/default.ak @@ -0,0 +1,6 @@ +pub type DefaultDatum { + /// The basis points to charge on each trade for bid (A -> B) and ask (B -> A) orders + /// For example, a 1% fee would be represented as 100 (out of 10,000), and a 0.3% fee would be represented as 30 + bid_fees_per_10_thousand: Int, + ask_fees_per_10_thousand: Int, +} diff --git a/lib/types/conditions/default_trading_hours.ak b/lib/types/conditions/default_trading_hours.ak new file mode 100644 index 0000000..41f1254 --- /dev/null +++ b/lib/types/conditions/default_trading_hours.ak @@ -0,0 +1,8 @@ +pub type DefaultTradingHoursDatum { + /// The basis points to charge on each trade for bid (A -> B) and ask (B -> A) orders + /// For example, a 1% fee would be represented as 100 (out of 10,000), and a 0.3% fee would be represented as 30 + bid_fees_per_10_thousand: Int, + ask_fees_per_10_thousand: Int, + open_time: Int, + close_time: Int, +} diff --git a/validators/condition_pool.ak b/validators/condition_pool.ak new file mode 100644 index 0000000..80e4eb0 --- /dev/null +++ b/validators/condition_pool.ak @@ -0,0 +1,860 @@ +use aiken/collection/list +use aiken/collection/pairs +use aiken/crypto.{ScriptHash} +use aiken/interval +use aiken/primitive/bytearray +use calculation/sub_conditions/shared.{ + condition_pool_input_to_state, minted_correct_pool_tokens, + process_withdrawal_orders, +} as sub_conditions_shared +use cardano/address.{Address, Inline, Script} +use cardano/assets.{AssetName, PolicyId, Value, ada_policy_id} +use cardano/script_context.{ScriptContext} +use cardano/transaction.{ + InlineDatum, Input, Output, OutputReference, Transaction, Withdraw, +} +use shared.{ + AssetClass, Ident, count_orders, own_input_index, pool_nft_name, spent_output, +} +use sundae/multisig +use types/condition_pool.{ + ConditionBurnPool, ConditionCreatePool, ConditionManage, + ConditionManageRedeemer, ConditionMintLP, ConditionPoolDatum, + ConditionPoolMintRedeemer, ConditionPoolRedeemer, ConditionPoolScoop, + ConditionUpdateCondition, ConditionWithdrawFees, find_pool_output, +} as types_pool +use types/settings.{SettingsDatum, find_settings_datum} + +/// The core / base "pooled AMM" script for the SundaeSwap v3 protocol +/// +/// Parameterized by the Settings policy ID, which makes the script unique, as well as lets us validate / read global settings. +/// +/// This script is responsible for: +/// - Guarding access to the pools assets on behalf of the depositors +/// - Enabling and executing a batch of orders against those assets, implementing a standard 'AMM' swap protocol +/// - Serving as a minting policy, minting the Pool NFT and LP tokens +/// - Accumulating protocol fees on behalf of the protocol +/// - Allowing protocol rewards to be withdrawn to a DAO treasury +/// +/// It does so by allowing a permissioned set of entities to "scoop" a batch of orders, and ensuring that each one +/// pays out to the appropriate destination. +/// +/// This set of people is permissioned to prevent classes of "sandwich" attacks, wherein a malicious actor could +/// execute and match orders in their *own* favor, rather than the users favor. +/// +/// Also of particular sensitivity is the optimizations applied; To achieve high-throughput, low-cost DeFi, executing +/// each order independently suffers from some amount of overhead. If, for example, the execution costs are split into: +/// A - The costs that need to be run in each transaction, regardless of what is executed +/// B - The costs that need to be executed for each order +/// C - The costs exclusive to batching, such as sorting the orders +/// +/// It is likely, then, that protocols converge on the per-order cost of batching being lower than un-batched variants, i.e. +/// +/// A + B*n + C < (A + B) * n +validator condition_pool( + manage_stake_script_hash: ScriptHash, + settings_policy_id: PolicyId, +) { + spend( + datum: Option, + redeemer: ConditionPoolRedeemer, + out_ref: OutputReference, + transaction: Transaction, + ) { + expect Some(datum) = datum + // First, we destructure the transaction right upfront, because field access is O(n), + // and we want access to these fields with just a single pass over the transaction + // This will be a common pattern throughout the scripts + // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) + let Transaction { + inputs, + outputs, + reference_inputs, + mint, + datums, + extra_signatories, + validity_range, + withdrawals, + .. + } = transaction + + // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction + let pool_input = spent_output(transaction, out_ref) + + // And pattern match to get the pool script hash; in particular, this can be used to find the pool output, + // *and* to know the policy ID of pool tokens, because this is a dual spending/minting validator. + expect Script(pool_script_hash) = pool_input.address.payment_credential + + // The protocol configures many global settings via a "settings" UTXO, updatable by certain administrators + // This is included as a reference input, so we have a utility to check the reference inputs for the settings NFT + // Note: it's important to check for the NFT, because checking just for the address would let someone pay random funds to the settings address. + let settings_datum = + find_settings_datum(reference_inputs, settings_policy_id) + + // Then, there are two different actions that can be taken against a pool: + // - Scooping a batch of orders + // - Withdrawing protocol fees to the treasury + when redeemer is { + // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed + ConditionPoolScoop { + signatory_index, + scooper_index, + input_order, + withdrawal_only, + } -> { + // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop + let ( + Output { address: pool_output_address, value: pool_output_value, .. }, + ConditionPoolDatum { + identifier: actual_identifier, + circulating_lp: actual_circulating_lp, + protocol_fees: actual_protocol_fees, + assets: actual_assets, + market_open: actual_market_open, + condition: actual_condition, + condition_datum: actual_condition_datum, + .. + }, + ) = find_pool_output(outputs) + + // Ensure that the pool output is to the same payment credential; This is critical, because it ensures that the pool NFT + // or liquidity aren't paid to some other script in control of an attacker. + // Note that we check the stake credential is correctly updated (or not) in the various redeemer cases below. + // We also check that the pool output has the correct output, which ensures it contains the pool NFT, + // meaning this can't just be a "token output" with the correct payment credential, but everything paid elsewhere. + expect + pool_output_address.payment_credential == Script(pool_script_hash) + + when withdrawal_only is { + True -> { + // Deconstruct the settings datum with the fields we need for a scoop + let SettingsDatum { authorized_scoopers, base_fee, simple_fee, .. } = + settings_datum + + // Do a simple scan over the orders to count up the number of orders we'll be processing + // This is unavoidable, because it's part of making sure that the provided redeemer set isn't + // excluding orders + let real_order_count = count_orders(inputs) + // Calculate the portion of the fee that each order will be + // entitled to pay; + // Because the division is rounded down, we add real_order_count and subtact 1 + // to ensure that we take the ceiling instead, and round in the protocols favor. + let amortized_base_fee = + ( base_fee + real_order_count - 1 ) / real_order_count + // Make sure it's not negative, for example if base_fee was negative + expect amortized_base_fee >= 0 + + expect InlineDatum(pool_input_datum) = pool_input.datum + expect ConditionPoolDatum { + assets, + protocol_fees, + identifier, + circulating_lp, + .. + } = pool_input_datum + + // Construct the initial pool state from the datum and the locked values + // This intermediate state will be updated as we process each order, allowing us to do a scan over each input + // In particular, it calculates what fees we should be charging (because of the linear fee decay) and the actual tradable reserves + // (excluding protocol fees, which shouldn't factor into the price) + // Note: this abomination is brought to you by the fact that constructing and destructuring structs + // is expensive, so it's cheaper to have **massive** lambdas / continuations + let + pool_policy_a, + pool_asset_name_a, + pool_quantity_a, + pool_policy_b, + pool_asset_name_b, + pool_quantity_b, + pool_policy_lp, + pool_asset_name_lp, + pool_quantity_lp, + initial_protocol_fees, + <- + condition_pool_input_to_state( + pool_script_hash, + assets, + protocol_fees, + identifier, + circulating_lp, + pool_input, + ) + + // Process the orders in order, and decide the final pool state we should see + // This also counts up the number of simple / strategy orders, which let us compute the effective protocol fee. + // for optimization purposes, there are quite a lot of parameters, and their interaction is quite subtle + let + final_a, + final_b, + final_lp, + <- + process_withdrawal_orders( + actual_identifier, + // The pool identifier, so we can check that each order is for this pool + datums, + // The datums, so we can look up the datum of each order (which may be inline, but may also be in the datums dict) + // The initial pool state, such as the reserves and circulating LP + pool_policy_a, + pool_asset_name_a, + pool_quantity_a, + pool_policy_b, + pool_asset_name_b, + pool_quantity_b, + pool_policy_lp, + pool_asset_name_lp, + pool_quantity_lp, + input_order, + // The input ordering specified by the scooper + amortized_base_fee, + // The base fee split by the number of orders, paid for each user + simple_fee, + // The fee to charge for each "simple" order (swap, deposit, withdrawal, etc.) + 0, + // The previous index we processed, intitially 0; this lets us detect if we need to "restart" the input list + inputs, + // *All* inputs, so we can start over at the beginning of the list if we want + inputs, + // *Remaining* inputs, so we can advance through the list one by one so long as the orders are in order + list.drop(outputs, 1), + // The list of outputs we should be comparing orders against + 0, + ) + + // A uniqueness bit-flag, to detect which orders have already been processed; see lib/calculation/InputSorting.md + // The accumulated count of "strategy" orders, see line above. + // We need to make sure that the number of orders matches the amount that we processed + // so the scooper doesn't "under-report" the orders and steal the funds on the order + // Is this needed for withdrawal only? + // expect simple_count + strategy_count == real_order_count + // We calculate the expected total collected protocol fee + // We multiply amortized_base_fee, which everyone paid, by the number of orders + // and then the respective fees for each simple order and strategy order + let expected_fees_collected = + amortized_base_fee * real_order_count + real_order_count * simple_fee + + // Make sure we actually increased the protocol fee by exactly this amount + expect + actual_protocol_fees == initial_protocol_fees + expected_fees_collected + + // The pool should have all of the scooper fees, and the quantity of each token of the outcome + // Note that initializing the state with `-transaction.fee` means this gets subracted out of the protocol fees + // TODO: do we need to account for this? it seems to have gotten lost in some changes. + expect + minted_correct_pool_tokens( + pool_script_hash, + mint, + identifier, + circulating_lp, + final_lp, + ) + + // Check that the scooper is authorized; the protocol can allow *any* scoopers, or limit it to a set of actors + // It's safe to use values provided in the redeemer to efficiently skip to the expected scooper / expected signature + // because at the end of the day, we just care that the scooper has signed the transaction. If the scooper provides + // anything but the correct indexes, it'll just fail the transaction. + expect + when authorized_scoopers is { + Some(authorized_scoopers) -> { + // OPTIMIZATION: skip 10 entries at a time + // OPTIMIZATION: assume scooper is first extra_signatory? have to assume there will only ever be one extra_signatory + expect Some(scooper_sig) = + list.at(extra_signatories, signatory_index) + expect Some(scooper) = + list.at(authorized_scoopers, scooper_index) + // must be an authorized scooper + scooper_sig == scooper + } + _ -> True + } + + // the market must have opened; this allows projects to pre-create their pool, potentially across multiple protocols, and allows + // people to open orders ahead of time, and avoids things like sniping bots, etc. + // TODO: should we *only* prevent swaps / withdrawals? would it be ok to allow deposits? + // TODO: should we have a "blackout period", where withdrawals are prevented, similar to IPOs? + expect interval.is_entirely_after(validity_range, datum.market_open) + + // We also check that the pool output has the right value (as mentioned above) + // In particular, the pool must have: + // - the pool NFT + // - the correctly adjusted assets from swapping, deposits, withdrawals, etc. + // - an additional amount of ADA corresponding to the protocol fees + // - NOTHING ELSE; This is important because someone could add tons of junk tokens and increase the execution units, potentially even freezing the UTXO + expect + has_expected_pool_value( + pool_script_hash, + actual_identifier, + pool_output_value, + pool_policy_a, + pool_asset_name_a, + final_a, + pool_policy_b, + pool_asset_name_b, + final_b, + final_lp, + actual_protocol_fees, + ) + // Now, we check various things about the output datum to ensure they're each correct. + // Check that the datum correctly records the final circulating LP, accounting for any deposits and withdrawals + // In particular, this is important because that circulating supply is exaclty what determines the users ownership of assets in the pool + // If this gets out of sync with what we've actually minted, then users will be able to either redeem assets they aren't entitled to, + // or be unable to access funds they are entitled to. + expect actual_circulating_lp == final_lp + + // Make sure the protocol fees have been correctly updated + expect + actual_protocol_fees == initial_protocol_fees + expected_fees_collected + + // And make sure each of these fields is unchanged + and { + datum.identifier == actual_identifier, + datum.assets == actual_assets, + datum.condition == actual_condition, + datum.condition_datum == actual_condition_datum, + datum.market_open == actual_market_open, + // Finally, make sure we don't change the stake credential; this can only be done when withdrawing fees, by the treasury administrator + pool_input.address.stake_credential == pool_output_address.stake_credential, + } + } + False -> { + expect Some(..) = + pairs.get_first(withdrawals, address.Script(actual_condition)) + True + } + } + } + ConditionManage -> + // There must be a redeemer for a (stake-script) withdrawal against the manage stake script, + // and the redeemer must correctly point at the pool UTXO + pairs.foldl( + transaction.redeemers, + False, + fn(script_purpose, redeemer, acc) { + when script_purpose is { + Withdraw(Script(script)) -> { + let is_valid_manage_script_invoke = + if script == manage_stake_script_hash { + expect redeemer: ConditionManageRedeemer = redeemer + let redeemer_pool_input = + when redeemer is { + ConditionUpdateCondition { pool_input } -> pool_input + ConditionWithdrawFees { pool_input, .. } -> pool_input + } + let input_index = own_input_index(transaction, out_ref) + // Manage redeemer must have the correct index of this pool input + input_index == redeemer_pool_input + } else { + False + } + acc || is_valid_manage_script_invoke + } + _ -> acc + } + }, + ) + } + } + + mint( + r: ConditionPoolMintRedeemer, + own_policy_id: PolicyId, + transaction: Transaction, + ) { + // When minting, we can be doing one of two things: minting the pool itself, or minting the LP token + when r is { + // For creating a new pool, one of our design objectives was to avoid requiring interaction with any global + // "factory", as in v1; this created a lot of contention, and didn't serve it's original goal of ensuring that + // the (pair, fee) was unique. + ConditionCreatePool(assets, pool_output_ix, metadata_output_ix) -> { + // And grab the pool output + expect Some(pool_output) = list.at(transaction.outputs, pool_output_ix) + + // Check that the pool datum is inline, because a datum hash could brick this pool + expect InlineDatum(d) = pool_output.datum + expect pool_output_datum: ConditionPoolDatum = d + + // The assets on the pool must be sorted + // This is partially to make off-chain indexing easier, and as an on-chain optimization, + // so we can always assume that, if ADA is one of the pairs, it's the first asset. + // This also prevents creating an X/X pool, since we require the assets to be stricly less than + // Note, it is a NON GOAL to enforce that any specific (pair, fee) combination is unique; while aggregating liquidity + // can be beneficial for capital efficiency, in practice that will happen anyway, and smaller pools can be useful as + // a pressure release valve. Additionally, we have bigger plans around interesting order splitting modes of operation that + // make that less important. + let (asset_a, asset_b) = assets + let coin_pair_ordering_is_canonical = + compare_asset_class(asset_a, asset_b) == Less + + // We use the first input ref spent in this transaction to uniquely identify the pool + // This is a trick widely used to produce NFTs, and ensures that we can generate a hash that is unique to this pool + expect Some(first_input) = list.at(transaction.inputs, 0) + let first_input_index = + int_to_ident(first_input.output_reference.output_index) + + // Calculate the identifier by hashing the txRef of the first input + // this makes the pool unique + // With CIP-68 identifiers (which take up 4 bytes), we have 28 bytes of space; + // ideally we'd use blake2b-224, which is exactly a 224 byte hash, but that won't be on-chain until plutus v3; + // So, we use blake2b-256 and drop a few bytes. Now, does this compromise the uniqueness guarantees? + // It is strongly believed by the cryptographer community that blake2b-256 provides indifferentiability from random oracles + // [https://eprint.iacr.org/2013/322.pdf] + // which allows us to treat the 256 bits as uniformly sampled. This means that there are 2^256 equally likely values, and the + // chance of collision is 1 in 2^256, unfathomably small. + // Even when used in bulk, the birthday paradox means that you would need to generate 2^128 different hashes before you had even a 50% chance of collision. + // This is known as "128 bit security", and is considered a gold standard of cryptography. + // So, how does dropping 4 bytes (32 bits) from this hash impact that analysis? + // There are still 2^224 possible values, meaning it offers 2^112 bit security, still considered "unbreakable". + // As a comparison, at the time of this writing the entire bitcoin network is performing 500 exa-hashes per second, or + // 500 * 10^18 hashes per second. If the entire bitcoin network was directed towards trying to generate a collision on pool ident, it would take + // Roughly 10^13 seconds, or 317,098 years to have a 50% chance of collision. Not to mention the cost of storing and comparing to detect if you'd performed + // a collision, or the cost of grinding UTXOs on cardano to give fresh inputs to the pool script. + let new_pool_id = + first_input.output_reference.transaction_id + |> bytearray.concat(#"23") // '#' character + |> bytearray.concat(first_input_index) + |> crypto.blake2b_256 + |> bytearray.drop(4) + + // With that pool identifier, we can attach 3 different CIP-68 pool identifiers: + // - (100) indicates a tracking token, for the purposes of on-chain metadata read by off-chain infra, so we can provide a nice experience to the users wallet for example + // - (222) indicates the pool NFT, which uniquely identifies the UTXO that holds pool assets + // - (333) indicates the fungible LP token, which represents a percentage ownership of the pool + let (new_pool_ref_token, new_pool_nft_token, new_pool_lp_token) = + shared.pool_token_names(new_pool_id) + + // Then, find the settings datum, so we can ensure the reference token is paid to the metadata admin + let reference_inputs = transaction.reference_inputs + let settings_datum = + find_settings_datum(reference_inputs, settings_policy_id) + + // Grab the initial reserves of each token by looking at what's paid to the UTXO + let coin_a_amt = + assets.quantity_of(pool_output.value, asset_a.1st, asset_a.2nd) + let coin_b_amt = + assets.quantity_of(pool_output.value, asset_b.1st, asset_b.2nd) + + // Ensure that the pool pays the pool creation fee, if any, by ensuring that the initial protocol_fees value is greater than or equal to the fee + expect + pool_output_datum.protocol_fees >= settings_datum.pool_creation_fee + + // Only ada has a null policy id. If coin A is ada, subtract the initial protocol_fees setting from the coin A amount + // rider from the output to get the true amount in the pool. + let coin_a_amt_sans_protocol_fees = + if bytearray.is_empty(asset_a.1st) { + coin_a_amt - pool_output_datum.protocol_fees + } else { + coin_a_amt + } + + // Check that the quantity of LP tokens is correct; In particular, we adopt Uniswaps convention of + // using the sqrt of the product of the two values for the initial number of LP tokens to mint.. + // This helps minimize precision loss: it gives decent initial liquidity values for a range of + // sizes of pools, such that an individual LP token is granular enough for depositing and withdrawing for most users. + // In particular, though, we don't calculate the sqrt here, which is an expensive function; we instead verify that the + // amount minted is valid by checking that it squares to the correct product + let initial_lq = pool_output_datum.circulating_lp + expect + shared.is_sqrt(coin_a_amt_sans_protocol_fees * coin_b_amt, initial_lq) + + // And check that we mint the correct tokens, and nothing else. + let expected_mint = + shared.to_value((own_policy_id, new_pool_ref_token, 1)) + |> assets.merge( + shared.to_value((own_policy_id, new_pool_nft_token, 1)), + ) + |> assets.merge( + shared.to_value((own_policy_id, new_pool_lp_token, initial_lq)), + ) + let mint_is_correct = transaction.mint == expected_mint + + // Confirm that the correct funds (asset A, asset B, the correct amount of ADA, and the pool NFT) get paid to the pool output + let funds_spent_to_pool = + has_expected_pool_value( + own_policy_id, + new_pool_id, + pool_output.value, + asset_a.1st, + asset_a.2nd, + coin_a_amt_sans_protocol_fees, + asset_b.1st, + asset_b.2nd, + coin_b_amt, + initial_lq, + pool_output_datum.protocol_fees, + ) + + // Make sure we send the pool metadata token to the metadata admin + // We use an index from the redeemer to skip to the right output, in case there are multiple outputs to the metadata admin + // This is safe to do for the usual reasons: if they point at a UTXO without the ref token, the transaction will fail. + expect Some(metadata_output) = + list.at(transaction.outputs, metadata_output_ix) + expect metadata_output.address == settings_datum.metadata_admin + expect + assets.quantity_of( + metadata_output.value, + own_policy_id, + new_pool_ref_token, + ) == 1 + + // We also check that the datum on the metadata output is void; It would be complex and in-flexible to enforce any particular structure on this, so we + // instead leave it to the metadata admin to spend the output and provide it the correct datum; We also don't want to leave it unspecified, because + // 1) the metadata admin might actually be a script address, in which case having no datum will permanently lock the metadata + // 2) the pool minter might include malicious metadata, such as an icon pointing at hardcore porn; until the metadata admin spent it, this would appear in users wallets, + // and potentially even on access UIs for the Sundae protocol + expect metadata_output.datum == InlineDatum(Void) + + expect Some(..) = + pairs.get_first( + transaction.withdrawals, + address.Script(pool_output_datum.condition), + ) + + // And check that the datum is initialized correctly; This is part of why we have a minting policy handling this, + // as it allows us to authenticate the providence of the datum. + // A datum is valid so long as + // - the pool identifier is set correctly + // - the assets is set correctly + // - the initial circulating supply is set correctly + // - the initial and final fees per 10,000 are both non-negative (>= 0%) + // - the intitial and final fees per 10,000 are both less than or equal to 10000 (<= 100%) + let pool_output_datum_correct = and { + pool_output_datum.identifier == new_pool_id, + pool_output_datum.assets == (asset_a, asset_b), + pool_output_datum.circulating_lp == initial_lq, + } + + // Make sure that the pool output is paid into own_policy_id (the pool script, remember this is a multivalidator) + // and that one of the valid staking addresses is attached + expect pool_output.address.payment_credential == Script(own_policy_id) + expect + list.any( + settings_datum.authorized_staking_keys, + fn(a) { pool_output.address.stake_credential == Some(Inline(a)) }, + ) + + // And then check each of the conditions above as the condition for minting + and { + coin_pair_ordering_is_canonical, + mint_is_correct, + funds_spent_to_pool, + pool_output_datum_correct, + } + } + // When minting an LP token, we just need to make sure the pool script is being spent, as it will enforce the correct + // name and quantity of the LP tokens. + // + // To do that, we could check for the pool NFT on the inputs, but this is expensive, especially if the pool input ends up being one of the last. + // So instead we check that the pool NFT is in the first output (this is safe to assume because it's unique, and if it's in any other output it will fail) + // and that we're not minting the pool token (i.e. someone could "pretend" to mint LP tokens, but also mint the pool token to make it look like a scoop) + // + // So, lets enumerate the possible cases: + // - We use the CreatePool redeemer; this checks that *only* the correct pool token and correct number of LP tokens are minted + // - We use the MintLP redeemer; this checks that the pool token (which is unique and locked in the pool script) is in the outputs, and not minted + // - the pool script checks that only the correct number of LP tokens, and nothing else under this policy ID, are minted + // And the impossible cases: + // - During CreatePool, it would be impossible to mint multiple of the same pool tokens; a different pool token; a different number of LP tokens; or a different pool's LP tokens + // - During MintLP, it would be impossible to mint the relevant pool token; thus, the pool script must run, and thus it will be impossible to mint another pool token, a different pool + // ident pool token, a different quantity of LP tokens, or a different pools LP tokens + ConditionMintLP(pool_ident) -> { + let pool_nft_name = shared.pool_nft_name(pool_ident) + expect Some(pool_output) = list.head(transaction.outputs) + and { + ( + pool_output.value + |> assets.quantity_of(own_policy_id, pool_nft_name) + ) == 1, + ( + transaction.mint + |> assets.quantity_of(own_policy_id, pool_nft_name) + ) == 0, + } + } + ConditionBurnPool(pool_ident) -> { + // Burning an asset is only possible when spending it, so if we enforce + // that the mints consist of exactly 1 burn for the specified pool NFT + // then we can defer to the pool spending validator + let pool_nft_name = shared.pool_nft_name(pool_ident) + let expected_mint = shared.to_value((own_policy_id, pool_nft_name, -1)) + transaction.mint == expected_mint + } + } + } + + else(_) { + fail + } +} + +/// Check that the UTXO contents are correct given a specific pool outcome +/// In particular, it must have the final A reserves, the final B reserves, the pool NFT, and the protocol fees +pub fn has_expected_pool_value( + pool_script_hash: PolicyId, + identifier: Ident, + output_value: Value, + pool_policy_a: PolicyId, + pool_asset_name_a: AssetName, + pool_quantity_a: Int, + pool_policy_b: PolicyId, + pool_asset_name_b: AssetName, + pool_quantity_b: Int, + final_lp: Int, + final_protocol_fees: Int, +) -> Bool { + // Asset A *could* be ADA; in which case there should be 3 tokens on the output + // (ADA, Asset B, and the NFT) + if pool_policy_a == ada_policy_id { + let actual = + list.foldl( + assets.flatten(output_value), + // (token count, lovelace amount, token b amount, pool nft amount) + (0, 0, 0, 0), + fn(asset, acc) { + let token_count = acc.1st + 1 + if asset.1st == pool_policy_a { + (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th) + } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { + (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th) + } else { + expect asset == (pool_script_hash, pool_nft_name(identifier), 1) + (token_count, acc.2nd, acc.3rd, acc.4th + 1) + } + }, + ) + // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token + let expected = + if final_lp == 0 { + expect pool_quantity_a == 0 + expect pool_quantity_b == 0 + (2, final_protocol_fees, 0, 1) + } else { + (3, final_protocol_fees + pool_quantity_a, pool_quantity_b, 1) + } + // Rather than constructing a value directly (which can be expensive) + // we can just compare the expected token count and amounts with a single pass over the value + expected == actual + } else { + // Asset A isn't ADA, Asset B will *never* be ADA; in this case, there should be 4 tokens on the output: + // ADA, the Pool NFT, Asset A, and Asset B + let actual = + list.foldl( + assets.flatten(output_value), + // (token count, lovelace amount, token a amount, token b amount, pool nft amount) + (0, 0, 0, 0, 0), + fn(asset, acc) { + let token_count = acc.1st + 1 + if asset.1st == ada_policy_id { + (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th, acc.5th) + } else if asset.1st == pool_policy_a && asset.2nd == pool_asset_name_a { + (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th, acc.5th) + } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { + (token_count, acc.2nd, acc.3rd, acc.4th + asset.3rd, acc.5th) + } else { + expect asset == (pool_script_hash, pool_nft_name(identifier), 1) + (token_count, acc.2nd, acc.3rd, acc.4th, acc.5th + 1) + } + }, + ) + // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token + let expected = + if final_lp == 0 { + expect pool_quantity_a == 0 + expect pool_quantity_b == 0 + (2, final_protocol_fees, 0, 0, 1) + } else { + (4, final_protocol_fees, pool_quantity_a, pool_quantity_b, 1) + } + expected == actual + } +} + +// Compare two policy IDs to determine a sort order; used in particular to enforce an ordering on the assets in the pool +fn compare_asset_class(a: AssetClass, b: AssetClass) { + let (a_policy, a_token) = a + let (b_policy, b_token) = b + when bytearray.compare(a_policy, b_policy) is { + Less -> Less + Equal -> bytearray.compare(a_token, b_token) + Greater -> Greater + } +} + +// Convert a specific integer (like a UTXO index) into a byte array, so we can construct a hashable string when minting the pool +pub fn int_to_ident(n: Int) -> Ident { + expect n < 256 + bytearray.push(#"", n) +} + +// In order to keep the script size small for the pool script, we defer some functions to a separate stake script; +// when the treasury administrator attempts to withdraw fees, or the fee manager attempts to update the pool fee, +// it only checks for this particular script hash. This script, using the withdraw 0 trick, then checks the correct invariants +validator manage(settings_policy_id: PolicyId) { + else(ctx: ScriptContext) { + expect redeemer: ConditionManageRedeemer = ctx.redeemer + let transaction = ctx.transaction + let Transaction { + inputs, + outputs, + reference_inputs, + mint, + extra_signatories, + validity_range, + withdrawals, + .. + } = transaction + + let settings_datum = + find_settings_datum(reference_inputs, settings_policy_id) + + when redeemer is { + // In order to withdraw `amount` fees into `treasury_output` utxo, looking at `pool_input` + ConditionWithdrawFees { amount, treasury_output, pool_input } -> { + // Find the pool input; note that we don't look for the pool NFT here, because if someone + // spends with an unauthenticated UTXO, it will fail the spend script; and if someone + // spends with a different script address, this script can't do anything fishy, + // just enforces some things about the outputs + // We also can't pull this out of the when, because we don't have the pool_input index yet + expect Some(pool_input) = list.at(inputs, pool_input) + let pool_input = pool_input.output + expect InlineDatum(datum) = pool_input.datum + expect datum: ConditionPoolDatum = datum + expect Script(pool_script_hash) = pool_input.address.payment_credential + let ConditionPoolDatum { + circulating_lp: initial_circulating_lp, + protocol_fees: initial_protocol_fees, + .. + } = datum + // Make sure we withdraw *only* up to what we've earned + // We allow less than, so that you can leave some behind for the minUTXO cost, or continuing to earn staking rewards, etc. + expect amount <= initial_protocol_fees + + // Only the treasury administrator is allowed to withdraw the fees, to prevent DDOS, and because of the allowance below + expect + multisig.satisfied( + settings_datum.treasury_admin, + extra_signatories, + validity_range, + withdrawals, + ) + + // Asking the DAO to approve every single cost individually would be a small cognitive DDOS on the community + // Instead, the DAO can set an "allowance", which is a percentage of each withdrawal that is entrusted to the + // treasury administrator to pay those basic fees, such as paying the scoopers, or running incentive programs, etc. + // + // In particular, it's a percentage, to ensure that splitting up the withdrawals into multiple transactions doesn't + // allow them to game that withdrawal. + let allowance = + amount * settings_datum.treasury_allowance.1st / settings_datum.treasury_allowance.2nd + let to_treasury = amount - allowance + + // And, we must pay everything except the allowance amount to the treasury address + // We use the `treasury_output` index to skip to it quickly, rather than scanning for the output + // TODO: should we instead sum all the values at the treasury output, to allow + // paying out in multiple UTXOs for some reason? + expect Some(treasury_output) = list.at(outputs, treasury_output) + expect treasury_output.address == settings_datum.treasury_address + // We ensure that it's sent with the Void datum. This is because the treasury is likely to be a script address + // and scripts are unspendable without a datum; We also don't have any notion of what the "correct" datum would be + // so we just enforce it to void. If the output datum ever needed a specific datum, we would have to use a proxy address + // that worked with the void datum, and paid to the real treasury with the correct datum. + // TODO: should we just let the treasury admin specify the datum on the redeemer? Or include it in the settings? + expect treasury_output.datum == InlineDatum(Void) + // And make sure we pay at least that much in that output. It could be more, for example to donate other ADA from other sources to that address + expect assets.lovelace_of(treasury_output.value) >= to_treasury + + if initial_circulating_lp == 0 { + // If there is no liquidity, just require the treasury admin to withdraw everything + expect amount == initial_protocol_fees + // If circulating_lp is 0, all of the assets have been withdrawn, and so the UTXO will be + // ADA (for the treasury fees) and the pool NFT; so we can very cleverly check that the pool + // NFT is burned by negating the input, and stripping off the lovelace + expect + mint == assets.negate(assets.without_lovelace(pool_input.value)) + True + } else { + let ( + Output { + address: pool_output_address, + value: pool_output_value, + .. + }, + output_datum, + ) = find_pool_output(outputs) + expect + pool_output_address.payment_credential == Script(pool_script_hash) + + // As part of withdrawing, we should decrease the protocol fees by the amount we're withdrawing + // but, importantly, *nothing else*; so we construct a datum with everything from the initial datum, plus the protofol fees updated + let expected_datum = + ConditionPoolDatum { + ..datum, + protocol_fees: initial_protocol_fees - amount, + } + expect output_datum == expected_datum + + // Now, check that the pool output decreases *only* by the amount we're withdrawing, and not by fewer or greater ADA + let expected_output_value = + assets.merge(pool_input.value, assets.from_lovelace(-amount)) + expect pool_output_value == expected_output_value + expect + list.any( + settings_datum.authorized_staking_keys, + fn(a) { pool_output_address.stake_credential == Some(Inline(a)) }, + ) + True + } + } + // To update the pool fees for the pool at `pool_input`... + ConditionUpdateCondition { pool_input } -> { + // Find the pool input; note that we don't look for the pool NFT here, because if someone + // spends with an unauthenticated UTXO, it will fail the spend script; and if someone + // spends with a different script address, this script can't do anything fishy, + // just enforces some things about the outputs + // This is duplicated code with the other branch, but only because we don't have pool_input yet + expect Some(pool_input) = list.at(inputs, pool_input) + let pool_input = pool_input.output + expect InlineDatum(datum) = pool_input.datum + expect datum: ConditionPoolDatum = datum + // We need the pool output to check that only the fees or fee manager are updated + let ( + Output { address: pool_output_address, value: pool_output_value, .. }, + pool_output_datum, + ) = find_pool_output(outputs) + + let ConditionPoolDatum { + condition: output_condition, + fee_manager: output_fee_manager, + .. + } = pool_output_datum + + let expected_datum = + ConditionPoolDatum { + ..datum, + condition: output_condition, + fee_manager: output_fee_manager, + } + expect pool_output_datum == expected_datum + + expect Some(..) = + pairs.get_first( + transaction.withdrawals, + address.Script(output_condition), + ) + + // Check that the *current* fee manager approves the update + expect Some(fee_manager) = datum.fee_manager + expect + multisig.satisfied( + fee_manager, + extra_signatories, + validity_range, + withdrawals, + ) + + // And make sure we don't touch the assets on the pool input; they must be spent back into the same script + and { + pool_output_address == pool_input.address, + pool_output_value == pool_input.value, + } + } + } + } +} diff --git a/validators/conditions/default.ak b/validators/conditions/default.ak new file mode 100644 index 0000000..04d90b6 --- /dev/null +++ b/validators/conditions/default.ak @@ -0,0 +1,141 @@ +use aiken/collection/list +use calculation/sub_conditions/default/process.{scoop_default} +use calculation/sub_conditions/default/verify_datum.{default_verify_datum} +use cardano/address.{Credential, Script} +use cardano/assets.{PolicyId} +use cardano/transaction.{InlineDatum, Input, Output, Transaction} +use types/condition_pool.{ + ConditionManage, ConditionPoolDatum, ConditionPoolRedeemer, ConditionPoolScoop, + find_pool_output, +} +use types/conditions/default.{DefaultDatum} as default_types +use types/settings.{find_settings_datum} + +validator default(settings_policy_id: PolicyId) { + withdraw( + redeemer: ConditionPoolRedeemer, + _account: Credential, + transaction: Transaction, + ) { + // First, we destructure the transaction right upfront, because field access is O(n), + // and we want access to these fields with just a single pass over the transaction + // This will be a common pattern throughout the scripts + // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) + let Transaction { + inputs, + outputs, + reference_inputs, + mint, + datums, + extra_signatories, + validity_range, + withdrawals, + .. + } = transaction + + // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop + let ( + Output { address: pool_output_address, value: pool_output_value, .. }, + ConditionPoolDatum { + identifier: actual_identifier, + circulating_lp: actual_circulating_lp, + protocol_fees: actual_protocol_fees, + assets: actual_assets, + market_open: actual_market_open, + condition_datum: actual_condition_datum, + .. + }, + ) = find_pool_output(outputs) + + expect DefaultDatum { + bid_fees_per_10_thousand: actual_bid_fees_per_10_thousand, + ask_fees_per_10_thousand: actual_ask_fees_per_10_thousand, + } = actual_condition_datum + + when redeemer is { + ConditionPoolScoop { .. } | ConditionManage -> { + // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction + expect Some(Input(_, pool_input)) = + list.find( + inputs, + fn(input) { input.output.address == pool_output_address }, + ) + + // And pattern match to get the pool script hash; in particular, this can be used to find the pool output, + // *and* to know the policy ID of pool tokens, because this is a dual spending/minting validator. + expect Script(pool_script_hash) = pool_input.address.payment_credential + + expect InlineDatum(pool_input_datum) = pool_input.datum + + expect ConditionPoolDatum { + identifier, + circulating_lp, + protocol_fees, + assets, + market_open, + condition_datum, + .. + } = pool_input_datum + + expect DefaultDatum { + bid_fees_per_10_thousand, + ask_fees_per_10_thousand, + } = condition_datum + + // The protocol configures many global settings via a "settings" UTXO, updatable by certain administrators + // This is included as a reference input, so we have a utility to check the reference inputs for the settings NFT + // Note: it's important to check for the NFT, because checking just for the address would let someone pay random funds to the settings address. + let settings_datum = + find_settings_datum(reference_inputs, settings_policy_id) + + // Then, there are two different actions that can be taken against a pool: + // - Scooping a batch of orders + // - Withdrawing protocol fees to the treasury + when redeemer is { + // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed + ConditionPoolScoop { signatory_index, scooper_index, input_order, .. } -> + scoop_default( + settings_datum, + inputs, + pool_script_hash, + actual_ask_fees_per_10_thousand, + actual_bid_fees_per_10_thousand, + pool_input, + actual_identifier, + validity_range, + withdrawals, + datums, + input_order, + outputs, + actual_protocol_fees, + mint, + extra_signatories, + signatory_index, + scooper_index, + pool_output_value, + actual_circulating_lp, + actual_assets, + actual_market_open, + pool_output_address, + assets, + protocol_fees, + identifier, + circulating_lp, + market_open, + bid_fees_per_10_thousand, + ask_fees_per_10_thousand, + ) + ConditionManage -> + default_verify_datum( + bid_fees_per_10_thousand, + ask_fees_per_10_thousand, + ) + } + } + } + } + + else(_) { + fail + } +} diff --git a/validators/conditions/default_trading_hours.ak b/validators/conditions/default_trading_hours.ak new file mode 100644 index 0000000..535bc66 --- /dev/null +++ b/validators/conditions/default_trading_hours.ak @@ -0,0 +1,153 @@ +use aiken/collection/list +use calculation/sub_conditions/default/process.{scoop_default} as default_process +use calculation/sub_conditions/default/verify_datum.{default_verify_datum} as default_verify_datum +use calculation/sub_conditions/trading_hours/process.{scoop_trading_hours} as trading_hours_process +use calculation/sub_conditions/trading_hours/verify_datum.{ + trading_hours_verify_datum, +} as trading_hours_verify_datum +use cardano/address.{Credential, Script} +use cardano/assets.{PolicyId} +use cardano/transaction.{InlineDatum, Input, Output, Transaction} +use types/condition_pool.{ + ConditionManage, ConditionPoolDatum, ConditionPoolRedeemer, ConditionPoolScoop, + find_pool_output, +} +use types/conditions/default_trading_hours.{DefaultTradingHoursDatum} as default_types +use types/settings.{find_settings_datum} + +validator default_trading_hours(settings_policy_id: PolicyId) { + withdraw( + redeemer: ConditionPoolRedeemer, + _account: Credential, + transaction: Transaction, + ) { + // First, we destructure the transaction right upfront, because field access is O(n), + // and we want access to these fields with just a single pass over the transaction + // This will be a common pattern throughout the scripts + // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) + let Transaction { + inputs, + outputs, + reference_inputs, + mint, + datums, + extra_signatories, + validity_range, + withdrawals, + .. + } = transaction + + // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop + let ( + Output { address: pool_output_address, value: pool_output_value, .. }, + ConditionPoolDatum { + identifier: actual_identifier, + circulating_lp: actual_circulating_lp, + protocol_fees: actual_protocol_fees, + assets: actual_assets, + market_open: actual_market_open, + condition_datum: actual_condition_datum, + .. + }, + ) = find_pool_output(outputs) + + expect DefaultTradingHoursDatum { + bid_fees_per_10_thousand: actual_bid_fees_per_10_thousand, + ask_fees_per_10_thousand: actual_ask_fees_per_10_thousand, + .. + } = actual_condition_datum + + when redeemer is { + ConditionPoolScoop { .. } | ConditionManage -> { + // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction + expect Some(Input(_, pool_input)) = + list.find( + inputs, + fn(input) { input.output.address == pool_output_address }, + ) + + // And pattern match to get the pool script hash; in particular, this can be used to find the pool output, + // *and* to know the policy ID of pool tokens, because this is a dual spending/minting validator. + expect Script(pool_script_hash) = pool_input.address.payment_credential + + expect InlineDatum(pool_input_datum) = pool_input.datum + + expect ConditionPoolDatum { + identifier, + circulating_lp, + protocol_fees, + assets, + market_open, + condition_datum, + .. + } = pool_input_datum + + expect DefaultTradingHoursDatum { + bid_fees_per_10_thousand, + ask_fees_per_10_thousand, + open_time, + close_time, + } = condition_datum + + // The protocol configures many global settings via a "settings" UTXO, updatable by certain administrators + // This is included as a reference input, so we have a utility to check the reference inputs for the settings NFT + // Note: it's important to check for the NFT, because checking just for the address would let someone pay random funds to the settings address. + let settings_datum = + find_settings_datum(reference_inputs, settings_policy_id) + + // Then, there are two different actions that can be taken against a pool: + // - Scooping a batch of orders + // - Withdrawing protocol fees to the treasury + when redeemer is { + // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed + ConditionPoolScoop { signatory_index, scooper_index, input_order, .. } -> + and { + scoop_default( + settings_datum, + inputs, + pool_script_hash, + actual_ask_fees_per_10_thousand, + actual_bid_fees_per_10_thousand, + pool_input, + actual_identifier, + validity_range, + withdrawals, + datums, + input_order, + outputs, + actual_protocol_fees, + mint, + extra_signatories, + signatory_index, + scooper_index, + pool_output_value, + actual_circulating_lp, + actual_assets, + actual_market_open, + pool_output_address, + assets, + protocol_fees, + identifier, + circulating_lp, + market_open, + bid_fees_per_10_thousand, + ask_fees_per_10_thousand, + ), + scoop_trading_hours(validity_range, open_time, close_time), + } + ConditionManage -> and { + default_verify_datum( + bid_fees_per_10_thousand, + ask_fees_per_10_thousand, + ), + trading_hours_verify_datum(open_time, close_time), + } + } + } + } + } + + else(_) { + fail + } +} diff --git a/validators/pool.ak b/validators/pool.ak index efe23ee..063a7e1 100644 --- a/validators/pool.ak +++ b/validators/pool.ak @@ -6,14 +6,14 @@ use aiken/interval use aiken/primitive/bytearray use calculation/process.{pool_input_to_state, process_orders} use cardano/address.{Address, Inline, Script} -use cardano/assets.{AssetName, PolicyId, Value, ada_policy_id} +use cardano/assets.{PolicyId, Value} use cardano/script_context.{ScriptContext} use cardano/transaction.{ InlineDatum, Input, Output, OutputReference, Transaction, Withdraw, } use shared.{ - AssetClass, Ident, count_orders, own_input_index, pool_lp_name, pool_nft_name, - spent_output, + AssetClass, Ident, count_orders, has_expected_pool_value, own_input_index, + pool_lp_name, spent_output, } use sundae/multisig use types/pool.{ @@ -598,88 +598,6 @@ fn minted_correct_pool_tokens( } } -/// Check that the UTXO contents are correct given a specific pool outcome -/// In particular, it must have the final A reserves, the final B reserves, the pool NFT, and the protocol fees -pub fn has_expected_pool_value( - pool_script_hash: PolicyId, - identifier: Ident, - output_value: Value, - pool_policy_a: PolicyId, - pool_asset_name_a: AssetName, - pool_quantity_a: Int, - pool_policy_b: PolicyId, - pool_asset_name_b: AssetName, - pool_quantity_b: Int, - final_lp: Int, - final_protocol_fees: Int, -) -> Bool { - // Asset A *could* be ADA; in which case there should be 3 tokens on the output - // (ADA, Asset B, and the NFT) - if pool_policy_a == ada_policy_id { - let actual = - list.foldl( - assets.flatten(output_value), - // (token count, lovelace amount, token b amount, pool nft amount) - (0, 0, 0, 0), - fn(asset, acc) { - let token_count = acc.1st + 1 - if asset.1st == pool_policy_a { - (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th) - } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { - (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th) - } else { - expect asset == (pool_script_hash, pool_nft_name(identifier), 1) - (token_count, acc.2nd, acc.3rd, acc.4th + 1) - } - }, - ) - // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token - let expected = - if final_lp == 0 { - expect pool_quantity_a == 0 - expect pool_quantity_b == 0 - (2, final_protocol_fees, 0, 1) - } else { - (3, final_protocol_fees + pool_quantity_a, pool_quantity_b, 1) - } - // Rather than constructing a value directly (which can be expensive) - // we can just compare the expected token count and amounts with a single pass over the value - expected == actual - } else { - // Asset A isn't ADA, Asset B will *never* be ADA; in this case, there should be 4 tokens on the output: - // ADA, the Pool NFT, Asset A, and Asset B - let actual = - list.foldl( - assets.flatten(output_value), - // (token count, lovelace amount, token a amount, token b amount, pool nft amount) - (0, 0, 0, 0, 0), - fn(asset, acc) { - let token_count = acc.1st + 1 - if asset.1st == ada_policy_id { - (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th, acc.5th) - } else if asset.1st == pool_policy_a && asset.2nd == pool_asset_name_a { - (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th, acc.5th) - } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { - (token_count, acc.2nd, acc.3rd, acc.4th + asset.3rd, acc.5th) - } else { - expect asset == (pool_script_hash, pool_nft_name(identifier), 1) - (token_count, acc.2nd, acc.3rd, acc.4th, acc.5th + 1) - } - }, - ) - // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token - let expected = - if final_lp == 0 { - expect pool_quantity_a == 0 - expect pool_quantity_b == 0 - (2, final_protocol_fees, 0, 0, 1) - } else { - (4, final_protocol_fees, pool_quantity_a, pool_quantity_b, 1) - } - expected == actual - } -} - // Compare two policy IDs to determine a sort order; used in particular to enforce an ordering on the assets in the pool fn compare_asset_class(a: AssetClass, b: AssetClass) { let (a_policy, a_token) = a diff --git a/validators/tests/pool.ak b/validators/tests/pool.ak index 5c63135..3a44b59 100644 --- a/validators/tests/pool.ak +++ b/validators/tests/pool.ak @@ -15,7 +15,7 @@ use cardano/transaction.{ Transaction, Withdraw, } use pool as pool_validator -use shared.{pool_lp_name} +use shared.{has_expected_pool_value, pool_lp_name} use sundae/multisig use tests/constants use tests/examples/ex_settings.{ @@ -1406,7 +1406,7 @@ test has_expected_pool_value_test() { } let protocol_fees = 2_000_000 - pool_validator.has_expected_pool_value( + has_expected_pool_value( constants.pool_script_hash, constants.pool_ident, pool_value, @@ -1453,7 +1453,7 @@ test has_expected_pool_value_test2() { } let protocol_fees = 2_000_000 - pool_validator.has_expected_pool_value( + has_expected_pool_value( constants.pool_script_hash, constants.pool_ident, pool_value, @@ -1482,7 +1482,7 @@ test has_expected_pool_value_withdraw_all() { } let protocol_fees = 3_000_000 - pool_validator.has_expected_pool_value( + has_expected_pool_value( constants.pool_script_hash, constants.pool_ident, pool_value, From 4da83695f8925eb6107cc950a16bec07f020935c Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Tue, 12 Nov 2024 14:59:00 +0100 Subject: [PATCH 3/8] simplified and permissioned types --- aiken.lock | 2 +- lib/calculation/process.ak | 45 +- .../sub_conditions/default/process.ak | 306 ------- .../sub_conditions/default/verify_datum.ak | 11 - lib/calculation/sub_conditions/shared.ak | 287 ------ .../sub_conditions/trading_hours/process.ak | 42 - .../trading_hours/verify_datum.ak | 10 - lib/tests/examples/ex_pool.ak | 2 + lib/types/condition_pool.ak | 109 --- lib/types/conditions/default.ak | 6 - lib/types/conditions/default_trading_hours.ak | 8 - lib/types/conditions/permissioned.ak | 27 + lib/types/conditions/trading_hours.ak | 4 + lib/types/pool.ak | 3 + validators/condition_pool.ak | 860 ------------------ validators/conditions/default.ak | 141 --- .../conditions/default_trading_hours.ak | 153 ---- validators/conditions/trading_hours.ak | 99 ++ validators/oracle.ak | 2 + validators/pool.ak | 34 +- validators/tests/pool.ak | 20 + 21 files changed, 216 insertions(+), 1955 deletions(-) delete mode 100644 lib/calculation/sub_conditions/default/process.ak delete mode 100644 lib/calculation/sub_conditions/default/verify_datum.ak delete mode 100644 lib/calculation/sub_conditions/shared.ak delete mode 100644 lib/calculation/sub_conditions/trading_hours/process.ak delete mode 100644 lib/calculation/sub_conditions/trading_hours/verify_datum.ak delete mode 100644 lib/types/condition_pool.ak delete mode 100644 lib/types/conditions/default.ak delete mode 100644 lib/types/conditions/default_trading_hours.ak create mode 100644 lib/types/conditions/permissioned.ak create mode 100644 lib/types/conditions/trading_hours.ak delete mode 100644 validators/condition_pool.ak delete mode 100644 validators/conditions/default.ak delete mode 100644 validators/conditions/default_trading_hours.ak create mode 100644 validators/conditions/trading_hours.ak diff --git a/aiken.lock b/aiken.lock index b285a5e..1cbf34e 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1730989129, nanos_since_epoch = 210284116 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1731417833, nanos_since_epoch = 808697597 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] diff --git a/lib/calculation/process.ak b/lib/calculation/process.ak index 6a45e41..b49aa38 100644 --- a/lib/calculation/process.ak +++ b/lib/calculation/process.ak @@ -1,5 +1,6 @@ use aiken/cbor use aiken/collection/dict.{Dict} +use aiken/collection/list use aiken/crypto.{Blake2b_256, Hash} use aiken/interval use calculation/deposit @@ -142,7 +143,7 @@ pub fn process_order( // TODO: we can probably avoid returning the outputs, and just return a boolean outputs: List, // A continuation to call with the next pool state and the list of outputs; this is more efficient than constructing an object and tuples - continuation: fn(Int, Int, Int, List) -> Bool, + continuation: fn(Int, Int, Int, List, Bool) -> Bool, ) -> Bool { // Returns the updated pool state, the correct list of outputs to resume from, and total fee charged by the order when details is { @@ -214,7 +215,13 @@ pub fn process_order( min_received, output, ) - continuation(new_pool_a, new_pool_b, pool_quantity_lp, rest_outputs) + continuation( + new_pool_a, + new_pool_b, + pool_quantity_lp, + rest_outputs, + False, + ) } order.Deposit(assets) -> { expect [output, ..rest_outputs] = outputs @@ -244,7 +251,7 @@ pub fn process_order( fee, output, ) - continuation(new_a, new_b, new_lp, rest_outputs) + continuation(new_a, new_b, new_lp, rest_outputs, False) } order.Withdrawal(amount) -> { expect [output, ..rest_outputs] = outputs @@ -274,7 +281,7 @@ pub fn process_order( fee, output, ) - continuation(new_a, new_b, new_lp, rest_outputs) + continuation(new_a, new_b, new_lp, rest_outputs, True) } // NOTE: we decided not to implement zap, for time constraints, and because a zap can be easily implemented as a chained order, as it is in V1 // The cheaper fees the DAO voted on should make this acceptable @@ -308,9 +315,9 @@ pub fn process_order( // so we can skip over it // TODO: can we just return used_output here instead of passing around the lists of outputs? if used_output { - continuation(new_a, new_b, pool_quantity_lp, rest_outputs) + continuation(new_a, new_b, pool_quantity_lp, rest_outputs, False) } else { - continuation(new_a, new_b, pool_quantity_lp, outputs) + continuation(new_a, new_b, pool_quantity_lp, outputs, False) } } order.Record(policy) -> { @@ -324,6 +331,7 @@ pub fn process_order( pool_quantity_b, pool_quantity_lp, rest_outputs, + False, ) } } @@ -373,8 +381,9 @@ pub fn process_orders( // A recursive aggregator for the number of "simple" and "strategy" orders we see; used for computing the fee without traversing the list independently, since we're already walking this list simple_count: Int, strategy_count: Int, + withdrawal_only: Bool, // A continuation to call with the final pool state; more efficient than constructing tuples / objects - continuation: fn(Int, Int, Int, Int, Int) -> Bool, + continuation: fn(Int, Int, Int, Int, Int, Bool) -> Bool, ) -> Bool { // Returns the final pool state, and the count of each order type // The main "pump" of the recursive loop is the input_order, which is a set of indices into the inputs list @@ -388,6 +397,7 @@ pub fn process_orders( pool_quantity_lp, simple_count, strategy_count, + withdrawal_only, ) [(idx, sse, _), ..rest] -> { // First, it's important to check that each order is processed only once; @@ -439,6 +449,7 @@ pub fn process_orders( new_b, new_lp, next_orders, + new_withdrawal_only, <- process_order( pool_policy_a, @@ -498,6 +509,7 @@ pub fn process_orders( next_uniqueness_flag, next_simple_count, next_strategy_count, + new_withdrawal_only && withdrawal_only, continuation, ) } @@ -588,6 +600,7 @@ test process_orders_test() { new_lp, simple, strategies, + withdrawal_only, <- process_orders( #"", @@ -616,6 +629,7 @@ test process_orders_test() { 0, 0, 0, + True, ) expect new_a == 1_001_000_000 @@ -623,6 +637,7 @@ test process_orders_test() { expect new_lp == 1_000_000_000 expect simple == 1 expect strategies == 0 + expect withdrawal_only == False True } @@ -723,6 +738,7 @@ test process_30_shuffled_orders_test() { new_lp, simple, strategies, + withdrawal_only, <- process_orders( #"", @@ -751,7 +767,20 @@ test process_30_shuffled_orders_test() { 0, 0, 0, + True, ) - new_a == 1_030_000_000 && new_b == 1_030_000_000 && new_lp == 1_000_000_000 && simple == 30 && strategies == 0 + new_a == 1_030_000_000 && new_b == 1_030_000_000 && new_lp == 1_000_000_000 && simple == 30 && strategies == 0 && !withdrawal_only +} + +pub fn find_pool_output(outputs: List) -> (Output, PoolDatum) { + // Find the pool output; we can assume the pool output is the first output, because: + // - The ledger doesn't reorder outputs, just inputs + // - We check that the address is correct, so if the first output was to a different contract, we would fail + // - We check that the datum is the correct type, meaning we can't construct an invalid pool output + // - Later, we check that the pool output has the correct value, meaning it *must* contain the pool token, so we can't pay to the pool script multiple times + expect Some(pool_output) = list.head(outputs) + expect InlineDatum(output_datum) = pool_output.datum + expect output_datum: PoolDatum = output_datum + (pool_output, output_datum) } diff --git a/lib/calculation/sub_conditions/default/process.ak b/lib/calculation/sub_conditions/default/process.ak deleted file mode 100644 index 0918541..0000000 --- a/lib/calculation/sub_conditions/default/process.ak +++ /dev/null @@ -1,306 +0,0 @@ -use aiken/collection/dict.{Dict} -use aiken/collection/list -use aiken/crypto.{DataHash, ScriptHash, VerificationKeyHash} -use aiken/interval -use calculation/process.{process_orders} -use calculation/sub_conditions/shared.{minted_correct_pool_tokens} as sub_conditions_shared -use cardano/address.{Address, Credential} -use cardano/assets.{AssetName, Lovelace, PolicyId, Value} -use cardano/transaction.{Input, Output, ValidityRange} -use shared.{ - AssetClass, Ident, count_orders, has_expected_pool_value, pool_lp_name, -} -use types/order.{SignedStrategyExecution} -use types/settings.{SettingsDatum} - -pub fn scoop_default( - settings_datum: SettingsDatum, - inputs: List, - pool_script_hash: ScriptHash, - actual_ask_fees_per_10_thousand: Int, - actual_bid_fees_per_10_thousand: Int, - pool_input: Output, - actual_identifier: Ident, - validity_range: ValidityRange, - withdrawals: Pairs, - datums: Dict, - input_order: List<(Int, Option, Int)>, - outputs: List, - actual_protocol_fees: Int, - mint: Value, - extra_signatories: List, - signatory_index: Int, - scooper_index: Int, - pool_output_value: Value, - actual_circulating_lp: Int, - actual_assets: (AssetClass, AssetClass), - actual_market_open: Int, - pool_output_address: Address, - assets: (AssetClass, AssetClass), - protocol_fees: Int, - identifier: Ident, - circulating_lp: Int, - market_open: Int, - bid_fees_per_10_thousand: Int, - ask_fees_per_10_thousand: Int, -) -> Bool { - // Deconstruct the settings datum with the fields we need for a scoop - let SettingsDatum { - authorized_scoopers, - base_fee, - simple_fee, - strategy_fee, - .. - } = settings_datum - - // Do a simple scan over the orders to count up the number of orders we'll be processing - // This is unavoidable, because it's part of making sure that the provided redeemer set isn't - // excluding orders - let real_order_count = count_orders(inputs) - // Calculate the portion of the fee that each order will be - // entitled to pay; - // Because the division is rounded down, we add real_order_count and subtact 1 - // to ensure that we take the ceiling instead, and round in the protocols favor. - let amortized_base_fee = - ( base_fee + real_order_count - 1 ) / real_order_count - // Make sure it's not negative, for example if base_fee was negative - expect amortized_base_fee >= 0 - - // Construct the initial pool state from the datum and the locked values - // This intermediate state will be updated as we process each order, allowing us to do a scan over each input - // In particular, it calculates what fees we should be charging (because of the linear fee decay) and the actual tradable reserves - // (excluding protocol fees, which shouldn't factor into the price) - // Note: this abomination is brought to you by the fact that constructing and destructuring structs - // is expensive, so it's cheaper to have **massive** lambdas / continuations - let - pool_policy_a, - pool_asset_name_a, - pool_quantity_a, - pool_policy_b, - pool_asset_name_b, - pool_quantity_b, - pool_policy_lp, - pool_asset_name_lp, - pool_quantity_lp, - bid_fees, - ask_fees, - initial_protocol_fees, - <- - default_pool_input_to_state( - pool_script_hash, - assets, - protocol_fees, - identifier, - circulating_lp, - bid_fees_per_10_thousand, - ask_fees_per_10_thousand, - pool_input, - ) - - // Process the orders in order, and decide the final pool state we should see - // This also counts up the number of simple / strategy orders, which let us compute the effective protocol fee. - // for optimization purposes, there are quite a lot of parameters, and their interaction is quite subtle - let - final_a, - final_b, - final_lp, - simple_count, - strategy_count, - <- - process_orders( - actual_identifier, - // The pool identifier, so we can check that each order is for this pool - validity_range, - // The validity range of the transaction, so we can check strategies haven't expired - withdrawals, - // Include the withdrawals, in case a strategy has some kind of attached script condition - datums, - // The datums, so we can look up the datum of each order (which may be inline, but may also be in the datums dict) - // The initial pool state, such as the reserves and circulating LP - pool_policy_a, - pool_asset_name_a, - pool_quantity_a, - pool_policy_b, - pool_asset_name_b, - pool_quantity_b, - pool_policy_lp, - pool_asset_name_lp, - pool_quantity_lp, - input_order, - // The input ordering specified by the scooper - bid_fees, - // The liquidity provider fee to charge for bids (A -> B), in parts per 10,000 (basis points) - ask_fees, - // ... for Ask (swap B -> A) - amortized_base_fee, - // The base fee split by the number of orders, paid for each user - simple_fee, - // The fee to charge for each "simple" order (swap, deposit, withdrawal, etc.) - strategy_fee, - // The fee to charge for each "strategy" order - 0, - // The previous index we processed, intitially 0; this lets us detect if we need to "restart" the input list - inputs, - // *All* inputs, so we can start over at the beginning of the list if we want - inputs, - // *Remaining* inputs, so we can advance through the list one by one so long as the orders are in order - list.drop(outputs, 1), - // The list of outputs we should be comparing orders against - 0, - // A uniqueness bit-flag, to detect which orders have already been processed; see lib/calculation/InputSorting.md - 0, - // The accumulated count of "simple" orders, for calculating the fee; set to 0 to start, but incremented in each recursion - 0, - ) - // The accumulated count of "strategy" orders, see line above. - // We need to make sure that the number of orders matches the amount that we processed - // so the scooper doesn't "under-report" the orders and steal the funds on the order - expect simple_count + strategy_count == real_order_count - - // We calculate the expected total collected protocol fee - // We multiply amortized_base_fee, which everyone paid, by the number of orders - // and then the respective fees for each simple order and strategy order - let expected_fees_collected = - amortized_base_fee * real_order_count + simple_count * simple_fee + strategy_count * strategy_fee - - // Make sure we actually increased the protocol fee by exactly this amount - expect actual_protocol_fees == initial_protocol_fees + expected_fees_collected - - // The pool should have all of the scooper fees, and the quantity of each token of the outcome - // Note that initializing the state with `-transaction.fee` means this gets subracted out of the protocol fees - // TODO: do we need to account for this? it seems to have gotten lost in some changes. - expect - minted_correct_pool_tokens( - pool_script_hash, - mint, - identifier, - circulating_lp, - final_lp, - ) - - // Check that the scooper is authorized; the protocol can allow *any* scoopers, or limit it to a set of actors - // It's safe to use values provided in the redeemer to efficiently skip to the expected scooper / expected signature - // because at the end of the day, we just care that the scooper has signed the transaction. If the scooper provides - // anything but the correct indexes, it'll just fail the transaction. - expect - when authorized_scoopers is { - Some(authorized_scoopers) -> { - // OPTIMIZATION: skip 10 entries at a time - // OPTIMIZATION: assume scooper is first extra_signatory? have to assume there will only ever be one extra_signatory - expect Some(scooper_sig) = list.at(extra_signatories, signatory_index) - expect Some(scooper) = list.at(authorized_scoopers, scooper_index) - // must be an authorized scooper - scooper_sig == scooper - } - _ -> True - } - - // the market must have opened; this allows projects to pre-create their pool, potentially across multiple protocols, and allows - // people to open orders ahead of time, and avoids things like sniping bots, etc. - // TODO: should we *only* prevent swaps / withdrawals? would it be ok to allow deposits? - // TODO: should we have a "blackout period", where withdrawals are prevented, similar to IPOs? - expect interval.is_entirely_after(validity_range, market_open) - - // We also check that the pool output has the right value (as mentioned above) - // In particular, the pool must have: - // - the pool NFT - // - the correctly adjusted assets from swapping, deposits, withdrawals, etc. - // - an additional amount of ADA corresponding to the protocol fees - // - NOTHING ELSE; This is important because someone could add tons of junk tokens and increase the execution units, potentially even freezing the UTXO - expect - has_expected_pool_value( - pool_script_hash, - actual_identifier, - pool_output_value, - pool_policy_a, - pool_asset_name_a, - final_a, - pool_policy_b, - pool_asset_name_b, - final_b, - final_lp, - actual_protocol_fees, - ) - // Now, we check various things about the output datum to ensure they're each correct. - // Check that the datum correctly records the final circulating LP, accounting for any deposits and withdrawals - // In particular, this is important because that circulating supply is exaclty what determines the users ownership of assets in the pool - // If this gets out of sync with what we've actually minted, then users will be able to either redeem assets they aren't entitled to, - // or be unable to access funds they are entitled to. - expect actual_circulating_lp == final_lp - - // Make sure the protocol fees have been correctly updated - expect actual_protocol_fees == initial_protocol_fees + expected_fees_collected - - // And make sure each of these fields is unchanged - and { - identifier == actual_identifier, - assets == actual_assets, - bid_fees_per_10_thousand == actual_bid_fees_per_10_thousand, - ask_fees_per_10_thousand == actual_ask_fees_per_10_thousand, - market_open == actual_market_open, - // Finally, make sure we don't change the stake credential; this can only be done when withdrawing fees, by the treasury administrator - pool_input.address.stake_credential == pool_output_address.stake_credential, - } -} - -/// Construct the initial pool state for processing a set of orders -pub fn default_pool_input_to_state( - pool_token_policy: PolicyId, - assets: (AssetClass, AssetClass), - protocol_fees: Int, - identifier: Ident, - circulating_lp: Int, - bid_fees_per_10_thousand: Int, - ask_fees_per_10_thousand: Int, - input: Output, - continuation: fn( - PolicyId, - AssetName, - Int, - PolicyId, - AssetName, - Int, - PolicyId, - AssetName, - Int, - Int, - Int, - Int, - ) -> - Bool, -) -> Bool { - let (asset_a, asset_b) = assets - let (asset_a_policy_id, asset_a_name) = asset_a - let (asset_b_policy_id, asset_b_name) = asset_b - // If asset_a is ADA, then we need to not consider the protocol fees as part of this - // We don't have to check asset_b, because assets are guaranteed to be in lexicographical order. - let min_utxo = - if asset_a_policy_id == assets.ada_policy_id { - protocol_fees - } else { - 0 - } - // Get the maximum of market_open and the transaction valid from so we can calculate the fees correctly - // Note: we use valid_from, as this favors the protocol: you pay the fees for the *earliest* moment your order *could* have executed. - // Scoopers could in theory set a wide validity range to cause users to overpay, but this should be considered malicious activity and - // get the scooper removed from the list of valid scoopers / ignore scooper rewards - // TODO: we could solve this by enforcing a validity range, and checking the length is within 4 hours. - let bid_fees = bid_fees_per_10_thousand - let ask_fees = ask_fees_per_10_thousand - // Then construct the pool state. We include the assets here, instead of just the reserves, so we can check the values of each order - // TODO: we could potentially save quite a bit by not passing around this object, and passing around a lot of parameters instead... - continuation( - asset_a_policy_id, - asset_a_name, - assets.quantity_of(input.value, asset_a_policy_id, asset_a_name) - min_utxo, - asset_b_policy_id, - asset_b_name, - assets.quantity_of(input.value, asset_b_policy_id, asset_b_name), - pool_token_policy, - pool_lp_name(identifier), - circulating_lp, - bid_fees, - ask_fees, - protocol_fees, - ) -} diff --git a/lib/calculation/sub_conditions/default/verify_datum.ak b/lib/calculation/sub_conditions/default/verify_datum.ak deleted file mode 100644 index b86d1bb..0000000 --- a/lib/calculation/sub_conditions/default/verify_datum.ak +++ /dev/null @@ -1,11 +0,0 @@ -use shared - -pub fn default_verify_datum( - bid_fees_per_10_thousand: Int, - ask_fees_per_10_thousand: Int, -) -> Bool { - and { - shared.fees_in_legal_range(bid_fees_per_10_thousand), - shared.fees_in_legal_range(ask_fees_per_10_thousand), - } -} diff --git a/lib/calculation/sub_conditions/shared.ak b/lib/calculation/sub_conditions/shared.ak deleted file mode 100644 index 2fad000..0000000 --- a/lib/calculation/sub_conditions/shared.ak +++ /dev/null @@ -1,287 +0,0 @@ -use aiken/collection/dict.{Dict} -use aiken/crypto.{Blake2b_256, Hash} -use calculation/process.{validate_pool_id} -use calculation/shared.{check_and_set_unique, - unsafe_fast_index_skip_with_tail} as calculation_shared -use calculation/withdrawal -use cardano/assets.{AssetName, PolicyId, Value} -use cardano/transaction.{Input, Output} -use shared.{AssetClass, Ident, datum_of, is_script, pool_lp_name} -use types/order.{Destination, Order, OrderDatum, SignedStrategyExecution} - -/// Construct the initial pool state for processing a set of orders -pub fn condition_pool_input_to_state( - pool_token_policy: PolicyId, - assets: (AssetClass, AssetClass), - protocol_fees: Int, - identifier: Ident, - circulating_lp: Int, - input: Output, - continuation: fn( - PolicyId, - AssetName, - Int, - PolicyId, - AssetName, - Int, - PolicyId, - AssetName, - Int, - Int, - ) -> - Bool, -) -> Bool { - let (asset_a, asset_b) = assets - let (asset_a_policy_id, asset_a_name) = asset_a - let (asset_b_policy_id, asset_b_name) = asset_b - // If asset_a is ADA, then we need to not consider the protocol fees as part of this - // We don't have to check asset_b, because assets are guaranteed to be in lexicographical order. - let min_utxo = - if asset_a_policy_id == assets.ada_policy_id { - protocol_fees - } else { - 0 - } - - // Then construct the pool state. We include the assets here, instead of just the reserves, so we can check the values of each order - // TODO: we could potentially save quite a bit by not passing around this object, and passing around a lot of parameters instead... - continuation( - asset_a_policy_id, - asset_a_name, - assets.quantity_of(input.value, asset_a_policy_id, asset_a_name) - min_utxo, - asset_b_policy_id, - asset_b_name, - assets.quantity_of(input.value, asset_b_policy_id, asset_b_name), - pool_token_policy, - pool_lp_name(identifier), - circulating_lp, - protocol_fees, - ) -} - -/// Process a single order, comparing it to the output to ensure it was executed faithfully, and returning the new pool state -/// -/// Most of the parameters here are for performance reasons, to avoid destructuring objects, since thats very expensive -pub fn process_withdrawal_order( - // The pool state as of the time the order is executed; If we process multiple orders, this gets passed through each time - pool_policy_a: PolicyId, - pool_asset_name_a: AssetName, - pool_quantity_a: Int, - pool_policy_b: PolicyId, - pool_asset_name_b: AssetName, - pool_quantity_b: Int, - pool_policy_lp: PolicyId, - pool_asset_name_lp: AssetName, - pool_quantity_lp: Int, - // The input being processed - input: Output, - // The details of the order to execute, such as whether it's a swap, the limit, etc. - details: Order, - // The max protocol fee that *can* be charged from the order; depending on how big the batch size is, some may be returned as a rebate, but this lets the user limit the maximum that gets charged in case the protocol fees in the settings change - max_protocol_fee: Int, - // The destination where the result of the order must be sent; useful for chaining transactions, as it lets you specify a datum for the output - destination: Destination, - // The base fee, divided among all the participants in the scoop - amortized_base_fee: Int, - // The amount to charge for simple vs strategy orders, taken from the settings - simple_fee: Int, - // A list of outputs, so we can destructure the next output - // TODO: we can probably avoid returning the outputs, and just return a boolean - outputs: List, - // A continuation to call with the next pool state and the list of outputs; this is more efficient than constructing an object and tuples - continuation: fn(Int, Int, Int, List) -> Bool, -) -> Bool { - // Returns the updated pool state, the correct list of outputs to resume from, and total fee charged by the order - expect order.Withdrawal(amount) = details - expect [output, ..rest_outputs] = outputs - // Make sure the scooper can only take up to the max fee the user has agreed to - // (See above) - let fee = amortized_base_fee + simple_fee - expect max_protocol_fee >= fee - // Calculate and validate the result of a withdrawal - let - new_a, - new_b, - new_lp, - <- - withdrawal.do_withdrawal( - pool_policy_a, - pool_asset_name_a, - pool_quantity_a, - pool_policy_b, - pool_asset_name_b, - pool_quantity_b, - pool_policy_lp, - pool_asset_name_lp, - pool_quantity_lp, - input, - amount, - destination, - fee, - output, - ) - continuation(new_a, new_b, new_lp, rest_outputs) -} - -/// Recursively process all orders in the correct order -/// There's a lot of parameters here, mostly for efficiency (though with some redundancies being removed in another branch) -pub fn process_withdrawal_orders( - // The pool identifier we're processing, so we can check the order if it has a specific pool - this_pool_ident: Ident, - // The datums in the witness set, in case we need to lookup a non-inline datum - datums: Dict, Data>, - // The initial / current pool state, passed recursively as we process each order - pool_policy_a: PolicyId, - pool_asset_name_a: AssetName, - pool_quantity_a: Int, - pool_policy_b: PolicyId, - pool_asset_name_b: AssetName, - pool_quantity_b: Int, - pool_policy_lp: PolicyId, - pool_asset_name_lp: AssetName, - pool_quantity_lp: Int, - // The list of remaining indices into the inputs, specifying which orders to process - input_order: List<(Int, Option, Int)>, - // The protocol base fee, split across each order - amortized_base_fee: Int, - // The simple and strategy fees from the settings datum - simple_fee: Int, - // The previous order we processed, to check if we need to restart the loop; TODO: we actually pass +1 from this, and i'm not sure how to explain why we do this... - prev_index: Int, - // *all* inputs on the transaction, in case we need to start over from the beginning (i.e. wrap around) - all_inputs: List, - // Just the remaining inputs in the list, in case it's more efficient to keep walking from here - remaining_inputs: List, - // The list of remaining outputs to compare the orders against; usually we pass the `tail` of this list recursively, but in the case of donations with no change, we pass outputs through unchanged - outputs: List, - // A number that, when interpreted as a bit flag, indicates which orders we've already processed; used to check if an order is processed more than once (see InputSorting.md) - uniqueness_flag: Int, - // A continuation to call with the final pool state; more efficient than constructing tuples / objects - continuation: fn(Int, Int, Int) -> Bool, -) -> Bool { - // Returns the final pool state, and the count of each order type - // The main "pump" of the recursive loop is the input_order, which is a set of indices into the inputs list - // specified by the scooper for the order to process each order in. - // Once we've reached the end of the list, we can return, but otherwise - when input_order is { - [] -> continuation(pool_quantity_a, pool_quantity_b, pool_quantity_lp) - [(idx, _, _), ..rest] -> { - // First, it's important to check that each order is processed only once; - // This is quite subtle, so check InputSorting.md for a full explanation - let next_uniqueness_flag = check_and_set_unique(uniqueness_flag, idx) - - // Then, we identify where to find the inputs; in particular, to avoid "starting from the beginning" every single time - // when indices are monotonic through the list, we can just continue to advance through the list - // so, all_inputs will always contain the full list of inputs - // while remaining_inputs will just contain the ones "after" the last one we processed. - // So, here, we check if we can continue down this path, or if we need to start from the beginning again - let next_input_list = - if idx >= prev_index { - unsafe_fast_index_skip_with_tail(remaining_inputs, idx - prev_index) - } else { - unsafe_fast_index_skip_with_tail(all_inputs, idx) - } - - expect [input_to_process, ..rest_of_input_list] = next_input_list - let Input { output: order, .. } = input_to_process - - // It's important that we fail if we ever try to process a UTXO from a wallet address - // This is a bit unfortunate, because it means we can't support processing orders directly out of a users wallet - // but is important, because we rely on this to check that every order is processed. - // If we didn't do this check, a scooper could include a UTXO from their wallet, and leave a *real* order un-processed, and steal those users funds. - expect is_script(order.address.payment_credential) - - // Find the datum that is associated with this order; we allow that datum to be either inline, or in the witness set, - // to aid in composibility with other protocols - // We also check that the datum is in the format we expect; - // Note: we don't actually check the order address anywhere!! As long as it's a script, and the datum is in the correct format, we're good. - // This lets us upgrade the order contract, or add other types of orders over time. - expect Some(datum) = datum_of(datums, order) - expect datum: OrderDatum = datum - let OrderDatum { pool_ident, destination, max_protocol_fee, details, .. } = - datum - // Make sure we're allowed to process this order (i.e. if the user specified a specific pool, we have to honor that) - expect validate_pool_id(pool_ident, this_pool_ident) - // And finally, process this one individual order and compute the next state - // Note that we get back next_orders here, which is needed if we process a donation that has no change UTXO - let - new_a, - new_b, - new_lp, - next_orders, - <- - process_withdrawal_order( - pool_policy_a, - pool_asset_name_a, - pool_quantity_a, - pool_policy_b, - pool_asset_name_b, - pool_quantity_b, - pool_policy_lp, - pool_asset_name_lp, - pool_quantity_lp, - order, - details, - max_protocol_fee, - destination, - amortized_base_fee, - simple_fee, - outputs, - ) - - // And recursively process the rest of the orders - process_withdrawal_orders( - this_pool_ident, - datums, - pool_policy_a, - pool_asset_name_a, - new_a, - pool_policy_b, - pool_asset_name_b, - new_b, - pool_policy_lp, - pool_asset_name_lp, - new_lp, - rest, - // This advances to the next element from input_order - amortized_base_fee, - simple_fee, - idx + 1, - // This is the "previous index" within the input list; TODO: I'm not actually sure why we add 1? - all_inputs, - // See the notes above about all_inputs vs remaining_inputs - rest_of_input_list, - next_orders, - next_uniqueness_flag, - continuation, - ) - } - } -} - -/// This is responsible for checking that the minting value on the transaction is valid -/// based on the pool state, the policy ID, and the initial incoming datum. -pub fn minted_correct_pool_tokens( - pool_policy_id: PolicyId, - mint: Value, - identifier: Ident, - circulating_lp: Int, - quantity_lp: Int, -) -> Bool { - // Unwrap the silly MintedValue special type - // Note also we only look at the tokens with this policyID - // so that we can still mint other tokens - let minted_tokens = assets.tokens(mint, pool_policy_id) - - // If the initial datum has the same circulating LP as the outcome, then we expect no minted tokens - // Otherwise, the minted tokens should be exactly the pool LP tokens and nothing else - // TODO: confirm that the "minting 0 ada" problem doesn't apply here; we have real-world transactions, so I doubt it does, but I want to confirm. - // TODO: This should allow minting of other tokens, from other policy IDs, perhaps, for composibility? - if circulating_lp == quantity_lp { - dict.is_empty(minted_tokens) - } else { - dict.to_pairs(minted_tokens) == [ - Pair(pool_lp_name(identifier), quantity_lp - circulating_lp), - ] - } -} diff --git a/lib/calculation/sub_conditions/trading_hours/process.ak b/lib/calculation/sub_conditions/trading_hours/process.ak deleted file mode 100644 index 116bae2..0000000 --- a/lib/calculation/sub_conditions/trading_hours/process.ak +++ /dev/null @@ -1,42 +0,0 @@ -use aiken/interval.{Finite, Interval, IntervalBound} -use calculation/shared.{millis_per_day} -use cardano/transaction.{ValidityRange} - -pub fn scoop_trading_hours( - validity_range: ValidityRange, - open_time: Int, - close_time: Int, -) -> Bool { - expect Interval { - lower_bound: IntervalBound { bound_type: Finite(low_val), .. }, - upper_bound: IntervalBound { bound_type: Finite(high_val), .. }, - } = validity_range - let low_val_d = low_val % millis_per_day - let high_val_d = high_val % millis_per_day - let validity_less_than_1_day = high_val - low_val < millis_per_day - and { - validity_less_than_1_day, - time_between_start_finish(low_val_d, open_time, close_time), - time_between_start_finish(high_val_d, open_time, close_time), - } -} - -fn time_between_start_finish(time: Int, start: Int, finish: Int) -> Bool { - if start < finish { - and { - time > start, - time < finish, - } - } else { - or { - and { - time < start, - time < finish, - }, - and { - time > start, - time > finish, - }, - } - } -} diff --git a/lib/calculation/sub_conditions/trading_hours/verify_datum.ak b/lib/calculation/sub_conditions/trading_hours/verify_datum.ak deleted file mode 100644 index 325f319..0000000 --- a/lib/calculation/sub_conditions/trading_hours/verify_datum.ak +++ /dev/null @@ -1,10 +0,0 @@ -use calculation/shared.{millis_per_day} - -pub fn trading_hours_verify_datum(open_time: Int, close_time: Int) -> Bool { - and { - open_time >= 0, - open_time < millis_per_day, - close_time >= 0, - close_time < millis_per_day, - } -} diff --git a/lib/tests/examples/ex_pool.ak b/lib/tests/examples/ex_pool.ak index 1f39bcf..dd4bb6d 100644 --- a/lib/tests/examples/ex_pool.ak +++ b/lib/tests/examples/ex_pool.ak @@ -20,6 +20,8 @@ fn mk_pool_datum() -> PoolDatum { fee_manager: None, market_open: 100, protocol_fees: 10000000, + condition: None, + condition_datum: None, } } diff --git a/lib/types/condition_pool.ak b/lib/types/condition_pool.ak deleted file mode 100644 index f791235..0000000 --- a/lib/types/condition_pool.ak +++ /dev/null @@ -1,109 +0,0 @@ -use aiken/collection/list -use aiken/crypto.{ScriptHash} -use cardano/transaction.{InlineDatum, Output} -use shared.{AssetClass, Ident} -use sundae/multisig -use types/order.{SignedStrategyExecution} - -/// The current state of a AMM liquidity pool at a UTXO. -pub type ConditionPoolDatum { - /// the unique identifier of the pool. Produced by hashing one of the input UTXOs used to produce the pool - /// to ensure uniqueness. - identifier: Ident, - /// The two asset IDs that this pool can exchange, in alphabetical order - /// Used to validate that the assets being swapped are indeed the intended assets - assets: (AssetClass, AssetClass), - /// The total number of LP tokens in circulation - /// Maintains the following two invariants on each deposit or withdrawal: - /// - circulating_lp is always equal to the number of LP tokens that have been minted and are in circulation - /// - A users LP tokens (or burned LP tokens), as a percentage of the circulating LP tokens, represent the percentage of assets they just deposited or withdrew. - circulating_lp: Int, - // An optional multisig condition under which the protocol fees can be updated - fee_manager: Option, - /// The UNIX millisecond timestamp at which trading against the pool should be allowed - /// TODO: deposits and arguably withdrawals should be processed before the market open - market_open: Int, - /// The amount of ADA on the UTXO that is set aside by collecting protocol fees - /// This should be increased on each scoop to represent collecting fees; deducted from the reserve amount (if one of the tokens in the pair is ADA) - /// to calculate the swap amounts, and decreased when some amount is withdrawn. - /// Note that this also allows us to conveniently sidestep minUTXO woes, because the creator of the pool can set the initial protocol fees to whatever minUTXO is needed - /// and withdrawals never have to be for the full amount. - /// TODO: should we add a field to the settings object to set a minimum initial protocol_fees on pool mint? - protocol_fees: Int, - /// The condition script that needs to be present in the withdrawals - condition: ScriptHash, - /// If the condition needs extra data it can be stored in this field - condition_datum: Data, -} - -/// A pool UTXO can be spent for two purposes: -pub type ConditionPoolRedeemer { - /// Execute a scoop, a batch of transactions all processed at once - ConditionPoolScoop { - /// The index in the transaction extra_signatories that represents the "scooper" signature - /// This lets us quickly skip to the scooper if there are multiple signatures on the transaciton for some reason - /// And this is safe to do, because at the end of the day the value pointed at has to be in the list of authorized scoopers anyway - signatory_index: Int, - /// The index in the list of authorized scoopers on the settings datum of the scooper processing the order - /// This is so that we can quickly skip to the correct scooper in the list of authorized scoopers - /// And this is safe to do, because at the end of the day, the pointed at scooper must have signed the transaction - scooper_index: Int, - /// The order to process the transaction inputs in, and optionally the signed strategy execution to execute for strategy orders - /// This is because the cardano ledger re-orders transaction inputs when building the script context; so this could end up processing - /// users orders out of order, and open the protocol to sandwich attacks and line-cutting. - /// Much of the complexity of the protocol comes from ensuring this list is processed both efficiently and safely. - input_order: List<(Int, Option, Int)>, - /// A withdrawal only scoop does should always be allowed, no matter the condition - withdrawal_only: Bool, - } - /// Withdraw the earned protocol fees into the treasury, or update the pool - /// fees - ConditionManage -} - -/// We use the pool mint script for two different purposes -pub type ConditionPoolMintRedeemer { - /// to mint LP tokens for the user for a specific pool (by identifier) - ConditionMintLP { identifier: Ident } - /// to mint the actual pool itself - ConditionCreatePool { - /// The assets to create the pool with; - /// provided so we can enforce that they are paid to the pool output, and the pool datum is constructed correctly - assets: (AssetClass, AssetClass), - /// The index in the outputs that corresponds to the pool output - /// This is done so we can efficiently skip to the correct output - /// This is safe because we validate that the datum, value, and pool token are paid into this UTXO - pool_output: Int, - /// The index in the outputs that the corresponding CIP-68 metadata token is paid to - /// Note that the tokens are structured as such: - /// - A CIP-68 (222) token, indicating an NFT, held by the pool script address - /// - A CIP-68 (333) token, indicating a fungible token, paid anywhere the user wants; this is the LP token - /// - A CIP-68 (111) token; Whichever UTXO holds this token can set a datum that determines on-chain metadata like decimals, name, and image - /// That token is given to a "metadata administrator" who can set the appropriate metadata, and update it if needed. - /// This is done so we can efficiently skip to the correct metadata output - /// This is safe because we validate that the token is paid to the metadata admin - metadata_output: Int, - } - /// to burn the pool NFT (when permitted by the spending validator) - ConditionBurnPool { identifier: Ident } -} - -/// Manage settings about a pool (used against the pool manage script) -pub type ConditionManageRedeemer { - // Withdraw some subset of the fees in the pool, paid into the treasury - ConditionWithdrawFees { amount: Int, treasury_output: Int, pool_input: Int } - // Update the percentage fee the pool charges - ConditionUpdateCondition { pool_input: Int } -} - -pub fn find_pool_output(outputs: List) -> (Output, ConditionPoolDatum) { - // Find the pool output; we can assume the pool output is the first output, because: - // - The ledger doesn't reorder outputs, just inputs - // - We check that the address is correct, so if the first output was to a different contract, we would fail - // - We check that the datum is the correct type, meaning we can't construct an invalid pool output - // - Later, we check that the pool output has the correct value, meaning it *must* contain the pool token, so we can't pay to the pool script multiple times - expect Some(pool_output) = list.head(outputs) - expect InlineDatum(output_datum) = pool_output.datum - expect output_datum: ConditionPoolDatum = output_datum - (pool_output, output_datum) -} diff --git a/lib/types/conditions/default.ak b/lib/types/conditions/default.ak deleted file mode 100644 index 3a69350..0000000 --- a/lib/types/conditions/default.ak +++ /dev/null @@ -1,6 +0,0 @@ -pub type DefaultDatum { - /// The basis points to charge on each trade for bid (A -> B) and ask (B -> A) orders - /// For example, a 1% fee would be represented as 100 (out of 10,000), and a 0.3% fee would be represented as 30 - bid_fees_per_10_thousand: Int, - ask_fees_per_10_thousand: Int, -} diff --git a/lib/types/conditions/default_trading_hours.ak b/lib/types/conditions/default_trading_hours.ak deleted file mode 100644 index 41f1254..0000000 --- a/lib/types/conditions/default_trading_hours.ak +++ /dev/null @@ -1,8 +0,0 @@ -pub type DefaultTradingHoursDatum { - /// The basis points to charge on each trade for bid (A -> B) and ask (B -> A) orders - /// For example, a 1% fee would be represented as 100 (out of 10,000), and a 0.3% fee would be represented as 30 - bid_fees_per_10_thousand: Int, - ask_fees_per_10_thousand: Int, - open_time: Int, - close_time: Int, -} diff --git a/lib/types/conditions/permissioned.ak b/lib/types/conditions/permissioned.ak new file mode 100644 index 0000000..ebee31b --- /dev/null +++ b/lib/types/conditions/permissioned.ak @@ -0,0 +1,27 @@ +use aiken/crypto.{ + Blake2b_256, Hash, Signature, VerificationKey, VerificationKeyHash, +} +use cardano/address.{Address} +use cardano/transaction.{ValidityRange} +use shared.{Ident} + +pub type ComplianceToken { + // The users DID Identifier + did: Ident, + //The users public key + user_key: VerificationKey, + //The destination address + destination: Address, + //Blake2b-256 hash of the cbor serialized details from the order + order_hash: Hash, + //A valid range + validity_range: ValidityRange, + //The public key of the oracle + oracle_key: VerificationKey, + //A signature from the compliance oracle for fields 1-6 + oracle_signature: Signature, +} + +pub type PermissionedDatum { + whitelisted_oracles: List, +} diff --git a/lib/types/conditions/trading_hours.ak b/lib/types/conditions/trading_hours.ak new file mode 100644 index 0000000..f9dba7f --- /dev/null +++ b/lib/types/conditions/trading_hours.ak @@ -0,0 +1,4 @@ +pub type TradingHoursDatum { + open_time: Int, + close_time: Int, +} diff --git a/lib/types/pool.ak b/lib/types/pool.ak index 82e797b..c7ec240 100644 --- a/lib/types/pool.ak +++ b/lib/types/pool.ak @@ -1,3 +1,4 @@ +use aiken/crypto.{ScriptHash} use shared.{AssetClass, Ident} use sundae/multisig use types/order.{SignedStrategyExecution} @@ -31,6 +32,8 @@ pub type PoolDatum { /// and withdrawals never have to be for the full amount. /// TODO: should we add a field to the settings object to set a minimum initial protocol_fees on pool mint? protocol_fees: Int, + condition: Option, + condition_datum: Option, } /// A pool UTXO can be spent for two purposes: diff --git a/validators/condition_pool.ak b/validators/condition_pool.ak deleted file mode 100644 index 80e4eb0..0000000 --- a/validators/condition_pool.ak +++ /dev/null @@ -1,860 +0,0 @@ -use aiken/collection/list -use aiken/collection/pairs -use aiken/crypto.{ScriptHash} -use aiken/interval -use aiken/primitive/bytearray -use calculation/sub_conditions/shared.{ - condition_pool_input_to_state, minted_correct_pool_tokens, - process_withdrawal_orders, -} as sub_conditions_shared -use cardano/address.{Address, Inline, Script} -use cardano/assets.{AssetName, PolicyId, Value, ada_policy_id} -use cardano/script_context.{ScriptContext} -use cardano/transaction.{ - InlineDatum, Input, Output, OutputReference, Transaction, Withdraw, -} -use shared.{ - AssetClass, Ident, count_orders, own_input_index, pool_nft_name, spent_output, -} -use sundae/multisig -use types/condition_pool.{ - ConditionBurnPool, ConditionCreatePool, ConditionManage, - ConditionManageRedeemer, ConditionMintLP, ConditionPoolDatum, - ConditionPoolMintRedeemer, ConditionPoolRedeemer, ConditionPoolScoop, - ConditionUpdateCondition, ConditionWithdrawFees, find_pool_output, -} as types_pool -use types/settings.{SettingsDatum, find_settings_datum} - -/// The core / base "pooled AMM" script for the SundaeSwap v3 protocol -/// -/// Parameterized by the Settings policy ID, which makes the script unique, as well as lets us validate / read global settings. -/// -/// This script is responsible for: -/// - Guarding access to the pools assets on behalf of the depositors -/// - Enabling and executing a batch of orders against those assets, implementing a standard 'AMM' swap protocol -/// - Serving as a minting policy, minting the Pool NFT and LP tokens -/// - Accumulating protocol fees on behalf of the protocol -/// - Allowing protocol rewards to be withdrawn to a DAO treasury -/// -/// It does so by allowing a permissioned set of entities to "scoop" a batch of orders, and ensuring that each one -/// pays out to the appropriate destination. -/// -/// This set of people is permissioned to prevent classes of "sandwich" attacks, wherein a malicious actor could -/// execute and match orders in their *own* favor, rather than the users favor. -/// -/// Also of particular sensitivity is the optimizations applied; To achieve high-throughput, low-cost DeFi, executing -/// each order independently suffers from some amount of overhead. If, for example, the execution costs are split into: -/// A - The costs that need to be run in each transaction, regardless of what is executed -/// B - The costs that need to be executed for each order -/// C - The costs exclusive to batching, such as sorting the orders -/// -/// It is likely, then, that protocols converge on the per-order cost of batching being lower than un-batched variants, i.e. -/// -/// A + B*n + C < (A + B) * n -validator condition_pool( - manage_stake_script_hash: ScriptHash, - settings_policy_id: PolicyId, -) { - spend( - datum: Option, - redeemer: ConditionPoolRedeemer, - out_ref: OutputReference, - transaction: Transaction, - ) { - expect Some(datum) = datum - // First, we destructure the transaction right upfront, because field access is O(n), - // and we want access to these fields with just a single pass over the transaction - // This will be a common pattern throughout the scripts - // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) - let Transaction { - inputs, - outputs, - reference_inputs, - mint, - datums, - extra_signatories, - validity_range, - withdrawals, - .. - } = transaction - - // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction - let pool_input = spent_output(transaction, out_ref) - - // And pattern match to get the pool script hash; in particular, this can be used to find the pool output, - // *and* to know the policy ID of pool tokens, because this is a dual spending/minting validator. - expect Script(pool_script_hash) = pool_input.address.payment_credential - - // The protocol configures many global settings via a "settings" UTXO, updatable by certain administrators - // This is included as a reference input, so we have a utility to check the reference inputs for the settings NFT - // Note: it's important to check for the NFT, because checking just for the address would let someone pay random funds to the settings address. - let settings_datum = - find_settings_datum(reference_inputs, settings_policy_id) - - // Then, there are two different actions that can be taken against a pool: - // - Scooping a batch of orders - // - Withdrawing protocol fees to the treasury - when redeemer is { - // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed - ConditionPoolScoop { - signatory_index, - scooper_index, - input_order, - withdrawal_only, - } -> { - // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop - let ( - Output { address: pool_output_address, value: pool_output_value, .. }, - ConditionPoolDatum { - identifier: actual_identifier, - circulating_lp: actual_circulating_lp, - protocol_fees: actual_protocol_fees, - assets: actual_assets, - market_open: actual_market_open, - condition: actual_condition, - condition_datum: actual_condition_datum, - .. - }, - ) = find_pool_output(outputs) - - // Ensure that the pool output is to the same payment credential; This is critical, because it ensures that the pool NFT - // or liquidity aren't paid to some other script in control of an attacker. - // Note that we check the stake credential is correctly updated (or not) in the various redeemer cases below. - // We also check that the pool output has the correct output, which ensures it contains the pool NFT, - // meaning this can't just be a "token output" with the correct payment credential, but everything paid elsewhere. - expect - pool_output_address.payment_credential == Script(pool_script_hash) - - when withdrawal_only is { - True -> { - // Deconstruct the settings datum with the fields we need for a scoop - let SettingsDatum { authorized_scoopers, base_fee, simple_fee, .. } = - settings_datum - - // Do a simple scan over the orders to count up the number of orders we'll be processing - // This is unavoidable, because it's part of making sure that the provided redeemer set isn't - // excluding orders - let real_order_count = count_orders(inputs) - // Calculate the portion of the fee that each order will be - // entitled to pay; - // Because the division is rounded down, we add real_order_count and subtact 1 - // to ensure that we take the ceiling instead, and round in the protocols favor. - let amortized_base_fee = - ( base_fee + real_order_count - 1 ) / real_order_count - // Make sure it's not negative, for example if base_fee was negative - expect amortized_base_fee >= 0 - - expect InlineDatum(pool_input_datum) = pool_input.datum - expect ConditionPoolDatum { - assets, - protocol_fees, - identifier, - circulating_lp, - .. - } = pool_input_datum - - // Construct the initial pool state from the datum and the locked values - // This intermediate state will be updated as we process each order, allowing us to do a scan over each input - // In particular, it calculates what fees we should be charging (because of the linear fee decay) and the actual tradable reserves - // (excluding protocol fees, which shouldn't factor into the price) - // Note: this abomination is brought to you by the fact that constructing and destructuring structs - // is expensive, so it's cheaper to have **massive** lambdas / continuations - let - pool_policy_a, - pool_asset_name_a, - pool_quantity_a, - pool_policy_b, - pool_asset_name_b, - pool_quantity_b, - pool_policy_lp, - pool_asset_name_lp, - pool_quantity_lp, - initial_protocol_fees, - <- - condition_pool_input_to_state( - pool_script_hash, - assets, - protocol_fees, - identifier, - circulating_lp, - pool_input, - ) - - // Process the orders in order, and decide the final pool state we should see - // This also counts up the number of simple / strategy orders, which let us compute the effective protocol fee. - // for optimization purposes, there are quite a lot of parameters, and their interaction is quite subtle - let - final_a, - final_b, - final_lp, - <- - process_withdrawal_orders( - actual_identifier, - // The pool identifier, so we can check that each order is for this pool - datums, - // The datums, so we can look up the datum of each order (which may be inline, but may also be in the datums dict) - // The initial pool state, such as the reserves and circulating LP - pool_policy_a, - pool_asset_name_a, - pool_quantity_a, - pool_policy_b, - pool_asset_name_b, - pool_quantity_b, - pool_policy_lp, - pool_asset_name_lp, - pool_quantity_lp, - input_order, - // The input ordering specified by the scooper - amortized_base_fee, - // The base fee split by the number of orders, paid for each user - simple_fee, - // The fee to charge for each "simple" order (swap, deposit, withdrawal, etc.) - 0, - // The previous index we processed, intitially 0; this lets us detect if we need to "restart" the input list - inputs, - // *All* inputs, so we can start over at the beginning of the list if we want - inputs, - // *Remaining* inputs, so we can advance through the list one by one so long as the orders are in order - list.drop(outputs, 1), - // The list of outputs we should be comparing orders against - 0, - ) - - // A uniqueness bit-flag, to detect which orders have already been processed; see lib/calculation/InputSorting.md - // The accumulated count of "strategy" orders, see line above. - // We need to make sure that the number of orders matches the amount that we processed - // so the scooper doesn't "under-report" the orders and steal the funds on the order - // Is this needed for withdrawal only? - // expect simple_count + strategy_count == real_order_count - // We calculate the expected total collected protocol fee - // We multiply amortized_base_fee, which everyone paid, by the number of orders - // and then the respective fees for each simple order and strategy order - let expected_fees_collected = - amortized_base_fee * real_order_count + real_order_count * simple_fee - - // Make sure we actually increased the protocol fee by exactly this amount - expect - actual_protocol_fees == initial_protocol_fees + expected_fees_collected - - // The pool should have all of the scooper fees, and the quantity of each token of the outcome - // Note that initializing the state with `-transaction.fee` means this gets subracted out of the protocol fees - // TODO: do we need to account for this? it seems to have gotten lost in some changes. - expect - minted_correct_pool_tokens( - pool_script_hash, - mint, - identifier, - circulating_lp, - final_lp, - ) - - // Check that the scooper is authorized; the protocol can allow *any* scoopers, or limit it to a set of actors - // It's safe to use values provided in the redeemer to efficiently skip to the expected scooper / expected signature - // because at the end of the day, we just care that the scooper has signed the transaction. If the scooper provides - // anything but the correct indexes, it'll just fail the transaction. - expect - when authorized_scoopers is { - Some(authorized_scoopers) -> { - // OPTIMIZATION: skip 10 entries at a time - // OPTIMIZATION: assume scooper is first extra_signatory? have to assume there will only ever be one extra_signatory - expect Some(scooper_sig) = - list.at(extra_signatories, signatory_index) - expect Some(scooper) = - list.at(authorized_scoopers, scooper_index) - // must be an authorized scooper - scooper_sig == scooper - } - _ -> True - } - - // the market must have opened; this allows projects to pre-create their pool, potentially across multiple protocols, and allows - // people to open orders ahead of time, and avoids things like sniping bots, etc. - // TODO: should we *only* prevent swaps / withdrawals? would it be ok to allow deposits? - // TODO: should we have a "blackout period", where withdrawals are prevented, similar to IPOs? - expect interval.is_entirely_after(validity_range, datum.market_open) - - // We also check that the pool output has the right value (as mentioned above) - // In particular, the pool must have: - // - the pool NFT - // - the correctly adjusted assets from swapping, deposits, withdrawals, etc. - // - an additional amount of ADA corresponding to the protocol fees - // - NOTHING ELSE; This is important because someone could add tons of junk tokens and increase the execution units, potentially even freezing the UTXO - expect - has_expected_pool_value( - pool_script_hash, - actual_identifier, - pool_output_value, - pool_policy_a, - pool_asset_name_a, - final_a, - pool_policy_b, - pool_asset_name_b, - final_b, - final_lp, - actual_protocol_fees, - ) - // Now, we check various things about the output datum to ensure they're each correct. - // Check that the datum correctly records the final circulating LP, accounting for any deposits and withdrawals - // In particular, this is important because that circulating supply is exaclty what determines the users ownership of assets in the pool - // If this gets out of sync with what we've actually minted, then users will be able to either redeem assets they aren't entitled to, - // or be unable to access funds they are entitled to. - expect actual_circulating_lp == final_lp - - // Make sure the protocol fees have been correctly updated - expect - actual_protocol_fees == initial_protocol_fees + expected_fees_collected - - // And make sure each of these fields is unchanged - and { - datum.identifier == actual_identifier, - datum.assets == actual_assets, - datum.condition == actual_condition, - datum.condition_datum == actual_condition_datum, - datum.market_open == actual_market_open, - // Finally, make sure we don't change the stake credential; this can only be done when withdrawing fees, by the treasury administrator - pool_input.address.stake_credential == pool_output_address.stake_credential, - } - } - False -> { - expect Some(..) = - pairs.get_first(withdrawals, address.Script(actual_condition)) - True - } - } - } - ConditionManage -> - // There must be a redeemer for a (stake-script) withdrawal against the manage stake script, - // and the redeemer must correctly point at the pool UTXO - pairs.foldl( - transaction.redeemers, - False, - fn(script_purpose, redeemer, acc) { - when script_purpose is { - Withdraw(Script(script)) -> { - let is_valid_manage_script_invoke = - if script == manage_stake_script_hash { - expect redeemer: ConditionManageRedeemer = redeemer - let redeemer_pool_input = - when redeemer is { - ConditionUpdateCondition { pool_input } -> pool_input - ConditionWithdrawFees { pool_input, .. } -> pool_input - } - let input_index = own_input_index(transaction, out_ref) - // Manage redeemer must have the correct index of this pool input - input_index == redeemer_pool_input - } else { - False - } - acc || is_valid_manage_script_invoke - } - _ -> acc - } - }, - ) - } - } - - mint( - r: ConditionPoolMintRedeemer, - own_policy_id: PolicyId, - transaction: Transaction, - ) { - // When minting, we can be doing one of two things: minting the pool itself, or minting the LP token - when r is { - // For creating a new pool, one of our design objectives was to avoid requiring interaction with any global - // "factory", as in v1; this created a lot of contention, and didn't serve it's original goal of ensuring that - // the (pair, fee) was unique. - ConditionCreatePool(assets, pool_output_ix, metadata_output_ix) -> { - // And grab the pool output - expect Some(pool_output) = list.at(transaction.outputs, pool_output_ix) - - // Check that the pool datum is inline, because a datum hash could brick this pool - expect InlineDatum(d) = pool_output.datum - expect pool_output_datum: ConditionPoolDatum = d - - // The assets on the pool must be sorted - // This is partially to make off-chain indexing easier, and as an on-chain optimization, - // so we can always assume that, if ADA is one of the pairs, it's the first asset. - // This also prevents creating an X/X pool, since we require the assets to be stricly less than - // Note, it is a NON GOAL to enforce that any specific (pair, fee) combination is unique; while aggregating liquidity - // can be beneficial for capital efficiency, in practice that will happen anyway, and smaller pools can be useful as - // a pressure release valve. Additionally, we have bigger plans around interesting order splitting modes of operation that - // make that less important. - let (asset_a, asset_b) = assets - let coin_pair_ordering_is_canonical = - compare_asset_class(asset_a, asset_b) == Less - - // We use the first input ref spent in this transaction to uniquely identify the pool - // This is a trick widely used to produce NFTs, and ensures that we can generate a hash that is unique to this pool - expect Some(first_input) = list.at(transaction.inputs, 0) - let first_input_index = - int_to_ident(first_input.output_reference.output_index) - - // Calculate the identifier by hashing the txRef of the first input - // this makes the pool unique - // With CIP-68 identifiers (which take up 4 bytes), we have 28 bytes of space; - // ideally we'd use blake2b-224, which is exactly a 224 byte hash, but that won't be on-chain until plutus v3; - // So, we use blake2b-256 and drop a few bytes. Now, does this compromise the uniqueness guarantees? - // It is strongly believed by the cryptographer community that blake2b-256 provides indifferentiability from random oracles - // [https://eprint.iacr.org/2013/322.pdf] - // which allows us to treat the 256 bits as uniformly sampled. This means that there are 2^256 equally likely values, and the - // chance of collision is 1 in 2^256, unfathomably small. - // Even when used in bulk, the birthday paradox means that you would need to generate 2^128 different hashes before you had even a 50% chance of collision. - // This is known as "128 bit security", and is considered a gold standard of cryptography. - // So, how does dropping 4 bytes (32 bits) from this hash impact that analysis? - // There are still 2^224 possible values, meaning it offers 2^112 bit security, still considered "unbreakable". - // As a comparison, at the time of this writing the entire bitcoin network is performing 500 exa-hashes per second, or - // 500 * 10^18 hashes per second. If the entire bitcoin network was directed towards trying to generate a collision on pool ident, it would take - // Roughly 10^13 seconds, or 317,098 years to have a 50% chance of collision. Not to mention the cost of storing and comparing to detect if you'd performed - // a collision, or the cost of grinding UTXOs on cardano to give fresh inputs to the pool script. - let new_pool_id = - first_input.output_reference.transaction_id - |> bytearray.concat(#"23") // '#' character - |> bytearray.concat(first_input_index) - |> crypto.blake2b_256 - |> bytearray.drop(4) - - // With that pool identifier, we can attach 3 different CIP-68 pool identifiers: - // - (100) indicates a tracking token, for the purposes of on-chain metadata read by off-chain infra, so we can provide a nice experience to the users wallet for example - // - (222) indicates the pool NFT, which uniquely identifies the UTXO that holds pool assets - // - (333) indicates the fungible LP token, which represents a percentage ownership of the pool - let (new_pool_ref_token, new_pool_nft_token, new_pool_lp_token) = - shared.pool_token_names(new_pool_id) - - // Then, find the settings datum, so we can ensure the reference token is paid to the metadata admin - let reference_inputs = transaction.reference_inputs - let settings_datum = - find_settings_datum(reference_inputs, settings_policy_id) - - // Grab the initial reserves of each token by looking at what's paid to the UTXO - let coin_a_amt = - assets.quantity_of(pool_output.value, asset_a.1st, asset_a.2nd) - let coin_b_amt = - assets.quantity_of(pool_output.value, asset_b.1st, asset_b.2nd) - - // Ensure that the pool pays the pool creation fee, if any, by ensuring that the initial protocol_fees value is greater than or equal to the fee - expect - pool_output_datum.protocol_fees >= settings_datum.pool_creation_fee - - // Only ada has a null policy id. If coin A is ada, subtract the initial protocol_fees setting from the coin A amount - // rider from the output to get the true amount in the pool. - let coin_a_amt_sans_protocol_fees = - if bytearray.is_empty(asset_a.1st) { - coin_a_amt - pool_output_datum.protocol_fees - } else { - coin_a_amt - } - - // Check that the quantity of LP tokens is correct; In particular, we adopt Uniswaps convention of - // using the sqrt of the product of the two values for the initial number of LP tokens to mint.. - // This helps minimize precision loss: it gives decent initial liquidity values for a range of - // sizes of pools, such that an individual LP token is granular enough for depositing and withdrawing for most users. - // In particular, though, we don't calculate the sqrt here, which is an expensive function; we instead verify that the - // amount minted is valid by checking that it squares to the correct product - let initial_lq = pool_output_datum.circulating_lp - expect - shared.is_sqrt(coin_a_amt_sans_protocol_fees * coin_b_amt, initial_lq) - - // And check that we mint the correct tokens, and nothing else. - let expected_mint = - shared.to_value((own_policy_id, new_pool_ref_token, 1)) - |> assets.merge( - shared.to_value((own_policy_id, new_pool_nft_token, 1)), - ) - |> assets.merge( - shared.to_value((own_policy_id, new_pool_lp_token, initial_lq)), - ) - let mint_is_correct = transaction.mint == expected_mint - - // Confirm that the correct funds (asset A, asset B, the correct amount of ADA, and the pool NFT) get paid to the pool output - let funds_spent_to_pool = - has_expected_pool_value( - own_policy_id, - new_pool_id, - pool_output.value, - asset_a.1st, - asset_a.2nd, - coin_a_amt_sans_protocol_fees, - asset_b.1st, - asset_b.2nd, - coin_b_amt, - initial_lq, - pool_output_datum.protocol_fees, - ) - - // Make sure we send the pool metadata token to the metadata admin - // We use an index from the redeemer to skip to the right output, in case there are multiple outputs to the metadata admin - // This is safe to do for the usual reasons: if they point at a UTXO without the ref token, the transaction will fail. - expect Some(metadata_output) = - list.at(transaction.outputs, metadata_output_ix) - expect metadata_output.address == settings_datum.metadata_admin - expect - assets.quantity_of( - metadata_output.value, - own_policy_id, - new_pool_ref_token, - ) == 1 - - // We also check that the datum on the metadata output is void; It would be complex and in-flexible to enforce any particular structure on this, so we - // instead leave it to the metadata admin to spend the output and provide it the correct datum; We also don't want to leave it unspecified, because - // 1) the metadata admin might actually be a script address, in which case having no datum will permanently lock the metadata - // 2) the pool minter might include malicious metadata, such as an icon pointing at hardcore porn; until the metadata admin spent it, this would appear in users wallets, - // and potentially even on access UIs for the Sundae protocol - expect metadata_output.datum == InlineDatum(Void) - - expect Some(..) = - pairs.get_first( - transaction.withdrawals, - address.Script(pool_output_datum.condition), - ) - - // And check that the datum is initialized correctly; This is part of why we have a minting policy handling this, - // as it allows us to authenticate the providence of the datum. - // A datum is valid so long as - // - the pool identifier is set correctly - // - the assets is set correctly - // - the initial circulating supply is set correctly - // - the initial and final fees per 10,000 are both non-negative (>= 0%) - // - the intitial and final fees per 10,000 are both less than or equal to 10000 (<= 100%) - let pool_output_datum_correct = and { - pool_output_datum.identifier == new_pool_id, - pool_output_datum.assets == (asset_a, asset_b), - pool_output_datum.circulating_lp == initial_lq, - } - - // Make sure that the pool output is paid into own_policy_id (the pool script, remember this is a multivalidator) - // and that one of the valid staking addresses is attached - expect pool_output.address.payment_credential == Script(own_policy_id) - expect - list.any( - settings_datum.authorized_staking_keys, - fn(a) { pool_output.address.stake_credential == Some(Inline(a)) }, - ) - - // And then check each of the conditions above as the condition for minting - and { - coin_pair_ordering_is_canonical, - mint_is_correct, - funds_spent_to_pool, - pool_output_datum_correct, - } - } - // When minting an LP token, we just need to make sure the pool script is being spent, as it will enforce the correct - // name and quantity of the LP tokens. - // - // To do that, we could check for the pool NFT on the inputs, but this is expensive, especially if the pool input ends up being one of the last. - // So instead we check that the pool NFT is in the first output (this is safe to assume because it's unique, and if it's in any other output it will fail) - // and that we're not minting the pool token (i.e. someone could "pretend" to mint LP tokens, but also mint the pool token to make it look like a scoop) - // - // So, lets enumerate the possible cases: - // - We use the CreatePool redeemer; this checks that *only* the correct pool token and correct number of LP tokens are minted - // - We use the MintLP redeemer; this checks that the pool token (which is unique and locked in the pool script) is in the outputs, and not minted - // - the pool script checks that only the correct number of LP tokens, and nothing else under this policy ID, are minted - // And the impossible cases: - // - During CreatePool, it would be impossible to mint multiple of the same pool tokens; a different pool token; a different number of LP tokens; or a different pool's LP tokens - // - During MintLP, it would be impossible to mint the relevant pool token; thus, the pool script must run, and thus it will be impossible to mint another pool token, a different pool - // ident pool token, a different quantity of LP tokens, or a different pools LP tokens - ConditionMintLP(pool_ident) -> { - let pool_nft_name = shared.pool_nft_name(pool_ident) - expect Some(pool_output) = list.head(transaction.outputs) - and { - ( - pool_output.value - |> assets.quantity_of(own_policy_id, pool_nft_name) - ) == 1, - ( - transaction.mint - |> assets.quantity_of(own_policy_id, pool_nft_name) - ) == 0, - } - } - ConditionBurnPool(pool_ident) -> { - // Burning an asset is only possible when spending it, so if we enforce - // that the mints consist of exactly 1 burn for the specified pool NFT - // then we can defer to the pool spending validator - let pool_nft_name = shared.pool_nft_name(pool_ident) - let expected_mint = shared.to_value((own_policy_id, pool_nft_name, -1)) - transaction.mint == expected_mint - } - } - } - - else(_) { - fail - } -} - -/// Check that the UTXO contents are correct given a specific pool outcome -/// In particular, it must have the final A reserves, the final B reserves, the pool NFT, and the protocol fees -pub fn has_expected_pool_value( - pool_script_hash: PolicyId, - identifier: Ident, - output_value: Value, - pool_policy_a: PolicyId, - pool_asset_name_a: AssetName, - pool_quantity_a: Int, - pool_policy_b: PolicyId, - pool_asset_name_b: AssetName, - pool_quantity_b: Int, - final_lp: Int, - final_protocol_fees: Int, -) -> Bool { - // Asset A *could* be ADA; in which case there should be 3 tokens on the output - // (ADA, Asset B, and the NFT) - if pool_policy_a == ada_policy_id { - let actual = - list.foldl( - assets.flatten(output_value), - // (token count, lovelace amount, token b amount, pool nft amount) - (0, 0, 0, 0), - fn(asset, acc) { - let token_count = acc.1st + 1 - if asset.1st == pool_policy_a { - (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th) - } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { - (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th) - } else { - expect asset == (pool_script_hash, pool_nft_name(identifier), 1) - (token_count, acc.2nd, acc.3rd, acc.4th + 1) - } - }, - ) - // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token - let expected = - if final_lp == 0 { - expect pool_quantity_a == 0 - expect pool_quantity_b == 0 - (2, final_protocol_fees, 0, 1) - } else { - (3, final_protocol_fees + pool_quantity_a, pool_quantity_b, 1) - } - // Rather than constructing a value directly (which can be expensive) - // we can just compare the expected token count and amounts with a single pass over the value - expected == actual - } else { - // Asset A isn't ADA, Asset B will *never* be ADA; in this case, there should be 4 tokens on the output: - // ADA, the Pool NFT, Asset A, and Asset B - let actual = - list.foldl( - assets.flatten(output_value), - // (token count, lovelace amount, token a amount, token b amount, pool nft amount) - (0, 0, 0, 0, 0), - fn(asset, acc) { - let token_count = acc.1st + 1 - if asset.1st == ada_policy_id { - (token_count, acc.2nd + asset.3rd, acc.3rd, acc.4th, acc.5th) - } else if asset.1st == pool_policy_a && asset.2nd == pool_asset_name_a { - (token_count, acc.2nd, acc.3rd + asset.3rd, acc.4th, acc.5th) - } else if asset.1st == pool_policy_b && asset.2nd == pool_asset_name_b { - (token_count, acc.2nd, acc.3rd, acc.4th + asset.3rd, acc.5th) - } else { - expect asset == (pool_script_hash, pool_nft_name(identifier), 1) - (token_count, acc.2nd, acc.3rd, acc.4th, acc.5th + 1) - } - }, - ) - // If we're withdrawing the last bit of liquidity, we just have ADA and the pool token - let expected = - if final_lp == 0 { - expect pool_quantity_a == 0 - expect pool_quantity_b == 0 - (2, final_protocol_fees, 0, 0, 1) - } else { - (4, final_protocol_fees, pool_quantity_a, pool_quantity_b, 1) - } - expected == actual - } -} - -// Compare two policy IDs to determine a sort order; used in particular to enforce an ordering on the assets in the pool -fn compare_asset_class(a: AssetClass, b: AssetClass) { - let (a_policy, a_token) = a - let (b_policy, b_token) = b - when bytearray.compare(a_policy, b_policy) is { - Less -> Less - Equal -> bytearray.compare(a_token, b_token) - Greater -> Greater - } -} - -// Convert a specific integer (like a UTXO index) into a byte array, so we can construct a hashable string when minting the pool -pub fn int_to_ident(n: Int) -> Ident { - expect n < 256 - bytearray.push(#"", n) -} - -// In order to keep the script size small for the pool script, we defer some functions to a separate stake script; -// when the treasury administrator attempts to withdraw fees, or the fee manager attempts to update the pool fee, -// it only checks for this particular script hash. This script, using the withdraw 0 trick, then checks the correct invariants -validator manage(settings_policy_id: PolicyId) { - else(ctx: ScriptContext) { - expect redeemer: ConditionManageRedeemer = ctx.redeemer - let transaction = ctx.transaction - let Transaction { - inputs, - outputs, - reference_inputs, - mint, - extra_signatories, - validity_range, - withdrawals, - .. - } = transaction - - let settings_datum = - find_settings_datum(reference_inputs, settings_policy_id) - - when redeemer is { - // In order to withdraw `amount` fees into `treasury_output` utxo, looking at `pool_input` - ConditionWithdrawFees { amount, treasury_output, pool_input } -> { - // Find the pool input; note that we don't look for the pool NFT here, because if someone - // spends with an unauthenticated UTXO, it will fail the spend script; and if someone - // spends with a different script address, this script can't do anything fishy, - // just enforces some things about the outputs - // We also can't pull this out of the when, because we don't have the pool_input index yet - expect Some(pool_input) = list.at(inputs, pool_input) - let pool_input = pool_input.output - expect InlineDatum(datum) = pool_input.datum - expect datum: ConditionPoolDatum = datum - expect Script(pool_script_hash) = pool_input.address.payment_credential - let ConditionPoolDatum { - circulating_lp: initial_circulating_lp, - protocol_fees: initial_protocol_fees, - .. - } = datum - // Make sure we withdraw *only* up to what we've earned - // We allow less than, so that you can leave some behind for the minUTXO cost, or continuing to earn staking rewards, etc. - expect amount <= initial_protocol_fees - - // Only the treasury administrator is allowed to withdraw the fees, to prevent DDOS, and because of the allowance below - expect - multisig.satisfied( - settings_datum.treasury_admin, - extra_signatories, - validity_range, - withdrawals, - ) - - // Asking the DAO to approve every single cost individually would be a small cognitive DDOS on the community - // Instead, the DAO can set an "allowance", which is a percentage of each withdrawal that is entrusted to the - // treasury administrator to pay those basic fees, such as paying the scoopers, or running incentive programs, etc. - // - // In particular, it's a percentage, to ensure that splitting up the withdrawals into multiple transactions doesn't - // allow them to game that withdrawal. - let allowance = - amount * settings_datum.treasury_allowance.1st / settings_datum.treasury_allowance.2nd - let to_treasury = amount - allowance - - // And, we must pay everything except the allowance amount to the treasury address - // We use the `treasury_output` index to skip to it quickly, rather than scanning for the output - // TODO: should we instead sum all the values at the treasury output, to allow - // paying out in multiple UTXOs for some reason? - expect Some(treasury_output) = list.at(outputs, treasury_output) - expect treasury_output.address == settings_datum.treasury_address - // We ensure that it's sent with the Void datum. This is because the treasury is likely to be a script address - // and scripts are unspendable without a datum; We also don't have any notion of what the "correct" datum would be - // so we just enforce it to void. If the output datum ever needed a specific datum, we would have to use a proxy address - // that worked with the void datum, and paid to the real treasury with the correct datum. - // TODO: should we just let the treasury admin specify the datum on the redeemer? Or include it in the settings? - expect treasury_output.datum == InlineDatum(Void) - // And make sure we pay at least that much in that output. It could be more, for example to donate other ADA from other sources to that address - expect assets.lovelace_of(treasury_output.value) >= to_treasury - - if initial_circulating_lp == 0 { - // If there is no liquidity, just require the treasury admin to withdraw everything - expect amount == initial_protocol_fees - // If circulating_lp is 0, all of the assets have been withdrawn, and so the UTXO will be - // ADA (for the treasury fees) and the pool NFT; so we can very cleverly check that the pool - // NFT is burned by negating the input, and stripping off the lovelace - expect - mint == assets.negate(assets.without_lovelace(pool_input.value)) - True - } else { - let ( - Output { - address: pool_output_address, - value: pool_output_value, - .. - }, - output_datum, - ) = find_pool_output(outputs) - expect - pool_output_address.payment_credential == Script(pool_script_hash) - - // As part of withdrawing, we should decrease the protocol fees by the amount we're withdrawing - // but, importantly, *nothing else*; so we construct a datum with everything from the initial datum, plus the protofol fees updated - let expected_datum = - ConditionPoolDatum { - ..datum, - protocol_fees: initial_protocol_fees - amount, - } - expect output_datum == expected_datum - - // Now, check that the pool output decreases *only* by the amount we're withdrawing, and not by fewer or greater ADA - let expected_output_value = - assets.merge(pool_input.value, assets.from_lovelace(-amount)) - expect pool_output_value == expected_output_value - expect - list.any( - settings_datum.authorized_staking_keys, - fn(a) { pool_output_address.stake_credential == Some(Inline(a)) }, - ) - True - } - } - // To update the pool fees for the pool at `pool_input`... - ConditionUpdateCondition { pool_input } -> { - // Find the pool input; note that we don't look for the pool NFT here, because if someone - // spends with an unauthenticated UTXO, it will fail the spend script; and if someone - // spends with a different script address, this script can't do anything fishy, - // just enforces some things about the outputs - // This is duplicated code with the other branch, but only because we don't have pool_input yet - expect Some(pool_input) = list.at(inputs, pool_input) - let pool_input = pool_input.output - expect InlineDatum(datum) = pool_input.datum - expect datum: ConditionPoolDatum = datum - // We need the pool output to check that only the fees or fee manager are updated - let ( - Output { address: pool_output_address, value: pool_output_value, .. }, - pool_output_datum, - ) = find_pool_output(outputs) - - let ConditionPoolDatum { - condition: output_condition, - fee_manager: output_fee_manager, - .. - } = pool_output_datum - - let expected_datum = - ConditionPoolDatum { - ..datum, - condition: output_condition, - fee_manager: output_fee_manager, - } - expect pool_output_datum == expected_datum - - expect Some(..) = - pairs.get_first( - transaction.withdrawals, - address.Script(output_condition), - ) - - // Check that the *current* fee manager approves the update - expect Some(fee_manager) = datum.fee_manager - expect - multisig.satisfied( - fee_manager, - extra_signatories, - validity_range, - withdrawals, - ) - - // And make sure we don't touch the assets on the pool input; they must be spent back into the same script - and { - pool_output_address == pool_input.address, - pool_output_value == pool_input.value, - } - } - } - } -} diff --git a/validators/conditions/default.ak b/validators/conditions/default.ak deleted file mode 100644 index 04d90b6..0000000 --- a/validators/conditions/default.ak +++ /dev/null @@ -1,141 +0,0 @@ -use aiken/collection/list -use calculation/sub_conditions/default/process.{scoop_default} -use calculation/sub_conditions/default/verify_datum.{default_verify_datum} -use cardano/address.{Credential, Script} -use cardano/assets.{PolicyId} -use cardano/transaction.{InlineDatum, Input, Output, Transaction} -use types/condition_pool.{ - ConditionManage, ConditionPoolDatum, ConditionPoolRedeemer, ConditionPoolScoop, - find_pool_output, -} -use types/conditions/default.{DefaultDatum} as default_types -use types/settings.{find_settings_datum} - -validator default(settings_policy_id: PolicyId) { - withdraw( - redeemer: ConditionPoolRedeemer, - _account: Credential, - transaction: Transaction, - ) { - // First, we destructure the transaction right upfront, because field access is O(n), - // and we want access to these fields with just a single pass over the transaction - // This will be a common pattern throughout the scripts - // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) - let Transaction { - inputs, - outputs, - reference_inputs, - mint, - datums, - extra_signatories, - validity_range, - withdrawals, - .. - } = transaction - - // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop - let ( - Output { address: pool_output_address, value: pool_output_value, .. }, - ConditionPoolDatum { - identifier: actual_identifier, - circulating_lp: actual_circulating_lp, - protocol_fees: actual_protocol_fees, - assets: actual_assets, - market_open: actual_market_open, - condition_datum: actual_condition_datum, - .. - }, - ) = find_pool_output(outputs) - - expect DefaultDatum { - bid_fees_per_10_thousand: actual_bid_fees_per_10_thousand, - ask_fees_per_10_thousand: actual_ask_fees_per_10_thousand, - } = actual_condition_datum - - when redeemer is { - ConditionPoolScoop { .. } | ConditionManage -> { - // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction - expect Some(Input(_, pool_input)) = - list.find( - inputs, - fn(input) { input.output.address == pool_output_address }, - ) - - // And pattern match to get the pool script hash; in particular, this can be used to find the pool output, - // *and* to know the policy ID of pool tokens, because this is a dual spending/minting validator. - expect Script(pool_script_hash) = pool_input.address.payment_credential - - expect InlineDatum(pool_input_datum) = pool_input.datum - - expect ConditionPoolDatum { - identifier, - circulating_lp, - protocol_fees, - assets, - market_open, - condition_datum, - .. - } = pool_input_datum - - expect DefaultDatum { - bid_fees_per_10_thousand, - ask_fees_per_10_thousand, - } = condition_datum - - // The protocol configures many global settings via a "settings" UTXO, updatable by certain administrators - // This is included as a reference input, so we have a utility to check the reference inputs for the settings NFT - // Note: it's important to check for the NFT, because checking just for the address would let someone pay random funds to the settings address. - let settings_datum = - find_settings_datum(reference_inputs, settings_policy_id) - - // Then, there are two different actions that can be taken against a pool: - // - Scooping a batch of orders - // - Withdrawing protocol fees to the treasury - when redeemer is { - // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed - ConditionPoolScoop { signatory_index, scooper_index, input_order, .. } -> - scoop_default( - settings_datum, - inputs, - pool_script_hash, - actual_ask_fees_per_10_thousand, - actual_bid_fees_per_10_thousand, - pool_input, - actual_identifier, - validity_range, - withdrawals, - datums, - input_order, - outputs, - actual_protocol_fees, - mint, - extra_signatories, - signatory_index, - scooper_index, - pool_output_value, - actual_circulating_lp, - actual_assets, - actual_market_open, - pool_output_address, - assets, - protocol_fees, - identifier, - circulating_lp, - market_open, - bid_fees_per_10_thousand, - ask_fees_per_10_thousand, - ) - ConditionManage -> - default_verify_datum( - bid_fees_per_10_thousand, - ask_fees_per_10_thousand, - ) - } - } - } - } - - else(_) { - fail - } -} diff --git a/validators/conditions/default_trading_hours.ak b/validators/conditions/default_trading_hours.ak deleted file mode 100644 index 535bc66..0000000 --- a/validators/conditions/default_trading_hours.ak +++ /dev/null @@ -1,153 +0,0 @@ -use aiken/collection/list -use calculation/sub_conditions/default/process.{scoop_default} as default_process -use calculation/sub_conditions/default/verify_datum.{default_verify_datum} as default_verify_datum -use calculation/sub_conditions/trading_hours/process.{scoop_trading_hours} as trading_hours_process -use calculation/sub_conditions/trading_hours/verify_datum.{ - trading_hours_verify_datum, -} as trading_hours_verify_datum -use cardano/address.{Credential, Script} -use cardano/assets.{PolicyId} -use cardano/transaction.{InlineDatum, Input, Output, Transaction} -use types/condition_pool.{ - ConditionManage, ConditionPoolDatum, ConditionPoolRedeemer, ConditionPoolScoop, - find_pool_output, -} -use types/conditions/default_trading_hours.{DefaultTradingHoursDatum} as default_types -use types/settings.{find_settings_datum} - -validator default_trading_hours(settings_policy_id: PolicyId) { - withdraw( - redeemer: ConditionPoolRedeemer, - _account: Credential, - transaction: Transaction, - ) { - // First, we destructure the transaction right upfront, because field access is O(n), - // and we want access to these fields with just a single pass over the transaction - // This will be a common pattern throughout the scripts - // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) - let Transaction { - inputs, - outputs, - reference_inputs, - mint, - datums, - extra_signatories, - validity_range, - withdrawals, - .. - } = transaction - - // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop - let ( - Output { address: pool_output_address, value: pool_output_value, .. }, - ConditionPoolDatum { - identifier: actual_identifier, - circulating_lp: actual_circulating_lp, - protocol_fees: actual_protocol_fees, - assets: actual_assets, - market_open: actual_market_open, - condition_datum: actual_condition_datum, - .. - }, - ) = find_pool_output(outputs) - - expect DefaultTradingHoursDatum { - bid_fees_per_10_thousand: actual_bid_fees_per_10_thousand, - ask_fees_per_10_thousand: actual_ask_fees_per_10_thousand, - .. - } = actual_condition_datum - - when redeemer is { - ConditionPoolScoop { .. } | ConditionManage -> { - // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction - expect Some(Input(_, pool_input)) = - list.find( - inputs, - fn(input) { input.output.address == pool_output_address }, - ) - - // And pattern match to get the pool script hash; in particular, this can be used to find the pool output, - // *and* to know the policy ID of pool tokens, because this is a dual spending/minting validator. - expect Script(pool_script_hash) = pool_input.address.payment_credential - - expect InlineDatum(pool_input_datum) = pool_input.datum - - expect ConditionPoolDatum { - identifier, - circulating_lp, - protocol_fees, - assets, - market_open, - condition_datum, - .. - } = pool_input_datum - - expect DefaultTradingHoursDatum { - bid_fees_per_10_thousand, - ask_fees_per_10_thousand, - open_time, - close_time, - } = condition_datum - - // The protocol configures many global settings via a "settings" UTXO, updatable by certain administrators - // This is included as a reference input, so we have a utility to check the reference inputs for the settings NFT - // Note: it's important to check for the NFT, because checking just for the address would let someone pay random funds to the settings address. - let settings_datum = - find_settings_datum(reference_inputs, settings_policy_id) - - // Then, there are two different actions that can be taken against a pool: - // - Scooping a batch of orders - // - Withdrawing protocol fees to the treasury - when redeemer is { - // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed - ConditionPoolScoop { signatory_index, scooper_index, input_order, .. } -> - and { - scoop_default( - settings_datum, - inputs, - pool_script_hash, - actual_ask_fees_per_10_thousand, - actual_bid_fees_per_10_thousand, - pool_input, - actual_identifier, - validity_range, - withdrawals, - datums, - input_order, - outputs, - actual_protocol_fees, - mint, - extra_signatories, - signatory_index, - scooper_index, - pool_output_value, - actual_circulating_lp, - actual_assets, - actual_market_open, - pool_output_address, - assets, - protocol_fees, - identifier, - circulating_lp, - market_open, - bid_fees_per_10_thousand, - ask_fees_per_10_thousand, - ), - scoop_trading_hours(validity_range, open_time, close_time), - } - ConditionManage -> and { - default_verify_datum( - bid_fees_per_10_thousand, - ask_fees_per_10_thousand, - ), - trading_hours_verify_datum(open_time, close_time), - } - } - } - } - } - - else(_) { - fail - } -} diff --git a/validators/conditions/trading_hours.ak b/validators/conditions/trading_hours.ak new file mode 100644 index 0000000..22ae119 --- /dev/null +++ b/validators/conditions/trading_hours.ak @@ -0,0 +1,99 @@ +use aiken/collection/list +use aiken/interval.{Finite, Interval, IntervalBound} +use calculation/process.{find_pool_output} +use calculation/shared.{millis_per_day} +use cardano/address.{Credential} +use cardano/transaction.{InlineDatum, Input, Output, Transaction, ValidityRange} +use types/conditions/trading_hours.{TradingHoursDatum} as trading_hours_types +use types/pool.{Manage, PoolDatum, PoolRedeemer, PoolScoop} + +validator trading_hours { + withdraw( + redeemer: PoolRedeemer, + _account: Credential, + transaction: Transaction, + ) { + // First, we destructure the transaction right upfront, because field access is O(n), + // and we want access to these fields with just a single pass over the transaction + // This will be a common pattern throughout the scripts + // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) + let Transaction { inputs, outputs, validity_range, .. } = transaction + + // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop + let ( + Output { address: pool_output_address, .. }, + PoolDatum { condition_datum: actual_condition_datum_option, .. }, + ) = find_pool_output(outputs) + + // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction + expect Some(Input(_, pool_input)) = + list.find( + inputs, + fn(input) { input.output.address == pool_output_address }, + ) + + expect InlineDatum(pool_input_datum) = pool_input.datum + + expect PoolDatum { condition_datum: condition_datum_option, .. } = + pool_input_datum + + expect Some(condition_datum) = condition_datum_option + + expect TradingHoursDatum { open_time, close_time } = condition_datum + + // Then, there are two different actions that can be taken against a pool: + // - Scooping a batch of orders + // - Withdrawing protocol fees to the treasury + when redeemer is { + // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed + PoolScoop { .. } -> and { + scoop_trading_hours(validity_range, open_time, close_time), + actual_condition_datum_option == condition_datum_option, + } + Manage -> False + } + } + + else(_) { + fail + } +} + +fn scoop_trading_hours( + validity_range: ValidityRange, + open_time: Int, + close_time: Int, +) -> Bool { + expect Interval { + lower_bound: IntervalBound { bound_type: Finite(low_val), .. }, + upper_bound: IntervalBound { bound_type: Finite(high_val), .. }, + } = validity_range + let low_val_d = low_val % millis_per_day + let high_val_d = high_val % millis_per_day + let validity_less_than_1_day = high_val - low_val < millis_per_day + and { + validity_less_than_1_day, + time_between_start_finish(low_val_d, open_time, close_time), + time_between_start_finish(high_val_d, open_time, close_time), + } +} + +fn time_between_start_finish(time: Int, start: Int, finish: Int) -> Bool { + if start < finish { + and { + time > start, + time < finish, + } + } else { + or { + and { + time < start, + time < finish, + }, + and { + time > start, + time > finish, + }, + } + } +} diff --git a/validators/oracle.ak b/validators/oracle.ak index a06c8df..391856a 100644 --- a/validators/oracle.ak +++ b/validators/oracle.ak @@ -239,6 +239,8 @@ fn mint_oracle( fee_manager: None, market_open: 0, protocol_fees: 2_000_000, + condition: None, + condition_datum: None, }, ), reference_script: None, diff --git a/validators/pool.ak b/validators/pool.ak index 063a7e1..2249587 100644 --- a/validators/pool.ak +++ b/validators/pool.ak @@ -4,7 +4,7 @@ use aiken/collection/pairs use aiken/crypto.{ScriptHash} use aiken/interval use aiken/primitive/bytearray -use calculation/process.{pool_input_to_state, process_orders} +use calculation/process.{find_pool_output, pool_input_to_state, process_orders} use cardano/address.{Address, Inline, Script} use cardano/assets.{PolicyId, Value} use cardano/script_context.{ScriptContext} @@ -105,6 +105,8 @@ validator pool( bid_fees_per_10_thousand: actual_bid_fees_per_10_thousand, ask_fees_per_10_thousand: actual_ask_fees_per_10_thousand, market_open: actual_market_open, + condition: actual_condition, + condition_datum: actual_condition_datum .. }, ) = find_pool_output(outputs) @@ -169,6 +171,7 @@ validator pool( final_lp, simple_count, strategy_count, + withdrawal_only, <- process_orders( actual_identifier, @@ -214,6 +217,8 @@ validator pool( 0, // The accumulated count of "simple" orders, for calculating the fee; set to 0 to start, but incremented in each recursion 0, + // Flag that turns False as soon as we encounter a non withdrawal + True, ) // The accumulated count of "strategy" orders, see line above. // We need to make sure that the number of orders matches the amount that we processed @@ -254,6 +259,19 @@ validator pool( _ -> True } + expect + when datum.condition is { + Some(condition_script) -> + if !withdrawal_only { + expect Some(..) = + pairs.get_first(withdrawals, address.Script(condition_script)) + True + } else { + True + } + None -> True + } + // the market must have opened; this allows projects to pre-create their pool, potentially across multiple protocols, and allows // people to open orders ahead of time, and avoids things like sniping bots, etc. // TODO: should we *only* prevent swaps / withdrawals? would it be ok to allow deposits? @@ -293,6 +311,8 @@ validator pool( // And make sure each of these fields is unchanged and { + datum.condition == actual_condition, + datum.condition_datum == actual_condition_datum, datum.identifier == actual_identifier, datum.assets == actual_assets, datum.bid_fees_per_10_thousand == actual_bid_fees_per_10_thousand, @@ -560,18 +580,6 @@ validator pool( } } -fn find_pool_output(outputs: List) -> (Output, PoolDatum) { - // Find the pool output; we can assume the pool output is the first output, because: - // - The ledger doesn't reorder outputs, just inputs - // - We check that the address is correct, so if the first output was to a different contract, we would fail - // - We check that the datum is the correct type, meaning we can't construct an invalid pool output - // - Later, we check that the pool output has the correct value, meaning it *must* contain the pool token, so we can't pay to the pool script multiple times - expect Some(pool_output) = list.head(outputs) - expect InlineDatum(output_datum) = pool_output.datum - expect output_datum: PoolDatum = output_datum - (pool_output, output_datum) -} - /// This is responsible for checking that the minting value on the transaction is valid /// based on the pool state, the policy ID, and the initial incoming datum. fn minted_correct_pool_tokens( diff --git a/validators/tests/pool.ak b/validators/tests/pool.ak index 3a44b59..0ef8b6f 100644 --- a/validators/tests/pool.ak +++ b/validators/tests/pool.ak @@ -342,6 +342,8 @@ fn scoop(options: ScoopTestOptions) { fee_manager: None, market_open: 0, protocol_fees: 2_000_000, + condition: None, + condition_datum: None, } let pool_out_datum = PoolDatum { @@ -356,6 +358,8 @@ fn scoop(options: ScoopTestOptions) { fee_manager: None, market_open: 0, protocol_fees: 7_000_000, + condition: None, + condition_datum: None, } let pool_nft_name = shared.pool_nft_name(constants.pool_ident) let pool_address = script_address(constants.pool_script_hash) @@ -554,6 +558,8 @@ fn scoop_swap_deposit(options: ScoopTestOptions) { fee_manager: None, market_open: 0, protocol_fees: 2_000_000, + condition: None, + condition_datum: None, } let pool_out_datum = PoolDatum { @@ -568,6 +574,8 @@ fn scoop_swap_deposit(options: ScoopTestOptions) { fee_manager: None, market_open: 0, protocol_fees: 7_000_000, + condition: None, + condition_datum: None, } let pool_nft_name = shared.pool_nft_name(constants.pool_ident) let pool_address = script_address(constants.pool_script_hash) @@ -781,6 +789,8 @@ fn withdraw_fees_transaction( fee_manager: None, market_open: 0, protocol_fees, + condition: None, + condition_datum: None, } let normal_input = new_tx_input( @@ -1026,6 +1036,8 @@ fn update_pool_fees_transaction(options: ScoopTestOptions) { fee_manager, market_open: 0, protocol_fees: 2_000_000, + condition: None, + condition_datum: None, } let pool_rider = 2_000_000 // pool_test_tx_input deduplicate? @@ -1231,6 +1243,8 @@ fn mint_test_modify( fee_manager: None, market_open: 0, protocol_fees: 2_000_000, + condition: None, + condition_datum: None, }, ), ) @@ -1515,6 +1529,8 @@ fn evaporate_pool_tx(options: ScoopTestOptions, withdraw_amount: Int) { fee_manager: None, market_open: 0, protocol_fees: 18_000_000, + condition: None, + condition_datum: None, } // pool_test_tx_input deduplicate? let pool_input = @@ -1598,6 +1614,8 @@ test attempt_evaporate_pool_test() { fee_manager: None, market_open: 0, protocol_fees: 18_000_000, + condition: None, + condition_datum: None, } // pool_test_tx_input deduplicate? let pool_input = @@ -1667,6 +1685,8 @@ test burn_pool() { fee_manager: None, market_open: 0, protocol_fees: 2_000_000, + condition: None, + condition_datum: None, } let pool_nft_name = shared.pool_nft_name(constants.pool_ident) let pool_address = script_address(constants.pool_script_hash) From 93eed7fe1bf40d51be566ab4aced924c2bd1f321 Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Tue, 12 Nov 2024 16:28:04 +0100 Subject: [PATCH 4/8] permissioned pool --- aiken.lock | 2 +- lib/types/conditions/permissioned.ak | 14 +- validators/conditions/permissioned.ak | 236 ++++++++++++++++++++++++++ 3 files changed, 246 insertions(+), 6 deletions(-) create mode 100644 validators/conditions/permissioned.ak diff --git a/aiken.lock b/aiken.lock index 1cbf34e..9024110 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1731417833, nanos_since_epoch = 808697597 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1731422080, nanos_since_epoch = 441356988 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] diff --git a/lib/types/conditions/permissioned.ak b/lib/types/conditions/permissioned.ak index ebee31b..9197cea 100644 --- a/lib/types/conditions/permissioned.ak +++ b/lib/types/conditions/permissioned.ak @@ -1,25 +1,29 @@ use aiken/crypto.{ Blake2b_256, Hash, Signature, VerificationKey, VerificationKeyHash, } -use cardano/address.{Address} use cardano/transaction.{ValidityRange} use shared.{Ident} +use types/order.{Destination} pub type ComplianceToken { + token: TokenData, + //A signature from the compliance oracle for fields 1-6 + oracle_signature: Signature, +} + +pub type TokenData { // The users DID Identifier did: Ident, //The users public key - user_key: VerificationKey, + user_key: VerificationKeyHash, //The destination address - destination: Address, + destination: Destination, //Blake2b-256 hash of the cbor serialized details from the order order_hash: Hash, //A valid range validity_range: ValidityRange, //The public key of the oracle oracle_key: VerificationKey, - //A signature from the compliance oracle for fields 1-6 - oracle_signature: Signature, } pub type PermissionedDatum { diff --git a/validators/conditions/permissioned.ak b/validators/conditions/permissioned.ak new file mode 100644 index 0000000..015ee49 --- /dev/null +++ b/validators/conditions/permissioned.ak @@ -0,0 +1,236 @@ +use aiken/cbor.{serialise} +use aiken/collection/dict.{Dict} +use aiken/collection/list +use aiken/crypto.{ + Blake2b_256, Hash, VerificationKeyHash, blake2b_256, verify_ed25519_signature, +} +use aiken/interval.{Finite, Interval, IntervalBound} +use calculation/process.{find_pool_output} +use calculation/shared.{check_and_set_unique, + unsafe_fast_index_skip_with_tail} as calculation_shared +use cardano/address.{Credential} +use cardano/transaction.{InlineDatum, Input, Output, Transaction, ValidityRange} +use shared.{datum_of, is_script} +use sundae/multisig +use types/conditions/permissioned.{ComplianceToken, + PermissionedDatum} as permissioned_types +use types/order.{OrderDatum, SignedStrategyExecution} +use types/pool.{Manage, PoolDatum, PoolRedeemer, PoolScoop} + +validator permissioned { + withdraw( + redeemer: PoolRedeemer, + _account: Credential, + transaction: Transaction, + ) { + // First, we destructure the transaction right upfront, because field access is O(n), + // and we want access to these fields with just a single pass over the transaction + // This will be a common pattern throughout the scripts + // (in fact, I really want a compiler optimization that detects and lifts repeated field accesses into a destructure) + let Transaction { inputs, outputs, validity_range, datums, .. } = + transaction + + // Find the pool output, the output datum, and destructure it to access the fields we need to process the scoop + let ( + Output { address: pool_output_address, .. }, + PoolDatum { condition_datum: actual_condition_datum_option, .. }, + ) = find_pool_output(outputs) + + // Then, (confusing terminology aside) find the specific pool UTXO being spent by this transaction + expect Some(Input(_, pool_input)) = + list.find( + inputs, + fn(input) { input.output.address == pool_output_address }, + ) + + expect InlineDatum(pool_input_datum) = pool_input.datum + + expect PoolDatum { condition_datum: condition_datum_option, .. } = + pool_input_datum + + expect Some(condition_datum) = condition_datum_option + + expect PermissionedDatum { whitelisted_oracles } = condition_datum + + // Then, there are two different actions that can be taken against a pool: + // - Scooping a batch of orders + // - Withdrawing protocol fees to the treasury + when redeemer is { + // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed + PoolScoop { input_order, .. } -> and { + scoop_permission( + whitelisted_oracles, + validity_range, + datums, + input_order, + inputs, + ), + actual_condition_datum_option == condition_datum_option, + } + Manage -> False + } + } + + else(_) { + fail + } +} + +fn scoop_permission( + oracles: List, + validity_range: ValidityRange, + datums: Dict, Data>, + input_order: List<(Int, Option, Int)>, + inputs: List, +) -> Bool { + let compliance_verified <- + process_orders( + oracles, + validity_range, + datums, + input_order, + 0, + inputs, + inputs, + 0, + True, + ) + compliance_verified +} + +/// Process a single order, comparing it to the output to ensure it was executed faithfully, and returning the new pool state +/// +/// Most of the parameters here are for performance reasons, to avoid destructuring objects, since thats very expensive +fn process_order( + // The validity range of the transaction, used to ensure the signed execution is within the correct time window + validity_range: ValidityRange, + // The details of the order to execute, such as whether it's a swap, the limit, etc. + order: OrderDatum, + // List of oracles that are whitelisted + oracles: List, +) -> Bool { + // Has an attached compliance token + expect compliance: ComplianceToken = order.extension + + // The owner of an order is the same as the public key in the token + expect multisig.Signature(owner_sig) = order.owner + + expect Interval { + lower_bound: IntervalBound { bound_type: Finite(tx_from), .. }, + upper_bound: IntervalBound { bound_type: Finite(tx_to), .. }, + } = validity_range + + expect Interval { + lower_bound: IntervalBound { bound_type: Finite(token_from), .. }, + upper_bound: IntervalBound { bound_type: Finite(token_to), .. }, + } = compliance.token.validity_range + + let oracle_key_hash = blake2b_256(compliance.token.oracle_key) + + // The oracle public key is one of those listed in the approved oracles + expect Some(..) = list.find(oracles, fn(o) { o == oracle_key_hash }) + and { + owner_sig == compliance.token.user_key, + // The destination of an order is the same as the one in the token + order.destination == compliance.token.destination, + // The order details hash to the same hash from the token + blake2b_256(serialise(order.details)) == compliance.token.order_hash, + // The valid range of the transaction entirely contains the valid range from the token + token_from < tx_from, + token_to > tx_to, + // The signature is valid + verify_ed25519_signature( + compliance.token.oracle_key, + serialise(compliance.token), + compliance.oracle_signature, + ), + } +} + +/// Recursively process all orders in the correct order +/// There's a lot of parameters here, mostly for efficiency (though with some redundancies being removed in another branch) +fn process_orders( + // List of oracles that are whitelisted + oracles: List, + // The transaction valid range, if we end up processing a strategy + tx_valid_range: ValidityRange, + // The datums in the witness set, in case we need to lookup a non-inline datum + datums: Dict, Data>, + // The list of remaining indices into the inputs, specifying which orders to process + input_order: List<(Int, Option, Int)>, + // The previous order we processed, to check if we need to restart the loop; TODO: we actually pass +1 from this, and i'm not sure how to explain why we do this... + prev_index: Int, + // *all* inputs on the transaction, in case we need to start over from the beginning (i.e. wrap around) + all_inputs: List, + // Just the remaining inputs in the list, in case it's more efficient to keep walking from here + remaining_inputs: List, + // A number that, when interpreted as a bit flag, indicates which orders we've already processed; used to check if an order is processed more than once (see InputSorting.md) + uniqueness_flag: Int, + // Flag keeping track of compliance verification + compliance_verified: Bool, + // A continuation to call with the final pool state; more efficient than constructing tuples / objects + continuation: fn(Bool) -> Bool, +) -> Bool { + // Returns the final pool state, and the count of each order type + // The main "pump" of the recursive loop is the input_order, which is a set of indices into the inputs list + // specified by the scooper for the order to process each order in. + // Once we've reached the end of the list, we can return, but otherwise + when input_order is { + [] -> continuation(True) + [(idx, _, _), ..rest] -> { + // First, it's important to check that each order is processed only once; + // This is quite subtle, so check InputSorting.md for a full explanation + let next_uniqueness_flag = check_and_set_unique(uniqueness_flag, idx) + + // Then, we identify where to find the inputs; in particular, to avoid "starting from the beginning" every single time + // when indices are monotonic through the list, we can just continue to advance through the list + // so, all_inputs will always contain the full list of inputs + // while remaining_inputs will just contain the ones "after" the last one we processed. + // So, here, we check if we can continue down this path, or if we need to start from the beginning again + let next_input_list = + if idx >= prev_index { + unsafe_fast_index_skip_with_tail(remaining_inputs, idx - prev_index) + } else { + unsafe_fast_index_skip_with_tail(all_inputs, idx) + } + + expect [input_to_process, ..rest_of_input_list] = next_input_list + let Input { output: order, .. } = input_to_process + + // It's important that we fail if we ever try to process a UTXO from a wallet address + // This is a bit unfortunate, because it means we can't support processing orders directly out of a users wallet + // but is important, because we rely on this to check that every order is processed. + // If we didn't do this check, a scooper could include a UTXO from their wallet, and leave a *real* order un-processed, and steal those users funds. + expect is_script(order.address.payment_credential) + + // Find the datum that is associated with this order; we allow that datum to be either inline, or in the witness set, + // to aid in composibility with other protocols + // We also check that the datum is in the format we expect; + // Note: we don't actually check the order address anywhere!! As long as it's a script, and the datum is in the correct format, we're good. + // This lets us upgrade the order contract, or add other types of orders over time. + expect Some(datum) = datum_of(datums, order) + expect datum: OrderDatum = datum + + // And finally, process this one individual order and compute the next state + // Note that we get back next_orders here, which is needed if we process a donation that has no change UTXO + let order_compliance_verified = + process_order(tx_valid_range, datum, oracles) + + // And recursively process the rest of the orders + process_orders( + oracles, + tx_valid_range, + datums, + rest, + idx + 1, + // This is the "previous index" within the input list; TODO: I'm not actually sure why we add 1? + all_inputs, + // See the notes above about all_inputs vs remaining_inputs + rest_of_input_list, + next_uniqueness_flag, + order_compliance_verified && compliance_verified, + continuation, + ) + } + } +} From d5cca046ab409e93f7f6010a2cd64a263dbe93dd Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Wed, 13 Nov 2024 15:36:13 +0100 Subject: [PATCH 5/8] simplify permissioned and trading hours test cases --- aiken.lock | 2 +- validators/conditions/permissioned.ak | 143 ++++++------------------- validators/conditions/trading_hours.ak | 65 +++++++++++ 3 files changed, 96 insertions(+), 114 deletions(-) diff --git a/aiken.lock b/aiken.lock index 9024110..f231e48 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1731422080, nanos_since_epoch = 441356988 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1731506081, nanos_since_epoch = 289261445 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] diff --git a/validators/conditions/permissioned.ak b/validators/conditions/permissioned.ak index 015ee49..bb7b1fd 100644 --- a/validators/conditions/permissioned.ak +++ b/validators/conditions/permissioned.ak @@ -6,15 +6,13 @@ use aiken/crypto.{ } use aiken/interval.{Finite, Interval, IntervalBound} use calculation/process.{find_pool_output} -use calculation/shared.{check_and_set_unique, - unsafe_fast_index_skip_with_tail} as calculation_shared use cardano/address.{Credential} use cardano/transaction.{InlineDatum, Input, Output, Transaction, ValidityRange} use shared.{datum_of, is_script} use sundae/multisig use types/conditions/permissioned.{ComplianceToken, PermissionedDatum} as permissioned_types -use types/order.{OrderDatum, SignedStrategyExecution} +use types/order.{OrderDatum} use types/pool.{Manage, PoolDatum, PoolRedeemer, PoolScoop} validator permissioned { @@ -57,16 +55,23 @@ validator permissioned { // - Withdrawing protocol fees to the treasury when redeemer is { // In the case of the scoop, the redeemer indicates which scooper is doing the scoop, and the order in which the inputs should be processed - PoolScoop { input_order, .. } -> and { - scoop_permission( - whitelisted_oracles, - validity_range, - datums, - input_order, + PoolScoop { .. } -> { + let orders = + list.filter( inputs, - ), + fn(i) { + and { + i.output != pool_input, + is_script(i.output.address.payment_credential), + } + }, + ) + |> list.map(fn(i) { i.output }) + and { + scoop_permission(whitelisted_oracles, validity_range, datums, orders), actual_condition_datum_option == condition_datum_option, } + } Manage -> False } } @@ -80,22 +85,22 @@ fn scoop_permission( oracles: List, validity_range: ValidityRange, datums: Dict, Data>, - input_order: List<(Int, Option, Int)>, - inputs: List, + orders: List, ) -> Bool { - let compliance_verified <- - process_orders( - oracles, - validity_range, - datums, - input_order, - 0, - inputs, - inputs, - 0, - True, - ) - compliance_verified + list.all( + orders, + fn(order) { + // Find the datum that is associated with this order; we allow that datum to be either inline, or in the witness set, + // to aid in composibility with other protocols + // We also check that the datum is in the format we expect; + // Note: we don't actually check the order address anywhere!! As long as it's a script, and the datum is in the correct format, we're good. + // This lets us upgrade the order contract, or add other types of orders over time. + expect Some(datum) = datum_of(datums, order) + expect datum: OrderDatum = datum + + process_order(validity_range, datum, oracles) + }, + ) } /// Process a single order, comparing it to the output to ensure it was executed faithfully, and returning the new pool state @@ -146,91 +151,3 @@ fn process_order( ), } } - -/// Recursively process all orders in the correct order -/// There's a lot of parameters here, mostly for efficiency (though with some redundancies being removed in another branch) -fn process_orders( - // List of oracles that are whitelisted - oracles: List, - // The transaction valid range, if we end up processing a strategy - tx_valid_range: ValidityRange, - // The datums in the witness set, in case we need to lookup a non-inline datum - datums: Dict, Data>, - // The list of remaining indices into the inputs, specifying which orders to process - input_order: List<(Int, Option, Int)>, - // The previous order we processed, to check if we need to restart the loop; TODO: we actually pass +1 from this, and i'm not sure how to explain why we do this... - prev_index: Int, - // *all* inputs on the transaction, in case we need to start over from the beginning (i.e. wrap around) - all_inputs: List, - // Just the remaining inputs in the list, in case it's more efficient to keep walking from here - remaining_inputs: List, - // A number that, when interpreted as a bit flag, indicates which orders we've already processed; used to check if an order is processed more than once (see InputSorting.md) - uniqueness_flag: Int, - // Flag keeping track of compliance verification - compliance_verified: Bool, - // A continuation to call with the final pool state; more efficient than constructing tuples / objects - continuation: fn(Bool) -> Bool, -) -> Bool { - // Returns the final pool state, and the count of each order type - // The main "pump" of the recursive loop is the input_order, which is a set of indices into the inputs list - // specified by the scooper for the order to process each order in. - // Once we've reached the end of the list, we can return, but otherwise - when input_order is { - [] -> continuation(True) - [(idx, _, _), ..rest] -> { - // First, it's important to check that each order is processed only once; - // This is quite subtle, so check InputSorting.md for a full explanation - let next_uniqueness_flag = check_and_set_unique(uniqueness_flag, idx) - - // Then, we identify where to find the inputs; in particular, to avoid "starting from the beginning" every single time - // when indices are monotonic through the list, we can just continue to advance through the list - // so, all_inputs will always contain the full list of inputs - // while remaining_inputs will just contain the ones "after" the last one we processed. - // So, here, we check if we can continue down this path, or if we need to start from the beginning again - let next_input_list = - if idx >= prev_index { - unsafe_fast_index_skip_with_tail(remaining_inputs, idx - prev_index) - } else { - unsafe_fast_index_skip_with_tail(all_inputs, idx) - } - - expect [input_to_process, ..rest_of_input_list] = next_input_list - let Input { output: order, .. } = input_to_process - - // It's important that we fail if we ever try to process a UTXO from a wallet address - // This is a bit unfortunate, because it means we can't support processing orders directly out of a users wallet - // but is important, because we rely on this to check that every order is processed. - // If we didn't do this check, a scooper could include a UTXO from their wallet, and leave a *real* order un-processed, and steal those users funds. - expect is_script(order.address.payment_credential) - - // Find the datum that is associated with this order; we allow that datum to be either inline, or in the witness set, - // to aid in composibility with other protocols - // We also check that the datum is in the format we expect; - // Note: we don't actually check the order address anywhere!! As long as it's a script, and the datum is in the correct format, we're good. - // This lets us upgrade the order contract, or add other types of orders over time. - expect Some(datum) = datum_of(datums, order) - expect datum: OrderDatum = datum - - // And finally, process this one individual order and compute the next state - // Note that we get back next_orders here, which is needed if we process a donation that has no change UTXO - let order_compliance_verified = - process_order(tx_valid_range, datum, oracles) - - // And recursively process the rest of the orders - process_orders( - oracles, - tx_valid_range, - datums, - rest, - idx + 1, - // This is the "previous index" within the input list; TODO: I'm not actually sure why we add 1? - all_inputs, - // See the notes above about all_inputs vs remaining_inputs - rest_of_input_list, - next_uniqueness_flag, - order_compliance_verified && compliance_verified, - continuation, - ) - } - } -} diff --git a/validators/conditions/trading_hours.ak b/validators/conditions/trading_hours.ak index 22ae119..1fcc2a4 100644 --- a/validators/conditions/trading_hours.ak +++ b/validators/conditions/trading_hours.ak @@ -78,6 +78,8 @@ fn scoop_trading_hours( } } +// We are dealing with start and finish defined as time of day, so we need to account +// for the case where open is in the evening and close in the morning (close < open) fn time_between_start_finish(time: Int, start: Int, finish: Int) -> Bool { if start < finish { and { @@ -97,3 +99,66 @@ fn time_between_start_finish(time: Int, start: Int, finish: Int) -> Bool { } } } + +test scoop_normal_case() { + let tx_validity = + Interval { + lower_bound: IntervalBound { + // Wednesday 13. November 2024 10:00:00 + bound_type: Finite(1731492000000), + is_inclusive: True, + }, + upper_bound: IntervalBound { + // Wednesday 13. November 2024 10:10:00 + bound_type: Finite(1731492600000), + is_inclusive: True, + }, + } + // 9 am + let open = 32400000 + // 5 pm + let close = 61200000 + expect scoop_trading_hours(tx_validity, open, close) +} + +test scoop_normal_case_negative() { + let tx_validity = + Interval { + lower_bound: IntervalBound { + // Wednesday 13. November 2024 23:50:00 + bound_type: Finite(1731541800000), + is_inclusive: True, + }, + upper_bound: IntervalBound { + // Thursday 14. November 2024 00:10:00 + bound_type: Finite(1731543000000), + is_inclusive: True, + }, + } + // 5 pm + let open = 32400000 + // 9 am + let close = 61200000 + expect !scoop_trading_hours(tx_validity, open, close) +} + +test scoop_night_time_case_negative() { + let tx_validity = + Interval { + lower_bound: IntervalBound { + // Wednesday 13. November 2024 10:00:00 + bound_type: Finite(1731492000000), + is_inclusive: True, + }, + upper_bound: IntervalBound { + // Wednesday 13. November 2024 10:10:00 + bound_type: Finite(1731492600000), + is_inclusive: True, + }, + } + // 5 pm + let open = 61200000 + // 9 am + let close = 32400000 + expect !scoop_trading_hours(tx_validity, open, close) +} From f5fc607cda94661870d777958d6d6b53419458da Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Wed, 20 Nov 2024 12:59:26 +0100 Subject: [PATCH 6/8] resolved review comments --- aiken.lock | 2 +- lib/types/conditions/permissioned.ak | 2 +- validators/pool.ak | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aiken.lock b/aiken.lock index f231e48..6d06582 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1731506081, nanos_since_epoch = 289261445 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1732103573, nanos_since_epoch = 920277561 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] diff --git a/lib/types/conditions/permissioned.ak b/lib/types/conditions/permissioned.ak index 9197cea..363b36b 100644 --- a/lib/types/conditions/permissioned.ak +++ b/lib/types/conditions/permissioned.ak @@ -7,7 +7,7 @@ use types/order.{Destination} pub type ComplianceToken { token: TokenData, - //A signature from the compliance oracle for fields 1-6 + //A signature from the compliance oracle for serialized TokenData oracle_signature: Signature, } diff --git a/validators/pool.ak b/validators/pool.ak index 2249587..221c5f5 100644 --- a/validators/pool.ak +++ b/validators/pool.ak @@ -106,7 +106,7 @@ validator pool( ask_fees_per_10_thousand: actual_ask_fees_per_10_thousand, market_open: actual_market_open, condition: actual_condition, - condition_datum: actual_condition_datum + condition_datum: actual_condition_datum, .. }, ) = find_pool_output(outputs) @@ -260,7 +260,7 @@ validator pool( } expect - when datum.condition is { + when actual_condition is { Some(condition_script) -> if !withdrawal_only { expect Some(..) = From b27b8419848fc76c95d4c95e6b95c6d1e0c04cf9 Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Wed, 20 Nov 2024 16:36:00 +0100 Subject: [PATCH 7/8] fixed comment and added data signing test --- aiken.lock | 2 +- validators/conditions/permissioned.ak | 83 +++++++++++++++++++++++++-- 2 files changed, 79 insertions(+), 6 deletions(-) diff --git a/aiken.lock b/aiken.lock index 6d06582..7430b1d 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1732103573, nanos_since_epoch = 920277561 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1732114660, nanos_since_epoch = 971737449 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] diff --git a/validators/conditions/permissioned.ak b/validators/conditions/permissioned.ak index bb7b1fd..1af20a9 100644 --- a/validators/conditions/permissioned.ak +++ b/validators/conditions/permissioned.ak @@ -6,13 +6,16 @@ use aiken/crypto.{ } use aiken/interval.{Finite, Interval, IntervalBound} use calculation/process.{find_pool_output} -use cardano/address.{Credential} -use cardano/transaction.{InlineDatum, Input, Output, Transaction, ValidityRange} +use cardano/address.{Credential, from_verification_key} +use cardano/assets.{ada_asset_name, ada_policy_id} +use cardano/transaction.{ + InlineDatum, Input, NoDatum, Output, Transaction, ValidityRange, +} use shared.{datum_of, is_script} use sundae/multisig use types/conditions/permissioned.{ComplianceToken, - PermissionedDatum} as permissioned_types -use types/order.{OrderDatum} + PermissionedDatum, TokenData} as permissioned_types +use types/order.{Fixed, OrderDatum, Swap} use types/pool.{Manage, PoolDatum, PoolRedeemer, PoolScoop} validator permissioned { @@ -140,7 +143,7 @@ fn process_order( order.destination == compliance.token.destination, // The order details hash to the same hash from the token blake2b_256(serialise(order.details)) == compliance.token.order_hash, - // The valid range of the transaction entirely contains the valid range from the token + // The valid range of the token entirely contains the valid range of the transaction token_from < tx_from, token_to > tx_to, // The signature is valid @@ -151,3 +154,73 @@ fn process_order( ), } } + +// oracle secret key: 2da637b0ab533b63f32d7adc3e1de5c26be33f878bf2364db3d4813c5873aac5 +// oracle public key: 49e7cc40f180809b4d9e05030a09127883151517d4ed40948ad9ada2c81820ea + +test process_order_with_signed_data() { + let token_validity = + Interval { + lower_bound: IntervalBound { bound_type: Finite(0), is_inclusive: False }, + upper_bound: IntervalBound { + bound_type: Finite(10000), + is_inclusive: False, + }, + } + let tx_validity = + Interval { + lower_bound: IntervalBound { + bound_type: Finite(1000), + is_inclusive: False, + }, + upper_bound: IntervalBound { + bound_type: Finite(2000), + is_inclusive: False, + }, + } + let user_pub_hash = + #"0000000000000000000000000000000000000000000000000000000000000000" + let destination = + Fixed { address: from_verification_key(user_pub_hash), datum: NoDatum } + let order = + Swap { + offer: (ada_policy_id, ada_asset_name, 10), + min_received: (ada_policy_id, ada_asset_name, 10), + } + let oracle_key = + #"49e7cc40f180809b4d9e05030a09127883151517d4ed40948ad9ada2c81820ea" + let token_data = + TokenData { + did: #"0000", + user_key: user_pub_hash, + destination, + order_hash: blake2b_256(serialise(order)), + validity_range: token_validity, + oracle_key, + } + + // If the test case data changes use this trace to sign with the secret key commented above the test case + trace serialise(token_data) + + let signature = + #"93b0e5878a118198ff15cd82c6de676503b6d7c2158e4a148091033f583599d0ee63598d1f609a3953806103db193ddaf355ede27b34e7ca614251fdb183dd0d" + + let compliance_token = + ComplianceToken { token: token_data, oracle_signature: signature } + + let order_datum = + OrderDatum { + pool_ident: None, + owner: multisig.Signature(user_pub_hash), + max_protocol_fee: 0, + destination, + details: order, + extension: compliance_token, + } + + process_order( + validity_range: tx_validity, + order: order_datum, + oracles: [blake2b_256(oracle_key)], + ) +} From 958ffdaff4ce64dc5e6dfaedc41e4631b6d4fccd Mon Sep 17 00:00:00 2001 From: Robert Pieter van Leeuwen Date: Tue, 26 Nov 2024 12:44:05 +0100 Subject: [PATCH 8/8] some negative test cases --- aiken.lock | 2 +- validators/conditions/permissioned.ak | 43 +++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/aiken.lock b/aiken.lock index 7430b1d..da94bda 100644 --- a/aiken.lock +++ b/aiken.lock @@ -35,4 +35,4 @@ requirements = [] source = "github" [etags] -"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1732114660, nanos_since_epoch = 971737449 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] +"aiken-lang/stdlib@v2" = [{ secs_since_epoch = 1732619791, nanos_since_epoch = 638622185 }, "33dce3a6dbfc58a92cc372c4e15d802f079f4958af941386d18980eb98439bb4"] diff --git a/validators/conditions/permissioned.ak b/validators/conditions/permissioned.ak index 1af20a9..2b6cad1 100644 --- a/validators/conditions/permissioned.ak +++ b/validators/conditions/permissioned.ak @@ -218,9 +218,42 @@ test process_order_with_signed_data() { extension: compliance_token, } - process_order( - validity_range: tx_validity, - order: order_datum, - oracles: [blake2b_256(oracle_key)], - ) + let valid_oracles = [blake2b_256(oracle_key)] + + let valid_case = + process_order( + validity_range: tx_validity, + order: order_datum, + oracles: valid_oracles, + ) + + let wrong_details = + process_order( + validity_range: tx_validity, + order: OrderDatum { + ..order_datum, + details: Swap { + offer: (ada_policy_id, ada_asset_name, 20), + min_received: (ada_policy_id, ada_asset_name, 10), + }, + }, + oracles: valid_oracles, + ) + + let wrong_signature = + process_order( + validity_range: tx_validity, + order: OrderDatum { + ..order_datum, + extension: ComplianceToken { + ..compliance_token, + token: TokenData { ..token_data, did: #"0010" }, + }, + }, + oracles: valid_oracles, + ) + + expect valid_case + expect !wrong_details + expect !wrong_signature }