Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion contracts/contracts/ccip/types.tolk
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ fun TVM2AnyRampMessage.generateMessageId(self, metadataHash: uint256): uint256 {
// Router

struct TokenAmount {
amount: uint256;
amount: coins;
token: address;
}

Expand All @@ -133,6 +133,8 @@ struct (0x10000001) SetRamp {
onRamp: address;
}

// TODO should separate CCIPSend msg (with opcode) from CCIPSend data

struct (0x00000001) CCIPSend {
queryId: uint64;
destChainSelector: uint64;
Expand Down
51 changes: 51 additions & 0 deletions contracts/contracts/lib/pools/ipool.tolk
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import "./pool"
// import "./ipool/messages"

tolk 1.0

struct IPool<T> {
context: T
hooks: IPool_Hooks<T>
}

struct IPool_Hooks<T> {
lockOrBurn: ((T, address, coins, Pool_LockOrBurnInV1) -> Pool_LockOrBurnOutV1)
releaseOrMint: ((T, address, coins, Pool_ReleaseOrMintInV1) -> Pool_ReleaseOrMintOutV1)
}

type IPool_InMessage =
| Pool_LockOrBurnInV1
| Pool_ReleaseOrMintInV1

type IPool_Response =
| Pool_LockOrBurnOutV1
| Pool_ReleaseOrMintOutV1

/// @notice Handles incoming messages for IPool
@inline
fun IPool<T>.onInternalMessage(
mutate self,
sender: address,
msgValue: coins,
msgBody: slice,
): bool {
val msg = lazy IPool_InMessage.fromSlice(msgBody);
var response: IPool_Response? = null;

match (msg) {
Pool_LockOrBurnInV1 =>
response = self.hooks.lockOrBurn(self.context, sender, msgValue, msg),
Pool_ReleaseOrMintInV1 =>
response = self.hooks.releaseOrMint(self.context, sender, msgValue, msg),
else => return false
}

createMessage({
bounce: false,
value: 0,
dest: sender,
body: response,
}).send(SEND_MODE_CARRY_ALL_REMAINING_MESSAGE_VALUE);
return true
}

63 changes: 63 additions & 0 deletions contracts/contracts/lib/pools/pool.tolk
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import "../../ccip/types"

tolk 1.0

// The tag used to signal support for the pool v1 standard.
// bytes4(keccak256("CCIP_POOL_V1"))
// const CCIP_POOL_V1 : bytes4 = 0xaff2afbf as bytes4 // TODO we don't know if we need this

// The number of bytes in the return data for a pool v1 releaseOrMint call.
// This should match the size of the ReleaseOrMintOutV1 struct.
const CCIP_POOL_V1_RET_BYTES : uint16 = 32

// The default max number of bytes in the return data for a pool v1 lockOrBurn call.
// This data can be used to send information to the destination chain token pool. Can be overwritten
// in the TokenTransferFeeConfig.destBytesOverhead if more data is required.
const CCIP_LOCK_OR_BURN_V1_RET_BYTES : uint32 = 32

//crc32('Pool_LockOrBurnInV1')
struct (0x179B4A8C)Pool_LockOrBurnInV1 {
msg: Cell<CCIPSend>
receiver: CrossChainAddress // The recipient of the tokens on the destination chain, abi encoded.
remoteChainSelector: uint64 // ─╮ The chain ID of the destination chain.
originalSender: address // ─────╯ The original sender of the tx on the source chain.
amount: coins // The amount of tokens to lock or burn, denominated in the source token's decimals.
localToken: address // The address on this chain of the token to lock or burn.
}

//crc32('Pool_LockOrBurnOutV1')
struct (0x56E7EB1A)Pool_LockOrBurnOutV1 {
msg: Cell<CCIPSend>
// The address of the destination token, abi encoded in the case of EVM chains.
// This value is UNTRUSTED as any pool owner can return whatever value they want.
destTokenAddress: CrossChainAddress
// Optional pool data to be transferred to the destination chain. Be default this is capped at
// CCIP_LOCK_OR_BURN_V1_RET_BYTES bytes. If more data is required, the TokenTransferFeeConfig.destBytesOverhead
// has to be set for the specific token.
destPoolData: slice
}

//crc32('Pool_ReleaseOrMintInV1')
struct (0x1703AC0B)Pool_ReleaseOrMintInV1 {
msg: Cell<CCIPSend>
originalSender: CrossChainAddress // The original sender of the tx on the source chain.
remoteChainSelector: uint64 // ───╮ The chain ID of the source chain.
receiver: address // ─────────────╯ The recipient of the tokens on the destination chain.
sourceDenominatedAmount: coins // The amount of tokens to release or mint, denominated in the source token's decimals.
localToken: address // The address on this chain of the token to release or mint.
/// @dev WARNING: sourcePoolAddress should be checked prior to any processing of funds. Make sure it matches the
/// expected pool address for the given remoteChainSelector.
sourcePoolAddress: CrossChainAddress // The address of the source pool, abi encoded in the case of EVM chains.
sourcePoolData: CrossChainAddress // The data received from the source pool to process the release or mint.
/// @dev WARNING: offchainTokenData is untrusted data.
offchainTokenData: CrossChainAddress // The offchain data to process the release or mint.
}

//crc32('Pool_ReleaseOrMintOutV1')
struct (0x0D347F43)Pool_ReleaseOrMintOutV1 {
msg: Cell<CCIPSend>
// The number of tokens released or minted on the destination chain, denominated in the local token's decimals.
// This value is expected to be equal to the ReleaseOrMintInV1.amount in the case where the source and destination
// chain have the same number of decimals.
destinationAmount: coins
}
159 changes: 159 additions & 0 deletions contracts/contracts/lib/pools/rate_limiter.tolk
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
import "../utils"

tolk 1.0

const RateLimiter_ERROR_BUCKET_OVERFILLED = 1001
const RateLimiter_ERROR_TOKEN_MAX_CAPACITY_EXCEEDED = 1002
const RateLimiter_ERROR_TOKEN_RATE_LIMIT_REACHED = 1003
const RateLimiter_ERROR_INVALID_RATE_LIMIT_RATE = 1004
const RateLimiter_ERROR_DISABLED_NON_ZERO_RATE_LIMIT = 1005

//crc32("RateLimiter_ConfigChanged")
const RATELIMITER_CONFIGCHANGED_TOPIC: int = stringCrc32("RateLimiter_ConfigChanged");

/// @notice Event // TODO complete description
struct RateLimiter_ConfigChanged { config: RateLimiter_Config }

struct RateLimiter_TokenBucket {
tokens: uint128 // ────╮ Current number of tokens that are in the bucket.
lastUpdated: uint32 // │ Timestamp in seconds of the last token refill, good for 100+ years.
isEnabled: bool // ────╯ Indication whether the rate limiting is enabled or not.
capacity: uint128 // ──╮ Maximum number of tokens that can be in the bucket.
rate: uint128 // ──────╯ Number of tokens per second that the bucket is refilled.
}

const RATE_LIMITER_CONFIG_LEN = 1 + 128 + 128

struct RateLimiter_Config {
isEnabled: bool // Indication whether the rate limiting should be enabled.
capacity: uint128 // ──╮ Specifies the capacity of the rate limiter.
rate: uint128 // ──────╯ Specifies the rate of the rate limiter.
}

struct RateLimiter_Data {
bucket: RateLimiter_TokenBucket // The token bucket that is used for rate limiting.
}

struct RateLimiter {
data: RateLimiter_Data
}

/// @notice _consume removes the given tokens from the pool, lowering the rate tokens allowed to be
/// consumed for subsequent calls.
/// @param requestTokens The total tokens to be consumed from the bucket.
/// @param tokenAddress The token to consume capacity for, use 0x0 to indicate aggregate value capacity.
/// @dev Reverts when requestTokens exceeds bucket capacity or available tokens in the bucket.
/// @dev emits removal of requestTokens if requestTokens is > 0.
fun RateLimiter_TokenBucket._consume(mutate self, requestTokens: coins, tokenAddress: address) {
// If there is no value to remove or rate limiting is turned off, skip this step to reduce gas usage.
if (!self.isEnabled || requestTokens == 0) {
return;
}

var tokens = self.tokens;
var capacity = self.capacity;
var timeDiff = blockchain.now() - self.lastUpdated;

if (timeDiff != 0) {
if (tokens > capacity) {
throw RateLimiter_ERROR_BUCKET_OVERFILLED; // TODO this should be a return? revert BucketOverfilled();
}

// Refill tokens when arriving at a new block time.
tokens = _calculateRefill(capacity, tokens, timeDiff, self.rate);

self.lastUpdated = blockchain.now();
}

if (capacity < requestTokens) {
throw RateLimiter_ERROR_TOKEN_MAX_CAPACITY_EXCEEDED; // TODO this should be a return? revert TokenMaxCapacityExceeded(capacity, requestTokens, tokenAddress);
}
if (tokens < requestTokens) {
var rate = self.rate;
// Wait required until the bucket is refilled enough to accept this value, round up to next higher second.
// Consume is not guaranteed to succeed after wait time passes if there is competing traffic.
// This acts as a lower bound of wait time.
var minWaitInSeconds = ((requestTokens - tokens) + (rate - 1)) / rate;

throw RateLimiter_ERROR_TOKEN_RATE_LIMIT_REACHED; // TODO this should be a return? revert TokenRateLimitReached(minWaitInSeconds, tokens, tokenAddress);
}
tokens -= requestTokens;

// Downcast is safe here, as tokens is not larger than capacity.
self.tokens = tokens;
}

/// @notice Gets the token bucket with its values for the block it was requested at.
/// @return The token bucket.
fun RateLimiter_TokenBucket._currentTokenBucketState(mutate self): RateLimiter_TokenBucket {
// We update the bucket to reflect the status at the exact time of the call. This means we might need to refill a
// part of the bucket based on the time that has passed since the last update.
self.tokens = _calculateRefill(
self.capacity,
self.tokens,
blockchain.now() - self.lastUpdated,
self.rate
);
self.lastUpdated = blockchain.now();
return self;
}

/// @notice Sets the rate limited config.
/// @param s_bucket The token bucket.
/// @param config The new config.
fun RateLimiter._setTokenBucketConfig(mutate self, config: RateLimiter_Config) {
// First update the bucket to make sure the proper rate is used for all the time up until the config change.
var timeDiff: uint256 = blockchain.now() - self.data.bucket.lastUpdated;
if (timeDiff != 0) {
self.data.bucket.tokens = _calculateRefill(
self.data.bucket.capacity,
self.data.bucket.tokens,
timeDiff,
self.data.bucket.rate
);

self.data.bucket.lastUpdated = blockchain.now();
}

self.data.bucket.tokens = _min(config.capacity, self.data.bucket.tokens);
// TBD why doesn't RateLimiter have a `config: Config` attribute instead of repeating the fields?
self.data.bucket.isEnabled = config.isEnabled;
self.data.bucket.capacity = config.capacity;
self.data.bucket.rate = config.rate;

emit(RATELIMITER_CONFIGCHANGED_TOPIC, RateLimiter_ConfigChanged{ config });
}

/// @notice Validates the token bucket config.
fun RateLimiter._validateTokenBucketConfig(config: RateLimiter_Config) {
if (config.isEnabled) {
if (config.rate > config.capacity) {
throw RateLimiter_ERROR_INVALID_RATE_LIMIT_RATE; // TODO this should be a return? revert InvalidRateLimitRate(config);
}
} else {
if (config.rate != 0 || config.capacity != 0) {
throw RateLimiter_ERROR_DISABLED_NON_ZERO_RATE_LIMIT; // TODO this should be a return? revert DisabledNonZeroRateLimit(config);
}
}
}

/// @notice Calculate refilled tokens.
/// @param capacity bucket capacity.
/// @param tokens current bucket tokens.
/// @param timeDiff block time difference since last refill.
/// @param rate bucket refill rate.
/// @return the value of tokens after refill.
fun _calculateRefill(capacity: uint128, tokens: uint128, timeDiff: uint256, rate: uint128): uint128 {
return _min(capacity, (tokens as uint256 + timeDiff * rate as uint256) as uint128);
}

/// @notice Return the smallest of two integers.
/// @param a first int.
/// @param b second int.
/// @return smallest.
fun _min(a: uint128, b: uint128): uint128 {
if (a < b) {
return a;
}
return b;
}
Loading
Loading