Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 91 additions & 2 deletions crates/database/src/trades.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ pub fn trades<'a>(
ex: &'a mut PgConnection,
owner_filter: Option<&'a Address>,
order_uid_filter: Option<&'a OrderUid>,
offset: i64,
limit: i64,
) -> instrument::Instrumented<BoxStream<'a, Result<TradesQueryRow, sqlx::Error>>> {
const COMMON_QUERY: &str = r#"
SELECT
Expand All @@ -49,27 +51,44 @@ LEFT OUTER JOIN LATERAL (
) AS settlement ON true"#;

const QUERY: &str = const_format::concatcp!(
"(",
COMMON_QUERY,
" JOIN orders o ON o.uid = t.order_uid",
" WHERE ($1 IS NULL OR o.owner = $1)",
" AND ($2 IS NULL OR o.uid = $2)",
" ORDER BY t.block_number DESC, t.log_index DESC",
" LIMIT $3 + $4",
Copy link
Contributor

@fafk fafk Dec 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This query is super confusing.

Can we do

SELECT
    t.block_number,
    t.log_index,
    t.order_uid,
    t.buy_amount,
    t.sell_amount,
    t.sell_amount - t.fee_amount AS sell_amount_before_fees,
    COALESCE(o.owner, jit_o.owner) AS owner,
    COALESCE(o.buy_token, jit_o.buy_token) AS buy_token,
    COALESCE(o.sell_token, jit_o.sell_token) AS sell_token,
    settlement.tx_hash,
    settlement.auction_id
FROM trades t
LEFT JOIN LATERAL (
    SELECT tx_hash, auction_id
    FROM settlements s
    WHERE s.block_number = t.block_number
      AND s.log_index > t.log_index
    ORDER BY s.log_index ASC
    LIMIT 1
) AS settlement ON true
LEFT JOIN orders o
    ON o.uid = t.order_uid
LEFT JOIN onchain_placed_orders onchain_o
    ON onchain_o.uid = t.order_uid
LEFT JOIN jit_orders jit_o
    ON jit_o.uid = t.order_uid
WHERE
    (
        $1 IS NULL
        OR o.owner = $1
        OR onchain_o.sender = $1
        OR jit_o.owner = $1
    )
  AND ($2 IS NULL OR t.order_uid = $2)
ORDER BY t.block_number DESC, t.log_index DESC
LIMIT $3 OFFSET $4;

instead all the unions and the overfetching of $3 + $4?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd experiment with this in a separate PR, since your version confuses me much more 😅

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree that it would be best to be able to see the query as a whole, in order to reason about its correctness easier.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd experiment with this in a separate PR, since your version confuses me much more 😅

I found 2 things confusing:

  1. I had to write a test to output the final string to make things easier on me to review
  2. The $3 + $4 threw me off (as it did copilot hehe), but an unusual pattern that you up to the offset and then the limit and then discard everything up to the offset in the outer layer 😅

The query I propose gets rid off the concatenations and just fetches everything at once so there is no need to "overfetch and discard". Anyway I think your SQL works, so I am approving and leaving it to your discretion if you wanna follow up with some refactoring. 🙌

Copy link
Contributor Author

@squadgazzz squadgazzz Dec 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The $3 + $4 threw me off

We have the same approach in the get_user_orders query.

const QUERY: &str = const_format::concatcp!(
"(SELECT ", orders::SELECT,
" FROM ", orders::FROM,
" LEFT OUTER JOIN onchain_placed_orders onchain_o on onchain_o.uid = o.uid",
" WHERE o.owner = $1",
" ORDER BY creation_timestamp DESC LIMIT $2 + $3 ) ",
" UNION ",
" (SELECT ", orders::SELECT,
" FROM ", orders::FROM,
" LEFT OUTER JOIN onchain_placed_orders onchain_o on onchain_o.uid = o.uid",
" WHERE onchain_o.sender = $1 ",
" ORDER BY creation_timestamp DESC LIMIT $2 + $3 ) ",
" UNION ",
" (SELECT ", jit_orders::SELECT,
" FROM ", jit_orders::FROM,
" WHERE o.owner = $1 AND NOT EXISTS (SELECT 1 FROM orders ord WHERE o.uid = ord.uid)",
" ORDER BY creation_timestamp DESC LIMIT $2 + $3 ) ",
" ORDER BY creation_timestamp DESC ",
" LIMIT $2 ",
" OFFSET $3 ",
);

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think it legitimizes the pattern. 😅

")",
" UNION ",
"(",
COMMON_QUERY,
" JOIN orders o ON o.uid = t.order_uid",
" LEFT OUTER JOIN onchain_placed_orders onchain_o",
" ON onchain_o.uid = t.order_uid",
" WHERE onchain_o.sender = $1",
" AND ($2 IS NULL OR o.uid = $2)",
" ORDER BY t.block_number DESC, t.log_index DESC",
" LIMIT $3 + $4",
")",
" UNION ",
"(",
COMMON_QUERY,
" JOIN jit_orders o ON o.uid = t.order_uid",
" WHERE ($1 IS NULL OR o.owner = $1)",
" AND ($2 IS NULL OR o.uid = $2)",
" ORDER BY t.block_number DESC, t.log_index DESC",
" LIMIT $3 + $4",
")",
" ORDER BY block_number DESC, log_index DESC",
" LIMIT $3",
" OFFSET $4",
);

sqlx::query_as(QUERY)
.bind(owner_filter)
.bind(order_uid_filter)
.bind(limit)
.bind(offset)
.fetch(ex)
.instrument(info_span!("trades"))
}
Expand Down Expand Up @@ -213,7 +232,8 @@ mod tests {
order_uid_filter: Option<&OrderUid>,
expected: &[TradesQueryRow],
) {
let mut filtered = trades(db, owner_filter, order_uid_filter)
// Use large limit to get all trades
let mut filtered = trades(db, owner_filter, order_uid_filter, 0, 1000)
.into_inner()
.try_collect::<Vec<_>>()
.await
Expand Down Expand Up @@ -287,7 +307,7 @@ mod tests {
}

let now = std::time::Instant::now();
trades(&mut db, Some(&ByteArray([2u8; 20])), None)
trades(&mut db, Some(&ByteArray([2u8; 20])), None, 0, 100)
.into_inner()
.try_collect::<Vec<_>>()
.await
Expand Down Expand Up @@ -655,4 +675,73 @@ mod tests {
Some(123)
);
}

#[tokio::test]
#[ignore]
async fn postgres_trades_pagination() {
let mut db = PgConnection::connect("postgresql://").await.unwrap();
let mut db = db.begin().await.unwrap();
crate::clear_DANGER_(&mut db).await.unwrap();

// Create 5 trades with the same owner
let (owners, order_ids) = generate_owners_and_order_ids(1, 5).await;
let owner = owners[0];

let mut expected_trades = Vec::new();
for (i, order_id) in order_ids.iter().enumerate() {
let trade = add_order_and_trade(
&mut db,
owner,
*order_id,
EventIndex {
block_number: i.try_into().unwrap(),
log_index: 0,
},
None,
None,
)
.await;
expected_trades.push(trade);
}

// Sort expected trades by block_number DESC (matching query ORDER BY)
expected_trades.sort_by(|a, b| b.block_number.cmp(&a.block_number));

// Test limit: get first 2 trades (blocks 4 and 3 in DESC order)
let result = trades(&mut db, Some(&owner), None, 0, 2)
.into_inner()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(result[0], expected_trades[0]); // block 4
assert_eq!(result[1], expected_trades[1]); // block 3

// Test offset: skip first 2, get next 2 (blocks 2 and 1 in DESC order)
let result = trades(&mut db, Some(&owner), None, 2, 2)
.into_inner()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(result.len(), 2);
assert_eq!(result[0], expected_trades[2]); // block 2
assert_eq!(result[1], expected_trades[3]); // block 1

// Test offset beyond available trades
let result = trades(&mut db, Some(&owner), None, 10, 2)
.into_inner()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(result.len(), 0);

// Test large limit returns all available trades in DESC order
let result = trades(&mut db, Some(&owner), None, 0, 100)
.into_inner()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(result.len(), 5);
assert_eq!(result, expected_trades);
}
}
22 changes: 22 additions & 0 deletions crates/orderbook/openapi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,12 @@ paths:
summary: Get existing trades.
description: |
Exactly one of `owner` or `orderUid` must be set.

Results are paginated and sorted by block number and log index descending (newest trades first).

To enumerate all trades start with `offset` 0 and keep increasing the
`offset` by the total number of returned results. When a response
contains less than `limit` the last page has been reached.
parameters:
- name: owner
in: query
Expand All @@ -224,6 +230,22 @@ paths:
schema:
$ref: "#/components/schemas/UID"
required: false
- name: offset
in: query
description: |
The pagination offset. Defaults to 0.
schema:
type: integer
required: false
- name: limit
in: query
description: |
The maximum number of trades to return. Defaults to 10. Must be between 1 and 1000.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is quite the breaking change, no?
Anyone using the existing endpoint will suddenly get a lot fewer orders. It should be pretty straight forward to introduce a v2 endpoint for the pagination while keeping the old one the same.
We could also immediately adjust the v1 description to mark it as deprecated.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, makes sense.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated the PR.

schema:
type: integer
minimum: 1
maximum: 1000
required: false
responses:
"200":
description: |-
Expand Down
59 changes: 57 additions & 2 deletions crates/orderbook/src/api/get_trades.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,43 @@ use {
struct Query {
pub order_uid: Option<OrderUid>,
pub owner: Option<Address>,
pub offset: Option<u64>,
pub limit: Option<u64>,
}

const DEFAULT_OFFSET: u64 = 0;
const DEFAULT_LIMIT: u64 = 10;
const MIN_LIMIT: u64 = 1;
const MAX_LIMIT: u64 = 1000;

#[derive(Debug, Eq, PartialEq)]
enum TradeFilterError {
InvalidFilter(String),
InvalidLimit(u64, u64),
}

impl Query {
fn trade_filter(&self) -> TradeFilter {
fn trade_filter(&self, offset: u64, limit: u64) -> TradeFilter {
TradeFilter {
order_uid: self.order_uid,
owner: self.owner,
offset,
limit,
}
}

fn validate(&self) -> Result<TradeFilter, TradeFilterError> {
match (self.order_uid.as_ref(), self.owner.as_ref()) {
(Some(_), None) | (None, Some(_)) => Ok(self.trade_filter()),
(Some(_), None) | (None, Some(_)) => {
let offset = self.offset.unwrap_or(DEFAULT_OFFSET);
let limit = self.limit.unwrap_or(DEFAULT_LIMIT);

if !(MIN_LIMIT..=MAX_LIMIT).contains(&limit) {
return Err(TradeFilterError::InvalidLimit(MIN_LIMIT, MAX_LIMIT));
}

Ok(self.trade_filter(offset, limit))
}
_ => Err(TradeFilterError::InvalidFilter(
"Must specify exactly one of owner or orderUid.".to_owned(),
)),
Expand Down Expand Up @@ -71,6 +90,13 @@ pub fn get_trades(db: Postgres) -> impl Filter<Extract = (ApiReply,), Error = Re
let err = error("InvalidTradeFilter", msg);
with_status(err, StatusCode::BAD_REQUEST)
}
Err(TradeFilterError::InvalidLimit(min, max)) => {
let err = error(
"InvalidLimit",
format!("limit must be between {min} and {max}"),
);
with_status(err, StatusCode::BAD_REQUEST)
}
})
}
})
Expand Down Expand Up @@ -98,6 +124,8 @@ mod tests {
.unwrap();
assert_eq!(result.owner, Some(owner));
assert_eq!(result.order_uid, None);
assert_eq!(result.offset, DEFAULT_OFFSET);
assert_eq!(result.limit, DEFAULT_LIMIT);

let uid = OrderUid([1u8; 56]);
let order_uid_path = format!("/v1/trades?orderUid={uid}");
Expand All @@ -107,6 +135,18 @@ mod tests {
.unwrap();
assert_eq!(result.owner, None);
assert_eq!(result.order_uid, Some(uid));
assert_eq!(result.offset, DEFAULT_OFFSET);
assert_eq!(result.limit, DEFAULT_LIMIT);

// Test with custom offset and limit
let owner_path = format!("/v1/trades?owner=0x{owner:x}&offset=10&limit=50");
let result = trade_filter(request().path(owner_path.as_str()))
.await
.unwrap()
.unwrap();
assert_eq!(result.owner, Some(owner));
assert_eq!(result.offset, 10);
assert_eq!(result.limit, 50);
}

#[tokio::test]
Expand All @@ -126,5 +166,20 @@ mod tests {
let path = "/v1/trades";
let result = trade_filter(request().path(path)).await.unwrap();
assert!(result.is_err());

// Test limit validation
let path = format!("/v1/trades?owner=0x{owner:x}&limit=0");
let result = trade_filter(request().path(path.as_str())).await.unwrap();
assert!(matches!(
result,
Err(TradeFilterError::InvalidLimit(MIN_LIMIT, MAX_LIMIT))
));

let path = format!("/v1/trades?owner=0x{owner:x}&limit=1001");
let result = trade_filter(request().path(path.as_str())).await.unwrap();
assert!(matches!(
result,
Err(TradeFilterError::InvalidLimit(MIN_LIMIT, MAX_LIMIT))
));
}
}
10 changes: 10 additions & 0 deletions crates/orderbook/src/database/trades.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ pub trait TradeRetrieving: Send + Sync {
pub struct TradeFilter {
pub owner: Option<Address>,
pub order_uid: Option<OrderUid>,
pub offset: u64,
pub limit: u64,
}

#[async_trait::async_trait]
Expand All @@ -34,6 +36,14 @@ impl TradeRetrieving for Postgres {
&mut ex,
filter.owner.map(|owner| ByteArray(owner.0.0)).as_ref(),
filter.order_uid.map(|uid| ByteArray(uid.0)).as_ref(),
filter
.offset
.try_into()
.context("offset too large for database")?,
filter
.limit
.try_into()
.context("limit too large for database")?,
)
.into_inner()
.map_err(anyhow::Error::from)
Expand Down
2 changes: 2 additions & 0 deletions crates/orderbook/src/orderbook.rs
Original file line number Diff line number Diff line change
Expand Up @@ -571,6 +571,8 @@ impl Orderbook {
.trades(&TradeFilter {
owner: None,
order_uid: Some(*uid),
offset: 0,
limit: 1,
})
.await?;

Expand Down
Loading