Skip to content

Commit 51e40f4

Browse files
committed
Generated ipynb and md files with results for dlrm
1 parent 65e3cc4 commit 51e40f4

File tree

8 files changed

+2264
-669
lines changed

8 files changed

+2264
-669
lines changed

examples/keras_rs/dlrm.py

Lines changed: 21 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
"""
22
Title: Ranking with Deep Learning Recommendation Model
3-
Author: Harshith Kulkarni
3+
Author: [Harshith Kulkarni](https://github.com/kharshith-k)
44
Date created: 2025/06/02
5-
Last modified: 2025/09/01
6-
Description: Rank movies with DLRM using KerasRS
5+
Last modified: 2025/09/04
6+
Description: Rank movies with DLRM using KerasRS.
7+
Accelerator: GPU
78
"""
89

910
"""
@@ -12,13 +13,14 @@
1213
This tutorial demonstrates how to use the Deep Learning Recommendation Model (DLRM) to
1314
effectively learn the relationships between items and user preferences using a
1415
dot-product interaction mechanism. For more details, please refer to the
15-
[DLRM](https://arxiv.org/pdf/1906.00091) paper.
16+
[DLRM](https://arxiv.org/abs/1906.00091) paper.
1617
1718
DLRM is designed to excel at capturing explicit, bounded-degree feature interactions and
1819
is particularly effective at processing both categorical and continuous (sparse/dense)
1920
input features. The architecture consists of three main components: dedicated input
20-
layers to handle diverse features, a dot-product interaction layer to explicitly model
21-
feature interactions, and a Multi-Layer Perceptron (MLP) to capture implicit feature relationships.
21+
layers to handle diverse features (typically embedding layers for categorical features),
22+
a dot-product interaction layer to explicitly model feature interactions, and a
23+
Multi-Layer Perceptron (MLP) to capture implicit feature relationships.
2224
2325
The dot-product interaction layer lies at the heart of DLRM, efficiently computing
2426
pairwise interactions between different feature embeddings. This contrasts with models
@@ -31,6 +33,7 @@
3133
3234
![DLRM Architecture](https://raw.githubusercontent.com/kharshith-k/keras-io/refs/heads/keras-rs-examples/examples/keras_rs/img/dlrm/dlrm_architecture.gif)
3335
36+
3437
Now that we have a foundational understanding of DLRM's architecture and key
3538
characteristics, let's dive into the code. We will train a DLRM on a real-world dataset
3639
to demonstrate its capability to learn meaningful feature interactions. Let's begin by
@@ -43,7 +46,7 @@
4346

4447
import os
4548

46-
os.environ["KERAS_BACKEND"] = "jax" # `"tensorflow"`/`"torch"`
49+
os.environ["KERAS_BACKEND"] = "tensorflow" # `"tensorflow"`/`"torch"`
4750

4851
import keras
4952
import matplotlib.pyplot as plt
@@ -187,10 +190,7 @@ def print_stats(rmse_list, num_params, model_name):
187190
tutorial. Let's load the dataset, and keep only the useful columns.
188191
"""
189192

190-
ratings_ds = tfds.load(
191-
"movielens/100k-ratings",
192-
split="train"
193-
)
193+
ratings_ds = tfds.load("movielens/100k-ratings", split="train")
194194

195195

196196
def preprocess_features(x):
@@ -340,7 +340,6 @@ def __init__(
340340
):
341341
super().__init__(**kwargs)
342342

343-
# Layers for categorical features (unchanged).
344343
self.embedding_layers = {}
345344
for feature_name in (
346345
MOVIELENS_CONFIG["categorical_int_features"]
@@ -352,8 +351,7 @@ def __init__(
352351
output_dim=embedding_dim,
353352
)
354353

355-
# A single MLP for all continuous features.
356-
self.continuous_mlp = keras.Sequential(
354+
self.bottom_mlp = keras.Sequential(
357355
[
358356
keras.layers.Dense(mlp_dim, activation="relu"),
359357
keras.layers.Dense(embedding_dim), # Output must match embedding_dim
@@ -362,18 +360,16 @@ def __init__(
362360

363361
self.dot_layer = keras_rs.layers.DotInteraction()
364362

365-
self.dense_layers = []
363+
self.top_mlp = []
366364
for num_units in dense_num_units_lst:
367-
self.dense_layers.append(keras.layers.Dense(num_units, activation="relu"))
365+
self.top_mlp.append(keras.layers.Dense(num_units, activation="relu"))
368366

369367
self.output_layer = keras.layers.Dense(1)
370368

371-
# Attributes.
372369
self.dense_num_units_lst = dense_num_units_lst
373370
self.embedding_dim = embedding_dim
374371

375372
def call(self, inputs):
376-
# Process categorical features to get embeddings (unchanged).
377373
embeddings = []
378374
for feature_name in (
379375
MOVIELENS_CONFIG["categorical_int_features"]
@@ -394,17 +390,18 @@ def call(self, inputs):
394390
# Concatenate into a single tensor: (batch_size, num_continuous_features)
395391
concatenated_continuous = keras.ops.concatenate(continuous_inputs, axis=1)
396392

397-
# Pass through the single MLP to get one combined vector.
398-
processed_continuous = self.continuous_mlp(concatenated_continuous)
393+
# Pass through the Bottom MLP to get one combined vector.
394+
processed_continuous = self.bottom_mlp(concatenated_continuous)
399395

400-
# Combine with categorical embeddings. Note: we add a list containing the single tensor.
396+
# Combine with categorical embeddings. Note: we add a list containing the
397+
# single tensor.
401398
combined_features = embeddings + [processed_continuous]
402399

403400
# Pass the list of features to the DotInteraction layer.
404401
x = self.dot_layer(combined_features)
405402

406-
for dense_layer in self.dense_layers:
407-
x = dense_layer(x)
403+
for layer in self.top_mlp:
404+
x = layer(x)
408405

409406
x = self.output_layer(x)
410407

@@ -463,7 +460,7 @@ def get_dot_interaction_matrix(model, categorical_features, continuous_features)
463460
num_continuous_features = len(continuous_features)
464461
# Create a dummy input of zeros for the MLP
465462
dummy_continuous_input = keras.ops.zeros((1, num_continuous_features))
466-
processed_continuous = model.continuous_mlp(dummy_continuous_input)
463+
processed_continuous = model.bottom_mlp(dummy_continuous_input)
467464
all_feature_outputs.append(processed_continuous)
468465

469466
interaction_matrix = np.zeros((num_features, num_features))
34.1 KB
Loading
20.4 KB
Loading

0 commit comments

Comments
 (0)