1+ """
2+ Test basic tensor operations and autograd functionality.
3+ This tests the core of the FIT library.
4+ """
5+
6+ import numpy as np
7+ from fit .core .tensor import Tensor
8+
9+ def test_basic_operations ():
10+ """Test basic tensor operations."""
11+ print ("=== Testing Basic Tensor Operations ===" )
12+
13+ # Create tensors
14+ a = Tensor ([1.0 , 2.0 , 3.0 ], requires_grad = True )
15+ b = Tensor ([4.0 , 5.0 , 6.0 ], requires_grad = True )
16+
17+ print (f"a = { a .data } " )
18+ print (f"b = { b .data } " )
19+
20+ # Addition
21+ c = a + b
22+ print (f"a + b = { c .data } " )
23+
24+ # Multiplication
25+ d = a * b
26+ print (f"a * b = { d .data } " )
27+
28+ # Matrix operations
29+ x = Tensor ([[1.0 , 2.0 ], [3.0 , 4.0 ]], requires_grad = True )
30+ y = Tensor ([[2.0 , 1.0 ], [4.0 , 3.0 ]], requires_grad = True )
31+
32+ print (f"x = \n { x .data } " )
33+ print (f"y = \n { y .data } " )
34+
35+ # Matrix multiplication
36+ z = x @ y
37+ print (f"x @ y = \n { z .data } " )
38+
39+ return True
40+
41+ def test_autograd ():
42+ """Test automatic differentiation."""
43+ print ("\n === Testing Automatic Differentiation ===" )
44+
45+ # Simple function: f(x) = x^2 + 2x + 1
46+ x = Tensor ([2.0 ], requires_grad = True )
47+
48+ # Forward pass
49+ y = x * x + 2 * x + 1
50+ print (f"f({ x .data [0 ]} ) = { y .data [0 ]} " )
51+
52+ # Backward pass
53+ y .backward ()
54+ print (f"f'({ x .data [0 ]} ) = { x .grad [0 ]} (expected: { 2 * x .data [0 ] + 2 } )" )
55+
56+ # More complex example
57+ a = Tensor ([1.0 , 2.0 ], requires_grad = True )
58+ b = Tensor ([3.0 , 4.0 ], requires_grad = True )
59+
60+ # Function: f(a, b) = sum(a * b)
61+ c = a * b
62+ loss = Tensor ([c .data .sum ()], requires_grad = True )
63+ loss ._backward = lambda : None
64+ loss ._prev = {c }
65+
66+ # Manually set up backward for sum
67+ def _backward ():
68+ if c .requires_grad :
69+ c .grad = np .ones_like (c .data ) if c .grad is None else c .grad + np .ones_like (c .data )
70+
71+ loss ._backward = _backward
72+
73+ # Test without calling backward for now (since sum might not be implemented)
74+ print (f"a = { a .data } , b = { b .data } " )
75+ print (f"a * b = { c .data } " )
76+ print (f"sum(a * b) = { c .data .sum ()} " )
77+
78+ return True
79+
80+ def test_reshape_and_slicing ():
81+ """Test tensor reshaping and slicing operations."""
82+ print ("\n === Testing Reshape and Slicing ===" )
83+
84+ # Create a tensor and reshape
85+ x = Tensor (np .arange (12 ).astype (float ), requires_grad = True )
86+ print (f"Original: { x .data } " )
87+
88+ # Test if reshape is available
89+ try :
90+ y = x .reshape ((3 , 4 ))
91+ print (f"Reshaped (3, 4): \n { y .data } " )
92+ except AttributeError :
93+ print ("Reshape not implemented yet" )
94+
95+ # Test slicing
96+ try :
97+ z = x [2 :8 ]
98+ print (f"Sliced [2:8]: { z .data } " )
99+ except (AttributeError , TypeError ):
100+ print ("Slicing not implemented yet" )
101+
102+ return True
103+
104+ if __name__ == "__main__" :
105+ try :
106+ test_basic_operations ()
107+ test_autograd ()
108+ test_reshape_and_slicing ()
109+ print ("\n ✅ Basic tensor tests completed!" )
110+ except Exception as e :
111+ print (f"\n ❌ Test failed with error: { e } " )
112+ import traceback
113+ traceback .print_exc ()
0 commit comments