11[dev ]
2+ # Set to "huggingface", for example, if you are a huggingface developer. Default is ""
23partner_developer = " "
4+ # Please only set it to true if you are preparing an EI related PR
5+ # Do remember to revert it back to false before merging any PR (including EI dedicated PR)
36ei_mode = false
7+ # Please only set it to true if you are preparing a NEURON related PR
8+ # Do remember to revert it back to false before merging any PR (including NEURON dedicated PR)
49neuron_mode = false
10+ # Please only set it to true if you are preparing a NEURONX related PR
11+ # Do remember to revert it back to false before merging any PR (including NEURONX dedicated PR)
512neuronx_mode = false
13+ # Please only set it to true if you are preparing a GRAVITON related PR
14+ # Do remember to revert it back to false before merging any PR (including GRAVITON dedicated PR)
615graviton_mode = false
16+ # Please only set it to true if you are preparing a ARM64 related PR
17+ # Do remember to revert it back to false before merging any PR (including ARM64 dedicated PR)
718arm64_mode = false
19+ # Please only set it to True if you are preparing a HABANA related PR
20+ # Do remember to revert it back to False before merging any PR (including HABANA dedicated PR)
821habana_mode = false
22+ # Please only set it to True if you are preparing a HUGGINGFACE TRCOMP related PR
23+ # Do remember to revert it back to False before merging any PR (including HUGGINGFACE TRCOMP dedicated PR)
24+ # This mode is used to build TF 2.6 and PT1.11 DLC
925huggingface_trcomp_mode = false
26+ # Please only set it to True if you are preparing a TRCOMP related PR
27+ # Do remember to revert it back to False before merging any PR (including TRCOMP dedicated PR)
28+ # This mode is used to build PT1.12 and above DLC
1029trcomp_mode = false
30+ # Set deep_canary_mode to true to simulate Deep Canary Test conditions on PR for all frameworks in the
31+ # build_frameworks list below. This will cause all image builds and non-deep-canary tests on the PR to be skipped,
32+ # regardless of whether they are enabled or disabled below.
33+ # Set graviton_mode/arm64_mode to true to run Deep Canaries on Graviton/ARM64 images.
34+ # Do remember to revert it back to false before merging any PR.
1135deep_canary_mode = false
1236
1337[build ]
14- build_frameworks = [ " huggingface_pytorch" ,]
38+ # Add in frameworks you would like to build. By default, builds are disabled unless you specify building an image.
39+ # available frameworks - ["base", "vllm", "autogluon", "huggingface_tensorflow", "huggingface_pytorch", "huggingface_tensorflow_trcomp", "huggingface_pytorch_trcomp", "pytorch_trcomp", "tensorflow", "pytorch", "stabilityai_pytorch"]
40+ build_frameworks = []
41+
42+
43+ # By default we build both training and inference containers. Set true/false values to determine which to build.
1544build_training = true
16- build_inference = false
45+ build_inference = true
46+
47+ # Set do_build to "false" to skip builds and test the latest image built by this PR
48+ # Note: at least one build is required to set do_build to "false"
1749do_build = true
1850
1951[notify ]
52+ # ## Notify on test failures
53+ # ## Off by default
2054notify_test_failures = false
21- notification_severity = " medium"
55+ # Valid values: medium or high
56+ notification_severity = " medium"
2257
2358[test ]
59+ # ## On by default
2460sanity_tests = true
2561security_tests = true
26- safety_check_test = false
27- ecr_scan_allowlist_feature = false
62+ safety_check_test = false
63+ ecr_scan_allowlist_feature = false
2864ecs_tests = true
2965eks_tests = true
3066ec2_tests = true
67+ # Set it to true if you are preparing a Benchmark related PR
3168ec2_benchmark_tests = false
69+
70+ # ## Set ec2_tests_on_heavy_instances = true to be able to run any EC2 tests that use large/expensive instance types by
71+ # ## default. If false, these types of tests will be skipped while other tests will run as usual.
72+ # ## These tests are run in EC2 test jobs, so ec2_tests must be true if ec2_tests_on_heavy_instances is true.
73+ # ## Off by default (set to false)
3274ec2_tests_on_heavy_instances = false
75+ # ## SM specific tests
76+ # ## On by default
3377sagemaker_local_tests = true
78+ # ## Set enable_ipv6 = true to run tests with IPv6-enabled resources
79+ # ## Off by default (set to false)
3480enable_ipv6 = false
81+ # ## Set the VPC name to be used for IPv6 testing, this variable is empty by default
82+ # ## To create an IPv6-enabled VPC and its related resources:
83+ # ## 1. Follow this AWS doc: https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html#create-vpc-and-other-resources
84+ # ## 2. After creating the VPC and related resources:
85+ # ## a. Set 'Auto-assign IPv6 address' option to 'No' in all public subnets within the VPC
86+ # ## b. Configure the default security group to allow SSH traffic using IPv4
87+ # ##
88+ # ## 3. Create an EFA-enabled security group:
89+ # ## a. Follow 'Step 1: Prepare an EFA-enabled security group' in:
90+ # ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html#efa-start-security
91+ # ## b. Configure this security group to also allow SSH traffic via IPv4
3592ipv6_vpc_name = " "
93+
94+ # run standard sagemaker remote tests from test/sagemaker_tests
3695sagemaker_remote_tests = true
96+ # run efa sagemaker tests
3797sagemaker_efa_tests = false
98+ # run release_candidate_integration tests
3899sagemaker_rc_tests = false
100+ # run sagemaker benchmark tests
39101sagemaker_benchmark_tests = false
102+
103+ # SM remote EFA test instance type
40104sagemaker_remote_efa_instance_type = " "
105+
106+ # Run CI tests for nightly images
107+ # false by default
41108nightly_pr_test_mode = false
109+
42110use_scheduler = false
43111
44112[buildspec_override ]
113+ # Assign the path to the required buildspec file from the deep-learning-containers folder
114+ # For example:
115+ # dlc-pr-tensorflow-2-habana-training = "habana/tensorflow/training/buildspec-2-10.yml"
116+ # dlc-pr-pytorch-inference = "pytorch/inference/buildspec-1-12.yml"
117+ # Setting the buildspec file path to "" allows the image builder to choose the default buildspec file.
118+
119+ # ## TRAINING PR JOBS ###
120+
121+ # Standard Framework Training
45122dlc-pr-pytorch-training = " "
46123dlc-pr-tensorflow-2-training = " "
47124dlc-pr-autogluon-training = " "
125+
126+ # ARM64 Training
48127dlc-pr-pytorch-arm64-training = " "
128+
129+ # HuggingFace Training
49130dlc-pr-huggingface-tensorflow-training = " "
50- dlc-pr-huggingface-pytorch-training = " huggingface/pytorch/training/buildspec.yml"
131+ dlc-pr-huggingface-pytorch-training = " "
132+
133+ # Training Compiler
51134dlc-pr-huggingface-pytorch-trcomp-training = " "
52135dlc-pr-huggingface-tensorflow-2-trcomp-training = " "
53136dlc-pr-pytorch-trcomp-training = " "
137+
138+ # Neuron Training
54139dlc-pr-pytorch-neuron-training = " "
55140dlc-pr-tensorflow-2-neuron-training = " "
141+
142+ # Stability AI Training
56143dlc-pr-stabilityai-pytorch-training = " "
144+
145+ # Habana Training
57146dlc-pr-pytorch-habana-training = " "
58147dlc-pr-tensorflow-2-habana-training = " "
148+
149+ # ## INFERENCE PR JOBS ###
150+
151+ # Standard Framework Inference
59152dlc-pr-pytorch-inference = " "
60153dlc-pr-tensorflow-2-inference = " "
61154dlc-pr-autogluon-inference = " "
155+
156+ # Graviton Inference
62157dlc-pr-pytorch-graviton-inference = " "
63158dlc-pr-tensorflow-2-graviton-inference = " "
159+
160+ # ARM64 Inference
64161dlc-pr-pytorch-arm64-inference = " "
65162dlc-pr-tensorflow-2-arm64-inference = " "
163+
164+ # Neuron Inference
66165dlc-pr-pytorch-neuron-inference = " "
67166dlc-pr-tensorflow-1-neuron-inference = " "
68167dlc-pr-tensorflow-2-neuron-inference = " "
168+
169+ # HuggingFace Inference
69170dlc-pr-huggingface-tensorflow-inference = " "
70171dlc-pr-huggingface-pytorch-inference = " "
71172dlc-pr-huggingface-pytorch-neuron-inference = " "
173+
174+ # Stability AI Inference
72175dlc-pr-stabilityai-pytorch-inference = " "
73- dlc-pr-pytorch-eia-inference = " "
74- dlc-pr-tensorflow-2-eia-inference = " "
75176
177+ # EIA Inference
178+ dlc-pr-pytorch-eia-inference = " "
179+ dlc-pr-tensorflow-2-eia-inference = " "
0 commit comments