Skip to content

Add the ability to fix and automatically create yaml pipeline tests. #39336

Add the ability to fix and automatically create yaml pipeline tests.

Add the ability to fix and automatically create yaml pipeline tests. #39336

GitHub Actions / Test Results failed Apr 7, 2025 in 0s

14 fail, 73 skipped, 399 pass in 3m 35s

  2 files    2 suites   3m 35s ⏱️
486 tests 399 ✅ 73 💤 14 ❌
490 runs  399 ✅ 77 💤 14 ❌

Results for commit fbe997c.

Annotations

Check warning on line 0 in apache_beam.ml.rag.embeddings.huggingface_test.HuggingfaceTextEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_embedding_pipeline (apache_beam.ml.rag.embeddings.huggingface_test.HuggingfaceTextEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml.xml [took 2s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.rag.embeddings.huggingface_test.HuggingfaceTextEmbeddingsTest testMethod=test_embedding_pipeline>

    def test_embedding_pipeline(self):
      expected = [
          Chunk(
              id="1",
              embedding=Embedding(dense_embedding=[0.0] * 384),
              metadata={
                  "source": "test.txt", "language": "en"
              },
              content=Content(text="This is a test sentence.")),
          Chunk(
              id="2",
              embedding=Embedding(dense_embedding=[0.0] * 384),
              metadata={
                  "source": "test.txt", "language": "en"
              },
              content=Content(text="Another example."))
      ]
      embedder = HuggingfaceTextEmbeddings(
          model_name="sentence-transformers/all-MiniLM-L6-v2")
    
>     with TestPipeline() as p:

apache_beam/ml/rag/embeddings/huggingface_test.py:100: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/testing/test_pipeline.py:115: in run
    result = super().run(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:594: in run
    self._options).run(False)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_huggingface_tokenizer_splitter (apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml.xml [took 3s]
Raw output
apache_beam.testing.util.BeamAssertException: Chunk has 12 tokens, expected <= 10 [while running 'assert_that/Match']
chunks = [Chunk(content=Content(text='This is a simple test document. It has multiple'), id='068c0759-5b35-43ab-bc69-627418d6af...c splitting.'), id='2ecbf141-b400-4e75-a490-a3d4e8d8ddf4', index=2, metadata={'source': 'simple.txt'}, embedding=None)]

    def check_token_lengths(chunks):
      for chunk in chunks:
        # Verify each chunk's token length is within limits
        num_tokens = len(tokenizer.encode(chunk.content.text))
        if not num_tokens <= 10:
>         raise BeamAssertException(
              f"Chunk has {num_tokens} tokens, expected <= 10")
E         apache_beam.testing.util.BeamAssertException: Chunk has 12 tokens, expected <= 10

apache_beam/ml/rag/chunking/langchain_test.py:172: BeamAssertException

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest testMethod=test_huggingface_tokenizer_splitter>

    @unittest.skipIf(not TRANSFORMERS_AVAILABLE, "transformers not available")
    def test_huggingface_tokenizer_splitter(self):
      """Test text splitter created from HuggingFace tokenizer."""
      tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
      splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
          tokenizer,
          chunk_size=10,  # tokens
          chunk_overlap=2  # tokens
      )
    
      provider = LangChainChunker(
          document_field='content',
          metadata_fields=['source'],
          text_splitter=splitter)
    
>     with TestPipeline() as p:

apache_beam/ml/rag/chunking/langchain_test.py:161: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/testing/test_pipeline.py:115: in run
    result = super().run(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:594: in run
    self._options).run(False)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:696: in process_bundle
    bundle_processor.process_bundle(instruction_id))
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1274: in process_bundle
    input_op_by_transform_id[element.transform_id].process_encoded(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:237: in process_encoded
    self.output(decoded_value)
apache_beam/runners/worker/operations.py:566: in apache_beam.runners.worker.operations.Operation.output
    def output(self, windowed_value, output_index=0):
apache_beam/runners/worker/operations.py:568: in apache_beam.runners.worker.operations.Operation.output
    _cast_to_receiver(self.receivers[output_index]).receive(windowed_value)
apache_beam/runners/worker/operations.py:259: in apache_beam.runners.worker.operations.SingletonElementConsumerSet.receive
    def receive(self, windowed_value):
apache_beam/runners/worker/operations.py:262: in apache_beam.runners.worker.operations.SingletonElementConsumerSet.receive
    self.consumer.process(windowed_value)
apache_beam/runners/worker/operations.py:949: in apache_beam.runners.worker.operations.DoOperation.process
    with self.scoped_process_state:
apache_beam/runners/worker/operations.py:950: in apache_beam.runners.worker.operations.DoOperation.process
    delayed_applications = self.dofn_runner.process(o)
apache_beam/runners/common.py:1503: in apache_beam.runners.common.DoFnRunner.process
    self._reraise_augmented(exn, windowed_value)
apache_beam/runners/common.py:1591: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise exn
apache_beam/runners/common.py:1501: in apache_beam.runners.common.DoFnRunner.process
    return self.do_fn_invoker.invoke_process(windowed_value)
apache_beam/runners/common.py:689: in apache_beam.runners.common.SimpleInvoker.invoke_process
    self.output_handler.handle_process_outputs(
apache_beam/runners/common.py:1686: in apache_beam.runners.common._OutputHandler.handle_process_outputs
    self._write_value_to_tag(tag, windowed_value, watermark_estimator)
apache_beam/runners/common.py:1799: in apache_beam.runners.common._OutputHandler._write_value_to_tag
    self.main_receivers.receive(windowed_value)
apache_beam/runners/worker/operations.py:262: in apache_beam.runners.worker.operations.SingletonElementConsumerSet.receive
    self.consumer.process(windowed_value)
apache_beam/runners/worker/operations.py:949: in apache_beam.runners.worker.operations.DoOperation.process
    with self.scoped_process_state:
apache_beam/runners/worker/operations.py:950: in apache_beam.runners.worker.operations.DoOperation.process
    delayed_applications = self.dofn_runner.process(o)
apache_beam/runners/common.py:1503: in apache_beam.runners.common.DoFnRunner.process
    self._reraise_augmented(exn, windowed_value)
apache_beam/runners/common.py:1591: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise exn
apache_beam/runners/common.py:1501: in apache_beam.runners.common.DoFnRunner.process
    return self.do_fn_invoker.invoke_process(windowed_value)
apache_beam/runners/common.py:689: in apache_beam.runners.common.SimpleInvoker.invoke_process
    self.output_handler.handle_process_outputs(
apache_beam/runners/common.py:1686: in apache_beam.runners.common._OutputHandler.handle_process_outputs
    self._write_value_to_tag(tag, windowed_value, watermark_estimator)
apache_beam/runners/common.py:1799: in apache_beam.runners.common._OutputHandler._write_value_to_tag
    self.main_receivers.receive(windowed_value)
apache_beam/runners/worker/operations.py:262: in apache_beam.runners.worker.operations.SingletonElementConsumerSet.receive
    self.consumer.process(windowed_value)
apache_beam/runners/worker/operations.py:949: in apache_beam.runners.worker.operations.DoOperation.process
    with self.scoped_process_state:
apache_beam/runners/worker/operations.py:950: in apache_beam.runners.worker.operations.DoOperation.process
    delayed_applications = self.dofn_runner.process(o)
apache_beam/runners/common.py:1503: in apache_beam.runners.common.DoFnRunner.process
    self._reraise_augmented(exn, windowed_value)
apache_beam/runners/common.py:1591: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise exn
apache_beam/runners/common.py:1501: in apache_beam.runners.common.DoFnRunner.process
    return self.do_fn_invoker.invoke_process(windowed_value)
apache_beam/runners/common.py:689: in apache_beam.runners.common.SimpleInvoker.invoke_process
    self.output_handler.handle_process_outputs(
apache_beam/runners/common.py:1686: in apache_beam.runners.common._OutputHandler.handle_process_outputs
    self._write_value_to_tag(tag, windowed_value, watermark_estimator)
apache_beam/runners/common.py:1799: in apache_beam.runners.common._OutputHandler._write_value_to_tag
    self.main_receivers.receive(windowed_value)
apache_beam/runners/worker/operations.py:262: in apache_beam.runners.worker.operations.SingletonElementConsumerSet.receive
    self.consumer.process(windowed_value)
apache_beam/runners/worker/operations.py:949: in apache_beam.runners.worker.operations.DoOperation.process
    with self.scoped_process_state:
apache_beam/runners/worker/operations.py:950: in apache_beam.runners.worker.operations.DoOperation.process
    delayed_applications = self.dofn_runner.process(o)
apache_beam/runners/common.py:1503: in apache_beam.runners.common.DoFnRunner.process
    self._reraise_augmented(exn, windowed_value)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1501: in apache_beam.runners.common.DoFnRunner.process
    return self.do_fn_invoker.invoke_process(windowed_value)
apache_beam/runners/common.py:690: in apache_beam.runners.common.SimpleInvoker.invoke_process
    windowed_value, self.process_method(windowed_value.value))
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/transforms/core.py:2086: in <lambda>
    wrapper = lambda x: [fn(x)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

chunks = [Chunk(content=Content(text='This is a simple test document. It has multiple'), id='068c0759-5b35-43ab-bc69-627418d6af...c splitting.'), id='2ecbf141-b400-4e75-a490-a3d4e8d8ddf4', index=2, metadata={'source': 'simple.txt'}, embedding=None)]

    def check_token_lengths(chunks):
      for chunk in chunks:
        # Verify each chunk's token length is within limits
        num_tokens = len(tokenizer.encode(chunk.content.text))
        if not num_tokens <= 10:
>         raise BeamAssertException(
              f"Chunk has {num_tokens} tokens, expected <= 10")
E         apache_beam.testing.util.BeamAssertException: Chunk has 12 tokens, expected <= 10 [while running 'assert_that/Match']

apache_beam/ml/rag/chunking/langchain_test.py:172: BeamAssertException

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_embeddings_with_inference_args (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 4s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_embeddings_with_inference_args>

    def test_embeddings_with_inference_args(self):
      model_name = DEFAULT_MODEL_NAME
    
      inference_args = {'convert_to_numpy': False}
      embedding_config = SentenceTransformerEmbeddings(
          model_name=model_name,
          columns=[test_query_column],
          inference_args=inference_args)
>     with beam.Pipeline() as pipeline:

apache_beam/ml/transforms/embeddings/huggingface_test.py:233: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_embeddings_with_read_artifact_location_0 (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 2s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

a = (<apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_embeddings_with_read_artifact_location_0>,)
kw = {}

    @wraps(func)
    def standalone_func(*a, **kw):
>       return func(*(a + p.args), **p.kwargs, **kw)

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/parameterized/parameterized.py:620: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/ml/transforms/embeddings/huggingface_test.py:155: in test_embeddings_with_read_artifact_location
    with beam.Pipeline() as p:
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_embeddings_with_read_artifact_location_1 (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 0s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

a = (<apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_embeddings_with_read_artifact_location_1>,)
kw = {}

    @wraps(func)
    def standalone_func(*a, **kw):
>       return func(*(a + p.args), **p.kwargs, **kw)

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/parameterized/parameterized.py:620: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/ml/transforms/embeddings/huggingface_test.py:155: in test_embeddings_with_read_artifact_location
    with beam.Pipeline() as p:
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_embeddings_with_read_artifact_location_2 (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 4s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

a = (<apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_embeddings_with_read_artifact_location_2>,)
kw = {}

    @wraps(func)
    def standalone_func(*a, **kw):
>       return func(*(a + p.args), **p.kwargs, **kw)

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/parameterized/parameterized.py:620: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/ml/transforms/embeddings/huggingface_test.py:155: in test_embeddings_with_read_artifact_location
    with beam.Pipeline() as p:
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_embeddings_with_scale_to_0_1 (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 2s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_embeddings_with_scale_to_0_1>

    @unittest.skipIf(tft is None, 'Tensorflow Transform is not installed.')
    def test_embeddings_with_scale_to_0_1(self):
      model_name = DEFAULT_MODEL_NAME
      embedding_config = SentenceTransformerEmbeddings(
          model_name=model_name,
          columns=[test_query_column],
      )
>     with beam.Pipeline() as pipeline:

apache_beam/ml/transforms/embeddings/huggingface_test.py:133: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_sentence_transformer_embeddings (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 0s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_sentence_transformer_embeddings>

    def test_sentence_transformer_embeddings(self):
      model_name = DEFAULT_MODEL_NAME
      embedding_config = SentenceTransformerEmbeddings(
          model_name=model_name, columns=[test_query_column])
>     with beam.Pipeline() as pipeline:

apache_beam/ml/transforms/embeddings/huggingface_test.py:111: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_sentence_transformer_image_embeddings (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 3s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1818: in _load_sbert_model
    module = module_class.load(module_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:98: in load
    return CLIPModel(model_name=input_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:18: in __init__
    self.model = transformers.CLIPModel.from_pretrained(model_name)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.clip.modeling_clip.CLIPModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_sentence_transformer_image_embeddings>

    @unittest.skipIf(Image is None, 'Pillow is not installed.')
    def test_sentence_transformer_image_embeddings(self):
      embedding_config = SentenceTransformerEmbeddings(
          model_name=IMAGE_MODEL_NAME,
          columns=[test_query_column],
          image_model=True)
      img = self.generateRandomImage(256)
>     with beam.Pipeline() as pipeline:

apache_beam/ml/transforms/embeddings/huggingface_test.py:299: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1818: in _load_sbert_model
    module = module_class.load(module_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:98: in load
    return CLIPModel(model_name=input_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:18: in __init__
    self.model = transformers.CLIPModel.from_pretrained(model_name)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.clip.modeling_clip.CLIPModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_sentence_transformer_images_with_str_data_types (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 0s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1818: in _load_sbert_model
    module = module_class.load(module_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:98: in load
    return CLIPModel(model_name=input_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:18: in __init__
    self.model = transformers.CLIPModel.from_pretrained(model_name)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_sentence_transformer_images_with_str_data_types>

    def test_sentence_transformer_images_with_str_data_types(self):
      embedding_config = SentenceTransformerEmbeddings(
          model_name=IMAGE_MODEL_NAME,
          columns=[test_query_column],
          image_model=True)
      with self.assertRaises(TypeError):
>       with beam.Pipeline() as pipeline:

apache_beam/ml/transforms/embeddings/huggingface_test.py:320: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1818: in _load_sbert_model
    module = module_class.load(module_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:98: in load
    return CLIPModel(model_name=input_path)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/CLIPModel.py:18: in __init__
    self.model = transformers.CLIPModel.from_pretrained(model_name)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_sentence_transformer_with_int_data_types (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 0s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_sentence_transformer_with_int_data_types>

    def test_sentence_transformer_with_int_data_types(self):
      model_name = DEFAULT_MODEL_NAME
      embedding_config = SentenceTransformerEmbeddings(
          model_name=model_name, columns=[test_query_column])
      with self.assertRaises(TypeError):
>       with beam.Pipeline() as pipeline:

apache_beam/ml/transforms/embeddings/huggingface_test.py:185: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_with_gcs_artifact_location_0 (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 1s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

a = (<apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_with_gcs_artifact_location_0>,)
kw = {}

    @wraps(func)
    def standalone_func(*a, **kw):
>       return func(*(a + p.args), **p.kwargs, **kw)

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/parameterized/parameterized.py:620: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/ml/transforms/embeddings/huggingface_test.py:200: in test_with_gcs_artifact_location
    with beam.Pipeline() as p:
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_with_gcs_artifact_location_1 (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 1s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

a = (<apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_with_gcs_artifact_location_1>,)
kw = {}

    @wraps(func)
    def standalone_func(*a, **kw):
>       return func(*(a + p.args), **p.kwargs, **kw)

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/parameterized/parameterized.py:620: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/ml/transforms/embeddings/huggingface_test.py:200: in test_with_gcs_artifact_location
    with beam.Pipeline() as p:
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.mpnet.modeling_mpnet.MPNetModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_with_gcs_artifact_location_2 (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py311/build/srcs/sdks/python/pytest_py311-ml_no_xdist.xml [took 1s]
Raw output
NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1565: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

During handling of the above exception, another exception occurred:

a = (<apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_with_gcs_artifact_location_2>,)
kw = {}

    @wraps(func)
    def standalone_func(*a, **kw):
>       return func(*(a + p.args), **p.kwargs, **kw)

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/parameterized/parameterized.py:620: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/ml/transforms/embeddings/huggingface_test.py:200: in test_with_gcs_artifact_location
    with beam.Pipeline() as p:
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:195: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:221: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:468: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:793: in _execute_bundle
    self._run_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1032: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1358: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1571: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1567: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1612: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:607: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:309: in __init__
    modules, self.module_kwargs = self._load_sbert_model(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/SentenceTransformer.py:1802: in _load_sbert_model
    module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:4333: in from_pretrained
    model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

cls = <class 'transformers.models.bert.modeling_bert.BertModel'>
is_quantized = False, _is_ds_init_called = False

    @classmethod
    def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
        # With deepspeed, we cannot initialize the model on meta device
        if is_deepspeed_zero3_enabled():
            init_contexts = [no_init_weights()]
            if not is_quantized and not _is_ds_init_called:
                logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
                init_contexts.extend(
                    [
                        deepspeed.zero.Init(config_dict_or_path=deepspeed_config()),
                        set_zero3_state(),
                    ]
                )
            elif is_quantized:
                init_contexts.append(set_quantized_state())
        else:
>           init_contexts = [no_init_weights(), init_empty_weights()]
E           NameError: name 'init_empty_weights' is not defined [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py311-ml/py311-ml/lib/python3.11/site-packages/transformers/modeling_utils.py:3736: NameError

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Test Results

73 skipped tests found

There are 73 skipped tests, see "Raw output" for the full list of skipped tests.
Raw output
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_deidentification
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_inspection
apache_beam.ml.gcp.naturallanguageml_test_it.NaturalLanguageMlTestIT ‑ test_analyzing_syntax
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_catalog_item
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_user_event
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_predict
apache_beam.ml.gcp.videointelligenceml_test_it.VideoIntelligenceMlTestIT ‑ test_label_detection_with_video_context
apache_beam.ml.gcp.visionml_test_it.VisionMlTestIT ‑ test_text_detection_with_language_hint
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_side_inputin_streaming
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling_large_model
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_pipeline
apache_beam.ml.inference.onnx_inference_it_test.OnnxInference ‑ test_onnx_run_inference_roberta_sentiment_classification
apache_beam.ml.inference.onnx_inference_test
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm_large_model
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn_v1_and_v2
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_imagenet_mobilenetv2
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification_large_model
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_regression
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_imagenet_image_segmentation
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification_large_model
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_with_weights_classification
apache_beam.ml.inference.tensorrt_inference_test
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_flower_image_classification
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_llm_text_classification
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch_large_model
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_single_batch
apache_beam.ml.inference.xgboost_inference_test
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_basic_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_batched_metadata_filter_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_condition_batching
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_empty_input
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_euclidean_distance_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_metadata_filter_leakage
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_no_metadata_restriction
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_ignore
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_update_fields
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_update
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_custom_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_id_embedding_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_defaults_with_args_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_language_connector
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_metadata_spec_and_conflicts
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_custom_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema_missing_embedding
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_streaming_default_schema
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_sentence_transformer_image_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_with_str_data_types

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Test Results

486 tests found

There are 486 tests, see "Raw output" for the full list of tests.
Raw output
apache_beam.ml.anomaly.aggregations_test.AllVoteTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.AnyVoteTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.AverageScoreTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.LabelAggTestWithMissingOrError ‑ test_default
apache_beam.ml.anomaly.aggregations_test.MajorityVoteTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.MajorityVoteTest ‑ test_tie_breaker
apache_beam.ml.anomaly.aggregations_test.MaxScoreTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.ScoreAggTestWithMissingOrError ‑ test_default
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_0
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_1
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_2
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_3
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_0
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_1
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_2
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_3
apache_beam.ml.anomaly.detectors.iqr_test.IQRTest ‑ test_with_default_trackers
apache_beam.ml.anomaly.detectors.robust_zscore_test.RobustZScoreTest ‑ test_with_default_trackers
apache_beam.ml.anomaly.detectors.zscore_test.ZScoreTest ‑ test_with_custom_mean_tracker
apache_beam.ml.anomaly.detectors.zscore_test.ZScoreTest ‑ test_with_default_trackers
apache_beam.ml.anomaly.specifiable_test.TestClassAsArgument ‑ test_normal_class
apache_beam.ml.anomaly.specifiable_test.TestFunctionAsArgument ‑ test_lambda_function
apache_beam.ml.anomaly.specifiable_test.TestFunctionAsArgument ‑ test_normal_function
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_just_in_time_init
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_on_demand_and_just_in_time_init
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_on_demand_init
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_on_pickle
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_error_in_child
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_0
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_1
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_2
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_3
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_4
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_5
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_decorator_in_function_form
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_decorator_in_syntactic_sugar_form
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_0
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_1
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_2
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_3
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_on_unknown_spec_type
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_init_params_in_specifiable
apache_beam.ml.anomaly.specifiable_test.TestUncommonUsages ‑ test_double_specifiable
apache_beam.ml.anomaly.specifiable_test.TestUncommonUsages ‑ test_unspecifiable
apache_beam.ml.anomaly.thresholds_test.TestFixedThreshold ‑ test_apply_only
apache_beam.ml.anomaly.thresholds_test.TestQuantileThreshold ‑ test_apply_only
apache_beam.ml.anomaly.thresholds_test.TestQuantileThreshold ‑ test_quantile_tracker
apache_beam.ml.anomaly.transforms_test.TestAnomalyDetection ‑ test_multiple_detectors_without_aggregation
apache_beam.ml.anomaly.transforms_test.TestAnomalyDetection ‑ test_multiple_sub_detectors_with_aggregation
apache_beam.ml.anomaly.transforms_test.TestAnomalyDetection ‑ test_one_detector
apache_beam.ml.anomaly.transforms_test.TestOfflineDetector ‑ test_default_inference_fn
apache_beam.ml.anomaly.transforms_test.TestOfflineDetector ‑ test_run_inference_args
apache_beam.ml.anomaly.transforms_test.TestStatefulThresholdDoFn ‑ test_dofn_on_single_prediction
apache_beam.ml.anomaly.transforms_test.TestStatelessThresholdDoFn ‑ test_dofn_on_multiple_predictions
apache_beam.ml.anomaly.transforms_test.TestStatelessThresholdDoFn ‑ test_dofn_on_single_prediction
apache_beam.ml.anomaly.univariate.mad_test.MadTest ‑ test_default_tracker
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_with_float64_max
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_with_nan
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_without_nan
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_float64_max_0
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_float64_max_1
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_nan_0
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_nan_1
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_without_nan_0
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_without_nan_1
apache_beam.ml.anomaly.univariate.median_test.MedianTest ‑ test_custom_tracker
apache_beam.ml.anomaly.univariate.median_test.MedianTest ‑ test_default_tracker
apache_beam.ml.anomaly.univariate.median_test.MedianTest ‑ test_wrong_tracker
apache_beam.ml.anomaly.univariate.perf_test.PerfTest ‑ test_mean_perf
apache_beam.ml.anomaly.univariate.perf_test.PerfTest ‑ test_quantile_perf
apache_beam.ml.anomaly.univariate.perf_test.PerfTest ‑ test_stdev_perf
apache_beam.ml.anomaly.univariate.quantile_test.LandmarkQuantileTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.quantile_test.LandmarkQuantileTest ‑ test_with_nan
apache_beam.ml.anomaly.univariate.quantile_test.LandmarkQuantileTest ‑ test_without_nan
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_with_nan_0
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_with_nan_1
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_without_nan_0
apache_beam.ml.anomaly.univariate.stdev_test.LandmarkStdevTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.stdev_test.LandmarkStdevTest ‑ test_with_nan
apache_beam.ml.anomaly.univariate.stdev_test.LandmarkStdevTest ‑ test_without_nan
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_stdev_with_nan_0
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_stdev_with_nan_1
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_without_nan_0
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_without_nan_1
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_deidentification
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_inspection
apache_beam.ml.gcp.cloud_dlp_test.TestDeidentifyFn ‑ test_deidentify_called
apache_beam.ml.gcp.cloud_dlp_test.TestDeidentifyText ‑ test_exception_raised_when_no_config_is_provided
apache_beam.ml.gcp.cloud_dlp_test.TestInspectFn ‑ test_inspect_called
apache_beam.ml.gcp.cloud_dlp_test.TestInspectText ‑ test_exception_raised_then_no_config_provided
apache_beam.ml.gcp.naturallanguageml_test.NaturalLanguageMlTest ‑ test_document_source
apache_beam.ml.gcp.naturallanguageml_test_it.NaturalLanguageMlTestIT ‑ test_analyzing_syntax
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAICatalogItemTest ‑ test_CreateCatalogItem
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAICatalogItemTest ‑ test_ImportCatalogItems
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAIPredictTest ‑ test_Predict
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAIUserEventTest ‑ test_CreateUserEvent
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAIUserEventTest ‑ test_ImportUserEvents
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_catalog_item
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_user_event
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_predict
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideoWithContext_b64_content
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideoWithContext_bad_input
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_URIs
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_b64_content
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_bad_input
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_with_side_input_context
apache_beam.ml.gcp.videointelligenceml_test_it.VideoIntelligenceMlTestIT ‑ test_label_detection_with_video_context
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImageWithContext_URIs
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImageWithContext_bad_input
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_URI_with_side_input_context
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_URIs
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_URIs_large_batch
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_b64_content
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_bad_input
apache_beam.ml.gcp.visionml_test_it.VisionMlTestIT ‑ test_text_detection_with_language_hint
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_child_class_without_env_vars
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_counted_metrics
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_env_vars_set_correctly
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_failed_batches_counter_no_failures
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_forwards_batch_args
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_increment_failed_batches_counter
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_keyed_many_model_handlers_validation
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_keyed_model_handler_get_num_bytes
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_keyed_model_handler_multiple_models_get_num_bytes
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_handler_compatibility
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_evicts_correct_num_of_models_after_being_incremented
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_evicts_models
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_evicts_models_after_update
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_loads_shared_model
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_status_provides_valid_garbage_collection
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_status_provides_valid_tags
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_empty_side_input
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_inference_args
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_keyed_mps_incrementing_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_mps_nobatch_incrementing_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_multi_process_shared_incrementing_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_simple_examples
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_simple_examples_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_simple_examples_multi_process_shared_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_many_mhs_max_models_hint
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_many_model_handlers
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_many_model_handlers_metrics
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_maybe_keyed_examples
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_maybe_keyed_examples_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_keyed_examples_with_unkeyed_model_handler
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_keyed_pre_and_post_processing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_loads_different_models
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_loads_different_models_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_maybe_keyed_pre_and_post_processing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_metrics_with_custom_namespace
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_postprocessing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_postprocessing_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_postprocessing_multiple_fns
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_pre_and_post_processing_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_prebatched
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_prediction_result_with_model_id
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_preprocessing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_preprocessing_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_preprocessing_multiple_fns
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch_per_key_models
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch_per_key_models_split_cohort
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_does_garbage_collection
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_not_hit
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_on_inference_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_on_load_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_unkeyed_examples_with_keyed_model_handler
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_watch_file_pattern_keyword_arg_side_input_label
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_watch_file_pattern_side_input_label
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_iterable_side_input
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_iterable_side_input_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_side_inputin_streaming
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_runinference_loads_same_model_with_identifier_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_timing_metrics
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_unexpected_inference_args_passed
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_exception_on_load_model_override
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_exception_on_run_inference_override
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_normal_model_execution
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_repeated_requests_fail
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_works_on_retry
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling_large_model
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_pipeline
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_framework_detection_tensorflow
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_framework_detection_torch
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_predict_tensor
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_predict_tensor_with_inference_args
apache_beam.ml.inference.onnx_inference_it_test.OnnxInference ‑ test_onnx_run_inference_roberta_sentiment_classification
apache_beam.ml.inference.onnx_inference_test
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm_large_model
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn_v1_and_v2
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_imagenet_mobilenetv2
apache_beam.ml.inference.pytorch_inference_test.PytorchInferenceTestWithMocks ‑ test_load_model_args_keyed_tensor
apache_beam.ml.inference.pytorch_inference_test.PytorchInferenceTestWithMocks ‑ test_load_model_args_tensor
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_env_vars_set_correctly_keyed_tensor_handler
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_env_vars_set_correctly_tensor_handler
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_gpu_auto_convert_to_cpu
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_inference_torch_script_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_invalid_input_type
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_load_torch_script_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_gcs_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_gcs_model_control_batching
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_extra_inference_args
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_extra_inference_args_batching_args
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_extra_inference_args_large
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_large
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_simple
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_prediction_result_model_id_with_torch_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_prediction_result_model_id_with_torch_script_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_specify_torch_script_path_and_state_dict_path
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_torch_model_class_none
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_torch_model_state_dict_none
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_inference_runner_inference_args
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_namespace
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_num_bytes
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_custom
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_helper
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_keyed
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_keyed_dict_output
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_keyed_helper
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_multiple_tensor_features
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_multiple_tensor_features_dict_output
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_single_tensor_feature
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification_large_model
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_regression
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_bad_file_raises
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_bad_input_type_raises
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_custom_inference_fn
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_data_vectorized
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_env_vars_set_correctly_numpy
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_infer_too_many_rows_in_dataframe
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_num_bytes_numpy
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_joblib
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_custom_batching
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_custom_inference
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_dict_out
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_env_vars_set_correctly
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_joblib
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_large_model
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_with_keys
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pickled
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pickled_custom_batching
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pickled_large_model
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_predict_output
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_predict_output_dict
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_imagenet_image_segmentation
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification_large_model
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_with_weights_classification
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_load_model_exception
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_keyed_numpy
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_keyed_tensor
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_numpy
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_numpy_with_batch_size
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_numpy_with_large_model
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor_with_args
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor_with_batch_size
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor_with_large_model
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_env_vars_set_correctly_numpy
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_env_vars_set_correctly_tensor
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_load_model_args
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_load_model_with_args_and_custom_weights
apache_beam.ml.inference.tensorrt_inference_test
apache_beam.ml.inference.utils_test.WatchFilePatternTest ‑ test_emitting_singleton_output
apache_beam.ml.inference.utils_test.WatchFilePatternTest ‑ test_latest_file_by_timestamp_default_value
apache_beam.ml.inference.utils_test.WatchFilePatternTest ‑ test_latest_file_with_timestamp_after_pipeline_construction_time
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_flower_image_classification
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_llm_text_classification
apache_beam.ml.inference.vertex_ai_inference_test.ModelHandlerArgConditions ‑ test_exception_on_private_without_network
apache_beam.ml.inference.vertex_ai_inference_test.RetryOnClientErrorTest ‑ test_retry_on_client_error_negative
apache_beam.ml.inference.vertex_ai_inference_test.RetryOnClientErrorTest ‑ test_retry_on_client_error_positive
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch_large_model
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_single_batch
apache_beam.ml.inference.xgboost_inference_test
apache_beam.ml.rag.chunking.base_test.ChunkingTransformProviderTest ‑ test_chunking_transform
apache_beam.ml.rag.chunking.base_test.ChunkingTransformProviderTest ‑ test_custom_chunk_id_fn
apache_beam.ml.rag.chunking.base_test.ChunkingTransformProviderTest ‑ test_doesnt_override_get_text_splitter_transform
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_empty_document_field
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_empty_text
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_huggingface_tokenizer_splitter
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_invalid_document_field
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_invalid_text_splitter
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_multiple_metadata_fields
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_no_metadata_fields
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_recursive_splitter_no_overlap
apache_beam.ml.rag.embeddings.base_test.RAGBaseEmbeddingsTest ‑ test_adapter_input_conversion
apache_beam.ml.rag.embeddings.base_test.RAGBaseEmbeddingsTest ‑ test_adapter_input_conversion_missing_text_content
apache_beam.ml.rag.embeddings.base_test.RAGBaseEmbeddingsTest ‑ test_adapter_output_conversion
apache_beam.ml.rag.embeddings.huggingface_test.HuggingfaceTextEmbeddingsTest ‑ test_embedding_pipeline
apache_beam.ml.rag.embeddings.vertex_ai_test.VertexAITextEmbeddingsTest ‑ test_embedding_pipeline
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_basic_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_batched_metadata_filter_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_condition_batching
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_empty_input
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_euclidean_distance_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_invalid_query
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_metadata_filter_leakage
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_missing_embedding
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_no_metadata_restriction
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_ignore
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_update_fields
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_update
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_custom_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_id_embedding_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_defaults_with_args_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_language_connector
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_metadata_spec_and_conflicts
apache_beam.ml.rag.ingestion.base_test.VectorDatabaseBaseTest ‑ test_invalid_config
apache_beam.ml.rag.ingestion.base_test.VectorDatabaseBaseTest ‑ test_pipeline_integration
apache_beam.ml.rag.ingestion.base_test.VectorDatabaseBaseTest ‑ test_write_transform_creation
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_custom_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema_missing_embedding
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_streaming_default_schema
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_appends_transforms_to_process_handler_correctly
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_0
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_1
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_2
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_3
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_fail_for_non_global_windows_in_produce_mode
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_no_read_or_write_artifact_lcoation
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_on_dict
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_on_list_dict
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_on_multiple_columns_single_transform
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transforms_on_multiple_columns_multiple_transforms
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_mltransform_with_counter
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_non_ptransfrom_provider_class_to_mltransform
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_read_mode_with_transforms
apache_beam.ml.transforms.base_test.MLTransformDLQTest ‑ test_dlq_with_embeddings
apache_beam.ml.transforms.base_test.MLTransformDLQTest ‑ test_mltransform_with_dlq_and_extract_tranform_name
apache_beam.ml.transforms.base_test.TestImageEmbeddingHandler ‑ test_handler_with_dict_inputs
apache_beam.ml.transforms.base_test.TestImageEmbeddingHandler ‑ test_handler_with_non_dict_datatype
apache_beam.ml.transforms.base_test.TestImageEmbeddingHandler ‑ test_handler_with_non_image_datatype
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_mltransform_to_ptransform_wrapper
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_save_and_load_run_inference
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_save_run_inference
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_save_tft_process_handler
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_with_gcs_location_with_none_options
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_with_same_local_artifact_location
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_input_fn_normal
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_input_fn_on_list_inputs
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_output_fn_normal
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_output_fn_on_list_inputs
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_on_columns_not_exist_in_input_data
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_on_multiple_columns
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_batch_sizes
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_dict_inputs
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_incompatible_datatype
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_inconsistent_keys
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_list_data
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_no_columns_or_type_adapter
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_inference_args
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_read_artifact_location_0
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_read_artifact_location_1
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_read_artifact_location_2
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_mltransform_to_ptransform_with_sentence_transformer
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_embeddings
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_image_embeddings
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_images_with_str_data_types
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_with_int_data_types
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_with_gcs_artifact_location_0
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_with_gcs_artifact_location_1
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_with_gcs_artifact_location_2
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_sentence_transformer_image_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_with_str_data_types
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_mltransform_to_ptransform_with_vertex
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_vertex_ai_text_embeddings
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_with_gcs_artifact_location
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIImageEmbeddingsTest ‑ test_improper_dimension
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIImageEmbeddingsTest ‑ test_vertex_ai_image_embedding
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_consume_mode_with_extra_columns_in_the_input
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_handler_with_same_input_elements
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_named_tuple_pcoll_numpy
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_row_type_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_row_type_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_schema_named_tuple_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_schema_named_tuple_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_dtypes
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_numpy_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_primitive_native_container_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_primitive_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_primitive_types_in_containers
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_union_type_in_single_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_operation_preprocessing_fn_0
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_operation_preprocessing_fn_1
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_default_transform_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_transformed_data_schema
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_unused_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_verify_artifacts
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_consume_mode_with_extra_columns_in_the_input
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_handler_with_same_input_elements
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_named_tuple_pcoll_numpy
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_row_type_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_row_type_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_schema_named_tuple_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_schema_named_tuple_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_dtypes
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_numpy_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_primitive_native_container_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_primitive_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_primitive_types_in_containers
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_union_type_in_single_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_operation_preprocessing_fn_0
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_operation_preprocessing_fn_1
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_default_transform_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_transformed_data_schema
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_unused_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_verify_artifacts
apache_beam.ml.transforms.tft_test.ApplyBucketsTest ‑ test_apply_buckets_0
apache_beam.ml.transforms.tft_test.ApplyBucketsTest ‑ test_apply_buckets_1
apache_beam.ml.transforms.tft_test.ApplyBucketsWithInterpolationTest ‑ test_apply_buckets_0
apache_beam.ml.transforms.tft_test.ApplyBucketsWithInterpolationTest ‑ test_apply_buckets_1
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_by_splitting_input_text
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_list_seperated_words_custom_ngrams
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_list_seperated_words_default_ngrams
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_numpy_data
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_count_per_key_on_list
apache_beam.ml.transforms.tft_test.BucketizeTest ‑ test_bucketize
apache_beam.ml.transforms.tft_test.BucketizeTest ‑ test_bucketize_list
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_compute_and_apply_vocabulary
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_compute_and_apply_vocabulary_inputs
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_multiple_columns_with_default_vocab_name
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_multiple_columns_with_vocab_name
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_string_split_with_multiple_delimiters
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_string_split_with_single_delimiter
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_with_basic_example_list
apache_beam.ml.transforms.tft_test.DeduplicateTensorPerRowTest ‑ test_deduplicate
apache_beam.ml.transforms.tft_test.DeduplicateTensorPerRowTest ‑ test_deduplicate_different_output_sizes
apache_beam.ml.transforms.tft_test.DeduplicateTensorPerRowTest ‑ test_deduplicate_no_op
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_multi_bucket_one_string
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_multi_buckets_multi_string
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_one_bucket_multi_string
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_single_bucket
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_two_bucket_multi_string
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_ngrams_on_list_separated_words
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_ngrams_on_list_separated_words_default_args
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_with_multiple_string_delimiters
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_with_string_split_delimiter
apache_beam.ml.transforms.tft_test.ScaleTo01Test ‑ test_ScaleTo01
apache_beam.ml.transforms.tft_test.ScaleTo01Test ‑ test_ScaleTo01_list
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_list_skewed_distribution
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_list_uniform_distribution
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_skewed
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_uniform
apache_beam.ml.transforms.tft_test.ScaleToMinMaxTest ‑ test_fail_max_value_less_than_min
apache_beam.ml.transforms.tft_test.ScaleToMinMaxTest ‑ test_scale_to_min_max
apache_beam.ml.transforms.tft_test.ScaleZScoreTest ‑ test_z_score
apache_beam.ml.transforms.tft_test.ScaleZScoreTest ‑ test_z_score_list_data
apache_beam.ml.transforms.tft_test.TFIDIFTest ‑ test_tfidf_compute_vocab_size_during_runtime