Die analysis

Now we will run an analysis using the device data we uploaded in the previous notebook.

As before, make sure you have the following environment variables set or added to a .env file:

GDSFACTORY_HUB_API_URL="https://{org}.gdsfactoryhub.com"
GDSFACTORY_HUB_QUERY_URL="https://query.{org}.gdsfactoryhub.com"
GDSFACTORY_HUB_KEY="<your-gdsfactoryplus-api-key>"
import getpass

from tqdm.auto import tqdm

import gdsfactoryhub as gfh
from gdsfactoryhub import (
    FunctionTargetModel,
)
from gdsfactoryhub.schemas import AnalysisExecutionRequest
project_id = f"cutback-{getpass.getuser()}"
client = gfh.create_client_from_env(project_id=project_id)
api = client.api()
query = client.query()

Die analysis

You can either trigger analysis automatically by defining it in the design manifest, using the UI or using the Python DoData library.

from gdsfactoryhub.functions.die import cutback_loss

cutback_loss.run?
die_pks = [d["pk"] for d in query.dies().execute().data]
cutback_loss.run(die_pkey=die_pks[0])
result = api.delete_function(function_id="cutback_loss")
result = api.upload_function(
    function_id="cutback_loss",
    target_model="die",
    file=gfh.get_module_path(cutback_loss),
    test_target_model_pk=die_pks[0],
    test_kwargs={},
)
task_ids = []
for die_pk in (pb := tqdm(die_pks)):
    pb.set_postfix(die_pk=die_pk)
    task_id = api.start_analysis(  # start_analysis triggers the analysis task, but does not wait for it to finish.
        analysis_id=f"die_cutback_{die_pk}",
        function_id="cutback_loss",
        target_model="die",
        target_model_pk=die_pk,
        kwargs={},
    )
    task_ids.append(task_id)
# Create an empty list to store task IDs
task_ids = []

# Create a progress bar to show our progress
progress_bar = tqdm(die_pks)

# Loop through each die_pk (die primary key)
for die_pk in progress_bar:
    # Update the progress bar to show which die we're currently processing
    progress_bar.set_postfix(die_pk=die_pk)

    # Start the analysis for this die
    # This triggers the analysis task but doesn't wait for it to finish
    task_id = api.start_analysis(
        analysis_id=f"die_cutback_{die_pk}",
        function_id="cutback_loss",
        target_model="die",
        target_model_pk=die_pk,
    )

    # Add the task ID to our list so we can track it later
    task_ids.append(task_id)
def make_analysis_request(die_pk: str) -> AnalysisExecutionRequest:
    return AnalysisExecutionRequest(
        analysis_id=f"die-cutback-{die_pk}",
        function_id="cutback_loss",
        target_model=FunctionTargetModel.DIE,
        target_model_pk=die_pk,
        parameters={},
    )


analyses_requests = [make_analysis_request(die_pk=die_pk) for die_pk in die_pks]
analyses_pks = client.utils().analyses().run_analyses_in_batches(analyses_requests=analyses_requests)
On This Page