Sampling functions
Sampling functions for model inference.
Generation
get_batch_samples
def get_batch_samples(
samples:int, auto_batch_size:int=512
)->list:
batched_sampling
def batched_sampling(
pipeline:Pipeline, cond_kwargs:dict, samples:int, system_size:int, num_of_qubits:int, max_gates:int, g:float=1.0,
init_latents:Optional=None, no_bar:bool=True, unique:bool=False, auto_batch_size:int=512,
enable_params:bool=True, reduce_spatial:bool=True, return_predicted_x0:bool=False
):
e.g. cond_kwargs.keys = {“c”, “micro_cond”, “negative_c”, “U”}
prepare_prompts
def prepare_prompts(
pipeline:Pipeline, prompt:Union, negative_prompt:Union=None
):
Task specific sampling
generate_tensors
def generate_tensors(
pipeline:Pipeline, prompt:Union, samples:int, system_size:int, num_of_qubits:int, max_gates:int, g:float=1.0,
init_latents:Optional=None, no_bar:bool=True, unique:bool=False, auto_batch_size:int=512,
enable_params:bool=False, reduce_spatial:bool=True, return_predicted_x0:bool=False, negative_prompt:Union=None,
micro_cond:Optional=None
)->Tensor:
generate_compilation_tensors
def generate_compilation_tensors(
pipeline:Pipeline, prompt:Union, U:Tensor, samples:int, system_size:int, num_of_qubits:int, max_gates:int,
g:float=1.0, tensor_prod_pad:bool=True, init_latents:Optional=None, no_bar:bool=True, unique:bool=False,
auto_batch_size:int=512, enable_params:bool=True, reduce_spatial:bool=True, return_predicted_x0:bool=False,
negative_prompt:Union=None, negative_u:Optional=None, micro_cond:Optional=None
)->Tensor:
Samples tensor encodings from the DM for the given sample parameters.
What kind of unitary padding we have depends on what we used for model training, so it depends on the concrete model weights.
Convertion
decode_tensors_to_backend
def decode_tensors_to_backend(
simulator:Simulator, tokenizer:BaseTokenizer, tensors:Tensor, params:Optional=None, silent:bool=True,
n_jobs:int=1, filter_errs:bool=True, return_tensors:bool=False
)->tuple[typing.Sequence[<built-in function any>], int] | tuple[typing.Sequence[<built-in function any>], int, torch.Tensor]: