Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AutoQuant for a pretrained segmentation model #3726

Open
zain72ali opened this issue Jan 13, 2025 · 0 comments
Open

AutoQuant for a pretrained segmentation model #3726

zain72ali opened this issue Jan 13, 2025 · 0 comments

Comments

@zain72ali
Copy link

I'm able to test the baseline accuracy of the pre-quantized model, however, when applying AutoQuant, I get the following error:

ValueError: in user code:

    File "/tmp/ipykernel_1850/3844288644.py", line 30, in eval_callback  *
        sess.run(tf.compat.v1.global_variables_initializer())

    ValueError: Argument `fetch` = name: "init"
    op: "NoOp"
    device: "/device:CPU:0"
     cannot be interpreted as a Tensor. (Operation name: "init"
    op: "NoOp"
    device: "/device:CPU:0"
     is not an element of this graph.)

Below is the relevant code snippet, my current setup is WSL2 using an NVIDIA CUDA-enabled GPU.

def preprocess_image(image_path, pipeline):
    """Loads and preprocesses a single image."""
    image = tf.io.read_file(image_path)
    image = tf.image.decode_image(image, dtype=tf.float32)  
    image /= pipeline.MAX_PIXEL_VALUE  
    return image

def prepare_unlabeled_dataset(pipeline):
    """Prepare an unlabeled dataset from the image directory."""
    # image_paths, _ = pipeline.load_data()
    # preprocessed_images = pipeline.preprocess_images(image_paths)
    # return tf.data.Dataset.from_tensor_slices(preprocessed_images).batch(pipeline.batch_size)

    image_paths, _ = pipeline.load_data()
    dataset = tf.data.Dataset.from_tensor_slices(image_paths)
    # Map the paths to the actual preprocessed images
    dataset = dataset.map(lambda path: preprocess_image(path, pipeline),
                          num_parallel_calls=tf.data.AUTOTUNE)
    dataset = dataset.batch(pipeline.batch_size)
    return dataset

@tf.function
def eval_callback(sess: tf.compat.v1.Session, num_samples: Optional[int] = None) -> float:
    """Evaluate the segmentation model using TensorFlow session."""
    if num_samples is None:
        num_samples = len(pipeline.load_data()[0])  

    sampled_dataset = iter(prepare_unlabeled_dataset(pipeline).take(num_samples))

    sess.run(tf.compat.v1.global_variables_initializer())
    input_tensor = sess.graph.get_tensor_by_name(pipeline.model.input.name)
    output_tensor = sess.graph.get_tensor_by_name(pipeline.model.output.name)

    preds = []
    truths = []

    for images in sampled_dataset:
        # Run inference
        predictions = sess.run(output_tensor, feed_dict={input_tensor: images})
        preds.append(predictions)

    # Calculate evaluation score 
    preds = np.array(preds).flatten()
    truths = np.ones_like(preds)  # Placeholder since dataset is unlabeled
    accuracy = np.mean(preds > pipeline.TH_FIRE == truths)
    
    return accuracy
from aimet_tensorflow.auto_quant import AutoQuant

# Prepare unlabeled dataset
unlabeled_dataset = prepare_unlabeled_dataset(pipeline)

# Create AutoQuant object
auto_quant = AutoQuant(
    allowed_accuracy_drop=0.01,  # Allowable accuracy drop
    unlabeled_dataset=unlabeled_dataset,
    eval_callback=eval_callback,
)   

# Starting and output operation names
starting_op_names = [pipeline.model.input.name.split(":")[0]]
output_op_names = [pipeline.model.output.name.split(":")[0]]

# Apply AutoQuant
sess, accuracy, encoding_path = auto_quant.apply(
    tf.compat.v1.keras.backend.get_session(),
    starting_op_names=starting_op_names,
    output_op_names=output_op_names,
)

print(f"Optimized Model Accuracy: {accuracy}")
print(f"Encoding Path: {encoding_path}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant