{ "cells": [ { "cell_type": "markdown", "id": "ec6d7909", "metadata": {}, "source": [ "# Get activations from a foveated model\n", "\n", "Here we will demonstrate two methods for getting activitations. The first uses the model class directly. \n", "\n", "Let's load a pre-trained model" ] }, { "cell_type": "code", "execution_count": null, "id": "e5424f0f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Model with base_fn fovi-dinov3-splus_a-2.78_res-64_in1k not found in ../models\n", "Attempting to download fovi-dinov3-splus_a-2.78_res-64_in1k from HuggingFace Hub...\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "e4568a95faa74c9abce173223af7a53a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Fetching 5 files: 0%| | 0/5 [00:00 Logging in /home/nblauch/data/fovi/logs/None\n", "HydraConfig was not set\n", "skipping hydra directory copying\n", "Training backbone: True\n" ] } ], "source": [ "from fovi import get_trainer_from_base_fn\n", "from fovi.paths import DATASETS_DIR\n", "\n", "# base_fn = 'fovi-alexnet_a-1_res-64_rfmult-2_in1k'\n", "base_fn = 'fovi-dinov3-splus_a-2.78_res-64_in1k'\n", "# edit the paths to those storing your ImageNet-1K FFCV files\n", "# in general, any kwarg you pass in will be used to update the loaded config file\n", "kwargs = {\n", " 'data.train_dataset': f'{DATASETS_DIR}/ffcv/imagenet/train_compressed.ffcv',\n", " 'data.val_dataset': f'{DATASETS_DIR}/ffcv/imagenet/val_compressed.ffcv',\n", " }\n", "trainer = get_trainer_from_base_fn(base_fn, load=True, model_dirs=['../models'], **kwargs)\n" ] }, { "cell_type": "code", "execution_count": 7, "id": "5d06ca4d", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 1%|█▎ | 3/391 [00:10<22:36, 3.50s/it]\n" ] } ], "source": [ "outputs, activations, targets = trainer.compute_activations(trainer.val_loader, layer_names=['backbone.layers.3', 'backbone', 'projector'], max_batches=4, do_postproc=True)" ] }, { "cell_type": "code", "execution_count": 8, "id": "db8c1b13", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'backbone.layers.3': (512, 20, 1, 384),\n", " 'backbone': (512, 20, 1, 384),\n", " 'projector': (512, 20, 1024)}" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "{k: v.shape for k, v in activations.items()}" ] }, { "cell_type": "markdown", "id": "86367c8f", "metadata": {}, "source": [ "note that we also now have the network outputs, which have been aggregated over fixations (since we passed `do_postproc=True`, which applies the fixation aggregator head)" ] }, { "cell_type": "code", "execution_count": 9, "id": "d79b820f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(512, 1000)" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "outputs.shape" ] }, { "cell_type": "markdown", "id": "a7a916a2", "metadata": {}, "source": [ "we can quickly check our top-1 accuracy (note: this is an unstable estimate since we used a small number of batches)" ] }, { "cell_type": "code", "execution_count": 10, "id": "ed4a521c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor(0.7305, device='cuda:0')" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "trainer.val_meters['top_1_val'](torch.tensor(outputs), torch.tensor(targets))" ] } ], "metadata": { "kernelspec": { "display_name": "Python (fovi)", "language": "python", "name": "fovi" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.25" } }, "nbformat": 4, "nbformat_minor": 5 }