Autoencoders are neural networks designed for unsupervised learning and dimensionality reduction. They learn to compress data into a lower-dimensional representation and then reconstruct it back to its original form, making them useful for feature learning, denoising, and anomaly detection.
Autoencoders are built on several key concepts that enable them to effectively learn data representations.
The structure of an Autoencoder consists of:
The main operations in Autoencoders include:
import tensorflow as tf
from tensorflow.keras import layers, models
def create_autoencoder(input_shape, encoding_dim):
# Encoder
encoder_input = layers.Input(shape=input_shape)
x = layers.Dense(128, activation='relu')(encoder_input)
x = layers.Dense(64, activation='relu')(x)
encoded = layers.Dense(encoding_dim, activation='relu')(x)
# Decoder
x = layers.Dense(64, activation='relu')(encoded)
x = layers.Dense(128, activation='relu')(x)
decoded = layers.Dense(input_shape[0], activation='sigmoid')(x)
# Autoencoder model
autoencoder = models.Model(encoder_input, decoded)
# Encoder model
encoder = models.Model(encoder_input, encoded)
# Decoder model
decoder_input = layers.Input(shape=(encoding_dim,))
x = layers.Dense(64, activation='relu')(decoder_input)
x = layers.Dense(128, activation='relu')(x)
decoder_output = layers.Dense(input_shape[0], activation='sigmoid')(x)
decoder = models.Model(decoder_input, decoder_output)
return autoencoder, encoder, decoder
# Example usage
input_shape = (784,) # For MNIST-like data
encoding_dim = 32 # Latent space dimension
autoencoder, encoder, decoder = create_autoencoder(input_shape, encoding_dim)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.summary()