@inproceedings{9d3bdd2396dd455dacd4ed7c747b8d8b,
title = "Learning Distinct Features Helps, Provably",
abstract = "We study the diversity of the features learned by a two-layer neural network trained with the least squares loss. We measure the diversity by the average -distance between the hidden-layer features and theoretically investigate how learning non-redundant distinct features affects the performance of the network. To do so, we derive novel generalization bounds depending on feature diversity based on Rademacher complexity for such networks. Our analysis proves that more distinct features at the network{\textquoteright}s units within the hidden layer lead to better generalization. We also show how to extend our results to deeper networks and different losses.",
keywords = "Feature Diversity, Generalization Theory, Neural Networks",
author = "Firas Laakom and Jenni Raitoharju and Alexandros Iosifidis and Moncef Gabbouj",
year = "2023",
doi = "10.1007/978-3-031-43415-0_13",
language = "English",
isbn = "978-3-031-43414-3",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "206--222",
editor = "Koutra, {Danai } and Plant, {Claudia } and {Gomez Rodriguez}, {Manuel } and Elena Baralis and Bonchi, {Francesco }",
booktitle = "Machine Learning and Knowledge Discovery in Databases: Research Track",
address = "Netherlands",
}