[
	
		
{
	"event_id": "973453",
	"eventinstance_id": "3756072",
	"calendar": {
		"id": 3519,
		"title": "CS/CRCV Seminars",
		"slug": "cscrcv-seminars",
		"url": "https://events.ucf.edu/calendar/3519/cscrcv-seminars/"
	},
	"id": "3756072",
	"title": "Understanding Distribution Learning of Diffusion Models via Low\u002DDimensional Modeling",
	"subtitle": null,
	"description": "\u003Cp\u003E\u003Cstrong\u003ESpeaker\u003C/strong\u003E: Dr. Peng Wang\u003C/p\u003E\u000A\u003Cp\u003E\u003Cstrong\u003EFrom\u003C/strong\u003E: University of Michigan\u003C/p\u003E\u000A\u003Cp\u003E\u003Cstrong\u003EAbstract\u003C/strong\u003E\u003C/p\u003E\u000A\u003Cp\u003ERecent empirical studies have demonstrated that diffusion models can effectively learn the image distribution and generate new samples. Remarkably, these models can achieve this even with a small number of training samples despite a large image dimension, circumventing the curse of dimensionality. In this work, we provide theoretical insights into this phenomenon by leveraging key empirical observations: (i) the low intrinsic dimensionality of image datasets and (ii) the low\u002Drank property of the denoising autoencoder in trained diffusion models. These observations motivate us to assume the underlying data distribution as a mixture of low\u002Drank Gaussians and to parameterize the denoising autoencoder as a low\u002Drank model. With these setups, we rigorously show that optimizing the training loss of diffusion models is equivalent to solving the canonical subspace clustering problem over the training samples. This insight carries practical implications for training and controlling diffusion models. Specifically, it allows us to characterize precisely the minimal number of samples necessary for learning correctly the low\u002Drank data support, shedding light on the phase transition from memorization to generalization. Moreover, we empirically establish a correspondence between the subspaces and the semantic representations of image data, facilitating image editing. We validate these results with corroborated experimental results on both simulated distributions and image datasets.\u003Cstrong\u003E\u0026nbsp\u003B\u003C/strong\u003E\u003C/p\u003E\u000A\u003Cp\u003E\u003Cspan\u003EFor more info, please follow this\u003C/span\u003E\u003Cspan\u003E \u003Ca href\u003D\u0022https://ai.ucf.edu/wp\u002Dcontent/uploads/2025/03/Peng\u002DWang\u002DFlyer.pdf\u0022 target\u003D\u0022_blank\u0022\u003Elink\u003C/a\u003E.\u003C/span\u003E\u003C/p\u003E",
	"location": "TC2: 222",
	"location_url": "https://www.ucf.edu/location/technology\u002Dcommons\u002Dii/",
	"virtual_url": "https://ucf.zoom.us/j/91346028171?pwd\u003DBvx6dtQb8bHq6jTRIGRzyMahScl92k.1\u0026from\u003Daddon",
	"registration_link": null,
	"registration_info": null,
	"starts": "Thu, 13 Mar 2025 11:00:00 -0400",
	"ends": "Thu, 13 Mar 2025 12:00:00 -0400",
	"ongoing": "False",
	"category": "Speaker/Lecture/Seminar",
	"tags": ["UCFCRCV"],
	"contact_name": "Cherry Place",
	"contact_phone": null,
	"contact_email": "cherry@crcv.ucf.edu",
	"url": "https://events.ucf.edu/event/3756072/understanding-distribution-learning-of-diffusion-models-via-low-dimensional-modeling/"
}

	
]
