dimred.bib

@comment{{This file has been generated by bib2bib 1.97}}
@comment{{Command line: bib2bib ../bibli.bib -c 'subject:"dimred" or keywords:"dimred"' -ob tmp.bib}}
@article{Balasubramanian2002isomap,
  author = {Balasubramanian, M. and Schwartz, E. L.},
  title = {The isomap algorithm and topological stability},
  journal = {Science},
  year = {2002},
  volume = {295},
  pages = {7},
  number = {5552},
  month = {Jan},
  doi = {10.1126/science.295.5552.7a},
  pdf = {../local/Balasubramanian2002isomap.pdf},
  file = {Balasubramanian2002isomap.pdf:local/Balasubramanian2002isomap.pdf:PDF},
  keywords = {dimred},
  pii = {295/5552/7a},
  url = {http://dx.doi.org/10.1126/science.295.5552.7a}
}
@article{Belkin2003Laplacian,
  author = {Belkin, M. and Niyogi, P.},
  title = {Laplacian {E}igenmaps for {D}imensionality {R}eduction and {D}ata
	{R}epresentation},
  journal = {Neural {C}omput.},
  year = {2003},
  volume = {15},
  pages = {1373-1396},
  number = {6},
  abstract = {One of the central problems in machine learning and pattern recognition
	is to develop appropriate representations for complex data. {W}e
	consider the problem of constructing a representation for data lying
	on a low-dimensional manifold embedded in a high-dimensional space.
	{D}rawing on the correspondence between the graph {L}aplacian, the
	{L}aplace {B}eltrami operator on the manifold, and the connections
	to the heat equation, we propose a geometrically motivated algorithm
	for representing the high-dimensional data. {T}he algorithm provides
	a computationally efficient approach to nonlinear dimensionality
	reduction that has locality-preserving properties and a natural connection
	to clustering. {S}ome potential applications and illustrative examples
	are discussed.},
  pdf = {../local/1373.pdf:http\},
  file = {1373.pdf:http\://neco.mitpress.org/cgi/reprint/15/6/1373.pdf:PDF},
  keywords = {dimred},
  url = {http://neco.mitpress.org/cgi/content/abstract/15/6/1373}
}
@article{Bengio2004Learning,
  author = {Bengio, Y. and Delalleau, O. and Le Roux, N. and Paiement, J.-F.
	and Vincent, P. and Ouimet, M.},
  title = {Learning eigenfunctions links spectral embedding and kernel {PCA}.},
  journal = {Neural {C}omput.},
  year = {2004},
  volume = {16},
  pages = {2197-219},
  number = {10},
  month = {Oct},
  abstract = {In this letter, we show a direct relation between spectral embedding
	methods and kernel principal components analysis and how both are
	special cases of a more general learning problem: learning the principal
	eigenfunctions of an operator defined from a kernel and the unknown
	data-generating density. {W}hereas spectral embedding methods provided
	only coordinates for the training points, the analysis justifies
	a simple extension to out-of-sample examples (the {N}yström formula)
	for multidimensional scaling ({MDS}), spectral clustering, {L}aplacian
	eigenmaps, locally linear embedding ({LLE}), and {I}somap. {T}he
	analysis provides, for all such spectral embedding methods, the definition
	of a loss function, whose empirical average is minimized by the traditional
	algorithms. {T}he asymptotic expected value of that loss defines
	a generalization performance and clarifies what these algorithms
	are trying to learn. {E}xperiments with {LLE}, {I}somap, spectral
	clustering, and {MDS} show that this out-of-sample embedding formula
	generalizes well, with a level of error comparable to the effect
	of small perturbations of the training set on the embedding.},
  doi = {10.1162/0899766041732396},
  pdf = {../local/Bengio2004Learning.pdf},
  file = {Bengio2004Learning.pdf:local/Bengio2004Learning.pdf:PDF},
  keywords = {dimred},
  url = {http://dx.doi.org/10.1162/0899766041732396}
}
@article{Donoho2003Hessian,
  author = {Donoho, D. L. and Grimes, C.},
  title = {Hessian eigenmaps: {L}ocally linear embedding techniques for high-dimensional
	data},
  journal = {Proc. {N}atl. {A}cad. {S}ci. {USA}},
  year = {2003},
  volume = {100},
  pages = {5591-5596},
  number = {10},
  doi = {10.1073/pnas.1031596100},
  pdf = {../local/5591.pdf:http\},
  file = {5591.pdf:http\://www.pnas.org/cgi/reprint/100/10/5591.pdf:PDF},
  keywords = {dimred},
  url = {http://www.pnas.org/cgi/content/abstract/100/10/5591}
}
@article{LHeureux2004Locally,
  author = {L'Heureux, P. J. and Carreau, J. and Bengio, Y. and Delalleau, O.
	and Yue, S. Y.},
  title = {Locally linear embedding for dimensionality reduction in {QSAR}.},
  journal = {J. {C}omput. {A}ided {M}ol. {D}es.},
  year = {2004},
  volume = {18},
  pages = {475-82},
  number = {7-9},
  abstract = {Current practice in {Q}uantitative {S}tructure {A}ctivity {R}elationship
	({QSAR}) methods usually involves generating a great number of chemical
	descriptors and then cutting them back with variable selection techniques.
	{V}ariable selection is an effective method to reduce the dimensionality
	but may discard some valuable information. {T}his paper introduces
	{L}ocally {L}inear {E}mbedding ({LLE}), a local non-linear dimensionality
	reduction technique, that can statistically discover a low-dimensional
	representation of the chemical data. {LLE} is shown to create more
	stable representations than other non-linear dimensionality reduction
	algorithms, and to be capable of capturing non-linearity in chemical
	data.},
  doi = {10.1007/s10822-004-5319-9},
  pdf = {../local/LHeureux2004Locally.pdf},
  file = {LHeureux2004Locally.pdf:local/LHeureux2004Locally.pdf:PDF},
  keywords = {dimred},
  url = {http://dx.doi.org/10.1007/s10822-004-5319-9}
}
@article{Nilsson2004Approximate,
  author = {Nilsson, J. and Fioretos, T. and Höglund, M. and Fontes, M.},
  title = {Approximate geodesic distances reveal biologically relevant structures
	in microarray data.},
  journal = {Bioinformatics},
  year = {2004},
  volume = {20},
  pages = {874-80},
  number = {6},
  month = {Apr},
  abstract = {M{OTIVATION}: {G}enome-wide gene expression measurements, as currently
	determined by the microarray technology, can be represented mathematically
	as points in a high-dimensional gene expression space. {G}enes interact
	with each other in regulatory networks, restricting the cellular
	gene expression profiles to a certain manifold, or surface, in gene
	expression space. {T}o obtain knowledge about this manifold, various
	dimensionality reduction methods and distance metrics are used. {F}or
	data points distributed on curved manifolds, a sensible distance
	measure would be the geodesic distance along the manifold. {I}n this
	work, we examine whether an approximate geodesic distance measure
	captures biological similarities better than the traditionally used
	{E}uclidean distance. {RESULTS}: {W}e computed approximate geodesic
	distances, determined by the {I}somap algorithm, for one set of lymphoma
	and one set of lung cancer microarray samples. {C}ompared with the
	ordinary {E}uclidean distance metric, this distance measure produced
	more instructive, biologically relevant, visualizations when applying
	multidimensional scaling. {T}his suggests the {I}somap algorithm
	as a promising tool for the interpretation of microarray data. {F}urthermore,
	the results demonstrate the benefit and importance of taking nonlinearities
	in gene expression data into account.},
  doi = {10.1093/bioinformatics/btg496},
  pdf = {../local/Nilsson2004Approximate.pdf},
  file = {Nilsson2004Approximate.pdf:local/Nilsson2004Approximate.pdf:PDF},
  keywords = {dimred},
  pii = {btg496},
  url = {http://dx.doi.org/10.1093/bioinformatics/btg496}
}
@article{Roweis2000Nonlinear,
  author = {Roweis, S. T. and Saul, L. K.},
  title = {Nonlinear dimensionality reduction by locally linear embedding.},
  journal = {Science},
  year = {2000},
  volume = {290},
  pages = {2323-6},
  number = {5500},
  month = {Dec},
  abstract = {Many areas of science depend on exploratory data analysis and visualization.
	{T}he need to analyze large amounts of multivariate data raises the
	fundamental problem of dimensionality reduction: how to discover
	compact representations of high-dimensional data. {H}ere, we introduce
	locally linear embedding ({LLE}), an unsupervised learning algorithm
	that computes low-dimensional, neighborhood-preserving embeddings
	of high-dimensional inputs. {U}nlike clustering methods for local
	dimensionality reduction, {LLE} maps its inputs into a single global
	coordinate system of lower dimensionality, and its optimizations
	do not involve local minima. {B}y exploiting the local symmetries
	of linear reconstructions, {LLE} is able to learn the global structure
	of nonlinear manifolds, such as those generated by images of faces
	or documents of text.},
  doi = {10.1126/science.290.5500.2323},
  pdf = {../local/Roweis2000Nonlinear.pdf},
  file = {Roweis2000Nonlinear.pdf:local/Roweis2000Nonlinear.pdf:PDF},
  keywords = {dimred},
  pii = {290/5500/2323},
  url = {http://dx.doi.org/10.1126/science.290.5500.2323}
}
@article{Saul2003Think,
  author = {Saul, L. K. and Roweis, S. T.},
  title = {Think {G}lobally, {F}it {L}ocally: {U}nsupervised {L}earning of {L}ow
	{D}imensional {M}anifolds},
  journal = {J. {M}ach. {L}earn. {R}es.},
  year = {2003},
  volume = {4},
  pages = {119-155},
  abstract = {The problem of dimensionality reduction arises in many fields of information
	processing, including machine learning, data compression, scientific
	visualization, pattern recognition, and neural computation. {H}ere
	we describe locally linear embedding ({LLE}), an unsupervised learning
	algorithm that computes low dimensional, neighborhood preserving
	embeddings of high dimensional data. {T}he data, assumed to be sampled
	from an underlying manifold, are mapped into a single global coordinate
	system of lower dimensionality. {T}he mapping is derived from the
	symmetries of locally linear reconstructions, and the actual computation
	of the embedding reduces to a sparse eigenvalue problem. {N}otably,
	the optimizations in {LLE}---though capable of generating highly
	nonlinear embeddings---are simple to implement, and they do not involve
	local minima. {I}n this paper, we describe the implementation of
	the algorithm in detail and discuss several extensions that enhance
	its performance. {W}e present results of the algorithm applied to
	data sampled from known manifolds, as well as to collections of images
	of faces, lips, and handwritten digits. {T}hese examples are used
	to provide extensive illustrations of the algorithm's performance---both
	successes and failures---and to relate the algorithm to previous
	and ongoing work in nonlinear dimensionality reduction.},
  pdf = {../local/saul03a.pdf:http\},
  file = {saul03a.pdf:http\://www.jmlr.org/papers/volume4/saul03a/saul03a.pdf:PDF},
  keywords = {dimred},
  url = {http://www.jmlr.org/papers/v4/saul03a.html}
}
@article{Tenenbaum2000global,
  author = {Tenenbaum, J. B. and de Silva, V. and Langford, J. C.},
  title = {A global geometric framework for nonlinear dimensionality reduction},
  journal = {Science},
  year = {2000},
  volume = {290},
  pages = {2319-23},
  number = {5500},
  month = {Dec},
  abstract = {Scientists working with large volumes of high-dimensional data, such
	as global climate patterns, stellar spectra, or human gene distributions,
	regularly confront the problem of dimensionality reduction: finding
	meaningful low-dimensional structures hidden in their high-dimensional
	observations. {T}he human brain confronts the same problem in everyday
	perception, extracting from its high-dimensional sensory inputs-30,000
	auditory nerve fibers or 10(6) optic nerve fibers-a manageably small
	number of perceptually relevant features. {H}ere we describe an approach
	to solving dimensionality reduction problems that uses easily measured
	local metric information to learn the underlying global geometry
	of a data set. {U}nlike classical techniques such as principal component
	analysis ({PCA}) and multidimensional scaling ({MDS}), our approach
	is capable of discovering the nonlinear degrees of freedom that underlie
	complex natural observations, such as human handwriting or images
	of a face under different viewing conditions. {I}n contrast to previous
	algorithms for nonlinear dimensionality reduction, ours efficiently
	computes a globally optimal solution, and, for an important class
	of data manifolds, is guaranteed to converge asymptotically to the
	true structure.},
  doi = {10.1126/science.290.5500.2319},
  pdf = {../local/Tenenbaum2000global.pdf},
  file = {Tenenbaum2000global.pdf:local/Tenenbaum2000global.pdf:PDF},
  keywords = {dimred},
  pii = {290/5500/2319},
  url = {http://dx.doi.org/10.1126/science.290.5500.2319}
}
@inproceedings{Weinberger2005Nonlinear,
  author = {Weinberger, K. and Packer, B. and Saul, L.},
  title = {Nonlinear {D}imensionality {R}eduction by {S}emidefinite {P}rogramming
	and {K}ernel {M}atrix {F}actorization},
  booktitle = {Proceedings of the {T}enth {I}nternational {W}orkshop on {A}rtificial
	{I}ntelligence and {S}tatistics, {J}an 6-8, 2005, {S}avannah {H}otel,
	{B}arbados},
  year = {2005},
  editor = {Cowell, R. G. and Ghahramani, Z.},
  pages = {381-388},
  publisher = {Society for Artificial Intelligence and Statistics},
  abstract = {We describe an algorithm for nonlinear dimensionality reduction based
	on semidefinite programming and kernel matrix factorization. {T}he
	algorithm learns a kernel matrix for high dimensional data that lies
	on or near a low dimensional manifold. {I}n earlier work, the kernel
	matrix was learned by maximizing the variance in feature space while
	preserving the distances and angles between nearest neighbors. {I}n
	this paper, adapting recent ideas from semi-supervised learning on
	graphs, we show that the full kernel matrix can be very well approximated
	by a product of smaller matrices. {R}epresenting the kernel matrix
	in this way, we can reformulate the semidefinite program in terms
	of a much smaller submatrix of inner products between randomly chosen
	landmarks. {T}he new framework leads to order-of-magnitude reductions
	in computation time and makes it possible to study much larger problems
	in manifold learning.},
  pdf = {../local/We describe an algorithm for nonlinear dimensionality reduction based on semidefinite programming and kernel matrix factorization. The algorithm learns a kernel matrix for high dimensional data that lies on or near a low dimensional manifold. In earlier work, the kernel matrix was learned by maximizing the variance in feature space while preserving the distances and angles between nearest neighbors. In this paper, adapting recent ideas from semi-supervised learning on graphs, we show that the full kernel matrix can be very well approximated by a product of smaller matrices. Representing the kernel matrix in this way, we can reformulate the semidefinite program in terms of a much smaller submatrix of inner products between randomly chosen landmarks. The new framework leads to order-of-magnitude reductions in computation time and makes it possible to study much larger problems in manifold learning.},
  file = {We describe an algorithm for nonlinear dimensionality reduction based on semidefinite programming and kernel matrix factorization. The algorithm learns a kernel matrix for high dimensional data that lies on or near a low dimensional manifold. In earlier work, the kernel matrix was learned by maximizing the variance in feature space while preserving the distances and angles between nearest neighbors. In this paper, adapting recent ideas from semi-supervised learning on graphs, we show that the full kernel matrix can be very well approximated by a product of smaller matrices. Representing the kernel matrix in this way, we can reformulate the semidefinite program in terms of a much smaller submatrix of inner products between randomly chosen landmarks. The new framework leads to order-of-magnitude reductions in computation time and makes it possible to study much larger problems in manifold learning.:We describe an algorithm for nonlinear dimensionality reduction based on semidefinite programming and kernel matrix factorization. The algorithm learns a kernel matrix for high dimensional data that lies on or near a low dimensional manifold. In earlier work, the kernel matrix was learned by maximizing the variance in feature space while preserving the distances and angles between nearest neighbors. In this paper, adapting recent ideas from semi-supervised learning on graphs, we show that the full kernel matrix can be very well approximated by a product of smaller matrices. Representing the kernel matrix in this way, we can reformulate the semidefinite program in terms of a much smaller submatrix of inner products between randomly chosen landmarks. The new framework leads to order-of-magnitude reductions in computation time and makes it possible to study much larger problems in manifold learning.:PDF},
  keywords = {dimred}
}
@inproceedings{Weinberger2004Unsupervised,
  author = {Weinberger, K. Q. and Saul, L. K.},
  title = {Unsupervised {L}earning of {I}mage {M}anifolds by {S}emidefinite
	{P}rogramming},
  booktitle = {2004 {IEEE} {C}omputer {S}ociety {C}onference on {C}omputer {V}ision
	and {P}attern {R}ecognition ({CVPR}'04)},
  year = {2004},
  volume = {2},
  number = {2},
  pages = {988-995},
  abstract = {Can we detect low dimensional structure in high dimensional data sets
	of images and video? {T}he problem of dimensionality reduction arises
	often in computer vision and pattern recognition. {I}n this paper,
	we propose a new solution to this problem based on semidefinite programming.
	{O}ur algorithm can be used to analyze high dimensional data that
	lies on or near a low dimensional manifold. {I}t overcomes certain
	limitations of previous work in manifold learning, such as {I}somap
	and locally linear embedding. {W}e illustrate the algorithm on easily
	visualized examples of curves and surfaces, as well as on actual
	images of faces, handwritten digits, and solid objects.},
  doi = {doi.ieeecomputersociety.org/10.1109/CVPR.2004.256},
  pdf = {../local/sdeFinal_cvpr04.pdf:http\://www.seas.upenn.edu/~kilianw/publications/PDFs/sdeFinal_cvpr04.pdf:PDF;sdeFinal_cvpr04.pdf:http\},
  file = {sdeFinal_cvpr04.pdf:http\://www.seas.upenn.edu/~kilianw/publications/PDFs/sdeFinal_cvpr04.pdf:PDF;sdeFinal_cvpr04.pdf:http\://www.seas.upenn.edu/~kilianw/publications/PDFs/sdeFinal_cvpr04.pdf:PDF},
  keywords = {dimred}
}
@inproceedings{Weinberger2004Learning,
  author = {Weinberger, K. Q. and Sha, F. and Saul, L. K.},
  title = {Learning a kernel matrix for nonlinear dimensionality reduction},
  booktitle = {I{CML} '04: {T}wenty-first international conference on {M}achine
	learning},
  year = {2004},
  address = {New York, NY, USA},
  publisher = {ACM Press},
  abstract = {We investigate how to learn a kernel matrix for high dimensional data
	that lies on or near a low dimensional manifold. {N}oting that the
	kernel matrix implicitly maps the data into a nonlinear feature space,
	we show how to discover a mapping that "unfolds" the underlying manifold
	from which the data was sampled. {T}he kernel matrix is constructed
	by maximizing the variance in feature space subject to local constraints
	that preserve the angles and distances between nearest neighbors.
	{T}he main optimization involves an instance of semidefinite programming---a
	fundamentally different computation than previous algorithms for
	manifold learning, such as {I}somap and locally linear embedding.
	{T}he optimized kernels perform better than polynomial and {G}aussian
	kernels for problems in manifold learning, but worse for problems
	in large margin classification. {W}e explain these results in terms
	of the geometric properties of different kernels and comment on various
	interpretations of other manifold learning algorithms as kernel methods.},
  doi = {http://doi.acm.org/10.1145/1015330.1015345},
  pdf = {../local/kernel_icml04.pdf:http\://www.seas.upenn.edu/~kilianw/publications/PDFs/kernel_icml04.pdf:PDF;kernel_icml04.pdf:http\},
  file = {kernel_icml04.pdf:http\://www.seas.upenn.edu/~kilianw/publications/PDFs/kernel_icml04.pdf:PDF;kernel_icml04.pdf:http\://www.seas.upenn.edu/~kilianw/publications/PDFs/kernel_icml04.pdf:PDF},
  keywords = {dimred}
}
@comment{{jabref-meta: selector_author:}}
@comment{{jabref-meta: selector_journal:Adv. Drug Deliv. Rev.;Am. J. Hu
m. Genet.;Am. J. Pathol.;Ann. Appl. Stat.;Ann. Math. Statist.;Ann. N. 
Y. Acad. Sci.;Ann. Probab.;Ann. Stat.;Artif. Intell. Med.;Bernoulli;Bi
ochim. Biophys. Acta;Bioinformatics;Biometrika;BMC Bioinformatics;Br. 
J. Pharmacol.;Breast Cancer Res.;Cell;Cell. Signal.;Chem. Res. Toxicol
.;Clin. Cancer Res.;Combinator. Probab. Comput.;Comm. Pure Appl. Math.
;Comput. Chem.;Comput. Comm. Rev.;Comput. Stat. Data An.;Curr. Genom.;
Curr. Opin. Chem. Biol.;Curr. Opin. Drug Discov. Devel.;Data Min. Know
l. Discov.;Electron. J. Statist.;Eur. J. Hum. Genet.;FEBS Lett.;Found.
 Comput. Math.;Genome Biol.;IEEE T. Neural Networ.;IEEE T. Pattern. An
al.;IEEE T. Signal. Proces.;IEEE Trans. Inform. Theory;IEEE Trans. Kno
wl. Data Eng.;IEEE/ACM Trans. Comput. Biol. Bioinf.;Int. J. Comput. Vi
sion;Int. J. Data Min. Bioinform.;Int. J. Qantum Chem.;J Biol Syst;J. 
ACM;J. Am. Soc. Inf. Sci. Technol.;J. Am. Stat. Assoc.;J. Bioinform. C
omput. Biol.;J. Biol. Chem.;J. Biomed. Inform.;J. Cell. Biochem.;J. Ch
em. Inf. Comput. Sci.;J. Chem. Inf. Model.;J. Clin. Oncol.;J. Comput. 
Biol.;J. Comput. Graph. Stat.;J. Eur. Math. Soc.;J. Intell. Inform. Sy
st.;J. Mach. Learn. Res.;J. Med. Chem.;J. Mol. BIol.;J. R. Stat. Soc. 
Ser. B;Journal of Statistical Planning and Inference;Mach. Learn.;Math
. Program.;Meth. Enzymol.;Mol. Biol. Cell;Mol. Biol. Evol.;Mol. Cell. 
Biol.;Mol. Syst. Biol.;N. Engl. J. Med.;Nat. Biotechnol.;Nat. Genet.;N
at. Med.;Nat. Methods;Nat. Rev. Cancer;Nat. Rev. Drug Discov.;Nat. Rev
. Genet.;Nature;Neural Comput.;Neural Network.;Neurocomputing;Nucleic 
Acids Res.;Pattern Anal. Appl.;Pattern Recognit.;Phys. Rev. E;Phys. Re
v. Lett.;PLoS Biology;PLoS Comput. Biol.;Probab. Theory Relat. Fields;
Proc. IEEE;Proc. Natl. Acad. Sci. USA;Protein Eng.;Protein Eng. Des. S
el.;Protein Sci.;Protein. Struct. Funct. Genet.;Random Struct. Algorit
hm.;Rev. Mod. Phys.;Science;Stat. Probab. Lett.;Statistica Sinica;Theo
r. Comput. Sci.;Trans. Am. Math. Soc.;Trends Genet.;}}
@comment{{jabref-meta: selector_keywords:biogm;biosvm;breastcancer;cgh;
chemogenomics;chemoinformatics;csbcbook;csbcbook-ch1;csbcbook-ch2;csbc
book-ch3;csbcbook-ch4;csbcbook-ch5;csbcbook-ch6;csbcbook-ch7;csbcbook-
ch8;csbcbook-ch9;csbcbook-mustread;dimred;featureselection;glycans;her
g;hic;highcontentscreening;image;immunoinformatics;kernel-theory;kerne
lbook;lasso;microarray;ngs;nlp;plasmodium;proteomics;PUlearning;rnaseq
;segmentation;sirna;}}
@comment{{jabref-meta: selector_booktitle:Adv. Neural. Inform. Process 
Syst.;}}

This file was generated by bibtex2html 1.97.