@comment{{This file has been generated by bib2bib 1.97}}
@comment{{Command line: bib2bib ../bibli.bib -c 'subject:"source-coding" or keywords:"source-coding"' -ob tmp.bib}}
@article{Feder1996Hierarchical, author = {Feder, M. and Merhav, N. }, title = {Hierarchical universal coding}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {1996}, volume = {42}, pages = {1354-1364}, number = {5}, month = {Sep}, abstract = {In an earlier paper, we proved a strong version of the redundancy-capacity converse theorem of universal coding, stating that for ?most? sources in a given class, the universal coding redundancy is essentially lower-bounded by the capacity of the channel induced by this class. {S}ince this result holds for general classes of sources, it extends {R}issanen's (1986) strong converse theorem for parametric families. {W}hile our earlier result has established strong optimality only for mixture codes weighted by the capacity-achieving prior, our first result herein extends this finding to a general prior. {F}or some cases our technique also leads to a simplified proof of the above mentioned strong converse theorem. {T}he major interest in this paper, however, is in extending the theory of universal coding to hierarchical structures of classes, where each class may have a different capacity. {I}n this setting, one wishes to incur redundancy essentially as small as that corresponding to the active class, and not the union of classes. {O}ur main result is that the redundancy of a code based on a two-stage mixture (first, within each class, and then over the classes), is no worse than that of any other code for ?most? sources of ?most? classes. {I}f, in addition, the classes can be efficiently distinguished by a certain decision rule, then the best attainable redundancy is given explicitly by the capacity of the active class plus the normalized negative logarithm of the prior probability assigned to this class. {T}hese results suggest some interesting guidelines as for the choice of the prior. {W}e also discuss some examples with a natural hierarchical partition into classes }, pdf = {../local/Feder1996Hierarchical.pdf}, file = {Feder1996Hierarchical.pdf:local/Feder1996Hierarchical.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@article{Krichevskiy1998Laplace's, author = {Krichevskiy, R. E.}, title = {Laplace's law of succession and universal encoding}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {1998}, volume = {44}, pages = {296-303}, number = {1}, month = {Jan}, pdf = {../local/Krichevskiy1998Laplace's.pdf}, file = {Krichevskiy1998Laplace's.pdf:local/Krichevskiy1998Laplace's.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@article{Krichevsky1981performance, author = {Krichevsky, R. and Trofimov, V. }, title = {The performance of universal encoding}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {1981}, volume = {27}, pages = {199--207}, number = {2}, month = {Mar}, abstract = {Universal coding theory is surveyed from the viewpoint of the interplay between delay and redundancy. {T}he price for universality turns out to be acceptably small.}, pdf = {../local/Krichevsky1981performance.pdf}, file = {Krichevsky1981performance.pdf:local/Krichevsky1981performance.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@article{Merhav1993Universal, author = {Merhav, N. and Feder, M. }, title = {Universal schemes for sequential decision from individual data sequences}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {1993}, volume = {39}, pages = {1280-1292}, number = {4}, month = {Jul}, abstract = {Sequential decision algorithms are investigated in relation to a family of additive performance criteria for individual data sequences. {S}imple universal sequential schemes are known, under certain conditions, to approach optimality uniformly as fast as n-1 log n, where n is the sample size. {F}or the case of finite-alphabet observations, the class of schemes that can be implemented by finite-state machines ({FSM}s) is studied. {I}t is shown that {M}arkovian machines with sufficiently long memory exist, which are asymptotically nearly as good as any given deterministic or randomized {FSM} for the purpose of sequential decision. {F}or the continuous-valued observation case, a useful class of parametric schemes is discussed with special attention to the recursive least squares algorithm }, pdf = {../local/Merhav1993Universal.pdf}, file = {Merhav1993Universal.pdf:local/Merhav1993Universal.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@article{Merhav1993Some, author = {Merhav, N. and Feder, M. and Gutman, M. }, title = {Some properties of sequential predictors for binary {M}arkov sources}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {1993}, volume = {39}, pages = {887-892}, number = {3}, month = {May}, abstract = {Universal predictions of the next outcome of a binary sequence drawn from a {M}arkov source with unknown parameters is considered. {F}or a given source, the predictability is defined as the least attainable expected fraction of prediction errors. {A} lower bound is derived on the maximum rate at which the predictability is asymptotically approached uniformly over all sources in the {M}arkov class. {T}his bound is achieved by a simple majority predictor. {F}or {B}ernoulli sources, bounds on the large deviations performance are investigated. {A} lower bound is derived for the probability that the fraction of errors will exceed the predictability by a prescribed amount ?>0. {T}his bound is achieved by the same predictor if ? is sufficiently small }, pdf = {../local/Merhav1993Some.pdf}, file = {Merhav1993Some.pdf:local/Merhav1993Some.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@article{Meron2004Finite-memory, author = {Meron, E. and Feder, M.}, title = {Finite-memory universal prediction of individual sequences}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {2004}, volume = {50}, pages = {1506-1523}, number = {7}, month = {Jul}, abstract = {The problem of predicting the next outcome of an individual binary sequence under the constraint that the universal predictor has a finite memory, is explored. {I}n this analysis, the finite-memory universal predictors are either deterministic or random time-invariant finite-state ({FS}) machines with {K} states ({K}-state machines). {T}he paper provides bounds on the asymptotic achievable regret of these constrained universal predictors as a function of {K}, the number of their states, for long enough sequences. {T}he specific results are as follows. {W}hen the universal predictors are deterministic machines, the comparison class consists of constant predictors, and prediction is with respect to the 0-1 loss function ({H}amming distance), we get tight bounds indicating that the optimal asymptotic regret is 1/(2{K}). {I}n that case of {K}-state deterministic universal predictors, the constant predictors comparison class, but prediction is with respect to the self-information (code length) and the square-error loss functions, we show an upper bound on the regret (coding redundancy) of {O}({K}/sup -2/3/) and a lower bound of /spl {T}heta/({K}/sup -4/5/). {F}or these loss functions, if the predictor is allowed to be a random {K}-state machine, i.e., a machine with random state transitions, we get a lower bound of /spl {T}heta/(1/{K}) on the regret, with a matching upper bound of {O}(1/{K}) for the square-error loss, and an upper bound of {O}(log{K}/{K}) {T}hroughout the paper for the self-information loss. {I}n addition, we provide results for all these loss functions in the case where the comparison class consists of all predictors that are order-{L} {M}arkov machines.}, pdf = {../local/Meron2004Finite-memory.pdf}, file = {Meron2004Finite-memory.pdf:local/Meron2004Finite-memory.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@article{Rissanen1981Universal, author = {Rissanen, J. and Langdon, G. Jr. }, title = {Universal modeling and coding}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {1981}, volume = {27}, pages = {12-23}, number = {1}, month = {Jan}, pdf = {../local/Rissanen1981Universal.pdf}, file = {Rissanen1981Universal.pdf:local/Rissanen1981Universal.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@article{Tjalkens1992universal, author = {Tjalkens, T.J. and Willems, F.M.J.}, title = {A universal variable-to-fixed length source code based on {L}awrence's algorithm}, journal = {I{EEE} {T}rans. {I}nform. {T}heory}, year = {1992}, volume = {38}, pages = {247-253}, number = {2}, month = {Mar}, abstract = {It is shown that the modified {L}awrence algorithm is universal over the class of binary memoryless sources and that the rate converges asymptotically optimally fast to the source entropy. {I}t is proven that no codes exist that have a better asymptotic performance. {T}he asymptotic bounds show that universal variable-to-fixed-length codes can have a significantly lower redundancy than universal fixed-to-variable-length codes with the same number of codewords}, pdf = {../local/Tjalkens1992universal.pdf}, file = {Tjalkens1992universal.pdf:local/Tjalkens1992universal.pdf:PDF}, keywords = {information-theory source-coding}, owner = {vert} }
@comment{{jabref-meta: selector_author:}}
@comment{{jabref-meta: selector_journal:Adv. Drug Deliv. Rev.;Am. J. Hu m. Genet.;Am. J. Pathol.;Ann. Appl. Stat.;Ann. Math. Statist.;Ann. N. Y. Acad. Sci.;Ann. Probab.;Ann. Stat.;Artif. Intell. Med.;Bernoulli;Bi ochim. Biophys. Acta;Bioinformatics;Biometrika;BMC Bioinformatics;Br. J. Pharmacol.;Breast Cancer Res.;Cell;Cell. Signal.;Chem. Res. Toxicol .;Clin. Cancer Res.;Combinator. Probab. Comput.;Comm. Pure Appl. Math. ;Comput. Chem.;Comput. Comm. Rev.;Comput. Stat. Data An.;Curr. Genom.; Curr. Opin. Chem. Biol.;Curr. Opin. Drug Discov. Devel.;Data Min. Know l. Discov.;Electron. J. Statist.;Eur. J. Hum. Genet.;FEBS Lett.;Found. Comput. Math.;Genome Biol.;IEEE T. Neural Networ.;IEEE T. Pattern. An al.;IEEE T. Signal. Proces.;IEEE Trans. Inform. Theory;IEEE Trans. Kno wl. Data Eng.;IEEE/ACM Trans. Comput. Biol. Bioinf.;Int. J. Comput. Vi sion;Int. J. Data Min. Bioinform.;Int. J. Qantum Chem.;J Biol Syst;J. ACM;J. Am. Soc. Inf. Sci. Technol.;J. Am. Stat. Assoc.;J. Bioinform. C omput. Biol.;J. Biol. Chem.;J. Biomed. Inform.;J. Cell. Biochem.;J. Ch em. Inf. Comput. Sci.;J. Chem. Inf. Model.;J. Clin. Oncol.;J. Comput. Biol.;J. Comput. Graph. Stat.;J. Eur. Math. Soc.;J. Intell. Inform. Sy st.;J. Mach. Learn. Res.;J. Med. Chem.;J. Mol. BIol.;J. R. Stat. Soc. Ser. B;Journal of Statistical Planning and Inference;Mach. Learn.;Math . Program.;Meth. Enzymol.;Mol. Biol. Cell;Mol. Biol. Evol.;Mol. Cell. Biol.;Mol. Syst. Biol.;N. Engl. J. Med.;Nat. Biotechnol.;Nat. Genet.;N at. Med.;Nat. Methods;Nat. Rev. Cancer;Nat. Rev. Drug Discov.;Nat. Rev . Genet.;Nature;Neural Comput.;Neural Network.;Neurocomputing;Nucleic Acids Res.;Pattern Anal. Appl.;Pattern Recognit.;Phys. Rev. E;Phys. Re v. Lett.;PLoS Biology;PLoS Comput. Biol.;Probab. Theory Relat. Fields; Proc. IEEE;Proc. Natl. Acad. Sci. USA;Protein Eng.;Protein Eng. Des. S el.;Protein Sci.;Protein. Struct. Funct. Genet.;Random Struct. Algorit hm.;Rev. Mod. Phys.;Science;Stat. Probab. Lett.;Statistica Sinica;Theo r. Comput. Sci.;Trans. Am. Math. Soc.;Trends Genet.;}}
@comment{{jabref-meta: selector_keywords:biogm;biosvm;breastcancer;cgh; chemogenomics;chemoinformatics;csbcbook;csbcbook-ch1;csbcbook-ch2;csbc book-ch3;csbcbook-ch4;csbcbook-ch5;csbcbook-ch6;csbcbook-ch7;csbcbook- ch8;csbcbook-ch9;csbcbook-mustread;dimred;featureselection;glycans;her g;hic;highcontentscreening;image;immunoinformatics;kernel-theory;kerne lbook;lasso;microarray;ngs;nlp;plasmodium;proteomics;PUlearning;rnaseq ;segmentation;sirna;}}
@comment{{jabref-meta: selector_booktitle:Adv. Neural. Inform. Process Syst.;}}
This file was generated by bibtex2html 1.97.