Librarian View
Last updated in SearchWorks on December 3, 2023 9:36pm
LEADER 13644cam a2200805Ii 4500
001
a14271374
003
SIRSI
006
m o d
007
cr cn||||m|||a
008
180830t20182018sz a ob 000 0 eng d
035
a| (Sirsi) a14271374
040
a| CaBNVSL
b| eng
e| rda
e| pn
c| J2I
d| J2I
d| UIU
d| EBLCP
d| WAU
d| YDX
d| N$T
d| MERER
d| OCLCF
d| CEF
d| OTZ
d| OCLCQ
d| N$T
d| COO
d| VT2
d| CUT
d| MGCLP
d| OL$
d| OCLCQ
d| UKAHL
d| OCLCO
d| GW5XE
d| CSt
020
a| 9781681733036
q| (electronic book)
020
a| 168173303X
q| (electronic book)
020
a| 9781681733999
q| (electronic book)
020
a| 1681733994
q| (electronic book)
020
z| 9781681733043
q| (hardcover)
020
z| 1681733048
q| (hardcover)
020
z| 9781681733029
q| (paperback)
020
z| 1681733021
q| (paperback)
020
a| 9783031015816
q| (electronic bk.)
020
a| 3031015819
q| (electronic bk.)
024
7
a| 10.2200/S00832ED1V01Y201802AIM037
2| doi
024
7
a| 10.1007/978-3-031-01581-6
2| doi
035
a| (OCoLC)1050333957
z| (OCoLC)1048935606
z| (OCoLC)1049912070
z| (OCoLC)1053990139
050
4
a| Q325.5
b| .C445 2018
072
7
a| COM
x| 004000
2| bisacsh
082
0
4
a| 006.31
2| 23
049
a| MAIN
100
1
a| Chen, Zhiyuan
c| (Computer scientist),
e| author.
245
1
0
a| Lifelong machine learning /
c| Zhiyuan Chen, Bing Liu.
250
a| Second edition.
264
1
a| Cham, Switzerland :
b| Springer,
c| [2018]
264
4
c| ©2018
300
a| 1 online resource (xix, 187 pages) :
b| illustrations
336
a| text
b| txt
2| rdacontent
337
a| computer
b| c
2| rdamedia
338
a| online resource
b| cr
2| rdacarrier
490
1
a| Synthesis lectures on artificial intelligence and machine learning,
x| 1939-4616 ;
v| #38
588
0
a| Online resource; title from PDF title page (Morgan & Claypool, viewed on August 29, 2018).
504
a| Includes bibliographical references (pages 159-186).
505
0
a| 1. Introduction -- 1.1 Classic machine learning paradigm -- 1.2 Motivating examples -- 1.3 A brief history of lifelong learning -- 1.4 Definition of lifelong learning -- 1.5 Types of knowledge and key challenges -- 1.6 Evaluation methodology and role of big data -- 1.7 Outline of the book -- 2. Related learning paradigms -- 2.1 Transfer learning -- 2.1.1 Structural correspondence learning -- 2.1.2 Naïve Bayes transfer classifier -- 2.1.3 Deep learning in transfer learning -- 2.1.4 Difference from lifelong learning -- 2.2 Multi-task learning -- 2.2.1 Task relatedness in multi-task learning -- 2.2.2 GO-MTL: multi-task learning using latent basis -- 2.2.3 Deep learning in multi-task learning -- 2.2.4 Difference from lifelong learning -- 2.3 Online learning -- 2.3.1 Difference from lifelong learning -- 2.4 Reinforcement learning -- 2.4.1 Difference from lifelong learning -- 2.5 Meta learning -- 2.5.1 Difference from lifelong learning -- 2.6 Summary -- 3. Lifelong supervised learning -- 3.1 Definition and overview -- 3.2 Lifelong memory-based learning -- 3.2.1 Two memory-based learning methods -- 3.2.2 Learning a new representation for lifelong learning -- 3.3 Lifelong neural networks -- 3.3.1 MTL net -- 3.3.2 Lifelong EBNN -- 3.4 ELLA: an efficient lifelong learning algorithm -- 3.4.1 Problem setting -- 3.4.2 Objective function -- 3.4.3 Dealing with the first inefficiency -- 3.4.4 Dealing with the second inefficiency -- 3.4.5 Active task selection -- 3.5 Lifelong naive Bayesian classification -- 3.5.1 Naïve Bayesian text classification -- 3.5.2 Basic ideas of LSC -- 3.5.3 LSC technique -- 3.5.4 Discussions -- 3.6 Domain word embedding via meta-learning -- 3.7 Summary and evaluation datasets -- 4. Continual learning and catastrophic forgetting -- 4.1 Catastrophic forgetting -- 4.2 Continual learning in neural networks -- 4.3 Learning without forgetting -- 4.4 Progressive neural networks -- 4.5 Elastic weight consolidation -- 4.6 iCaRL: incremental classifier and representation learning -- 4.6.1 Incremental training -- 4.6.2 Updating representation -- 4.6.3 Constructing exemplar sets for new classes -- 4.6.4 Performing classification in iCaRL -- 4.7 Expert gate -- 4.7.1 Autoencoder gate -- 4.7.2 Measuring task relatedness for training -- 4.7.3 Selecting the most relevant expert for testing -- 4.7.4 Encoder-based lifelong learning -- 4.8 Continual learning with generative replay -- 4.8.1 Generative adversarial networks -- 4.8.2 Generative replay -- 4.9 Evaluating catastrophic forgetting -- 4.10 Summary and evaluation datasets -- 5. Open-world learning -- 5.1 Problem definition and applications -- 5.2 Center-based similarity space learning -- 5.2.1 Incrementally updating a CBS learning model -- 5.2.2 Testing a CBS learning model -- 5.2.3 CBS learning for unseen class detection -- 5.3 DOC: deep open classification -- 5.3.1 Feed-forward layers and the 1-vs.-rest layer -- 5.3.2 Reducing open-space risk -- 5.3.3 DOC for image classification -- 5.3.4 Unseen class discovery -- 5.4 Summary and evaluation datasets -- 5058 6. Lifelong topic modeling -- 6.1 Main ideas of lifelong topic modeling -- 6.2 LTM: a lifelong topic model -- 6.2.1 LTM model -- 6.2.2 Topic knowledge mining -- 6.2.3 Incorporating past knowledge -- 6.2.4 Conditional distribution of Gibbs sampler -- 6.3 AMC: a lifelong topic model for small data -- 6.3.1 Overall algorithm of AMC -- 6.3.2 Mining must-link knowledge -- 6.3.3 Mining cannot-link knowledge -- 6.3.4 Extended Pólya Urn model -- 6.3.5 Sampling distributions in Gibbs sampler -- 6.4 Summary and evaluation datasets -- 7. Lifelong information extraction -- 7.1 NELL: a never-ending language learner -- 7.1.1 NELL architecture -- 7.1.2 Extractors and learning in NELL -- 7.1.3 Coupling constraints in NELL -- 7.2 Lifelong opinion target extraction -- 7.2.1 Lifelong learning through recommendation -- 7.2.2 AER algorithm -- 7.2.3 Knowledge learning -- 7.2.4 Recommendation using past knowledge -- 7.3 Learning on the job -- 7.3.1 Conditional random fields -- 7.3.2 General dependency feature -- 7.3.3 The L-CRF algorithm -- 7.4 Lifelong-RL: lifelong relaxation labeling -- 7.4.1 Relaxation labeling -- 7.4.2 Lifelong relaxation labeling -- 7.5 Summary and evaluation datasets -- 5058 8. Continuous knowledge learning in chatbots -- 8.1 LiLi: lifelong interactive learning and inference -- 8.2 Basic ideas of LiLi -- 8.3 Components of LiLi -- 8.4 A running example -- 8.5 Summary and evaluation datasets -- 9. Lifelong reinforcement learning -- 9.1 Lifelong reinforcement learning through multiple environments -- 9.1.1 Acquiring and incorporating bias -- 9.2 Hierarchical Bayesian lifelong reinforcement learning -- 9.2.1 Motivation -- 9.2.2 Hierarchical Bayesian approach -- 9.2.3 MTRL algorithm -- 9.2.4 Updating hierarchical model parameters -- 9.2.5 Sampling an MDP -- 9.3 PG-ELLA: lifelong policy gradient reinforcement learning -- 9.3.1 Policy gradient reinforcement learning -- 9.3.2 Policy gradient lifelong learning setting -- 9.3.3 Objective function and optimization -- 9.3.4 Safe policy search for lifelong learning -- 9.3.5 Cross-domain lifelong reinforcement learning -- 9.4 Summary and evaluation datasets -- 10. Conclusion and future directions -- Bibliography -- Authors' biographies.
520
3
a| "This is an introduction to an advanced machine learning paradigm that continuously learns by accumulating past knowledge that it then uses in future learning and problem solving. In contrast, the current dominant machine learning paradigm learns in isolation: given a training dataset, it runs a machine learning algorithm on the dataset to produce a model that is then used in its intended application. It makes no attempt to retain the learned knowledge and use it in subsequent learning. Unlike this isolated system, humans learn effectively with only a few examples precisely because our learning is very knowledge-driven: the knowledge learned in the past helps us learn new things with little data or effort. Lifelong learning aims to emulate this capability, because without it, an AI system cannot be considered truly intelligent. Research in lifelong learning has developed significantly in the relatively short time since the first edition of this book was published. The purpose of this second edition is to expand the definition of lifelong learning, update the content of several chapters, and add a new chapter about continual learning in deep neural networks--which has been actively researched over the past two or three years. A few chapters have also been reorganized to make each of them more coherent for the reader. Moreover, the authors want to propose a unified framework for the research area. Currently, there are several research topics in machine learning that are closely related to lifelong learning--most notably, multi-task learning, transfer learning, and metalearning--because they also employ the idea of knowledge sharing and transfer. This book brings all these topics under one roof and discusses their similarities and differences. Its goal is to introduce this emerging machine learning paradigm and present a comprehensive survey and review of the important research results and latest ideas in the area. This book is thus suitable for students, researchers, and practitioners who are interested in machine learning, data mining, natural language processing, or pattern recognition. Lecturers can readily use the book for courses in any of these related fields."--Provided by publisher.
650
0
a| Machine learning.
650
6
a| Apprentissage automatique.
650
7
a| COMPUTERS
x| Intelligence (AI) & Semantics.
2| bisacsh
650
7
a| Machine learning.
2| fast
0| (OCoLC)fst01004795
653
a| lifelong machine learning
653
a| lifelong learning
653
a| continuous learning
653
a| continual learning
653
a| meta-learning
653
a| never-ending learning
653
a| multi-task learning
653
a| transfer learning
700
1
a| Liu, Bing,
d| 1963-
e| author.
776
0
8
i| Print version:
a| Chen, Zhiyuan (Computer scientist).
t| Lifelong machine learning.
b| Second edition.
d| [San Rafael, California] : Morgan & Claypool Publishers, [2018]
z| 9781681733043
w| (OCoLC)1080906732
830
0
a| Synthesis lectures on artificial intelligence and machine learning ;
v| #38.
x| 1939-4608
856
4
0
z| Available to Stanford-affiliated users.
u| https://link.springer.com/10.1007/978-3-031-01575-5
x| WMS
y| SpringerLink
x| Provider: Springer
x| purchased
x| eLoaderURL
x| sp4
x| spon1050333957
856
4
0
z| Available to Stanford-affiliated users.
u| https://link.springer.com/10.1007/978-3-031-01581-6
x| WMS
y| SpringerLink
x| Provider: Springer
x| purchased
x| eLoaderURL
x| sp4
x| spon1050333957
994
a| 92
b| STF
905
0
a| Preface Acknowledgments Introduction Related Learning Paradigms Lifelong Supervised Learning Continual Learning and Catastrophic Forgetting Open-World Learning Lifelong Topic Modeling Lifelong Information Extraction Continuous Knowledge Learning in Chatbots Lifelong Reinforcement Learning Conclusion and Future Directions Bibliography Authors' Biographies.
1| Nielsen
x| 9781681733043
x| 20220815
920
b| Lifelong Machine Learning, Second Edition is an introduction to an advanced machine learning paradigm that continuously learns by accumulating past knowledge that it then uses in future learning and problem solving. In contrast, the current dominant machine learning paradigm learns in isolation: given a training dataset, it runs a machine learning algorithm on the dataset to produce a model that is then used in its intended application. It makes no attempt to retain the learned knowledge and use it in subsequent learning. Unlike this isolated system, humans learn effectively with only a few examples precisely because our learning is very knowledge-driven: the knowledge learned in the past helps us learn new things with little data or effort. Lifelong learning aims to emulate this capability, because without it, an AI system cannot be considered truly intelligent. Research in lifelong learning has developed significantly in the relatively short time since the first edition of this book was published. The purpose of this second edition is to expand the definition of lifelong learning, update the content of several chapters, and add a new chapter about continual learning in deep neural networks-which has been actively researched over the past two or three years. A few chapters have also been reorganized to make each of them more coherent for the reader. Moreover, the authors want to propose a unified framework for the research area. Currently, there are several research topics in machine learning that are closely related to lifelong learning-most notably, multi-task learning, transfer learning, and meta-learning-because they also employ the idea of knowledge sharing and transfer. This book brings all these topics under one roof and discusses their similarities and differences. Its goal is to introduce this emerging machine learning paradigm and present a comprehensive survey and review of the important research results and latest ideas in the area. This book is thus suitable for students, researchers, and practitioners who are interested in machine learning, data mining, natural language processing, or pattern recognition. Lecturers can readily use the book for courses in any of these related fields.
1| Nielsen
x| 9781681733043
x| 20220815
596
a| 22
035
a| (Sirsi) spon1050333957
999
f
f
i| 4a341d19-4626-5774-8d7d-eb4a4fdaff5f
s| d596554b-107d-5369-b294-4ca25d452c2a
Holdings JSON
{ "holdings": [ { "id": "b4d5bae6-1d40-580b-b228-2b2bde0110b4", "hrid": "ah14271374_1", "notes": [ ], "_version": 1, "metadata": { "createdDate": "2023-08-21T21:26:12.356Z", "updatedDate": "2023-08-21T21:26:12.356Z", "createdByUserId": "58d0aaf6-dcda-4d5e-92da-012e6b7dd766", "updatedByUserId": "58d0aaf6-dcda-4d5e-92da-012e6b7dd766" }, "sourceId": "f32d531e-df79-46b3-8932-cdd35f7a2264", "boundWith": null, "formerIds": [ ], "illPolicy": null, "instanceId": "4a341d19-4626-5774-8d7d-eb4a4fdaff5f", "holdingsType": { "id": "996f93e2-5b5e-4cf2-9168-33ced1f95eed", "name": "Electronic", "source": "folio" }, "holdingsItems": [ ], "callNumberType": null, "holdingsTypeId": "996f93e2-5b5e-4cf2-9168-33ced1f95eed", "electronicAccess": [ ], "bareHoldingsItems": [ ], "discoverySuppress": false, "holdingsStatements": [ ], "statisticalCodeIds": [ ], "administrativeNotes": [ ], "effectiveLocationId": "b0a1a8c3-cc9a-487c-a2ed-308fc3a49a91", "permanentLocationId": "b0a1a8c3-cc9a-487c-a2ed-308fc3a49a91", "suppressFromDiscovery": false, "holdingsStatementsForIndexes": [ ], "holdingsStatementsForSupplements": [ ], "location": { "effectiveLocation": { "id": "b0a1a8c3-cc9a-487c-a2ed-308fc3a49a91", "code": "SUL-ELECTRONIC", "name": "online resource", "campus": { "id": "c365047a-51f2-45ce-8601-e421ca3615c5", "code": "SUL", "name": "Stanford Libraries" }, "details": { }, "library": { "id": "c1a86906-ced0-46cb-8f5b-8cef542bdd00", "code": "SUL", "name": "SUL" }, "isActive": true, "institution": { "id": "8d433cdd-4e8f-4dc1-aa24-8a4ddb7dc929", "code": "SU", "name": "Stanford University" } }, "permanentLocation": { "id": "b0a1a8c3-cc9a-487c-a2ed-308fc3a49a91", "code": "SUL-ELECTRONIC", "name": "online resource", "campus": { "id": "c365047a-51f2-45ce-8601-e421ca3615c5", "code": "SUL", "name": "Stanford Libraries" }, "details": { }, "library": { "id": "c1a86906-ced0-46cb-8f5b-8cef542bdd00", "code": "SUL", "name": "SUL" }, "isActive": true, "institution": { "id": "8d433cdd-4e8f-4dc1-aa24-8a4ddb7dc929", "code": "SU", "name": "Stanford University" } } } } ], "items": [ ] }
FOLIO JSON
{ "pieces": [ null ], "instance": { "id": "4a341d19-4626-5774-8d7d-eb4a4fdaff5f", "hrid": "a14271374", "notes": [ { "note": "Online resource; title from PDF title page (Morgan & Claypool, viewed on August 29, 2018)", "staffOnly": false, "instanceNoteTypeId": "66ea8f28-d5da-426a-a7c9-739a5d676347" }, { "note": "Includes bibliographical references (pages 159-186)", "staffOnly": false, "instanceNoteTypeId": "86b6e817-e1bc-42fb-bab0-70e7547de6c1" }, { "note": "1. Introduction -- 1.1 Classic machine learning paradigm -- 1.2 Motivating examples -- 1.3 A brief history of lifelong learning -- 1.4 Definition of lifelong learning -- 1.5 Types of knowledge and key challenges -- 1.6 Evaluation methodology and role of big data -- 1.7 Outline of the book -- 2. Related learning paradigms -- 2.1 Transfer learning -- 2.1.1 Structural correspondence learning -- 2.1.2 Naïve Bayes transfer classifier -- 2.1.3 Deep learning in transfer learning -- 2.1.4 Difference from lifelong learning -- 2.2 Multi-task learning -- 2.2.1 Task relatedness in multi-task learning -- 2.2.2 GO-MTL: multi-task learning using latent basis -- 2.2.3 Deep learning in multi-task learning -- 2.2.4 Difference from lifelong learning -- 2.3 Online learning -- 2.3.1 Difference from lifelong learning -- 2.4 Reinforcement learning -- 2.4.1 Difference from lifelong learning -- 2.5 Meta learning -- 2.5.1 Difference from lifelong learning -- 2.6 Summary -- 3. Lifelong supervised learning -- 3.1 Definition and overview -- 3.2 Lifelong memory-based learning -- 3.2.1 Two memory-based learning methods -- 3.2.2 Learning a new representation for lifelong learning -- 3.3 Lifelong neural networks -- 3.3.1 MTL net -- 3.3.2 Lifelong EBNN -- 3.4 ELLA: an efficient lifelong learning algorithm -- 3.4.1 Problem setting -- 3.4.2 Objective function -- 3.4.3 Dealing with the first inefficiency -- 3.4.4 Dealing with the second inefficiency -- 3.4.5 Active task selection -- 3.5 Lifelong naive Bayesian classification -- 3.5.1 Naïve Bayesian text classification -- 3.5.2 Basic ideas of LSC -- 3.5.3 LSC technique -- 3.5.4 Discussions -- 3.6 Domain word embedding via meta-learning -- 3.7 Summary and evaluation datasets -- 4. Continual learning and catastrophic forgetting -- 4.1 Catastrophic forgetting -- 4.2 Continual learning in neural networks -- 4.3 Learning without forgetting -- 4.4 Progressive neural networks -- 4.5 Elastic weight consolidation -- 4.6 iCaRL: incremental classifier and representation learning -- 4.6.1 Incremental training -- 4.6.2 Updating representation -- 4.6.3 Constructing exemplar sets for new classes -- 4.6.4 Performing classification in iCaRL -- 4.7 Expert gate -- 4.7.1 Autoencoder gate -- 4.7.2 Measuring task relatedness for training -- 4.7.3 Selecting the most relevant expert for testing -- 4.7.4 Encoder-based lifelong learning -- 4.8 Continual learning with generative replay -- 4.8.1 Generative adversarial networks -- 4.8.2 Generative replay -- 4.9 Evaluating catastrophic forgetting -- 4.10 Summary and evaluation datasets -- 5. Open-world learning -- 5.1 Problem definition and applications -- 5.2 Center-based similarity space learning -- 5.2.1 Incrementally updating a CBS learning model -- 5.2.2 Testing a CBS learning model -- 5.2.3 CBS learning for unseen class detection -- 5.3 DOC: deep open classification -- 5.3.1 Feed-forward layers and the 1-vs.-rest layer -- 5.3.2 Reducing open-space risk -- 5.3.3 DOC for image classification -- 5.3.4 Unseen class discovery -- 5.4 Summary and evaluation datasets -- 5058 6. Lifelong topic modeling -- 6.1 Main ideas of lifelong topic modeling -- 6.2 LTM: a lifelong topic model -- 6.2.1 LTM model -- 6.2.2 Topic knowledge mining -- 6.2.3 Incorporating past knowledge -- 6.2.4 Conditional distribution of Gibbs sampler -- 6.3 AMC: a lifelong topic model for small data -- 6.3.1 Overall algorithm of AMC -- 6.3.2 Mining must-link knowledge -- 6.3.3 Mining cannot-link knowledge -- 6.3.4 Extended Pólya Urn model -- 6.3.5 Sampling distributions in Gibbs sampler -- 6.4 Summary and evaluation datasets -- 7. Lifelong information extraction -- 7.1 NELL: a never-ending language learner -- 7.1.1 NELL architecture -- 7.1.2 Extractors and learning in NELL -- 7.1.3 Coupling constraints in NELL -- 7.2 Lifelong opinion target extraction -- 7.2.1 Lifelong learning through recommendation -- 7.2.2 AER algorithm -- 7.2.3 Knowledge learning -- 7.2.4 Recommendation using past knowledge -- 7.3 Learning on the job -- 7.3.1 Conditional random fields -- 7.3.2 General dependency feature -- 7.3.3 The L-CRF algorithm -- 7.4 Lifelong-RL: lifelong relaxation labeling -- 7.4.1 Relaxation labeling -- 7.4.2 Lifelong relaxation labeling -- 7.5 Summary and evaluation datasets -- 5058 8. Continuous knowledge learning in chatbots -- 8.1 LiLi: lifelong interactive learning and inference -- 8.2 Basic ideas of LiLi -- 8.3 Components of LiLi -- 8.4 A running example -- 8.5 Summary and evaluation datasets -- 9. Lifelong reinforcement learning -- 9.1 Lifelong reinforcement learning through multiple environments -- 9.1.1 Acquiring and incorporating bias -- 9.2 Hierarchical Bayesian lifelong reinforcement learning -- 9.2.1 Motivation -- 9.2.2 Hierarchical Bayesian approach -- 9.2.3 MTRL algorithm -- 9.2.4 Updating hierarchical model parameters -- 9.2.5 Sampling an MDP -- 9.3 PG-ELLA: lifelong policy gradient reinforcement learning -- 9.3.1 Policy gradient reinforcement learning -- 9.3.2 Policy gradient lifelong learning setting -- 9.3.3 Objective function and optimization -- 9.3.4 Safe policy search for lifelong learning -- 9.3.5 Cross-domain lifelong reinforcement learning -- 9.4 Summary and evaluation datasets -- 10. Conclusion and future directions -- Bibliography -- Authors' biographies", "staffOnly": false, "instanceNoteTypeId": "5ba8e385-0e27-462e-a571-ffa1fa34ea54" }, { "note": "\"This is an introduction to an advanced machine learning paradigm that continuously learns by accumulating past knowledge that it then uses in future learning and problem solving. In contrast, the current dominant machine learning paradigm learns in isolation: given a training dataset, it runs a machine learning algorithm on the dataset to produce a model that is then used in its intended application. It makes no attempt to retain the learned knowledge and use it in subsequent learning. Unlike this isolated system, humans learn effectively with only a few examples precisely because our learning is very knowledge-driven: the knowledge learned in the past helps us learn new things with little data or effort. Lifelong learning aims to emulate this capability, because without it, an AI system cannot be considered truly intelligent. Research in lifelong learning has developed significantly in the relatively short time since the first edition of this book was published. The purpose of this second edition is to expand the definition of lifelong learning, update the content of several chapters, and add a new chapter about continual learning in deep neural networks--which has been actively researched over the past two or three years. A few chapters have also been reorganized to make each of them more coherent for the reader. Moreover, the authors want to propose a unified framework for the research area. Currently, there are several research topics in machine learning that are closely related to lifelong learning--most notably, multi-task learning, transfer learning, and metalearning--because they also employ the idea of knowledge sharing and transfer. This book brings all these topics under one roof and discusses their similarities and differences. Its goal is to introduce this emerging machine learning paradigm and present a comprehensive survey and review of the important research results and latest ideas in the area. This book is thus suitable for students, researchers, and practitioners who are interested in machine learning, data mining, natural language processing, or pattern recognition. Lecturers can readily use the book for courses in any of these related fields.\"--Provided by publisher", "staffOnly": false, "instanceNoteTypeId": "10e2e11b-450f-45c8-b09b-0f819999966e" } ], "title": "Lifelong machine learning / Zhiyuan Chen, Bing Liu.", "series": [ "Synthesis lectures on artificial intelligence and machine learning, 1939-4616 ; #38", "Synthesis lectures on artificial intelligence and machine learning ; #38. 1939-4608" ], "source": "MARC", "_version": 1, "editions": [ "Second edition" ], "metadata": { "createdDate": "2023-08-21T21:23:16.892Z", "updatedDate": "2023-08-21T21:23:16.892Z", "createdByUserId": "58d0aaf6-dcda-4d5e-92da-012e6b7dd766", "updatedByUserId": "58d0aaf6-dcda-4d5e-92da-012e6b7dd766" }, "statusId": "9634a5ab-9228-4703-baf2-4d12ebc77d56", "subjects": [ "Machine learning", "Apprentissage automatique", "COMPUTERS Intelligence (AI) & Semantics" ], "languages": [ "eng" ], "indexTitle": "Lifelong machine learning", "identifiers": [ { "value": "(Sirsi) a14271374", "identifierTypeId": "7e591197-f335-4afb-bc6d-a6d76ca3bace" }, { "value": "9781681733036 (electronic book)", "identifierTypeId": "8261054f-be78-422d-bd51-4ed9f33c3422" }, { "value": "168173303X (electronic book)", "identifierTypeId": "8261054f-be78-422d-bd51-4ed9f33c3422" }, { "value": "9781681733999 (electronic book)", "identifierTypeId": "8261054f-be78-422d-bd51-4ed9f33c3422" }, { "value": "1681733994 (electronic book)", "identifierTypeId": "8261054f-be78-422d-bd51-4ed9f33c3422" }, { "value": "9781681733043 (hardcover)", "identifierTypeId": "fcca2643-406a-482a-b760-7a7f8aec640e" }, { "value": "1681733048 (hardcover)", "identifierTypeId": "fcca2643-406a-482a-b760-7a7f8aec640e" }, { "value": "9781681733029 (paperback)", "identifierTypeId": "fcca2643-406a-482a-b760-7a7f8aec640e" }, { "value": "1681733021 (paperback)", "identifierTypeId": "fcca2643-406a-482a-b760-7a7f8aec640e" }, { "value": "9783031015816 (electronic bk.)", "identifierTypeId": "8261054f-be78-422d-bd51-4ed9f33c3422" }, { "value": "3031015819 (electronic bk.)", "identifierTypeId": "8261054f-be78-422d-bd51-4ed9f33c3422" }, { "value": "10.2200/S00832ED1V01Y201802AIM037 doi", "identifierTypeId": "2e8b3b6c-0e7d-4e48-bca2-b0b23b376af5" }, { "value": "10.2200/S00832ED1V01Y201802AIM037", "identifierTypeId": "ebfd00b6-61d3-4d87-a6d8-810c941176d5" }, { "value": "10.2200/S00832ED1V01Y201802AIM037", "identifierTypeId": "1795ea23-6856-48a5-a772-f356e16a8a6c" }, { "value": "10.1007/978-3-031-01581-6 doi", "identifierTypeId": "2e8b3b6c-0e7d-4e48-bca2-b0b23b376af5" }, { "value": "10.1007/978-3-031-01581-6", "identifierTypeId": "ebfd00b6-61d3-4d87-a6d8-810c941176d5" }, { "value": "10.1007/978-3-031-01581-6", "identifierTypeId": "1795ea23-6856-48a5-a772-f356e16a8a6c" }, { "value": "(OCoLC)1050333957", "identifierTypeId": "439bfbae-75bc-4f74-9fc7-b2a2d47ce3ef" }, { "value": "(OCoLC)1048935606", "identifierTypeId": "fc4e3f2a-887a-46e5-8057-aeeb271a4e56" }, { "value": "(OCoLC)1049912070", "identifierTypeId": "fc4e3f2a-887a-46e5-8057-aeeb271a4e56" }, { "value": "(OCoLC)1053990139", "identifierTypeId": "fc4e3f2a-887a-46e5-8057-aeeb271a4e56" }, { "value": "(Sirsi) spon1050333957", "identifierTypeId": "7e591197-f335-4afb-bc6d-a6d76ca3bace" } ], "publication": [ { "role": "Publication", "place": "Cham, Switzerland", "publisher": "Springer", "dateOfPublication": "[2018]" }, { "role": "Copyright notice date", "place": "", "publisher": "", "dateOfPublication": "©2018" } ], "contributors": [ { "name": "Chen, Zhiyuan (Computer scientist)", "primary": true, "contributorTypeId": "6e09d47d-95e2-4d8a-831b-f777b8ef6d81", "contributorTypeText": "author.", "contributorNameTypeId": "2b94c631-fca9-4892-a730-03ee529ffe2a" }, { "name": "Liu, Bing, 1963-", "primary": false, "contributorTypeId": "6e09d47d-95e2-4d8a-831b-f777b8ef6d81", "contributorTypeText": "author.", "contributorNameTypeId": "2b94c631-fca9-4892-a730-03ee529ffe2a" } ], "catalogedDate": "2022-08-06", "staffSuppress": false, "instanceTypeId": "6312d172-f0cf-40f6-b27d-9fa8feaf332f", "previouslyHeld": false, "classifications": [ { "classificationNumber": "Q325.5 .C445 2018", "classificationTypeId": "ce176ace-a53e-4b4d-aa89-725ed7b2edac" }, { "classificationNumber": "006.31", "classificationTypeId": "42471af9-7d25-4f3a-bf78-60d29dcf463b" } ], "instanceFormats": [ ], "electronicAccess": [ { "uri": "https://link.springer.com/10.1007/978-3-031-01575-5", "name": "Resource", "linkText": "SpringerLink", "publicNote": "Available to Stanford-affiliated users", "relationshipId": "f5d0068e-6272-458e-8a81-b85e7b9a14aa" }, { "uri": "https://link.springer.com/10.1007/978-3-031-01581-6", "name": "Resource", "linkText": "SpringerLink", "publicNote": "Available to Stanford-affiliated users", "relationshipId": "f5d0068e-6272-458e-8a81-b85e7b9a14aa" } ], "holdingsRecords2": [ ], "modeOfIssuanceId": "9d18a02f-5897-4c31-9106-c9abb5c7ae8b", "publicationRange": [ ], "statisticalCodes": [ ], "alternativeTitles": [ ], "discoverySuppress": false, "instanceFormatIds": [ "f5e8210f-7640-459b-a71f-552567f92369" ], "publicationPeriod": { "start": 2018 }, "statusUpdatedDate": "2023-08-21T21:23:16.107+0000", "statisticalCodeIds": [ ], "administrativeNotes": [ ], "physicalDescriptions": [ "1 online resource (xix, 187 pages) : illustrations" ], "publicationFrequency": [ ], "suppressFromDiscovery": false, "natureOfContentTermIds": [ ] }, "holdingSummaries": [ { "poLineId": null, "orderType": null, "orderStatus": null, "poLineNumber": null, "orderSentDate": null, "orderCloseReason": null, "polReceiptStatus": null } ] }