{"created":"2023-05-15T10:27:02.198687+00:00","id":22327,"links":{},"metadata":{"_buckets":{"deposit":"dfb67f55-c41d-43ff-832d-61899803286a"},"_deposit":{"created_by":1,"id":"22327","owners":[1],"pid":{"revision_id":0,"type":"depid","value":"22327"},"status":"published"},"_oai":{"id":"oai:u-fukui.repo.nii.ac.jp:00022327","sets":["2403:2404"]},"author_link":["64383","64381","64384","64382"],"item_10001_biblio_info_7":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2009-10-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"5","bibliographicPageEnd":"733","bibliographicPageStart":"722","bibliographicVolumeNumber":"21","bibliographic_titles":[{"bibliographic_title":"日本知能情報ファジィ学会誌"}]}]},"item_10001_description_5":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"一般に,自律エージェントや自律移動ロボットに効率的な行動学習をさせるためには動物の学習メカニズムから工学的応用を行なうことは有効な手法であることが知られている。中でも,動物行動学,行動分析学や動物のトレーニング(調教)などで広く用いられている「Shaping」という概念が最近注目されている. Shapingは学習者が容易に実行できる行動から複雑な行動へと段階的,誘導的に強化信号を与え,次第に希望の行動系列を形成する概念である。本研究では繰り返し探索により自律的に目標行動を獲得できる強化学習にShapingの概念を取り入れたShaping強化学習を提案する。有効なShaping効果を検証するために強化学習の代表的なQ-Learning, Profit Sharing, Actor-Criticの3手法を用いた異なるShaping強化学習を提案し,グリッド探索問題のシミュレータを用いて比較実験を行なった。さらに,実際の動物などの調教の場などで知られている段階を追って行動を強化する「分化強化」という概念をShaping強化学習に取り入れた分化強化型ShapingQ-Learning (DR-SQL)を提案し,シミュレーション実験により手法の有効性が確認された。Generally, it is known that the engineering application simulated from the learning mechanism of animals is useful to make learn behaviors of the autonomous agents or mobile robots efficiently. Above all, a general idea of \"shaping\" used by ethology, behavior analysis or animal training is a remarkable method recently. \"Shaping\" is a general idea that the learner is given a reinforcement signal step by step gradually and inductively forward the behavior from easy tasks to complicated tasks. In this research, we propose a shaping reinforcement learning method took in a general idea of \"shaping\" to the reinforcement learning that can acquire a desired behavior by the repeated search autonomously. Three different shaping reinforcement learning methods used Q-Learning, Profit Sharing, and Actor-Critic to check the efficiency of the shaping were proposed and the experiment by the simulator of grid search was performed. Furthermore, we proposed the Differential Reinforcement-type Shaping Q-Learning (DR-SQL) applied a general idea of differential reinforcement to reinforce a special behavior step by step such as real animal training, and confirmed the effectiveness of this method by the simulation experiment.","subitem_description_type":"Abstract"}]},"item_10001_publisher_8":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"日本知能情報ファジィ学会"}]},"item_10001_relation_11":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_relation_type_id":{"subitem_relation_type_id_text":"TD00006774","subitem_relation_type_select":"NCID"}}]},"item_10001_source_id_9":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"13477986","subitem_source_identifier_type":"ISSN"}]},"item_10001_version_type_20":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_b1a7d7d4d402bcce","subitem_version_type":"AO"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"前田, 陽一郎"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"花香, 敏"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"MAEDA, Yoichiro","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"HANAKA, Satoshi","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2020-08-04"}],"displaytype":"detail","filename":"BD00006774_001.pdf","filesize":[{"value":"1.5 MB"}],"format":"application/pdf","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"BD00006774_001.pdf","url":"https://u-fukui.repo.nii.ac.jp/record/22327/files/BD00006774_001.pdf"},"version_id":"154b1442-d25f-4c7c-9def-e9662c234e0f"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Shaping強化学習","subitem_subject_scheme":"Other"},{"subitem_subject":"分化強化","subitem_subject_scheme":"Other"},{"subitem_subject":"調教","subitem_subject_scheme":"Other"},{"subitem_subject":"自律エージェント","subitem_subject_scheme":"Other"},{"subitem_subject":"移動ロボット","subitem_subject_scheme":"Other"},{"subitem_subject":"Shaping Reinforcement Learning","subitem_subject_scheme":"Other"},{"subitem_subject":"Differential Reinforcement","subitem_subject_scheme":"Other"},{"subitem_subject":"Animal Training","subitem_subject_scheme":"Other"},{"subitem_subject":"Autonomous Agent","subitem_subject_scheme":"Other"},{"subitem_subject":"Mobile Robot","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"other","resourceuri":"http://purl.org/coar/resource_type/c_1843"}]},"item_title":"Shaping強化学習を用いた自律エージェントの行動獲得支援手法","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Shaping強化学習を用いた自律エージェントの行動獲得支援手法"},{"subitem_title":"Behavior Acquisition Supporting Method Used Shaping Reinforcement Learning for Autonomous Agent","subitem_title_language":"en"}]},"item_type_id":"10001","owner":"1","path":["2404"],"pubdate":{"attribute_name":"公開日","attribute_value":"2010-01-19"},"publish_date":"2010-01-19","publish_status":"0","recid":"22327","relation_version_is_last":true,"title":["Shaping強化学習を用いた自律エージェントの行動獲得支援手法"],"weko_creator_id":"1","weko_shared_id":-1},"updated":"2023-05-15T13:03:01.918633+00:00"}