@misc{oai:u-fukui.repo.nii.ac.jp:00023056, author = {Wang, Mao and Maeda, Yoichiro and Takahashi, Yasutake}, month = {Jul}, note = {Visual attention region prediction has attracted the attention of intelligent systems researchers because it makes the interaction between human beings and intelligent nonhuman agents to be more intelligent. Visual attention region prediction uses multiple input factors such as gestures, face images and eye gaze position. Physically, disabled persons may find it difficult to move in some way. In this paper, we propose using gaze position estimation as input to a prediction system achieved by extracting image features. Our approach is divided into two parts: user gaze estimation and visual attention region inference. The neural network has been used in user gaze estimation as the decision making unit, following which the user gaze position at the computer screen is then estimated. We proposed that prediction in visual attention region inference of the visual attention region be inferred by using fuzzy inference after image feature maps and saliency maps have been extracted and computed. User experiments conducted to evaluate the prediction accuracy of our proposed method surveyed prediction results. These results indicated that the prediction we proposed performs better at the attention regions position prediction level depending on the image., Journal of Advanced Computational Intelligence and Intelligent Informatics Vol.18 No.4 p499-510}, title = {Visual Attention Region Prediction Based on Eye Tracking Using Fuzzy Inference}, year = {2014} }