How to cite item

Reinforcement learning in clinical medicine: a method to optimize dynamic treatment regime over time

  
@article{ATM27136,
	author = {Zhongheng Zhang and written on behalf of AME Big-Data Clinical Trial Collaborative Group},
	title = {Reinforcement learning in clinical medicine: a method to optimize dynamic treatment regime over time},
	journal = {Annals of Translational Medicine},
	volume = {7},
	number = {14},
	year = {2019},
	keywords = {},
	abstract = {Precision medicine requires individualized treatment regime for subjects with different clinical characteristics. Machine learning methods have witnessed rapid progress in recent years, which can be employed to make individualized treatment regime in clinical practice. The idea of reinforcement learning method is to take action in response to the changing environment. In clinical medicine, this idea can be used to assign optimal regime to patients with distinct characteristics. In the field of statistics, reinforcement learning has been widely investigated, aiming to identify an optimal dynamic treatment regime (DTR). Q-learning is among the earliest methods to identify optimal DTR, which fits linear outcome models in a recursive manner. The advantage is its easy interpretation and can be performed in most statistical software. However, it suffers from the risk of misspecification of the linear model. More recently, some other methods not so heavily depend on model specification have been developed such as inverse probability weighted estimator and augmented inverse probability weighted estimator. This review introduces the basic ideas of these methods and shows how to perform the learning algorithm within R environment.},
	issn = {2305-5847},	url = {https://atm.amegroups.org/article/view/27136}
}