@article {220, title = {The effect of model misspecification on classification decisions made using a computerized test}, journal = {Journal of Educational Measurement}, volume = {36}, number = {1}, year = {1999}, note = {National Council on Measurement in Education, US}, pages = {47-59}, abstract = {Many computerized testing algorithms require the fitting of some item response theory (IRT) model to examinees{\textquoteright} responses to facilitate item selection, the determination of test stopping rules, and classification decisions. Some IRT models are thought to be particularly useful for small volume certification programs that wish to make the transition to computerized adaptive testing (CAT). The 1-parameter logistic model (1-PLM) is usually assumed to require a smaller sample size than the 3-parameter logistic model (3-PLM) for item parameter calibrations. This study examined the effects of model misspecification on the precision of the decisions made using the sequential probability ratio test. For this comparison, the 1-PLM was used to estimate item parameters, even though the items{\textquoteright} characteristics were represented by a 3-PLM. Results demonstrate that the 1-PLM produced considerably more decision errors under simulation conditions similar to a real testing environment, compared to the true model and to a fixed-form standard reference set of items. (PsycINFO Database Record (c) 2003 APA, all rights reserved).}, keywords = {computerized adaptive testing}, author = {Kalohn, J.C. and Spray, J. A.} }