Linear Regression and group by in R Linear Regression and group by in R r r

Linear Regression and group by in R


Here's an approach using the plyr package:

d <- data.frame(  state = rep(c('NY', 'CA'), 10),  year = rep(1:10, 2),  response= rnorm(20))library(plyr)# Break up d by state, then fit the specified model to each piece and# return a listmodels <- dlply(d, "state", function(df)   lm(response ~ year, data = df))# Apply coef to each model and return a data frameldply(models, coef)# Print the summary of each modell_ply(models, summary, .print = TRUE)


Since 2009, dplyr has been released which actually provides a very nice way to do this kind of grouping, closely resembling what SAS does.

library(dplyr)d <- data.frame(state=rep(c('NY', 'CA'), c(10, 10)),                year=rep(1:10, 2),                response=c(rnorm(10), rnorm(10)))fitted_models = d %>% group_by(state) %>% do(model = lm(response ~ year, data = .))# Source: local data frame [2 x 2]# Groups: <by row>##    state   model#   (fctr)   (chr)# 1     CA <S3:lm># 2     NY <S3:lm>fitted_models$model# [[1]]# # Call:# lm(formula = response ~ year, data = .)# # Coefficients:# (Intercept)         year  #    -0.06354      0.02677  ### [[2]]# # Call:# lm(formula = response ~ year, data = .)# # Coefficients:# (Intercept)         year  #    -0.35136      0.09385  

To retrieve the coefficients and Rsquared/p.value, one can use the broom package. This package provides:

three S3 generics: tidy, which summarizes a model's statistical findings such as coefficients of a regression; augment, which adds columns to the original data such as predictions, residuals and cluster assignments; and glance, which provides a one-row summary of model-level statistics.

library(broom)fitted_models %>% tidy(model)# Source: local data frame [4 x 6]# Groups: state [2]# #    state        term    estimate  std.error  statistic   p.value#   (fctr)       (chr)       (dbl)      (dbl)      (dbl)     (dbl)# 1     CA (Intercept) -0.06354035 0.83863054 -0.0757668 0.9414651# 2     CA        year  0.02677048 0.13515755  0.1980687 0.8479318# 3     NY (Intercept) -0.35135766 0.60100314 -0.5846187 0.5749166# 4     NY        year  0.09385309 0.09686043  0.9689519 0.3609470fitted_models %>% glance(model)# Source: local data frame [2 x 12]# Groups: state [2]# #    state   r.squared adj.r.squared     sigma statistic   p.value    df#   (fctr)       (dbl)         (dbl)     (dbl)     (dbl)     (dbl) (int)# 1     CA 0.004879969  -0.119510035 1.2276294 0.0392312 0.8479318     2# 2     NY 0.105032068  -0.006838924 0.8797785 0.9388678 0.3609470     2# Variables not shown: logLik (dbl), AIC (dbl), BIC (dbl), deviance (dbl),#   df.residual (int)fitted_models %>% augment(model)# Source: local data frame [20 x 10]# Groups: state [2]# #     state   response  year      .fitted   .se.fit     .resid      .hat#    (fctr)      (dbl) (int)        (dbl)     (dbl)      (dbl)     (dbl)# 1      CA  0.4547765     1 -0.036769875 0.7215439  0.4915464 0.3454545# 2      CA  0.1217003     2 -0.009999399 0.6119518  0.1316997 0.2484848# 3      CA -0.6153836     3  0.016771076 0.5146646 -0.6321546 0.1757576# 4      CA -0.9978060     4  0.043541551 0.4379605 -1.0413476 0.1272727# 5      CA  2.1385614     5  0.070312027 0.3940486  2.0682494 0.1030303# 6      CA -0.3924598     6  0.097082502 0.3940486 -0.4895423 0.1030303# 7      CA -0.5918738     7  0.123852977 0.4379605 -0.7157268 0.1272727# 8      CA  0.4671346     8  0.150623453 0.5146646  0.3165112 0.1757576# 9      CA -1.4958726     9  0.177393928 0.6119518 -1.6732666 0.2484848# 10     CA  1.7481956    10  0.204164404 0.7215439  1.5440312 0.3454545# 11     NY -0.6285230     1 -0.257504572 0.5170932 -0.3710185 0.3454545# 12     NY  1.0566099     2 -0.163651479 0.4385542  1.2202614 0.2484848# 13     NY -0.5274693     3 -0.069798386 0.3688335 -0.4576709 0.1757576# 14     NY  0.6097983     4  0.024054706 0.3138637  0.5857436 0.1272727# 15     NY -1.5511940     5  0.117907799 0.2823942 -1.6691018 0.1030303# 16     NY  0.7440243     6  0.211760892 0.2823942  0.5322634 0.1030303# 17     NY  0.1054719     7  0.305613984 0.3138637 -0.2001421 0.1272727# 18     NY  0.7513057     8  0.399467077 0.3688335  0.3518387 0.1757576# 19     NY -0.1271655     9  0.493320170 0.4385542 -0.6204857 0.2484848# 20     NY  1.2154852    10  0.587173262 0.5170932  0.6283119 0.3454545# Variables not shown: .sigma (dbl), .cooksd (dbl), .std.resid (dbl)


Here's one way using the lme4 package.

 library(lme4) d <- data.frame(state=rep(c('NY', 'CA'), c(10, 10)),                 year=rep(1:10, 2),                 response=c(rnorm(10), rnorm(10))) xyplot(response ~ year, groups=state, data=d, type='l') fits <- lmList(response ~ year | state, data=d) fits#------------Call: lmList(formula = response ~ year | state, data = d)Coefficients:   (Intercept)        yearCA -1.34420990  0.17139963NY  0.00196176 -0.01852429Degrees of freedom: 20 total; 16 residualResidual standard error: 0.8201316