First load the data

library(tictoc)
library(caret)

df <- read.csv("../reports/probing_results_1200_per_class/task1_predict_task_performance.csv")
dim(df)
[1] 25 98
all_glue_tasks = c("rte", "cola", "mrpc", "sst2", "qnli", "qqp")
all_probe_tasks = c("bigram_shift", "coordination_inversion", "obj_number", "odd_man_out", "past_present", "subj_number", "tree_depth")

1. Probing from all layers in one task

all_layers_from_one_task <- function(glue_task, probe_task) {
  layers=1:12
  features = paste(paste(probe_task, "_layer_", sep=""), layers, sep="")
  x_y_features = c(glue_task, features)
  formula = as.formula(paste(glue_task, "~ ."))
  trcontrol <- trainControl(method="cv", number=5)
  model <- train(formula, data=df[x_y_features], method="lm", trControl=trcontrol)
  rmse <- sqrt(mean(summary(model)$residuals^2))
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- train(
    as.formula(sprintf("%s ~ .", glue_task)), data=Z, method="lm", trControl=trcontrol)
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  
  SST <- var(df[glue_task]) * (length(df)-1)
  SSE <- deviance(model)
  return(list("RMSE"=rmse,
              "ctrl_RMSE"=ctrl_rmse,
              "RMSE_reduction"=(ctrl_rmse-rmse)/ctrl_rmse*100,
              "explained_var"=(SST-SSE) / SST * 100 ))
}

set.seed(1234)
for (gt in all_glue_tasks) {
  print(sprintf("Predict %s", gt))
  for (pt in all_probe_tasks) {
    ret = all_layers_from_one_task(gt, pt)
    print(sprintf("probing task %s. RMSE %.4f. ctrl_RMSE %.4f RMSE_reduction %.2f", pt, ret$RMSE, ret$ctrl_RMSE, ret$RMSE_reduction))
  }
}
[1] "Predict rte"
[1] "probing task bigram_shift. RMSE 0.0353. ctrl_RMSE 0.0377 RMSE_reduction 6.25"
[1] "probing task coordination_inversion. RMSE 0.0336. ctrl_RMSE 0.0343 RMSE_reduction 2.19"
[1] "probing task obj_number. RMSE 0.0427. ctrl_RMSE 0.0460 RMSE_reduction 7.04"
[1] "probing task odd_man_out. RMSE 0.0274. ctrl_RMSE 0.0396 RMSE_reduction 30.90"
[1] "probing task past_present. RMSE 0.0430. ctrl_RMSE 0.0444 RMSE_reduction 3.07"
[1] "probing task subj_number. RMSE 0.0489. ctrl_RMSE 0.0408 RMSE_reduction -19.66"
[1] "probing task tree_depth. RMSE 0.0378. ctrl_RMSE 0.0395 RMSE_reduction 4.37"
[1] "Predict cola"
[1] "probing task bigram_shift. RMSE 0.0091. ctrl_RMSE 0.0194 RMSE_reduction 52.80"
[1] "probing task coordination_inversion. RMSE 0.0054. ctrl_RMSE 0.0162 RMSE_reduction 66.59"
[1] "probing task obj_number. RMSE 0.0069. ctrl_RMSE 0.0125 RMSE_reduction 44.20"
[1] "probing task odd_man_out. RMSE 0.0074. ctrl_RMSE 0.0134 RMSE_reduction 44.75"
[1] "probing task past_present. RMSE 0.0092. ctrl_RMSE 0.0178 RMSE_reduction 48.42"
[1] "probing task subj_number. RMSE 0.0035. ctrl_RMSE 0.0162 RMSE_reduction 78.56"
[1] "probing task tree_depth. RMSE 0.0073. ctrl_RMSE 0.0155 RMSE_reduction 53.03"
[1] "Predict mrpc"
[1] "probing task bigram_shift. RMSE 0.0160. ctrl_RMSE 0.0247 RMSE_reduction 35.18"
[1] "probing task coordination_inversion. RMSE 0.0187. ctrl_RMSE 0.0228 RMSE_reduction 18.18"
[1] "probing task obj_number. RMSE 0.0174. ctrl_RMSE 0.0241 RMSE_reduction 28.02"
[1] "probing task odd_man_out. RMSE 0.0173. ctrl_RMSE 0.0246 RMSE_reduction 29.39"
[1] "probing task past_present. RMSE 0.0176. ctrl_RMSE 0.0269 RMSE_reduction 34.65"
[1] "probing task subj_number. RMSE 0.0176. ctrl_RMSE 0.0269 RMSE_reduction 34.48"
[1] "probing task tree_depth. RMSE 0.0207. ctrl_RMSE 0.0229 RMSE_reduction 9.54"
[1] "Predict sst2"
[1] "probing task bigram_shift. RMSE 0.0050. ctrl_RMSE 0.0072 RMSE_reduction 29.78"
[1] "probing task coordination_inversion. RMSE 0.0044. ctrl_RMSE 0.0080 RMSE_reduction 44.24"
[1] "probing task obj_number. RMSE 0.0036. ctrl_RMSE 0.0077 RMSE_reduction 53.15"
[1] "probing task odd_man_out. RMSE 0.0054. ctrl_RMSE 0.0077 RMSE_reduction 29.28"
[1] "probing task past_present. RMSE 0.0055. ctrl_RMSE 0.0071 RMSE_reduction 22.29"
[1] "probing task subj_number. RMSE 0.0044. ctrl_RMSE 0.0084 RMSE_reduction 47.75"
[1] "probing task tree_depth. RMSE 0.0039. ctrl_RMSE 0.0074 RMSE_reduction 46.98"
[1] "Predict qnli"
[1] "probing task bigram_shift. RMSE 0.0040. ctrl_RMSE 0.0089 RMSE_reduction 55.29"
[1] "probing task coordination_inversion. RMSE 0.0024. ctrl_RMSE 0.0054 RMSE_reduction 56.35"
[1] "probing task obj_number. RMSE 0.0034. ctrl_RMSE 0.0086 RMSE_reduction 60.04"
[1] "probing task odd_man_out. RMSE 0.0040. ctrl_RMSE 0.0066 RMSE_reduction 38.65"
[1] "probing task past_present. RMSE 0.0038. ctrl_RMSE 0.0064 RMSE_reduction 41.37"
[1] "probing task subj_number. RMSE 0.0026. ctrl_RMSE 0.0074 RMSE_reduction 64.74"
[1] "probing task tree_depth. RMSE 0.0031. ctrl_RMSE 0.0083 RMSE_reduction 62.79"
[1] "Predict qqp"
[1] "probing task bigram_shift. RMSE 0.0227. ctrl_RMSE 0.0469 RMSE_reduction 51.64"
[1] "probing task coordination_inversion. RMSE 0.0218. ctrl_RMSE 0.0502 RMSE_reduction 56.57"
[1] "probing task obj_number. RMSE 0.0141. ctrl_RMSE 0.0509 RMSE_reduction 72.38"
[1] "probing task odd_man_out. RMSE 0.0195. ctrl_RMSE 0.0441 RMSE_reduction 55.68"
[1] "probing task past_present. RMSE 0.0100. ctrl_RMSE 0.0409 RMSE_reduction 75.58"
[1] "probing task subj_number. RMSE 0.0180. ctrl_RMSE 0.0371 RMSE_reduction 51.50"
[1] "probing task tree_depth. RMSE 0.0204. ctrl_RMSE 0.0450 RMSE_reduction 54.67"

2. Which features are significant?

probing_from_one_task <- function(glue_task, probe_task) {
  layers=1:12
  features = paste(paste(probe_task, "_layer_", sep=""), layers, sep="")
  x_y_features = c(glue_task, features)
  formula = paste(glue_task, "~ .")
  model <- lm(formula, data=df[x_y_features])
  anova_result <- anova(model)
  rmse <- sqrt(mean(summary(model)$residuals^2))
  sig_features <- features[anova_result[,5]<0.05]
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- lm(sprintf("%s ~ .", glue_task), data=Z)
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  
  SST <- var(df[glue_task]) * (length(df)-1)
  SSE <- deviance(model)
  return(list("anova_result"=anova_result, 
              "sig_features"=sig_features,
              "RMSE"=rmse,
              "RMSE_reduction"=(ctrl_rmse-rmse)/ctrl_rmse*100,
              "explained_var"=(SST-SSE) / SST * 100 ))
}

set.seed(1234)
for (gt in all_glue_tasks) {
  print(sprintf("Predict %s", gt))
  for (pt in all_probe_tasks) {
    ret = probing_from_one_task(gt, pt)
    anova_result = ret$anova_result
    sig_features = ret$sig_features
    print(sprintf("probing task %s", pt))
    print(sprintf(sig_features))
  }
}
[1] "Predict rte"
[1] "probing task bigram_shift"
[1] "bigram_shift_layer_4" "bigram_shift_layer_9" "NA"                  
[1] "probing task coordination_inversion"
[1] "coordination_inversion_layer_5" "NA"                            
[1] "probing task obj_number"
[1] "obj_number_layer_1" "obj_number_layer_3" "obj_number_layer_5"
[4] "NA"                
[1] "probing task odd_man_out"
[1] "odd_man_out_layer_6" "NA"                 
[1] "probing task past_present"
[1] "past_present_layer_1" "NA"                  
[1] "probing task subj_number"
[1] "subj_number_layer_3" "NA"                 
[1] "probing task tree_depth"
[1] "NA"
[1] "Predict cola"
[1] "probing task bigram_shift"
[1] "bigram_shift_layer_2"  "bigram_shift_layer_4"  "bigram_shift_layer_5" 
[4] "bigram_shift_layer_12" "NA"                   
[1] "probing task coordination_inversion"
[1] "coordination_inversion_layer_1"  "coordination_inversion_layer_7" 
[3] "coordination_inversion_layer_11" "NA"                             
[1] "probing task obj_number"
[1] "obj_number_layer_1"  "obj_number_layer_2"  "obj_number_layer_3" 
[4] "obj_number_layer_8"  "obj_number_layer_9"  "obj_number_layer_11"
[7] "obj_number_layer_12" "NA"                 
[1] "probing task odd_man_out"
[1] "odd_man_out_layer_5"  "odd_man_out_layer_12" "NA"                  
[1] "probing task past_present"
[1] "past_present_layer_1" "past_present_layer_4" "past_present_layer_5"
[4] "past_present_layer_8" "past_present_layer_9" "NA"                  
[1] "probing task subj_number"
[1] "subj_number_layer_1"  "subj_number_layer_4"  "subj_number_layer_6" 
[4] "subj_number_layer_10" "subj_number_layer_11" "NA"                  
[1] "probing task tree_depth"
[1] "tree_depth_layer_1"  "tree_depth_layer_2"  "tree_depth_layer_4" 
[4] "tree_depth_layer_6"  "tree_depth_layer_8"  "tree_depth_layer_12"
[7] "NA"                 
[1] "Predict mrpc"
[1] "probing task bigram_shift"
[1] "bigram_shift_layer_4" "NA"                  
[1] "probing task coordination_inversion"
[1] "coordination_inversion_layer_5" "coordination_inversion_layer_7"
[3] "NA"                            
[1] "probing task obj_number"
[1] "obj_number_layer_1" "obj_number_layer_3" "obj_number_layer_4"
[4] "NA"                
[1] "probing task odd_man_out"
[1] "odd_man_out_layer_6" "NA"                 
[1] "probing task past_present"
[1] "past_present_layer_1" "past_present_layer_7" "past_present_layer_8"
[4] "NA"                  
[1] "probing task subj_number"
[1] "subj_number_layer_1" "subj_number_layer_3" "subj_number_layer_4"
[4] "NA"                 
[1] "probing task tree_depth"
[1] "tree_depth_layer_1" "tree_depth_layer_7" "tree_depth_layer_8"
[4] "NA"                
[1] "Predict sst2"
[1] "probing task bigram_shift"
[1] "bigram_shift_layer_2"  "bigram_shift_layer_4"  "bigram_shift_layer_5" 
[4] "bigram_shift_layer_12" "NA"                   
[1] "probing task coordination_inversion"
[1] "coordination_inversion_layer_1"  "coordination_inversion_layer_7" 
[3] "coordination_inversion_layer_8"  "coordination_inversion_layer_11"
[5] "NA"                             
[1] "probing task obj_number"
[1] "obj_number_layer_1"  "obj_number_layer_3"  "obj_number_layer_4" 
[4] "obj_number_layer_5"  "obj_number_layer_8"  "obj_number_layer_9" 
[7] "obj_number_layer_11" "obj_number_layer_12" "NA"                 
[1] "probing task odd_man_out"
[1] "odd_man_out_layer_12" "NA"                  
[1] "probing task past_present"
[1] "past_present_layer_1" "past_present_layer_4" "past_present_layer_8"
[4] "NA"                  
[1] "probing task subj_number"
[1] "subj_number_layer_1" "subj_number_layer_4" "NA"                 
[1] "probing task tree_depth"
[1] "tree_depth_layer_1" "tree_depth_layer_6" "NA"                
[1] "Predict qnli"
[1] "probing task bigram_shift"
[1] "bigram_shift_layer_2" "bigram_shift_layer_4" "bigram_shift_layer_5"
[4] "NA"                  
[1] "probing task coordination_inversion"
[1] "coordination_inversion_layer_1"  "coordination_inversion_layer_11"
[3] "NA"                             
[1] "probing task obj_number"
[1] "obj_number_layer_1"  "obj_number_layer_3"  "obj_number_layer_5" 
[4] "obj_number_layer_8"  "obj_number_layer_9"  "obj_number_layer_11"
[7] "obj_number_layer_12" "NA"                 
[1] "probing task odd_man_out"
[1] "NA"
[1] "probing task past_present"
[1] "past_present_layer_1" "past_present_layer_4" "past_present_layer_7"
[4] "past_present_layer_8" "past_present_layer_9" "NA"                  
[1] "probing task subj_number"
[1] "subj_number_layer_1" "NA"                 
[1] "probing task tree_depth"
[1] "tree_depth_layer_1" "tree_depth_layer_2" "NA"                
[1] "Predict qqp"
[1] "probing task bigram_shift"
[1] "bigram_shift_layer_2" "bigram_shift_layer_4" "bigram_shift_layer_5"
[4] "bigram_shift_layer_8" "NA"                  
[1] "probing task coordination_inversion"
[1] "coordination_inversion_layer_8"  "coordination_inversion_layer_11"
[3] "NA"                             
[1] "probing task obj_number"
[1] "obj_number_layer_2"  "obj_number_layer_3"  "obj_number_layer_5" 
[4] "obj_number_layer_6"  "obj_number_layer_12" "NA"                 
[1] "probing task odd_man_out"
[1] "odd_man_out_layer_1"  "odd_man_out_layer_5"  "odd_man_out_layer_6" 
[4] "odd_man_out_layer_8"  "odd_man_out_layer_10" "NA"                  
[1] "probing task past_present"
[1] "past_present_layer_1"  "past_present_layer_2"  "past_present_layer_3" 
[4] "past_present_layer_7"  "past_present_layer_8"  "past_present_layer_10"
[7] "past_present_layer_11" "NA"                   
[1] "probing task subj_number"
[1] "subj_number_layer_1" "subj_number_layer_2" "subj_number_layer_3"
[4] "subj_number_layer_4" "subj_number_layer_5" "subj_number_layer_9"
[7] "NA"                 
[1] "probing task tree_depth"
[1] "tree_depth_layer_2" "tree_depth_layer_3" "tree_depth_layer_7"
[4] "NA"                

3. Probing from some layers from some tasks

Just use one layer for each probing task.


probing_some_layers_some_ptasks <- function(glue_task, features) {
  x_y_features = c(glue_task, features)
  formula = as.formula(paste(glue_task, "~ ."))
  # Need to convert to formula; otherwise caret throws error
  
  trctrl <- trainControl(method="cv", number=5)
  model <- train(formula, 
                 data=df[x_y_features], 
                 trControl=trctrl, 
                 method="lm")
  
  summary_result <- summary(model)
  rmse <- sqrt(mean(summary_result$residuals^2))
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- train(
    as.formula(sprintf("%s ~ .", glue_task)), 
    data=Z, method="lm", 
    trControl=trainControl(method="cv", number=5))
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  if (ctrl_rmse == 0) {
    reduction = 0
  } else {
    reduction = (ctrl_rmse-rmse)/ctrl_rmse*100
  }

  return(list(
    "summary_result"=summary_result, 
    "RMSE"=rmse,
    "RMSE_reduction"=reduction ))
}

for (gt in all_glue_tasks) {
  features = c(
    "bigram_shift_layer_5",
    "coordination_inversion_layer_6",
    "obj_number_layer_1", 
    "odd_man_out_layer_5",  
    "past_present_layer_1",
    "subj_number_layer_1",
    "tree_depth_layer_1"  
  )
  ret <- probing_some_layers_some_ptasks(gt, features)
  print(sprintf("GLUE task %s, RMSE %.5f, RMSE_reduction %.2f", 
                gt, ret$RMSE, ret$RMSE_reduction))
}
[1] "GLUE task rte, RMSE 0.03796, RMSE_reduction 34.44"
[1] "GLUE task cola, RMSE 0.00682, RMSE_reduction 62.22"
[1] "GLUE task mrpc, RMSE 0.02012, RMSE_reduction 21.57"
[1] "GLUE task sst2, RMSE 0.00479, RMSE_reduction 53.28"
[1] "GLUE task qnli, RMSE 0.00288, RMSE_reduction 63.80"
[1] "GLUE task qqp, RMSE 0.03833, RMSE_reduction 23.52"

4. Predict from just 3 features

Feature elimination:
- Try a brute force iteration approach: This will take \(84*83*82\) runs; Without 5-fold CV this takes around 10 mins per GLUE task. With CV: doesnโ€™t finish within 2 hrs; too long. Optimize a bit: Just use lm to select features. When report, report CV results.
- Use the RFE by caret? The RMSE values are not as good as those from 12 features one ptask.

probing_some_layers_some_ptasks_fast <- function(glue_task, features) {
  x_y_features = c(glue_task, features)
  formula = as.formula(paste(glue_task, "~ ."))

  model <- lm(formula,data=df[x_y_features])
  
  summary_result <- summary(model)
  rmse <- sqrt(mean(summary_result$residuals^2))
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- lm(sprintf("%s ~ .", glue_task), data=Z)
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  if (ctrl_rmse == 0) {
    reduction = 0
  } else {
    reduction = (ctrl_rmse-rmse)/ctrl_rmse*100
  }

  return(list(
    "summary_result"=summary_result, 
    "RMSE"=rmse,
    "RMSE_reduction"=reduction ))
}

all_probe_features <- outer(all_probe_tasks, paste0("_layer_", 1:12), FUN="paste0")

find_best_features <- function(glue_task) {
  best_features = NA
  smallest_rmse = 10000
  for (i in 1:(length(all_probe_features)-2)) {
    for (j in (i+1):(length(all_probe_features)-1)) {
      for (k in (j+1):length(all_probe_features)) {
        feats <- c(all_probe_features[i], all_probe_features[j], all_probe_features[k])
        ret <- probing_some_layers_some_ptasks_fast(glue_task, feats)
        if (ret$RMSE < smallest_rmse) {
          smallest_rmse = ret$RMSE
          best_features = feats
        }
      }
    }
  }
  ret <- probing_some_layers_some_ptasks(glue_task, best_features)
  return(list(
    "max_rmse_reduction"=ret$RMSE_reduction,
    "best_features"=best_features,
    "rmse"=ret$RMSE
  ))
}

for (gt in all_glue_tasks) {
  tic("find_best_features")
  retval = find_best_features(gt)
  toc()
  print(sprintf("Glue task %s, RMSE %.4f, max rmse reduction %.2f, achieved using %s",
                gt, retval$rmse, retval$max_rmse_reduction, retval$best_features))
}
find_best_features: 129.576 sec elapsed
[1] "Glue task rte, RMSE 0.0331, max rmse reduction 43.42, achieved using coordination_inversion_layer_1"
[2] "Glue task rte, RMSE 0.0331, max rmse reduction 43.42, achieved using tree_depth_layer_1"            
[3] "Glue task rte, RMSE 0.0331, max rmse reduction 43.42, achieved using bigram_shift_layer_12"         
find_best_features: 130.529 sec elapsed
[1] "Glue task cola, RMSE 0.0053, max rmse reduction 75.31, achieved using obj_number_layer_2" 
[2] "Glue task cola, RMSE 0.0053, max rmse reduction 75.31, achieved using subj_number_layer_2"
[3] "Glue task cola, RMSE 0.0053, max rmse reduction 75.31, achieved using tree_depth_layer_12"
find_best_features: 130.039 sec elapsed
[1] "Glue task mrpc, RMSE 0.0149, max rmse reduction 44.80, achieved using tree_depth_layer_1" 
[2] "Glue task mrpc, RMSE 0.0149, max rmse reduction 44.80, achieved using odd_man_out_layer_4"
[3] "Glue task mrpc, RMSE 0.0149, max rmse reduction 44.80, achieved using obj_number_layer_7" 
find_best_features: 129.992 sec elapsed
[1] "Glue task sst2, RMSE 0.0028, max rmse reduction 71.24, achieved using subj_number_layer_1"            
[2] "Glue task sst2, RMSE 0.0028, max rmse reduction 71.24, achieved using bigram_shift_layer_3"           
[3] "Glue task sst2, RMSE 0.0028, max rmse reduction 71.24, achieved using coordination_inversion_layer_10"
find_best_features: 131.172 sec elapsed
[1] "Glue task qnli, RMSE 0.0019, max rmse reduction 81.58, achieved using subj_number_layer_2"           
[2] "Glue task qnli, RMSE 0.0019, max rmse reduction 81.58, achieved using past_present_layer_8"          
[3] "Glue task qnli, RMSE 0.0019, max rmse reduction 81.58, achieved using coordination_inversion_layer_9"
find_best_features: 128.734 sec elapsed
[1] "Glue task qqp, RMSE 0.0125, max rmse reduction 78.52, achieved using tree_depth_layer_6"   
[2] "Glue task qqp, RMSE 0.0125, max rmse reduction 78.52, achieved using past_present_layer_8" 
[3] "Glue task qqp, RMSE 0.0125, max rmse reduction 78.52, achieved using past_present_layer_12"
---
title: "Predict GLUE performance"
output: html_notebook
---

First load the data
```{r}
library(tictoc)
library(caret)

df <- read.csv("../reports/probing_results_1200_per_class/task1_predict_task_performance.csv")
dim(df)

all_glue_tasks = c("rte", "cola", "mrpc", "sst2", "qnli", "qqp")
all_probe_tasks = c("bigram_shift", "coordination_inversion", "obj_number", "odd_man_out", "past_present", "subj_number", "tree_depth")
```

## 1. Probing from all layers in one task

```{r}
all_layers_from_one_task <- function(glue_task, probe_task) {
  layers=1:12
  features = paste(paste(probe_task, "_layer_", sep=""), layers, sep="")
  x_y_features = c(glue_task, features)
  formula = as.formula(paste(glue_task, "~ ."))
  trcontrol <- trainControl(method="cv", number=5)
  model <- train(formula, data=df[x_y_features], method="lm", trControl=trcontrol)
  rmse <- sqrt(mean(summary(model)$residuals^2))
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- train(
    as.formula(sprintf("%s ~ .", glue_task)), data=Z, method="lm", trControl=trcontrol)
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  
  SST <- var(df[glue_task]) * (length(df)-1)
  SSE <- deviance(model)
  return(list("RMSE"=rmse,
              "ctrl_RMSE"=ctrl_rmse,
              "RMSE_reduction"=(ctrl_rmse-rmse)/ctrl_rmse*100,
              "explained_var"=(SST-SSE) / SST * 100 ))
}

set.seed(1234)
for (gt in all_glue_tasks) {
  print(sprintf("Predict %s", gt))
  for (pt in all_probe_tasks) {
    ret = all_layers_from_one_task(gt, pt)
    print(sprintf("probing task %s. RMSE %.4f. ctrl_RMSE %.4f RMSE_reduction %.2f", pt, ret$RMSE, ret$ctrl_RMSE, ret$RMSE_reduction))
  }
}
```

## 2. Which features are significant?

```{r}
probing_from_one_task <- function(glue_task, probe_task) {
  layers=1:12
  features = paste(paste(probe_task, "_layer_", sep=""), layers, sep="")
  x_y_features = c(glue_task, features)
  formula = paste(glue_task, "~ .")
  model <- lm(formula, data=df[x_y_features])
  anova_result <- anova(model)
  rmse <- sqrt(mean(summary(model)$residuals^2))
  sig_features <- features[anova_result[,5]<0.05]
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- lm(sprintf("%s ~ .", glue_task), data=Z)
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  
  SST <- var(df[glue_task]) * (length(df)-1)
  SSE <- deviance(model)
  return(list("anova_result"=anova_result, 
              "sig_features"=sig_features,
              "RMSE"=rmse,
              "RMSE_reduction"=(ctrl_rmse-rmse)/ctrl_rmse*100,
              "explained_var"=(SST-SSE) / SST * 100 ))
}

set.seed(1234)
for (gt in all_glue_tasks) {
  print(sprintf("Predict %s", gt))
  for (pt in all_probe_tasks) {
    ret = probing_from_one_task(gt, pt)
    anova_result = ret$anova_result
    sig_features = ret$sig_features
    print(sprintf("probing task %s", pt))
    print(sprintf(sig_features))
  }
}
```

## 3. Probing from some layers from some tasks
Just use one layer for each probing task.

```{r}

probing_some_layers_some_ptasks <- function(glue_task, features) {
  x_y_features = c(glue_task, features)
  formula = as.formula(paste(glue_task, "~ ."))
  # Need to convert to formula; otherwise caret throws error
  
  trctrl <- trainControl(method="cv", number=5)
  model <- train(formula, 
                 data=df[x_y_features], 
                 trControl=trctrl, 
                 method="lm")
  
  summary_result <- summary(model)
  rmse <- sqrt(mean(summary_result$residuals^2))
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- train(
    as.formula(sprintf("%s ~ .", glue_task)), 
    data=Z, method="lm", 
    trControl=trainControl(method="cv", number=5))
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  if (ctrl_rmse == 0) {
    reduction = 0
  } else {
    reduction = (ctrl_rmse-rmse)/ctrl_rmse*100
  }

  return(list(
    "summary_result"=summary_result, 
    "RMSE"=rmse,
    "RMSE_reduction"=reduction ))
}

for (gt in all_glue_tasks) {
  features = c(
    "bigram_shift_layer_5",
    "coordination_inversion_layer_6",
    "obj_number_layer_1", 
    "odd_man_out_layer_5",  
    "past_present_layer_1",
    "subj_number_layer_1",
    "tree_depth_layer_1"  
  )
  ret <- probing_some_layers_some_ptasks(gt, features)
  print(sprintf("GLUE task %s, RMSE %.5f, RMSE_reduction %.2f", 
                gt, ret$RMSE, ret$RMSE_reduction))
}
```

## 4. Predict from just 3 features

Feature elimination:   
- Try a brute force iteration approach: This will take $84*83*82$ runs; Without 5-fold CV this takes around 10 mins per GLUE task. With CV: doesn't finish within 2 hrs; too long. Optimize a bit: Just use lm to select features. When report, report CV results.  
- Use the RFE by `caret`? The RMSE values are not as good as those from 12 features one ptask.   

```{r}
probing_some_layers_some_ptasks_fast <- function(glue_task, features) {
  x_y_features = c(glue_task, features)
  formula = as.formula(paste(glue_task, "~ ."))

  model <- lm(formula,data=df[x_y_features])
  
  summary_result <- summary(model)
  rmse <- sqrt(mean(summary_result$residuals^2))
  
  ctrl_features <- matrix(rnorm(length(features) * nrow(df), 0, 0.1), 
                         nrow=nrow(df), ncol=length(features))
  ctrl_label <- df[glue_task]
  Z <- as.data.frame(cbind(ctrl_label, ctrl_features))
  ctrl_model <- lm(sprintf("%s ~ .", glue_task), data=Z)
  ctrl_rmse <- sqrt(mean(summary(ctrl_model)$residuals^2))
  if (ctrl_rmse == 0) {
    reduction = 0
  } else {
    reduction = (ctrl_rmse-rmse)/ctrl_rmse*100
  }

  return(list(
    "summary_result"=summary_result, 
    "RMSE"=rmse,
    "RMSE_reduction"=reduction ))
}

all_probe_features <- outer(all_probe_tasks, paste0("_layer_", 1:12), FUN="paste0")

find_best_features <- function(glue_task) {
  best_features = NA
  smallest_rmse = 10000
  for (i in 1:(length(all_probe_features)-2)) {
    for (j in (i+1):(length(all_probe_features)-1)) {
      for (k in (j+1):length(all_probe_features)) {
        feats <- c(all_probe_features[i], all_probe_features[j], all_probe_features[k])
        ret <- probing_some_layers_some_ptasks_fast(glue_task, feats)
        if (ret$RMSE < smallest_rmse) {
          smallest_rmse = ret$RMSE
          best_features = feats
        }
      }
    }
  }
  ret <- probing_some_layers_some_ptasks(glue_task, best_features)
  return(list(
    "max_rmse_reduction"=ret$RMSE_reduction,
    "best_features"=best_features,
    "rmse"=ret$RMSE
  ))
}

for (gt in all_glue_tasks) {
  tic("find_best_features")
  retval = find_best_features(gt)
  toc()
  print(sprintf("Glue task %s, RMSE %.4f, max rmse reduction %.2f, achieved using %s",
                gt, retval$rmse, retval$max_rmse_reduction, retval$best_features))
}
```

