Learning R with Generative AI in a Metagenomics Data Science Course - Survey Results

R code
library(tidyverse) 
library(janitor)
library(scales)
library(patchwork)   
library(sessioninfo)
library(knitr) # this is needed for rendering

Overview

These graphs, tables and open responses are from an anonymous Google forms survey that was designed to solicit students’ personal experience on the effectiveness of using generative AI in this course. The resulting csv file with the survey results is in the course GitHub repository. These results are generated from this csv file and the underlying R code can also be found in the above GitHub repo and are rendered on this survey page. The R code for producing many of these graphs was developed interactively using the UMass Amherst GenAI platform.

Prior Coding Experience

R code
# 5 color likert palette for single stacked bar plots
cols5 <- c("#d73027","#fc8d59","#fee090","#91bfdb","#1a9850")
R code
# Read CSV for single stacked bar plots
df_survey <- readr::read_csv("survey/bio478_survey_responses.csv") |>
  clean_names() 
R code
experience_levels = c(
"Extensive (multiple courses or projects)",
"Moderate (regular use in another language)",
"Some (one course or self-study)",
"Very limited (a few tutorials or labs)",
"None"
)

df_survey |>
  count(prior_coding_experience, name = "n") |>
  complete(prior_coding_experience = experience_levels, fill = list(n = 0)) |>
  mutate(prior_coding_experience = factor(prior_coding_experience, levels = experience_levels)) |>
  mutate(pct = n / sum(n)) |>

ggplot(aes(x = 1, y = n, fill = prior_coding_experience)) +
  geom_col(position = "fill") +          
  scale_fill_manual(values = cols5) +
  coord_flip() +                                                    
  scale_y_continuous(labels = percent_format(accuracy = 1),
                     expand = expansion(mult = c(0, 0.05))) +
  labs(title = "Prior coding experience",
       x = NULL, y = "Percentage of respondents", fill = NULL) +
  theme_minimal(base_size = 12) +
  theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
        panel.grid.major.y = element_blank())

Frequency of AI Use

R code
experience_levels <- c(
"I did not use AI",
"I tried it once or twice",
"About once per week",
"A few times per week",
"Daily"
)

df_survey |>
  count(frequency_of_ai_use_in_this_course, name = "n") |>
  complete(frequency_of_ai_use_in_this_course = experience_levels, fill = list(n = 0))  |>
  mutate(frequency_of_ai_use_in_this_course = factor(frequency_of_ai_use_in_this_course, levels = experience_levels))  |>
  mutate(pct = n / sum(n)) |>

ggplot(aes(x = 1, y = n, fill = frequency_of_ai_use_in_this_course)) +
  geom_col(position = "fill") +          
  scale_fill_manual(values = cols5) +
  coord_flip() +                                                   
  scale_y_continuous(labels = percent_format(accuracy = 1),
                     expand = expansion(mult = c(0, 0.05))) +
  labs(title = "Frequency of AI Use",
       x = NULL, y = "Percentage of respondents", fill = NULL) +
  theme_minimal(base_size = 12) +
  theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
        panel.grid.major.y = element_blank())

How much time did using AI save me?

R code
experience_levels <- c(
"< 0.5 hr total",
"0.5-1 hrs",
"1-2 hrs",
"2-3 hrs",
"3+ hrs"
)

df_survey |>
  count(how_much_time_did_using_ai_save_me, name = "n") |>
  complete(how_much_time_did_using_ai_save_me = experience_levels, fill = list(n = 0))  |>
  mutate(how_much_time_did_using_ai_save_me = factor(how_much_time_did_using_ai_save_me, levels = experience_levels))  |>
  mutate(pct = n / sum(n)) |>

ggplot(aes(x = 1, y = n, fill = how_much_time_did_using_ai_save_me)) +
  geom_col(position = "fill") +          
  scale_fill_manual(values = cols5) +
# stacked to 100%
  coord_flip() +                                                     # horizontal bar
  scale_y_continuous(labels = percent_format(accuracy = 1),
                     expand = expansion(mult = c(0, 0.05))) +
  labs(title = "How much time did using AI save me?",
       x = NULL, y = "Percentage of respondents", fill = NULL) +
  theme_minimal(base_size = 12) +
  theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
        panel.grid.major.y = element_blank())

Frequency of incorrect AI output

R code
experience_levels <- c(
"Never",
"Rarely",
"Sometimes",
"Often",
"Very Often"
)

df_survey |>
  count(frequency_of_incorrect_ai_output, name = "n") |>
  complete(frequency_of_incorrect_ai_output = experience_levels, fill = list(n = 0))  |>
  mutate(frequency_of_incorrect_ai_output = factor(frequency_of_incorrect_ai_output, levels = experience_levels))  |>
mutate(pct = n / sum(n)) |>

ggplot(aes(x = 1, y = n, fill = frequency_of_incorrect_ai_output)) +
  geom_col(position = "fill") +          
  scale_fill_manual(values = cols5) +
# stacked to 100%
  coord_flip() +                                                     # horizontal bar
  scale_y_continuous(labels = percent_format(accuracy = 1),
                     expand = expansion(mult = c(0, 0.05))) +
  labs(title = "Frequency of incorrect AI output",
       x = NULL, y = "Percentage of respondents", fill = NULL) +
  theme_minimal(base_size = 12) +
  theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
        panel.grid.major.y = element_blank())

Did you become more engaged or interactive in solving problems using AI?

R code
experience_levels <- c(
"Much less engaged",
"Slightly less engaged",
"No change",
"Slightly more engaged",
"Much more engaged"
)

df_survey |>
  count(did_you_become_more_engaged_or_interactive_in_solving_problems_using_ai, name = "n") |>
  complete(did_you_become_more_engaged_or_interactive_in_solving_problems_using_ai = experience_levels, fill = list(n = 0))  |>
  mutate(did_you_become_more_engaged_or_interactive_in_solving_problems_using_ai = factor(did_you_become_more_engaged_or_interactive_in_solving_problems_using_ai, levels = experience_levels))  |>
mutate(pct = n / sum(n)) |>

  ggplot(aes(x = 1, y = n, fill = did_you_become_more_engaged_or_interactive_in_solving_problems_using_ai)) +
  geom_col(position = "fill") +          
  scale_fill_manual(values = cols5) +
# stacked to 100%
  coord_flip() +                                                     # horizontal bar
  scale_y_continuous(labels = percent_format(accuracy = 1),
                     expand = expansion(mult = c(0, 0.05))) +
  labs(title = "Did you become more engaged or interactive in solving problems using AI?",
       x = NULL, y = "Percentage of respondents", fill = NULL) +
  theme_minimal(base_size = 12) +
  theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
        panel.grid.major.y = element_blank())

Confidence change in programming in R

R code
experience_levels <- c(
"Much lower",
"Lower",
"About the same",
"Higher",
"Much higher"
)

df_survey |>
  count(confidence_change_in_programming_in_r, name = "n") |>
  complete(confidence_change_in_programming_in_r = experience_levels, fill = list(n = 0))  |>
  mutate(confidence_change_in_programming_in_r = factor(confidence_change_in_programming_in_r, levels = experience_levels))  |>
mutate(pct = n / sum(n)) |>

  ggplot(aes(x = 1, y = n, fill = confidence_change_in_programming_in_r)) +
  geom_col(position = "fill") +          
  scale_fill_manual(values = cols5) +
# stacked to 100%
  coord_flip() +                                                     # horizontal bar
  scale_y_continuous(labels = percent_format(accuracy = 1),
                     expand = expansion(mult = c(0, 0.05))) +
  labs(title = "Confidence change in programming in R",
       x = NULL, y = "Percentage of respondents", fill = NULL) +
  theme_minimal(base_size = 12) +
  theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
        panel.grid.major.y = element_blank())

Clarity of instructions on AI use

R code
experience_levels <- c(
"Very unclear",
"Unclear",
"Neutral",
"Clear",
"Very clear"
)

df_survey |>
  count(clarity_of_instructions_on_ai_use, name = "n") |>
  complete(clarity_of_instructions_on_ai_use = experience_levels, fill = list(n = 0))  |>
  mutate(clarity_of_instructions_on_ai_use = factor(clarity_of_instructions_on_ai_use, levels = experience_levels))  |>
mutate(pct = n / sum(n)) |>

    ggplot(aes(x = 1, y = n, fill = clarity_of_instructions_on_ai_use)) +
  geom_col(position = "fill") +          
  scale_fill_manual(values = cols5) +
# stacked to 100%
  coord_flip() +                                                     # horizontal bar
  scale_y_continuous(labels = percent_format(accuracy = 1),
                     expand = expansion(mult = c(0, 0.05))) +
  labs(title = "Clarity of instructions on AI use",
       x = NULL, y = "Percentage of respondents", fill = NULL) +
  theme_minimal(base_size = 12) +
  theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
        panel.grid.major.y = element_blank())

Combined likert graph of multiple choice grids

R code
# Read CSV
df <- readr::read_csv("survey/bio478_survey_responses.csv")

# Domain order (used to control facet/plot sequence)
domains <- c(
  "Helpfulness of generative AI",
  "Inquiry & exploration",
  "Impact on learning",
  "Code quality outcomes using AI",
  "\"Vibe code\" outcomes",
  "Impact on metagenomics learning",
  "Trust calibration",
  "Ethical alignment"
)

# find columns that match any domain substring
col_matches <- names(df)[sapply(names(df), function(nm) any(sapply(domains, function(d) str_detect(nm, fixed(d)))))]
if (length(col_matches) == 0) stop("No matching columns found. Check column names or adjust domain patterns.")
raw_help <- df |> select(all_of(col_matches))
R code
# helper to detect domain and extract item text inside brackets if present
detect_domain <- function(colname, domain_vec) {
  idx <- which(sapply(domain_vec, function(d) str_detect(colname, fixed(d))))
  if (length(idx) == 0) return(NA_character_)
  domain_vec[idx[1]]
}

col_info <- tibble(orig_name = names(raw_help)) |>
  mutate(domain = map_chr(orig_name, ~ detect_domain(.x, domains)),
         item = if_else(str_detect(orig_name, "\\[.*\\]"),
                        str_extract(orig_name, "(?<=\\[).+?(?=\\])"),
                        str_remove(orig_name, fixed(domain))),
         item = str_trim(item),
         item = if_else(item == "" | is.na(item), orig_name, item))
R code
# Agreement levels and robust mapping function
agree_levels <- c("Strongly disagree", "Disagree", "Neutral", "Agree", "Strongly agree")

map_to_agree <- function(x) {
  x <- stringr::str_squish(as.character(x))
  x_lower <- tolower(x)
  case_when(
    # helpfulness -> agreement mapping
    x_lower %in% c("not at all helpful", "not helpful", "not at all helpful to not helpful") ~ "Strongly disagree",
    x_lower %in% c("slightly helpful") ~ "Disagree",
    x_lower %in% c("moderately helpful") ~ "Neutral",
    x_lower %in% c("very helpful") ~ "Agree",
    x_lower %in% c("extremely helpful") ~ "Strongly agree",
    # direct agreement variants
    x_lower %in% c("strongly disagree", "strong disagree") ~ "Strongly disagree",
    x_lower %in% c("disagree") ~ "Disagree",
    x_lower %in% c("neutral", "neither agree nor disagree", "neither") ~ "Neutral",
    x_lower %in% c("agree") ~ "Agree",
    x_lower %in% c("strongly agree", "strong agree") ~ "Strongly agree",
    # already canonical
    x %in% agree_levels ~ x,
    TRUE ~ NA_character_
  )
}

# pivot to long, join col_info, map responses, compute counts and percentages
agree_levels <- c("Strongly disagree","Disagree","Neutral","Agree","Strongly agree")

long <- raw_help %>%
  pivot_longer(cols = everything(), names_to = "orig_name", values_to = "response_raw") %>%
  left_join(col_info, by = "orig_name") %>%
  mutate(response_mapped = map_to_agree(response_raw)) %>%
  group_by(domain, item, response_mapped) %>%
  summarise(n = n(), .groups = "drop_last") %>%
  group_by(domain, item) %>%
  # add rows for any missing response levels per domain/item
  complete(response_mapped = agree_levels, fill = list(n = 0)) %>%
  # make sure response_mapped is an ordered factor with the exact levels
  mutate(response_mapped = factor(response_mapped, levels = agree_levels, ordered = TRUE),
         total_n = sum(n),
         pct = ifelse(total_n > 0, n / total_n, 0)) %>%
  ungroup()
R code
# compute per-item positive proportion (Agree + Strongly agree) per domain
item_summary <- long |>
  filter(!is.na(response_mapped)) |>
  group_by(domain, item) |>
  summarize(total_n = sum(n, na.rm = TRUE),
            positive_n = sum(n[response_mapped %in% c("Agree", "Strongly agree")], na.rm = TRUE),
            positive_pct = if_else(total_n > 0, positive_n / total_n, 0),
            .groups = "drop")
R code
# Prepare per-domain data frames (only domains present)
domain_dfs <- long |>
  filter(!is.na(response_mapped)) |>
  group_split(domain)
names(domain_dfs) <- map_chr(domain_dfs, ~ unique(.x$domain))

# keep only requested domains that actually exist in data
present_domains <- intersect(domains, names(domain_dfs))
domain_dfs <- domain_dfs[present_domains]

# count items per domain for relative heights
item_counts <- map_int(domain_dfs, ~ length(unique(.x$item)))

# color palette and common y-limits
cols <- c("Strongly disagree" = "#d73027",
          "Disagree" = "#fc8d59",
          "Neutral" = "#fee090",
          "Agree" = "#91bfdb",
          "Strongly agree" = "#1a9850")
y_limits <- c(0, 1)

# Build one ggplot per domain. Ensure scale_fill includes all five categories and uses a one-row legend.
plot_list <- imap(domain_dfs, function(df_dom, dom_name) {
  # determine ordering by positive_pct (descending), tie-break by total_n (descending)
  item_order <- item_summary |>
    filter(domain == dom_name) |>
    arrange(-desc(positive_pct), desc(total_n)) |>
    pull(item)
  if (length(item_order) == 0) item_order <- unique(df_dom$item)

  df_dom2 <- df_dom |>
    mutate(item = factor(item, levels = item_order, ordered = TRUE))

  ggplot(df_dom2, aes(x = item, y = pct, fill = response_mapped)) +
    geom_col(width = 0.75, color = NA) +
    coord_flip() +
    scale_y_continuous(labels = percent_format(accuracy = 1), limits = y_limits, expand = c(0,0)) +
    scale_fill_manual(
        values = cols,
        breaks = agree_levels,
        limits = agree_levels,    # ensure every plot's scale has the same keys
        drop = FALSE,             # don't drop unused factor levels
        guide = guide_legend(nrow = 1, byrow = TRUE),
        name = "Response"
    ) +
    labs(title = dom_name, x = NULL, y = NULL) +
    theme_minimal(base_size = 12) +
    theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
          axis.text.y = element_text(size = 10),
          panel.grid.major.y = element_blank(),
          plot.margin = margin(6,6,6,6),
          text = element_text(color = "black"),
          legend.position = "bottom")
})
R code
# Combine vertically with a single collected legend at bottom
combined <- wrap_plots(plot_list, ncol = 1, guides = "collect") +
  plot_layout(heights = item_counts, guides = "collect") & theme(legend.position = "bottom")

combined

When AI was wrong, I typically… - multiple response

R code
# remove all objects from the environment - ha!
rm(list = ls())
R code
# Choose whether percentages should be relative to respondents who answered this question ("answerers") or relative to all survey respondents ("all"):
percent_base <- "answerers" # "answerers" or "all"

## Read data and detect the column
df <- read_csv("survey/bio478_survey_responses.csv")

# detect the multi-response column (flexible by partial name)
resp_col <- names(df)[str_detect(names(df), regex("When AI was wrong", ignore_case = TRUE))][1]
if(is.na(resp_col)) stop("Could not find a column matching 'When AI was wrong' in the CSV.")
# resp_col
R code
# Prepare answers (split multi-response entries)
# ensure respondent id exists
if(!"respondent_id" %in% names(df)) df <- df |> mutate(respondent_id = row_number())

df_answers <- df |>
  select(respondent_id, all_of(resp_col)) |>
  rename(raw = all_of(resp_col)) |>
  filter(!is.na(raw) & str_trim(raw) != "") |>               # only respondents who answered this question
  mutate(respondent_id = as.character(respondent_id)) |>
  # split by comma or semicolon (adjust sep if your data uses a different delimiter)
  separate_rows(raw, sep = "\\s*[,;]\\s*") |>
  # treat "Other: ..." as "Other" (if you want to inspect free text, modify this)
  mutate(answer = str_remove(raw, regex("^Other[:\\-]\\s*", ignore_case = TRUE)) |> str_trim()) |>
  mutate(answer = if_else(str_detect(raw, regex("^Other[:\\-]", ignore_case = TRUE)), "Other", answer)) |>
  distinct(respondent_id, answer) |>   # avoid double-count within same respondent
  filter(answer != "" & !is.na(answer))

# quick look at distinct answers found
# df_answers |> distinct(answer) |> arrange(answer)
R code
# If custom order not provided, infer from observed answers:
options_order <- if(exists("options_order")) options_order else df_answers |> distinct(answer) |> pull(answer)
R code
# Count and compute percentages
counts <- df_answers |>
  count(answer, name = "n") |>
  complete(answer = options_order, fill = list(n = 0)) |>
  mutate(pct = n / sum(n),
         answer = forcats::fct_reorder(answer, -n))  # order by frequency

if(tolower(percent_base) == "all"){
  counts <- counts |> mutate(pct = n / nrow(df))
  y_label <- "Percent of all respondents"
} else {
  counts <- counts |> mutate(pct = n / n_distinct(df_answers$respondent_id))
  y_label <- "Percent of respondents who answered this question"
}
R code
p <- ggplot(counts, aes(x = fct_rev(answer), y = pct)) +
  geom_col(fill = "#881c1c", width = 0.7) +
  coord_flip() +
  scale_y_continuous(labels = percent_format(accuracy = 1), expand = expansion(mult = c(0, 0.12))) +
  geom_text(aes(label = paste0(n, " (", scales::percent(pct, accuracy = 1), ")")),
            hjust = -0.02, size = 3.2) +
  labs(title = "When AI was wrong, I typically…",
       x = NULL,
       y = y_label) +
  theme_minimal(base_size = 12) +
  theme(panel.grid.major.y = element_blank())

p

Common issues encountered - multiple response

R code
# remove all objects from the environment
rm(list = ls())
R code
# Choose whether percentages should be relative to respondents who answered this question ("answerers") or relative to all survey respondents ("all"):
percent_base <- "answerers" # "answerers" or "all"

## Read data and detect the column
df <- read_csv("survey/bio478_survey_responses.csv")

resp_col <- names(df)[str_detect(names(df), regex("Common issues encountered", ignore_case = TRUE))][1]
if(is.na(resp_col)) stop("Could not find a column matching 'Common issues encountered' in the CSV.")
# resp_col
R code
# Prepare answers (split multi-response entries)
# ensure respondent id exists
if(!"respondent_id" %in% names(df)) df <- df |> mutate(respondent_id = row_number())

df_answers <- df |>
  select(respondent_id, all_of(resp_col)) |>
  rename(raw = all_of(resp_col)) |>
  filter(!is.na(raw) & str_trim(raw) != "") |>               # only respondents who answered this question
  mutate(respondent_id = as.character(respondent_id)) |>
  # split by comma or semicolon (adjust sep if your data uses a different delimiter)
  separate_rows(raw, sep = "\\s*[,;]\\s*") |>
  # treat "Other: ..." as "Other" (if you want to inspect free text, modify this)
  mutate(answer = str_remove(raw, regex("^Other[:\\-]\\s*", ignore_case = TRUE)) |> str_trim()) |>
  mutate(answer = if_else(str_detect(raw, regex("^Other[:\\-]", ignore_case = TRUE)), "Other", answer)) |>
  distinct(respondent_id, answer) |>   # avoid double-count within same respondent
  filter(answer != "" & !is.na(answer))

# quick look at distinct answers found
# df_answers |> distinct(answer) |> arrange(answer)
R code
# If custom order not provided, infer from observed answers:
options_order <- if(exists("options_order")) options_order else df_answers |> distinct(answer) |> pull(answer)
R code
# Count and compute percentages
counts <- df_answers |>
  count(answer, name = "n") |>
  complete(answer = options_order, fill = list(n = 0)) |>
  mutate(pct = n / sum(n),
         answer = forcats::fct_reorder(answer, -n))  # order by frequency

if(tolower(percent_base) == "all"){
  counts <- counts |> mutate(pct = n / nrow(df))
  y_label <- "Percent of all respondents"
} else {
  counts <- counts |> mutate(pct = n / n_distinct(df_answers$respondent_id))
  y_label <- "Percent of respondents who answered this question"
}
R code
p <- ggplot(counts, aes(x = fct_rev(answer), y = pct)) +
  geom_col(fill = "#881c1c", width = 0.7) +
  coord_flip() +
  scale_y_continuous(labels = percent_format(accuracy = 1), expand = expansion(mult = c(0, 0.12))) +
  geom_text(aes(label = paste0(n, " (", scales::percent(pct, accuracy = 1), ")")),
            hjust = -0.02, size = 3.2) +
  labs(title = "Common issues encountered",
       x = NULL,
       y = y_label) +
  theme_minimal(base_size = 12) +
  theme(panel.grid.major.y = element_blank())

p

Where more support would help - multiple response

R code
# remove all objects from the environment
rm(list = ls())
R code
# Choose whether percentages should be relative to respondents who answered this question ("answerers") or relative to all survey respondents ("all"):
percent_base <- "answerers" # "answerers" or "all"

## Read data and detect the column
df <- read_csv("survey/bio478_survey_responses.csv")

resp_col <- names(df)[str_detect(names(df), regex("Where more support would help", ignore_case = TRUE))][1]
if(is.na(resp_col)) stop("Could not find a column matching 'Where more support would help' in the CSV.")
# resp_col
R code
# Prepare answers (split multi-response entries)
# ensure respondent id exists
if(!"respondent_id" %in% names(df)) df <- df |> mutate(respondent_id = row_number())

df_answers <- df |>
  select(respondent_id, all_of(resp_col)) |>
  rename(raw = all_of(resp_col)) |>
  filter(!is.na(raw) & str_trim(raw) != "") |>               # only respondents who answered this question
  mutate(respondent_id = as.character(respondent_id)) |>
  # split by comma or semicolon (adjust sep if your data uses a different delimiter)
  separate_rows(raw, sep = "\\s*[,;]\\s*") |>
  # treat "Other: ..." as "Other" (if you want to inspect free text, modify this)
  mutate(answer = str_remove(raw, regex("^Other[:\\-]\\s*", ignore_case = TRUE)) |> str_trim()) |>
  mutate(answer = if_else(str_detect(raw, regex("^Other[:\\-]", ignore_case = TRUE)), "Other", answer)) |>
  distinct(respondent_id, answer) |>   # avoid double-count within same respondent
  filter(answer != "" & !is.na(answer))

# quick look at distinct answers found
# df_answers |> distinct(answer) |> arrange(answer)
R code
# If custom order not provided, infer from observed answers:
options_order <- if(exists("options_order")) options_order else df_answers |> distinct(answer) |> pull(answer)
R code
# Count and compute percentages
counts <- df_answers |>
  count(answer, name = "n") |>
  complete(answer = options_order, fill = list(n = 0)) |>
  mutate(pct = n / sum(n),
         answer = forcats::fct_reorder(answer, -n))  # order by frequency

if(tolower(percent_base) == "all"){
  counts <- counts |> mutate(pct = n / nrow(df))
  y_label <- "Percent of all respondents"
} else {
  counts <- counts |> mutate(pct = n / n_distinct(df_answers$respondent_id))
  y_label <- "Percent of respondents who answered this question"
}
R code
p <- ggplot(counts, aes(x = fct_rev(answer), y = pct)) +
  geom_col(fill = "#881c1c", width = 0.7) +
  coord_flip() +
  scale_y_continuous(labels = percent_format(accuracy = 1), expand = expansion(mult = c(0, 0.12))) +
  geom_text(aes(label = paste0(n, " (", scales::percent(pct, accuracy = 1), ")")),
            hjust = -0.02, size = 3.2) +
  labs(title = "Where more support would help",
       x = NULL,
       y = y_label) +
  theme_minimal(base_size = 12) +
  theme(panel.grid.major.y = element_blank())

p

Examples of helpful AI responses - Open response

R code
# Read CSV for open_response
df_survey <- readr::read_csv("survey/bio478_survey_responses.csv") |>
  clean_names() 
R code
wrapped_text <- str_wrap(df_survey$could_you_give_short_examples_of_a_helpful_ai_response, width = 80, indent = 0, exdent = 0, whitespace_only = TRUE)
cat(wrapped_text, sep = "\n\n")
It telling me how to create statistical analysis formulas to compare data
points, and how to map graphs

Sometime when I didn't know how to attack a problem it would take me through the
steps I needed to do before I wrote a code to answer the question like cleaning
up or merging data

Before we learned the definitions of regex patterns and I tried to ask AI
to create patterns for effective character finding, I used AI to give an
explanation of the reasoning and meaning of the regex patterns it generated
Even before we formally learned the skill, AI helped me think of advanced regex
patterns

Writing code blocks utilizing functions/libraries I hadn't used before

clarity on homework / studying

Giving me different types of analysis I could do to see spatial autocorrelation
for the MAG and metadata dataset

Making sense of the outputs generated by the code Or clarifying what table
categories that have slightly different names from the instructions are most
likely to be what the instructions are trying to direct you to

It really helped a lot during the lab exercises and it also explains it too

When working with the NEON Mags dataset, the AI gave very good suggestions on
how to go about formatting my data and what errors were preventing my code from
running

NA

A lot of the time I asked AI to help explain certain coding functions or I would
ask them why I am getting the error message I am getting when rendering my lab
and that's helped me learn from why I can't write the code in certain ways So
AI has definitely helped me improve my coding syntax so that less errors come up
when I render my labs

AI was very good at telling me what an error message meant, provided that I gave
them the exact error message R was giving me AI would often explain what the
error message meant and then provide multiple common ways to solve this problem

When an error occurs it will explain my error to me and give me code to correct
it

-producing helpful explanations when codes do not run, providing simple code
fixes

AI was able to come up analyses and graphs for the data that I never would have
thought of It also provided the code for each analysis which was very helpful
when trying to reproduce the graph in RStudio

I frequently would give copilot what I said and the error code and it would then
bold what it changes and why

Once I was supposed to run a code to make graphs in a certain way but I wasn't
really sure on how to do that AI helped me brainstorm ways to make my code run
the way I needed it to

I asked it how to create a pie chart and it described it for me Another thing
was to make a graph that was interactive so when your mouse hovers over a point
you see what it is

One that integrated the suggested code in its response, and followed that with
an explanation of why the code worked Another common one was when I reached an
issue with one of my code chunks it would correct the issue and explain why my
code wasn't working

Giving an example of the code and then explainging what each part does, making
it easy to know what use/add to get the product i needed

AI has been helpful in providing responses to fix my coding errors For example,
I would tell CoPilot the code I was trying to run, and say it was not working
and enter the specific "Error:" I was getting Copilot would then send back the
correct code (most of the time), and explain where I went wrong which was really
helpful so that I would not make the same mistake again

I was having trouble finding code to help me build a PCA plot for the MAG data
set I upload the code that I already had as well as the MAG csv file and AI was
able to give me a better code that also gave me instructions on what to change
That was very helpful and allowed me to see what I was doing wrong

I usually used AI to correct errors within the code I created, so a helpful
response would be something along the lines of "I see the problem is ______
which can be solved by _______"

it would give options on how to improve or fix the code and when taking a
screenshot of my code & the errors, it was almost always helpful with helping me
fix the issues

When I'd get an error, I'd give it to AI and often, it could tell me what I did
wrong and how to fix it

Often times I will just input my error messages into AI (specifically ChatGPT)
and even without seeing my original code, it generates a successful fix for my
code Chat has been very successful in providing code fixes and formats on how to
code certain things

I thought the most helpful Ai responses were the ones that gave me step by
step instructions for the type of code to generate depending on the prompt By
breaking it down, I could understand the point of each chunk

Putting in error code from R would 85% of the time be resolvable through AI
directly Otherwise I could ask for alternative methods to alter my code

I would ask it to help me with something, and then it would output the code and
the reasoning behind the code and how it works

It helped me be able to make graphs and complete exercises It helped me
understand what was going on in general And it always showed different ways to
write the code

Having an idea on how you want data to be analyzed but not being sure of what
kind of graphic would be best suited for that AI can suggest a few different
methods and write the bulk of the code required to set the graphic up in RStudio

Troubleshooting issues was when I found AI most effective, rendering problems
and trying to understand coding terms

All of the assignments like Making a graph for neon image data

When I have difficulty running a code due to an error in it and I could not find
a way to make the code work so I will put the code and the feedback that R gave
me on copilot and ask the AI to help me understand how to fix the code

Anything I don’t know ai can help

Not an exact example but the answers that included step by step explanations of
the code were helpful

AI can help clean out a code chunk by replacing verbose patterns with
simpler, more readable alternatives — for example, instead of writing
filter(!isna(site_id), !isna(phylum)) %>% count(site_id, phylum), AI might
suggest drop_na(site_id, phylum) %>% count(site_id, phylum), which is shorter
and easier to understand

Here’s a specific example: my question to AI was: “what's the difference
between your solution and what I wrote?” AI gave me a step by step explanation
of (1) what my code does and (2) problems my code causes (failing to address
the problem) The specific problem I was working on was How to display a table
that counts the number of MAGs by phylum at each site AI was very helpful
in all circumstances, especially in *Suggesting R code to solve problems*
and *Explaining R code syntax and logic* Those were my two top uses of AI Of
course, AI helped with all the other categories too, but for the most part, the
two I just mentioned were the categories that I most often consulted AI for I
feel like the category *Data wrangling with MAG taxonomy + genome properties
+ soil chemistry* can be included underneath either of these two, because it
was related to problem-solving The reason I put *Extremely helpful* for all
categories because of the ability to consult AI for really almost everything
For the two categories I mentioned above, these are the prompts I would ask
“Here’s my code It seems to me that it works fine, but can you verify that it
really does?” Or, “What does this function do? What’s the difference between
this one and this one?” Or, “Here’s the problem I’m trying to solve, and here’s
my code so far Can you help me with my code (either troubleshoot this error
I’m getting or show me whether this is the best way to approach the problem)”
I took advantage of making the most out of AI by asking it to explain functions
it used that I wasn’t familiar with I also always tried to understand AI logic
by walking myself through each line, one by one The next two categories that
I used AI for mostly in this course was *Brainstorming research questions with
NEON metagenomic data* *‘Vibe code’: generating statistical, network, or other
advanced analyses* kind of follows or falls under this This is especially
helpful because I don’t have a research background and while I can ask simple
questions like “What phyla are enriched in these soil pH ranges?”, I still
would need to expand on this question My problem is that I’m not creative
(maybe not curious enough?) with my questions This is why using AI to jumpstart
brainstorming is especially helpful, especially because it can quickly provide
background information that might spark my curiosity

What's wrong here? - “You are getting this error because joinID does not exist
in the right table Check colnames() and rename before left_join()”

AI fixing errors in my code

Providing multiple approaches

Most useful with chunks of code for statistical analysis and plots readibility

NA

Examples of unhelpful or incorrect AI responses - Open response

R code
wrapped_text <- str_wrap(df_survey$could_you_give_short_examples_of_unhelpful_or_incorrect_ai_responses, width = 80, indent = 0, exdent = 0, whitespace_only = TRUE)
cat(wrapped_text, sep = "\n\n")
When it would tell me to use formulas that needed only numerical values but I
had N/A in my values It would tell me to use random names or column types that
didn't exist

Sometimes if I had an error it would keep giving me incorrect codes to the point
where nothing worked and I did'nt know how to tell fix this issue

When reading the soil mags + soil chem csv file to create Vibe code, AI would
either reveal that the file is too big to be read or it would pretend to read
it Either way, it would generate code that assumed variables and column names,
which were often incorrect

Many AI responses generate code that contain errors

using AI for therapy

The codes it would give me would have a lot of errors, but this was fine because
you learn a lot by seeing where those mistakes are and how to fix them yourself

When you're trying to fix a path directory error or use code it's giving you and
you don't realize the proper names of the files are wrong and thats the secret
reason why nothing is working

It takes a couple of tries for copilot to understand what is being asked which
can be frustrating

When it came to working with GitHub or other external R coding, AI was not
able to give very helpful tips and was just reiterating the same response in
different wording

NA

Sometimes when prompting AI with my questions, I've learned that you have to
craft the question very specifically in order for them to give generally what
you're looking for The times that I have found AI giving unhelpful responses is
when I don't prompt it correctly and sometimes it will give me an answer/code
that uses a library or code function that we have not learned or not part of the
lesson we are working on

When combining data frames I found that using AI wasn't as helpful as just
referring back to the module AI would overcomplicate things and sometimes I
would lose some of my data

AI isnt the best at helping with MAG data or anything in terms of
troubleshooting for working directory

AI responses that don't provide a code or give super complicated unrunnable
codes

AI is very prompt-specific, so if a prompt does not reflect exactly what the
person wants, it takes a lot of back and forth between the AI and the person to
get a good result

sometimes it gives wrong names for column

Sometimes when I would get an error in my code I was ask AI what I did wrong and
how I can fix It and most of the time it did help me but sometimes it would act
up and not really give me proper guidance in what needed to be done

AI telling me to install and load new libraries when I don't even need them

It would occasionally give me a code that wouldn't run or it wouldn't explain
why a certain code wasn't working

giving just example code with no explanation

There have been times where Copilot was wrong I would have to work with it back
and forth, and say things like "No, that is not correct The error: is coming
up", etc However, after I explained more, it would show me the correct code
(most of the time)

In the beginning of the class, using AI was a challenge because I did not know
the proper formatting or syntax I would as AI to give me code to transform a
data set but not really understand what was being done An example of this was me
using the "merge" function instead "left join"

If I used AI to generate new code, it would often assume file/column names When
I input my edited code (with corrected file and column names), it would solve
problems in the code but still using the file names it originally created Not a
terrible issue, just annoying

sometimes it would make the code too long & complicated when it could really be
more concise & it became really challenging when trying to use Al to help with
uploading thing like soilChem and definitely NEONmag

Sometimes, AI would suggest code that included words/phrases that weren't in the
data set, so I struggled to know what I should enter instead

I was trying to join two data sets but the IDs in the data sets had slightly
different suffixes The suggestion that AI was giving me wasn't working in
removing the suffix but it kept going around in the same circle Eventually, AI
was able to make the join work but I am not sure how

I found that some of the unhelpful/incorrect Ai responses typically had to do
with sometimes I felt like it did not listen to my requests or it would suggest
troubleshooting techniques that would not work

When AI would try to overcomplicate analysis (that would not run) and make me
download a bunch of new libraries for one prompt By the end of lab 13 I have
library(ggfortify), library(vegan), library(plotly), library(respirometry),
library(ggplot2), and library(corrplot)

Sometimes it would output code that did not do what I wanted it to do; then
I would have to refine it and tell it to redo it until it got it right Also,
sometimes the graphs or figures it would make would be cramped together and not
legible

AI made some graphs that were not accurate or did not have a good set up Also
certain commands it was not good at using

For some of the more complicated data analysis there will be some lines where
you get an error and try to use AI to solve it However it can't solve it (maybe
due to package/R version incompatibility) and you end up going down a rabbit
hole and wasting time

Would over-complicate simple exercises, adding unnecessary steps

In lab S3 Connecting a Github repo site with a new RStudio project It kept
giving me wrong code to link and due to that the Lab was not loading

When AI will give me different variations of the same code and it will still
not work even after going back and forth with it for 30 minutes It happened
to me multiple times that I asked my classmates for help Another bad short
examples will be AI using outdated syntax, complex code for simple tax and
misinterpreting data types

some wrong solution making everything complicated, ai love to Change environment
for some reasones

Not an exact example but the answers that misinterpreted the prompts or didn’t
have the full data sets weren’t helpful

An unhelpful AI response might be overly vague, like ‘Just fix the code
somehow’, which gives no actionable guidance Another incorrect response could
confidently suggest a function or method that doesn’t exist, leaving the user
confused and misled

For me, examples would be when the code simply doesn’t work Sometimes it’s due
to base R issues or packages and version incompatibility—things like that I
don’t know any technical terms to describe what the issues are more specifically
I also don’t have a good understanding of how computers work One specific
example is when I wanted to do an analysis of MAG data using a heat map based
on soil moisture My input was, “Here’s my code so far where I got the average
soil temperature for each phyla How do I make a heat map from here?” AI gave
me a solution where I had to convert my data frame to a matrix first The code
didn’t work from there due to some issues with creating the matrix I believe I
could have troubleshooted some more, as well as learn more about matrices on my
own, but in that moment I really just wanted a quick visual and wasn’t willing
to spend time to learn about matrices I think the only issue with AI feedback
here is that my code is nuanced to the data I’m working with and AI doesn’t
get to have the full picture I admit I could have been less lazy here and try
troubleshooting it myself!

“Here is an answer” with facts that are wrong or made up (hallucinating)

AI going in the wrong direction about my code

Misunderstanding the prompt, hallucinations

Hardest to solve problems If something did not work or wrong answer, needed to
figure it out mostly on my own, AI hardly could fix it

NA

Suggested changes for future offering - Open response

R code
wrapped_text <- str_wrap(df_survey$suggested_changes_for_future_offerings, width = 80, indent = 0, exdent = 0, whitespace_only = TRUE)
cat(wrapped_text, sep = "\n\n")
More help on github pushing and just using github as a whole

I think AI helped my learning expirence I would just add more to debugging and
varifying AI like how to maybe do a monday class on this instead of a lab

I definitely think that lab reports should require students to report the prompt
that was used to generate any code they used, preceding an exercise I think this
is important data for this class and can be used to think of effective prompting
strategies One idea I had, is to use Monday discussions to do either class-wide
or team/table debriefing in which we talk about what sort of code we are getting
from AI I think a collaborative aspect in this class would be helpful for
keeping students on track together

The problems often felt formatted/designed in a way where AI use could easily
solve them without requiring critical thinking from the student I think problems
that focus more on having students determine what steps (or generated code
chunks) would need to be taken could help the course encourage greater learning

NA

Covering more on ethics and AI use

NA

I think because there is a Monday and Wednesday portion, The Monday portion
should be spent explaining what the lab on Wednesday will be I found myself
using AI a lot for things that I felt could’ve been discussed in class

Some assignments that focus more on the content of the course (ie assignments
that teach about NEON and soil chemistry/make up and how the research is
advancing) and where AI is not used

NA

I think when starting with AI sometimes it's easy to just copy and paste the
question we're asked, but I think it would be helpful to start off with examples
of how we can prompt AI to guide us in answering our problems

NA

n/a

I would not say change anything but maybe more AI use towards the end of a
specific subject so students have time to actually learn about the topic (ex No
AI use allowed monday(introduxe topic), AI use allowed in wednesday labs

Incorporating learning R code without AI first would help give students a
baseline of what R code should look and function like without having to rely on
AI too much

maybe not allow AI until a little further into the course and making the
assignments not so full so that we could focus on learning rather than just
getting all the assignments and examples and then exercises done

Using AI and giving it some background before asking questions so it has the
knowledge to help instead of pulling information from places that are irreverent
to the person using it

Nothing right now

The class relied too heavily on AI I think it would have been helpful to have
the first couple of weeks without AI so we could learn the basics of R before
delving into more complicated analysis with AI Also I felt that I didn't learn
how to properly cite AI sources with respect to a coding assignment

NA

I would say there is not much to change, I think AI definitely brought my
abilities of working with R to the next level The only thing I'd recommend is
going over how to verify the AI outputs because sometimes I would get the wrong
code or have to install packages that we do not need for this class

I would think it would be great if we could get to the MAG data earlier in the
class and learn more about different metagenomic analyses

I think it was sometimes confusing to know exactly how to cite AI in the course,
so more clarity on when/how it should be used in the earlier labs would be
helpful

I know students will use Al to help them in this course in one way or another
but if there's a way to help improve learning I know many of us had no
background use of coding at all and we kind of jumped in quickly so I would
depend on Al a lot to simply just get my work done

NA

AI skyrocketed my coding capabilities However, I don't feel very confident in
coding without AI This class would benefit from spending more time on ingraining
R/coding basics before moving to advanced R coding and metagenomic analysis
Maybe the first half of the course could be ingraining both the basics in code
and metagenomic analysis and the second half could be the explosive, exciting,
AI based coding and analysis Using AI was a world-opening experience for me in
coding

N/A

Ask people to discuss there code, or allow for people to do independent projects
I really enjoyed the last lab that was open ended If we did the end of year
project I think that I would have been able to utilize all the skills that I
learned

I would suggest more instruction for how to ask AI questions to get the results
you want Also, more help to verify if the results that the AI produced is
actually what you want / answers the question how you wanted it answered Also,
how to make AI create more legible graphs where the labels or axes are not all
clumped together so much

More help with learning how to write code on your own before working AI into the
course I feel like maybe the first few classes should be without AI and trying
to do things on your own and the bring AI in to help make more advanced code

Using some measure to prevent over reliance on AI to write all the code without
understanding it Maybe having fewer exercises per lab, but requiring students to
annotate their code and explain what was accomplished by the finished code

NA

More activities in the beginning of the course

I had hoped that the class would be more focused on the NEON Lab data and allow
more time to work with it We only engaged with the dataset for the last 2 weeks
of classes And while the previous R sessions did build foundational skills
that definitely helped in analyzing the NEON Lab data, it would have been more
beneficial to introduce and work with the data earlier during the course Because
for me, doing so could have helped with better understanding the value of using
coding tools for biological data

NA

Have a class specifically for how to ask copilot for help with R coding,
including how to write a helpful prompt and dissect the answers so that you are
able to use them in your actual code

NA

I feel like *Debugging with AI* is the most common issue I experience when
using AI for code because what we are working on can be so nuanced Because of
the nuance, I’m not sure how feasible in-class support with AI debugging would
be (in the context of a whole class period), especially if everyone is working
on different analyses / project? I would say the same thing for *Verifying AI
output* I think AI is a useful tool used for assistance, but I also found that
what we did in the course has been very valuable— namely doing the chapter
outlines They can take quite a while to get through, but I agree that it’s
important for us to know the basics of R ourselves For Advanced analyses,
maybe it would be helpful to have a brief overview in class of things like
alpha-diversity and beta-diversity I learned about this before in other classes
(and don’t remember what they are), so maybe formally going over it in class,
if only briefly, would be good Maybe taking an additional lecture to explain
more of what MAGs are and why they’re useful in research, since I didn’t get it
the first time Eventually I came to understand what they are Also, personally
I don’t know anything about analyzing data, so if we received some advice about
things to pay attention to (for example, make sure you take a larger sample
size Or any research principles we aren’t aware of? And what is ANOVA test, if
we happened to need to use it) from class, just briefly, that would help As for
using AI to help generate code for Advanced analyses, it helped so much Perhaps
one thing I think would be good for this course is to require or at least
highly encourage students to explain what their code chunk does, and put that
explanation in the Quarto document above the code chunk Or to put comments in
code in cases where the code looks suspiciously flawless We sometimes do these
things by putting the header “AI note” and “Code chunk”, but I wonder whether it
would be good for students to go further than minimally stating what prompt they
asked AI Either way, I suppose a student’s ability to gain some useful knowledge
is up to how disciplined they are able to be when they have powerful resources
like AI

N/A

NA

NA

I think it's like a calculator, I would not use it for anything else It works
less well than google for research, but it works as expected when given complex
prompts You may need to amen/check the equation yourself for sure, but it will
be able to give you a decent starting point/do the easy stuff faster than you

NA

Final comments and suggestions - Open response

R code
wrapped_text <- str_wrap(df_survey$final_comments_and_suggestions, width = 80, indent = 0, exdent = 0, whitespace_only = TRUE)
cat(wrapped_text, sep = "\n\n")
Ai really did help and I feel like it is needed for advanced code based on the
level of most students in this class, it just made it so coding felt robotic and
I learned a little less

I liked being able to use AI but I wish I knew what to do if I fall down a
rabbit hole of incorrect codes with CoPilot

Your suggestions of introducing the "Vibe" code mini labs earlier in the
semester is interesting to me I think it would be helpful in one way, as it sort
of shows students our final goals for this class and can give ideas on what AI
can be used to achieve On the other hand, it might introduce advanced code too
quickly and can make students feel lost or left behind

NA

NA

Very well structured, I learned a lot

NA

NA

NA

NA

I think the integration of AI is helpful in beginner courses as sometimes AI
has a good way to "dumb" things down almost to explain why the code produces the
output that it does Sometimes having a conversation with AI helps the learner
understand a lot more than just reading the textbook because sometimes we need
more examples than just the ones provided to see what the concept is trying to
show us

NA

helped me feel like i could confidently code if asked to

Loved this class, Dr Blanchard was super helpful and made coding seem fun even
though I have never liked to code Love AI, makes sense for coding courses do not
get rid of it completely

I enjoyed this class and performed analyses I never would have thought of
without AI suggestions I would have liked to learn coding without AI first to
establish a foundation, but ultimately, using AI allowed me to complete a lot
more work and understand more functions in R code

I would not have been able to complete this class without the assistance of AI
and if you want to continue to have a large workload AI is 100% necessary and is
helpful It is more on the individual student themselves to actually make sure we
learn the content rather than just copy and pasting

With zero knowledge of coding when coming into this class, AI helped me learn
things along the way and made my life so much easier

No comments

Good course to introduce someone to the use of AI in R, but would have been
helpful/interesting to learn some of the basics beforehand

Without the help of AI, I don't think I would've been able to complete this
course at all, honestly Coding is just extremely hard for me, and using Copilot
really helped to break it down into manageable chunks to learn/use

This has been a great semester! This class was super interesting!

Thanks for a great semester!

I feel like the effectiveness of AI in this course varies between students, so
this survey is very interesting! I could see AI inhibiting learning if students
used it heavily in the beginning of the course when we focused on R basics,
because then that work is just busy work I feel like I learned the most from the
beginning of the course just using AI as an occasional troubleshooting tool, but
depending on AI from the start would have been detrimental

NA

AI helped me understand and learn a lot more in this course than I could have
otherwise without it!

See suggested changes for future offerings

This was a very cool class!

Using on AI was appropriate in this course, but I don't think that it would be
effective in every course I attached my mycology class analysis in the comments
if you are interested in the analysis I did I presented the information to my
class :) I really enjoyed the skills that I learned in this class and will be
adding to my resume! Thank you for an amazing semester

I wish there was less of a need to use AI in this course, but once things
started getting complicated it was very hard to do anything without the use of
AI, especially considering most of us in the class had no experience with coding
of any sort For example, I did not use AI for the first couple of labs, and that
was on purpose because I did not want to have to rely on AI for everything I did
The first couple of labs were manageable to do without the use of AI, and they
were just challenging enough where I was actually learning a lot of the basics
of coding However, after a certain point, I felt I needed to use AI to get the
work done because the level of difficulty increased too much for me to figure
out the coding on my own After that point, I relied on AI too much, and I wasn't
learning anymore

I really did enjoy the course overall, I like the room and set up of working
with a group I think Dr Blanchard was helpful when I went to office hours or
needed help in class I wish we learned a few more basic coding steps because
I think I relied on AI too much I still think it should be a big part of the
course but just that a little more coding without it at the start

I think AI has its place in the modern world but that place is not everywhere I
understand the potential AI has for making coding more accessible for newcomers
but it should not eliminate the learning process A balance needs to be found
between AI use and organic learning

Without AI for this course, I doubt I would have been able to do the labs on my
own I think the course name should be changed to "Coding with R" or something of
a similar nature, because that is what the assignments were, going through the
textbook While it was beneficial to get some exposure to R, it was not what I
expected at all If I knew what it would be like beforehand, I would never have
taken this course The time spent trying to resolve issues with no luck from AI
and other sources added way too many hours into completing assignments I don't
enjoy using AI and try to avoid it, but unfortunately with the way the class was
formatted, I felt that there was no way I could have figured out the issues I
had or solved the exercises without it

AI really helped understanding the coding

Using AI made me more comfortable with coding, and I am actually applying what I
learned to one of my classes for a project

NA

None

NA

Using AI is extremely helpful in a coding class especially for students like
me without much familiarity with code I wish more coding classes were offered
more frequently in the Biology Department R programming is such a useful skill,
even if I’m a beginner, and I will certainly be using it to make visuals in the
future I even saw my classmate from here using R to make graphs for project in
a different course Since this is a research-based course and we have limited
time this semester (the semester is very short, only 3-4 months), I loved
that we gain hands-on, first-hand experience of working with real MAG data and
doing analyses with it Doing the chapter outlines from the textbook was very
helpful in providing us with foundational knowledge about code syntax and using
dplyr and ggplot for visualization and analysis I like learning foundational
principles of coding from a concrete source like a textbook, compared to
self-teaching myself through google At the same time, I still think having
resources like internet and AI are such a big help because they introduce me
to so much I would’t have explored (like other functions, more example uses,
graph aesthetics, research ideas) And I’m sure that self-teaching is still
an important part of learning to code and research, too (though I don’t like
thinking of the idea so much) I found that AI saved so much time when it came to
problem-solving, troubleshooting errors, and me memorizing functions That can be
a pain when working on coding assignments, which I’ve experienced before I think
it’s important to responsibly use AI as a tool to help with my understanding,
but not just use it to get the assignment done That’s why I don’t just use AI to
ask for code suggestions, but also for explanation for every function use I also
take a look at how the code was structured to understand the problem-solving
logic I think that goes back to how we emphasized vibe coding— you should be
able to know how to approach the problem, even if you can’t write the code just
yet I have one comment about why students asked less questions to the instructor
about the actual code itself and problem-solving (although maybe there were
still questions about debugging?) I am sure the reason is obviously having the
assistance of AI, which is a big help with coding But I think the underlying
reason could be that most students hope to avoid asking a question and then
being told to look into it themselves and check their resources they already
have I hear instructors say this so often, even in my other research-based
class I’m taking this semester Personally, this is a main reason I refrain from
asking questions, because given enough time and (sometimes hard) effort on my
own, I can answer my own question I just have to look for the answers myself on
google or AI Most of the questions I want to ask are ones that I could answer
given time and effort on my own I feel like instructors may think I am lazy if
I don’t try to first figure it out Also, another reason is that students are
confused and want to ask questions, but don’t know what exactly to ask or how
to phrase it I’ve heard a classmate say this in another course, and I feel the
same way sometimes It’s harder when there are technical terms involved because
I don’t know how to describe what I’m confused about Overall, I personally
don’t find either of these things to be huge issues in this course, as you were
always willing to help us and were a very encouraging instructor I never felt
pressured in a negative way Given our resources, it made sense for me to try to
at least try to problem solve myself, which often resulted in me figuring out
an answer…thanks to my resources, like AI! Overall, I’m not sure what changes I
would suggest for this course, especially given our short semester The important
thing was that I learned useful programming skills that I know I will apply in
the future Thank you Dr Blanchard!

Thank you :)

AI removes the need to spend hours learning coding and allows me to focus on
more important things

NA

I genuinely think the point of AI is like computers and calculators, make
complex work easier I just think we should not confuse a glorified calculator
with a friend, God's word, or an actual intelligent person (Cogito, ergo sum)
Even if it was an intelligent person, I would check their words and answers if
I suspected it sounded too good to be true (that's probably because by nature,
I also have a tendency to say things with a lot of confidence even if they are
not true—simply because I believe them) If someone came to me with the total
confidence that there are three types of T cells, I would know better and tell
them "that's not true" If they told me "a new type of IgG was discovered in
a paper by Matt et al," I'd have a tendency to say "yes? Can you show me?" If
I was told "Did you know there are over 300 types of turtles?" I may have a
small doubt, but usually say "oh nice!" and accept it as a doubtful, but actual
fact (and mess up and repeat it with total confidence to other people as a
truth) Asking any question to AI comes with the understanding that it is only a
computer and just does math But even if it could think and was human, wouldn't
we also double-check their statement and thoughts? Or correct them, or ask for
proofs?

NA

Table - Overall effectiveness / Likelihood to recommend AI

R code
cols <- c(
  "Overall effectiveness of AI in this course",
  "Likelihood to recommend AI use in similar courses"
)

res <- df |>
  select(any_of(cols)) |>
  pivot_longer(everything(), names_to = "column", values_to = "value") |>
  mutate(value = as.numeric(value)) |>                # convert to numeric
  group_by(column) |>
  summarise(
    mean = mean(value, na.rm = TRUE),
    sd   = sd(value, na.rm = TRUE),
    n    = sum(!is.na(value)),
    .groups = "drop"
  ) |>
  mutate(mean = round(mean, 2), sd = round(sd, 2))

kable(res)
column mean sd n
Likelihood to recommend AI use in similar courses 7.86 1.98 43
Overall effectiveness of AI in this course 8.60 1.22 43