old.dir <- "/path/to/blogdown/files/content/authors/"
new.dir <- "/path/to/quarto/files/people"
library(magick)
profiles <- read.csv(paste0(old.dir,"profile.list.csv"))
for(i in c(1:nrow(profiles))) {
src.user <- profiles[i,]$user
orcid_id <- profiles[i,]$orcid
username <- profiles[i,]$username
# Modify profile image (to have a squared one) and save it
img <- image_read(paste0(old.dir,src.user,"/avatar.jpg"))
width <- as.numeric(image_info(img)[2])
height <- as.numeric(image_info(img)[3])
if (width < height) {
img <- image_crop(img, paste0(width,"x",width))
image_write(img, path = paste0(new.dir,"/images/",username,".jpg"), format = "jpg")
} else if(height < width) {
img <- image_crop(img, paste0(height,"x",height))
image_write(img, path = paste0(new.dir,"/images/",username,".jpg"), format = "jpg")
} else {
file.copy(paste0(old.dir,src.user,"/avatar.jpg"),paste0(new.dir,"/images/",username,".jpg"))
}
# Load the source file
source_path <- paste0(old.dir,src.user,"/_index.md")
dest_path <- paste0(new.dir,"/",username,".qmd")
# Read all lines
lines <- readLines(source_path)
# Extract YAML section
yaml_start <- which(lines == "---")[1]
yaml_end <- which(lines == "---")[2]
yaml_lines <- lines[(yaml_start + 1):(yaml_end - 1)]
# Helper to extract single key-value line
extract_field <- function(lines, key) {
line <- grep(paste0("^", key, ":"), lines, value = TRUE)
sub(paste0("^", key, ":\\s*"), "", line)
}
# Extract organizations
line_index <- grep("organizations:", yaml_lines)
org_name <- trimws(sub("- name:\\s*", "", yaml_lines[line_index + 1]))
org_link <- trimws(sub("url:\\s*", "", yaml_lines[line_index + 2]))
# Extract user_groups
line_index <- grep("user_groups:", yaml_lines)
user_group <- trimws(sub("-\\s*", "", yaml_lines[line_index + 1]))
# Extract Google Scholar URL
line_index <- grep("google-scholar", yaml_lines)
url_line <- yaml_lines[line_index + 2]
scholar_url <- trimws(sub("link:\\s*", "", url_line))
# Extract Twitter URL
line_index <- grep("twitter", yaml_lines)
url_line <- yaml_lines[line_index + 2][1]
twitter_url <- trimws(sub("link:\\s*", "", url_line))
# Extract bio paragraph
content_start <- grep("<!--StartFragment-->", lines)
content_end <- grep("<!--EndFragment-->", lines)
bio <- trimws(lines[(content_start + 1):(content_end - 1)])
# Compose new YAML front matter
new_yaml <- c(
"---",
paste0("title: \"",extract_field(yaml_lines, "title"),"\""),
paste0("role: \"",extract_field(yaml_lines, "role"),"\""),
paste0("group: \"",user_group,"\""),
paste0("image: images/",username,".jpg"),
paste0("about:"),
paste0(" template: trestles"),
paste0(" image-shape: rounded"),
paste0(" image-width: 15rem"),
paste0(" links:"),
paste0(" - icon: envelope"),
paste0(" text: Email"),
paste0(" href: \"mailto:",extract_field(yaml_lines, "email"),"\"")
)
if (length(scholar_url) > 0) {
new_yaml <- c(
new_yaml,
paste0(" - text: \"{{< ai google-scholar size=1.1em title='Google Scholar' >}} G. Scholar\""),
paste0(" href: \"",scholar_url,"\"")
)
}
if(length(twitter_url) > 0) {
new_yaml <- c(
new_yaml,
paste0(" - text: \"{{< fa brands twitter size=1.1em title='Twitter' >}} Twitter\""),
paste0(" href: \"",twitter_url,"\"")
)
}
if (length(orcid_id) > 0) {
new_yaml <- c(
new_yaml,
paste0(" - text: \"{{< ai orcid size=1.1em title='ORCID' >}} ORCID\""),
paste0(" href: \"https://orcid.org/",orcid_id,"\"")
)
}
# End YAML
new_yaml <- c(
new_yaml,
"---"
)
# Combine all into new Quarto content
new_content <- c(
new_yaml,
bio
)
# Write the result
writeLines(new_content, dest_path)
cat(src.user, " ✓\n")
}Convert a Blogdown/Wowchemy website to Quarto
The website of our research group INN4ALL was done using blogdown and wowchemy. Lately we had a lot of rendering errors, mainly because there were a lot of plugins and parameters for each page , so we decided to move to quarto to have a neat solution, easier to maintain and to add new content. Here are the main steps we followed to run the conversion, inspired by this blog entry.
List of publications
As I did with my personal website, we decided to switch from manually generated pages for each publication to the use of a bibtex database with a R script that generates qmd for each publication.
With this system, a new publication on the website is just a new entry in the bibtex database. Usually journals provide bibtex citations, otherwise we use (doi2bib)[http://doi2bib.org/], adding the abstract manually to the generated bibtex.
In a future, we might add a list of publications for each member, scanning the bibtex authors list to look for author variation, or something similar.
Individual profiles
We developed a R script that reads each profile in the blogdown source and transform it to a qmd file. The list of profiles is done using the quarto document listings function.
In blogdown, the file structure of each profile is the following:
- authors
└ userid
├ avatar.jpg
└ _index.md
While in Quarto all the qmd files are in the same folder (called people, but it could be anything) and we created a subfolder images where we store all the profile pictures.
Therefore, the R script reads each _index.md to extract the YAML code, to write it in each qmd file, and copy each avatar.jpg to images renaming it as the profile name. In order to not copy all the profiles, we use a csv to list the profiles we want to copy with 3 columns:
- user: the name of the author in blogdown
- username: the new name in quarto
- orcid: the ORCID id, to add it to each profile in quarto.
Here the script:
Project and Events
We followed the same approach for projects and events: read YAML from the original blogdown files, convert them to qmd files and copy images. Then we did some manual checks to change images paths and verify the conversion.
Other sections
For the remaining part of the website, we just build it from scratch using quarto templates.