stuff including batch_upsert function

This commit is contained in:
dull b 2023-12-16 05:15:08 +00:00
parent ea6f3910eb
commit fdebc85753
3 changed files with 50 additions and 8 deletions

View file

@ -57,21 +57,37 @@ impl Comment {
comment_form: &CommentInsertForm,
parent_path: Option<&Ltree>,
) -> Result<Comment, Error> {
Comment::create_batch(pool, &[(comment_form, parent_path)]).await?.into_iter().next().ok_or(Error::NotFound)
}
pub async fn create_batch(
pool: &mut DbPool<'_>,
items: &[(&CommentInsertForm, Option<&Ltree>)],
) -> Result<Vec<Comment>, Error> {
let conn = &mut get_conn(pool).await?;
conn
.build_transaction()
.run(|conn| {
Box::pin(async move {
// Insert, to get the id
let inserted_comment = insert_into(comment)
.values(comment_form)
.on_conflict(ap_id)
let forms = items
.iter()
.map(|&(form, _)| form)
.collect::<Vec<_>>();
// Insert, to get the ids
let inserted_comments = insert_into(comment)
.values(&forms)
/*.on_conflict(ap_id)
.do_update()
.set(comment_form)
.set()*/
.get_result::<Self>(conn)
.await?;
// `ap_id` unique constraint violation is handled individually for each row
// because batch upsert requires having the same `set` argument for all rows
let comment_id = inserted_comment.id;
// You need to update the ltree column

View file

@ -19,7 +19,7 @@ use diesel::{
sql_types::{Text, Timestamptz},
IntoSql,
PgConnection,
RunQueryDsl,
RunQueryDsl, Insertable, Table, Column, AsChangeset, Expression, SelectableExpression, expression::NonAggregate, query_builder::QueryFragment,
};
use diesel_async::{
pg::AsyncPgConnection,
@ -153,6 +153,19 @@ macro_rules! try_join_with_pool {
}};
}
pub async fn batch_upsert<T, U, Target, R>(conn: &mut AsyncPgConnection, target: T, records: U, conflict_target: Target) -> Result<Vec<R>, DieselError>
where
T: Table,
T::AllColumns: Expression + SelectableExpression<T> + NonAggregate + QueryFragment<Pg>,
U: IntoIterator + Clone,
Vec<U::Item>: Insertable<T>,
U::Item: Insertable<T> + AsChangeset<Target = T>,
Target: Column<Table = T>,
{
let result = diesel::insert_into(target).values(records.clone().into_iter().collect::<Vec<_>>()).load::<R>(conn).await;
}
pub fn fuzzy_search(q: &str) -> String {
let replaced = q.replace('%', "\\%").replace('_', "\\_").replace(' ', "%");
format!("%{replaced}%")

View file

@ -18,10 +18,23 @@ then
fi
# Create cluster
initdb --username=postgres --auth=trust --no-instructions
initdb --username=postgres --auth=trust --no-instructions \
# Only listen to socket in current directory
-c listen_addresses= -c unix_socket_directories=$PWD \
# Write logs to a file in $PGDATA/log
-c logging_collector=on \
# Log all query plans by default
-c session_preload_libraries=auto_explain -c auto_explain.log_min_duration=0
# Include actual row amounts and run times for query plan nodes
-c auto_explain.log_analyze=on
# Avoid sequential scans so query plans show what index scans can be done
# (index scan is normally avoided in some cases, such as the table being small enough)
-c enable_seqscan=off
# Don't log parameter values
-c auto_explain.log_parameter_max_length=0
# Start server that only listens to socket in current directory
pg_ctl start --options="-c listen_addresses= -c unix_socket_directories=$PWD -c logging_collector=on -c session_preload_libraries=auto_explain -c auto_explain.log_min_duration=0 -c auto_explain.log_parameter_max_length=0 -c auto_explain.log_analyze=on -c enable_seqscan=off" > /dev/null
pg_ctl start
# Setup database
psql -c "CREATE USER lemmy WITH PASSWORD 'password' SUPERUSER;" -U postgres