Compare commits

...

1 Commits

Author SHA1 Message Date
23de8a6fc6 [All Platforms] 2.1.0 - Sync Optimizations
All checks were successful
Ascently - Sync Deploy / build-and-push (push) Successful in 2m31s
Ascently - Docs Deploy / build-and-push (push) Successful in 3m30s
2025-10-15 18:17:19 -06:00
32 changed files with 1538 additions and 409 deletions

View File

@@ -16,8 +16,8 @@ android {
applicationId = "com.atridad.ascently"
minSdk = 31
targetSdk = 36
versionCode = 41
versionName = "2.0.1"
versionCode = 42
versionName = "2.1.0"
testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner"
}

View File

@@ -32,13 +32,12 @@ data class BackupGym(
val supportedClimbTypes: List<ClimbType>,
val difficultySystems: List<DifficultySystem>,
@kotlinx.serialization.SerialName("customDifficultyGrades")
val customDifficultyGrades: List<String> = emptyList(),
val customDifficultyGrades: List<String>? = null,
val notes: String? = null,
val createdAt: String,
val updatedAt: String
) {
companion object {
/** Create BackupGym from native Android Gym model */
fun fromGym(gym: Gym): BackupGym {
return BackupGym(
id = gym.id,
@@ -46,7 +45,7 @@ data class BackupGym(
location = gym.location,
supportedClimbTypes = gym.supportedClimbTypes,
difficultySystems = gym.difficultySystems,
customDifficultyGrades = gym.customDifficultyGrades,
customDifficultyGrades = gym.customDifficultyGrades.ifEmpty { null },
notes = gym.notes,
createdAt = gym.createdAt,
updatedAt = gym.updatedAt
@@ -54,7 +53,6 @@ data class BackupGym(
}
}
/** Convert to native Android Gym model */
fun toGym(): Gym {
return Gym(
id = id,
@@ -62,7 +60,7 @@ data class BackupGym(
location = location,
supportedClimbTypes = supportedClimbTypes,
difficultySystems = difficultySystems,
customDifficultyGrades = customDifficultyGrades,
customDifficultyGrades = customDifficultyGrades ?: emptyList(),
notes = notes,
createdAt = createdAt,
updatedAt = updatedAt
@@ -79,7 +77,7 @@ data class BackupProblem(
val description: String? = null,
val climbType: ClimbType,
val difficulty: DifficultyGrade,
val tags: List<String> = emptyList(),
val tags: List<String>? = null,
val location: String? = null,
val imagePaths: List<String>? = null,
val isActive: Boolean = true,
@@ -89,7 +87,6 @@ data class BackupProblem(
val updatedAt: String
) {
companion object {
/** Create BackupProblem from native Android Problem model */
fun fromProblem(problem: Problem): BackupProblem {
return BackupProblem(
id = problem.id,
@@ -112,7 +109,6 @@ data class BackupProblem(
}
}
/** Convert to native Android Problem model */
fun toProblem(): Problem {
return Problem(
id = id,
@@ -121,7 +117,7 @@ data class BackupProblem(
description = description,
climbType = climbType,
difficulty = difficulty,
tags = tags,
tags = tags ?: emptyList(),
location = location,
imagePaths = imagePaths ?: emptyList(),
isActive = isActive,
@@ -132,7 +128,6 @@ data class BackupProblem(
)
}
/** Create a copy with updated image paths for import processing */
fun withUpdatedImagePaths(newImagePaths: List<String>): BackupProblem {
return copy(imagePaths = newImagePaths.ifEmpty { null })
}
@@ -153,7 +148,6 @@ data class BackupClimbSession(
val updatedAt: String
) {
companion object {
/** Create BackupClimbSession from native Android ClimbSession model */
fun fromClimbSession(session: ClimbSession): BackupClimbSession {
return BackupClimbSession(
id = session.id,
@@ -170,7 +164,6 @@ data class BackupClimbSession(
}
}
/** Convert to native Android ClimbSession model */
fun toClimbSession(): ClimbSession {
return ClimbSession(
id = id,
@@ -203,7 +196,6 @@ data class BackupAttempt(
val updatedAt: String? = null
) {
companion object {
/** Create BackupAttempt from native Android Attempt model */
fun fromAttempt(attempt: Attempt): BackupAttempt {
return BackupAttempt(
id = attempt.id,
@@ -221,7 +213,6 @@ data class BackupAttempt(
}
}
/** Convert to native Android Attempt model */
fun toAttempt(): Attempt {
return Attempt(
id = id,

View File

@@ -66,7 +66,6 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Check if Health Connect is available on this device */
fun isHealthConnectAvailable(): Flow<Boolean> = flow {
try {
if (!_isCompatible.value) {
@@ -82,10 +81,6 @@ class HealthConnectManager(private val context: Context) {
}
}
/**
* Enable or disable Health Connect integration and automatically request permissions if
* enabling
*/
suspend fun setEnabled(enabled: Boolean) {
preferences.edit().putBoolean("enabled", enabled).apply()
_isEnabled.value = enabled
@@ -105,13 +100,11 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Update the permissions granted state */
fun setPermissionsGranted(granted: Boolean) {
preferences.edit().putBoolean("permissions", granted).apply()
_hasPermissions.value = granted
}
/** Check if all required permissions are granted */
suspend fun hasAllPermissions(): Boolean {
return try {
if (!_isCompatible.value || healthConnectClient == null) {
@@ -132,7 +125,6 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Check if Health Connect is ready for use */
suspend fun isReady(): Boolean {
return try {
if (!_isEnabled.value || !_isCompatible.value || healthConnectClient == null)
@@ -148,12 +140,10 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Get permission request contract */
fun getPermissionRequestContract(): ActivityResultContract<Set<String>, Set<String>> {
return PermissionController.createRequestPermissionResultContract()
}
/** Get required permissions as strings */
fun getRequiredPermissions(): Set<String> {
return try {
REQUIRED_PERMISSIONS.map { it }.toSet()
@@ -163,7 +153,6 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Sync a completed climbing session to Health Connect (only when auto-sync is enabled) */
@SuppressLint("RestrictedApi")
suspend fun syncCompletedSession(
session: ClimbSession,
@@ -271,7 +260,6 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Auto-sync a completed session if enabled - this is the only way to sync sessions */
suspend fun autoSyncCompletedSession(
session: ClimbSession,
gymName: String,
@@ -293,7 +281,6 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Estimate calories burned during climbing */
private fun estimateCaloriesForClimbing(durationMinutes: Long, attemptCount: Int): Double {
val baseCaloriesPerMinute = 8.0
val intensityMultiplier =
@@ -305,7 +292,6 @@ class HealthConnectManager(private val context: Context) {
return durationMinutes * baseCaloriesPerMinute * intensityMultiplier
}
/** Create heart rate data */
@SuppressLint("RestrictedApi")
private fun createHeartRateRecord(
startTime: Instant,
@@ -347,9 +333,7 @@ class HealthConnectManager(private val context: Context) {
}
}
/** Check if ready for use */
fun isReadySync(): Boolean {
return _isEnabled.value && _hasPermissions.value
}
}

View File

@@ -251,23 +251,15 @@ class ClimbRepository(database: AscentlyDatabase, private val context: Context)
}
}
/**
* Sets the callback for auto-sync functionality. This should be called by the SyncService to
* register itself for auto-sync triggers.
*/
fun setAutoSyncCallback(callback: (() -> Unit)?) {
autoSyncCallback = callback
}
/**
* Triggers auto-sync if enabled. This is called after any data modification to keep data
* synchronized across devices automatically.
*/
private fun triggerAutoSync() {
autoSyncCallback?.invoke()
}
private fun trackDeletion(itemId: String, itemType: String) {
fun trackDeletion(itemId: String, itemType: String) {
val currentDeletions = getDeletedItems().toMutableList()
val newDeletion =
DeletedItem(id = itemId, type = itemType, deletedAt = DateFormatUtils.nowISO8601())

View File

@@ -0,0 +1,30 @@
package com.atridad.ascently.data.sync
import com.atridad.ascently.data.format.BackupAttempt
import com.atridad.ascently.data.format.BackupClimbSession
import com.atridad.ascently.data.format.BackupGym
import com.atridad.ascently.data.format.BackupProblem
import com.atridad.ascently.data.format.DeletedItem
import kotlinx.serialization.Serializable
/** Request structure for delta sync - sends only changes since last sync */
@Serializable
data class DeltaSyncRequest(
val lastSyncTime: String,
val gyms: List<BackupGym>,
val problems: List<BackupProblem>,
val sessions: List<BackupClimbSession>,
val attempts: List<BackupAttempt>,
val deletedItems: List<DeletedItem>
)
/** Response structure for delta sync - receives only changes from server */
@Serializable
data class DeltaSyncResponse(
val serverTime: String,
val gyms: List<BackupGym>,
val problems: List<BackupProblem>,
val sessions: List<BackupClimbSession>,
val attempts: List<BackupAttempt>,
val deletedItems: List<DeletedItem>
)

View File

@@ -19,6 +19,9 @@ import com.atridad.ascently.utils.ImageNamingUtils
import com.atridad.ascently.utils.ImageUtils
import java.io.IOException
import java.io.Serializable
import java.text.SimpleDateFormat
import java.util.Date
import java.util.Locale
import java.util.concurrent.TimeUnit
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
@@ -63,6 +66,7 @@ class SyncService(private val context: Context, private val repository: ClimbRep
prettyPrint = true
ignoreUnknownKeys = true
explicitNulls = false
coerceInputValues = true
}
// State
@@ -195,26 +199,33 @@ class SyncService(private val context: Context, private val repository: ClimbRep
serverBackup.sessions.isNotEmpty() ||
serverBackup.attempts.isNotEmpty()
when {
!hasLocalData && hasServerData -> {
Log.d(TAG, "No local data found, performing full restore from server")
val imagePathMapping = syncImagesFromServer(serverBackup)
importBackupToRepository(serverBackup, imagePathMapping)
Log.d(TAG, "Full restore completed")
}
hasLocalData && !hasServerData -> {
Log.d(TAG, "No server data found, uploading local data to server")
uploadData(localBackup)
syncImagesForBackup(localBackup)
Log.d(TAG, "Initial upload completed")
}
hasLocalData && hasServerData -> {
Log.d(TAG, "Both local and server data exist, merging (server wins)")
mergeDataSafely(serverBackup)
Log.d(TAG, "Merge completed")
}
else -> {
Log.d(TAG, "No data to sync")
// If both client and server have been synced before, use delta sync
val lastSyncTimeStr = sharedPreferences.getString(Keys.LAST_SYNC_TIME, null)
if (hasLocalData && hasServerData && lastSyncTimeStr != null) {
Log.d(TAG, "Using delta sync for incremental updates")
performDeltaSync(lastSyncTimeStr)
} else {
when {
!hasLocalData && hasServerData -> {
Log.d(TAG, "No local data found, performing full restore from server")
val imagePathMapping = syncImagesFromServer(serverBackup)
importBackupToRepository(serverBackup, imagePathMapping)
Log.d(TAG, "Full restore completed")
}
hasLocalData && !hasServerData -> {
Log.d(TAG, "No server data found, uploading local data to server")
uploadData(localBackup)
syncImagesForBackup(localBackup)
Log.d(TAG, "Initial upload completed")
}
hasLocalData && hasServerData -> {
Log.d(TAG, "Both local and server data exist, merging (server wins)")
mergeDataSafely(serverBackup)
Log.d(TAG, "Merge completed")
}
else -> {
Log.d(TAG, "No data to sync")
}
}
}
@@ -230,6 +241,265 @@ class SyncService(private val context: Context, private val repository: ClimbRep
}
}
private suspend fun performDeltaSync(lastSyncTimeStr: String) {
Log.d(TAG, "Starting delta sync with lastSyncTime=$lastSyncTimeStr")
// Parse last sync time to filter modified items
val lastSyncDate = parseISO8601(lastSyncTimeStr) ?: Date(0)
// Collect items modified since last sync
val allGyms = repository.getAllGyms().first()
val modifiedGyms =
allGyms
.filter { gym -> parseISO8601(gym.updatedAt)?.after(lastSyncDate) == true }
.map { BackupGym.fromGym(it) }
val allProblems = repository.getAllProblems().first()
val modifiedProblems =
allProblems
.filter { problem ->
parseISO8601(problem.updatedAt)?.after(lastSyncDate) == true
}
.map { problem ->
val backupProblem = BackupProblem.fromProblem(problem)
val normalizedImagePaths =
problem.imagePaths.mapIndexed { index, _ ->
ImageNamingUtils.generateImageFilename(problem.id, index)
}
if (normalizedImagePaths.isNotEmpty()) {
backupProblem.copy(imagePaths = normalizedImagePaths)
} else {
backupProblem
}
}
val allSessions = repository.getAllSessions().first()
val modifiedSessions =
allSessions
.filter { session ->
parseISO8601(session.updatedAt)?.after(lastSyncDate) == true
}
.map { BackupClimbSession.fromClimbSession(it) }
val allAttempts = repository.getAllAttempts().first()
val modifiedAttempts =
allAttempts
.filter { attempt ->
parseISO8601(attempt.createdAt)?.after(lastSyncDate) == true
}
.map { BackupAttempt.fromAttempt(it) }
val allDeletions = repository.getDeletedItems()
val modifiedDeletions =
allDeletions.filter { item ->
parseISO8601(item.deletedAt)?.after(lastSyncDate) == true
}
Log.d(
TAG,
"Delta sync sending: gyms=${modifiedGyms.size}, problems=${modifiedProblems.size}, sessions=${modifiedSessions.size}, attempts=${modifiedAttempts.size}, deletions=${modifiedDeletions.size}"
)
// Create delta request
val deltaRequest =
DeltaSyncRequest(
lastSyncTime = lastSyncTimeStr,
gyms = modifiedGyms,
problems = modifiedProblems,
sessions = modifiedSessions,
attempts = modifiedAttempts,
deletedItems = modifiedDeletions
)
val requestBody =
json.encodeToString(DeltaSyncRequest.serializer(), deltaRequest)
.toRequestBody("application/json".toMediaType())
val request =
Request.Builder()
.url("$serverUrl/sync/delta")
.header("Authorization", "Bearer $authToken")
.post(requestBody)
.build()
val deltaResponse =
withContext(Dispatchers.IO) {
try {
httpClient.newCall(request).execute().use { response ->
if (response.isSuccessful) {
val body = response.body?.string()
if (!body.isNullOrEmpty()) {
json.decodeFromString(DeltaSyncResponse.serializer(), body)
} else {
throw SyncException.InvalidResponse("Empty response body")
}
} else {
handleHttpError(response.code)
}
}
} catch (e: IOException) {
throw SyncException.NetworkError(e.message ?: "Network error")
}
}
Log.d(
TAG,
"Delta sync received: gyms=${deltaResponse.gyms.size}, problems=${deltaResponse.problems.size}, sessions=${deltaResponse.sessions.size}, attempts=${deltaResponse.attempts.size}, deletions=${deltaResponse.deletedItems.size}"
)
// Apply server changes to local data
applyDeltaResponse(deltaResponse)
// Sync only modified problem images
syncModifiedImages(modifiedProblems)
}
private fun parseISO8601(dateString: String): Date? {
return try {
val format = SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss", Locale.US)
format.parse(dateString)
} catch (e: Exception) {
null
}
}
private suspend fun applyDeltaResponse(response: DeltaSyncResponse) {
// Temporarily disable auto-sync to prevent recursive sync triggers
repository.setAutoSyncCallback(null)
try {
// Download images for new/modified problems from server
val imagePathMapping = mutableMapOf<String, String>()
for (problem in response.problems) {
problem.imagePaths?.forEach { imagePath ->
val serverFilename = imagePath.substringAfterLast('/')
try {
val localImagePath = downloadImage(serverFilename)
if (localImagePath != null) {
imagePathMapping[imagePath] = localImagePath
}
} catch (e: Exception) {
Log.w(TAG, "Failed to download image $imagePath: ${e.message}")
}
}
}
// Merge gyms - check if exists and compare timestamps
val existingGyms = repository.getAllGyms().first()
for (backupGym in response.gyms) {
val existing = existingGyms.find { it.id == backupGym.id }
if (existing == null || backupGym.updatedAt >= existing.updatedAt) {
val gym = backupGym.toGym()
if (existing != null) {
repository.updateGym(gym)
} else {
repository.insertGym(gym)
}
}
}
// Merge problems
val existingProblems = repository.getAllProblems().first()
for (backupProblem in response.problems) {
val updatedImagePaths =
backupProblem.imagePaths?.map { oldPath ->
imagePathMapping[oldPath] ?: oldPath
}
val problemToMerge = backupProblem.copy(imagePaths = updatedImagePaths)
val problem = problemToMerge.toProblem()
val existing = existingProblems.find { it.id == backupProblem.id }
if (existing == null || backupProblem.updatedAt >= existing.updatedAt) {
if (existing != null) {
repository.updateProblem(problem)
} else {
repository.insertProblem(problem)
}
}
}
// Merge sessions
val existingSessions = repository.getAllSessions().first()
for (backupSession in response.sessions) {
val session = backupSession.toClimbSession()
val existing = existingSessions.find { it.id == backupSession.id }
if (existing == null || backupSession.updatedAt >= existing.updatedAt) {
if (existing != null) {
repository.updateSession(session)
} else {
repository.insertSession(session)
}
}
}
// Merge attempts
val existingAttempts = repository.getAllAttempts().first()
for (backupAttempt in response.attempts) {
val attempt = backupAttempt.toAttempt()
val existing = existingAttempts.find { it.id == backupAttempt.id }
if (existing == null || backupAttempt.createdAt >= existing.createdAt) {
if (existing != null) {
repository.updateAttempt(attempt)
} else {
repository.insertAttempt(attempt)
}
}
}
// Apply deletions
applyDeletions(response.deletedItems)
// Update deletion records
val allDeletions = repository.getDeletedItems() + response.deletedItems
repository.clearDeletedItems()
allDeletions.distinctBy { "${it.type}:${it.id}" }.forEach {
repository.trackDeletion(it.id, it.type)
}
} finally {
// Re-enable auto-sync
repository.setAutoSyncCallback { serviceScope.launch { triggerAutoSync() } }
}
}
private suspend fun applyDeletions(
deletions: List<com.atridad.ascently.data.format.DeletedItem>
) {
val existingGyms = repository.getAllGyms().first()
val existingProblems = repository.getAllProblems().first()
val existingSessions = repository.getAllSessions().first()
val existingAttempts = repository.getAllAttempts().first()
for (item in deletions) {
when (item.type) {
"gym" -> {
existingGyms.find { it.id == item.id }?.let { repository.deleteGym(it) }
}
"problem" -> {
existingProblems.find { it.id == item.id }?.let { repository.deleteProblem(it) }
}
"session" -> {
existingSessions.find { it.id == item.id }?.let { repository.deleteSession(it) }
}
"attempt" -> {
existingAttempts.find { it.id == item.id }?.let { repository.deleteAttempt(it) }
}
}
}
}
private suspend fun syncModifiedImages(modifiedProblems: List<BackupProblem>) {
if (modifiedProblems.isEmpty()) return
Log.d(TAG, "Syncing images for ${modifiedProblems.size} modified problems")
for (backupProblem in modifiedProblems) {
backupProblem.imagePaths?.forEach { imagePath ->
val filename = imagePath.substringAfterLast('/')
uploadImage(imagePath, filename)
}
}
}
private suspend fun downloadData(): ClimbDataBackup {
val request =
Request.Builder()

View File

@@ -5,10 +5,6 @@ import android.content.SharedPreferences
import android.util.Log
import androidx.core.content.edit
/**
* Handles migration of data from OpenClimb to Ascently This includes SharedPreferences, database
* names, and other local storage
*/
class MigrationManager(private val context: Context) {
companion object {

View File

@@ -457,10 +457,6 @@ class SyncMergeLogicTest {
@Test
fun `test active sessions excluded from sync`() {
// Test scenario: Active sessions should not be included in sync data
// This tests the new behavior where active sessions are excluded from sync
// until they are completed
val allLocalSessions =
listOf(
BackupClimbSession(

View File

@@ -40,6 +40,7 @@ export default defineConfig({
items: [
{ label: "Overview", slug: "sync/overview" },
{ label: "Quick Start", slug: "sync/quick-start" },
{ label: "API Reference", slug: "sync/api-reference" },
],
},
{

View File

@@ -26,7 +26,7 @@
},
"dependencies": {
"@astrojs/node": "^9.5.0",
"@astrojs/starlight": "^0.36.0",
"@astrojs/starlight": "^0.36.1",
"astro": "^5.14.5",
"sharp": "^0.34.4"
}

258
docs/pnpm-lock.yaml generated
View File

@@ -12,8 +12,8 @@ importers:
specifier: ^9.5.0
version: 9.5.0(astro@5.14.5(@types/node@24.7.2)(rollup@4.52.4)(typescript@5.9.3))
'@astrojs/starlight':
specifier: ^0.36.0
version: 0.36.0(astro@5.14.5(@types/node@24.7.2)(rollup@4.52.4)(typescript@5.9.3))
specifier: ^0.36.1
version: 0.36.1(astro@5.14.5(@types/node@24.7.2)(rollup@4.52.4)(typescript@5.9.3))
astro:
specifier: ^5.14.5
version: 5.14.5(@types/node@24.7.2)(rollup@4.52.4)(typescript@5.9.3)
@@ -50,8 +50,8 @@ packages:
'@astrojs/sitemap@3.6.0':
resolution: {integrity: sha512-4aHkvcOZBWJigRmMIAJwRQXBS+ayoP5z40OklTXYXhUDhwusz+DyDl+nSshY6y9DvkVEavwNcFO8FD81iGhXjg==}
'@astrojs/starlight@0.36.0':
resolution: {integrity: sha512-aVJVBfvFuE2avsMDhmRzn6I5GjDhUwIQFlu3qH9a1C0fNsPYDw2asxHQODAD7EfGiKGvvHCJgHb+9jbJ8lCfNQ==}
'@astrojs/starlight@0.36.1':
resolution: {integrity: sha512-Fmt8mIsAIZN18Y4YQDI6p521GsYGe4hYxh9jWmz0pHBXnS5J7Na3TSXNya4eyIymCcKkuiKFbs7b/knsdGVYPg==}
peerDependencies:
astro: ^5.5.0
@@ -91,158 +91,158 @@ packages:
'@emnapi/runtime@1.5.0':
resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==}
'@esbuild/aix-ppc64@0.25.10':
resolution: {integrity: sha512-0NFWnA+7l41irNuaSVlLfgNT12caWJVLzp5eAVhZ0z1qpxbockccEt3s+149rE64VUI3Ml2zt8Nv5JVc4QXTsw==}
'@esbuild/aix-ppc64@0.25.11':
resolution: {integrity: sha512-Xt1dOL13m8u0WE8iplx9Ibbm+hFAO0GsU2P34UNoDGvZYkY8ifSiy6Zuc1lYxfG7svWE2fzqCUmFp5HCn51gJg==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [aix]
'@esbuild/android-arm64@0.25.10':
resolution: {integrity: sha512-LSQa7eDahypv/VO6WKohZGPSJDq5OVOo3UoFR1E4t4Gj1W7zEQMUhI+lo81H+DtB+kP+tDgBp+M4oNCwp6kffg==}
'@esbuild/android-arm64@0.25.11':
resolution: {integrity: sha512-9slpyFBc4FPPz48+f6jyiXOx/Y4v34TUeDDXJpZqAWQn/08lKGeD8aDp9TMn9jDz2CiEuHwfhRmGBvpnd/PWIQ==}
engines: {node: '>=18'}
cpu: [arm64]
os: [android]
'@esbuild/android-arm@0.25.10':
resolution: {integrity: sha512-dQAxF1dW1C3zpeCDc5KqIYuZ1tgAdRXNoZP7vkBIRtKZPYe2xVr/d3SkirklCHudW1B45tGiUlz2pUWDfbDD4w==}
'@esbuild/android-arm@0.25.11':
resolution: {integrity: sha512-uoa7dU+Dt3HYsethkJ1k6Z9YdcHjTrSb5NUy66ZfZaSV8hEYGD5ZHbEMXnqLFlbBflLsl89Zke7CAdDJ4JI+Gg==}
engines: {node: '>=18'}
cpu: [arm]
os: [android]
'@esbuild/android-x64@0.25.10':
resolution: {integrity: sha512-MiC9CWdPrfhibcXwr39p9ha1x0lZJ9KaVfvzA0Wxwz9ETX4v5CHfF09bx935nHlhi+MxhA63dKRRQLiVgSUtEg==}
'@esbuild/android-x64@0.25.11':
resolution: {integrity: sha512-Sgiab4xBjPU1QoPEIqS3Xx+R2lezu0LKIEcYe6pftr56PqPygbB7+szVnzoShbx64MUupqoE0KyRlN7gezbl8g==}
engines: {node: '>=18'}
cpu: [x64]
os: [android]
'@esbuild/darwin-arm64@0.25.10':
resolution: {integrity: sha512-JC74bdXcQEpW9KkV326WpZZjLguSZ3DfS8wrrvPMHgQOIEIG/sPXEN/V8IssoJhbefLRcRqw6RQH2NnpdprtMA==}
'@esbuild/darwin-arm64@0.25.11':
resolution: {integrity: sha512-VekY0PBCukppoQrycFxUqkCojnTQhdec0vevUL/EDOCnXd9LKWqD/bHwMPzigIJXPhC59Vd1WFIL57SKs2mg4w==}
engines: {node: '>=18'}
cpu: [arm64]
os: [darwin]
'@esbuild/darwin-x64@0.25.10':
resolution: {integrity: sha512-tguWg1olF6DGqzws97pKZ8G2L7Ig1vjDmGTwcTuYHbuU6TTjJe5FXbgs5C1BBzHbJ2bo1m3WkQDbWO2PvamRcg==}
'@esbuild/darwin-x64@0.25.11':
resolution: {integrity: sha512-+hfp3yfBalNEpTGp9loYgbknjR695HkqtY3d3/JjSRUyPg/xd6q+mQqIb5qdywnDxRZykIHs3axEqU6l1+oWEQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [darwin]
'@esbuild/freebsd-arm64@0.25.10':
resolution: {integrity: sha512-3ZioSQSg1HT2N05YxeJWYR+Libe3bREVSdWhEEgExWaDtyFbbXWb49QgPvFH8u03vUPX10JhJPcz7s9t9+boWg==}
'@esbuild/freebsd-arm64@0.25.11':
resolution: {integrity: sha512-CmKjrnayyTJF2eVuO//uSjl/K3KsMIeYeyN7FyDBjsR3lnSJHaXlVoAK8DZa7lXWChbuOk7NjAc7ygAwrnPBhA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [freebsd]
'@esbuild/freebsd-x64@0.25.10':
resolution: {integrity: sha512-LLgJfHJk014Aa4anGDbh8bmI5Lk+QidDmGzuC2D+vP7mv/GeSN+H39zOf7pN5N8p059FcOfs2bVlrRr4SK9WxA==}
'@esbuild/freebsd-x64@0.25.11':
resolution: {integrity: sha512-Dyq+5oscTJvMaYPvW3x3FLpi2+gSZTCE/1ffdwuM6G1ARang/mb3jvjxs0mw6n3Lsw84ocfo9CrNMqc5lTfGOw==}
engines: {node: '>=18'}
cpu: [x64]
os: [freebsd]
'@esbuild/linux-arm64@0.25.10':
resolution: {integrity: sha512-5luJWN6YKBsawd5f9i4+c+geYiVEw20FVW5x0v1kEMWNq8UctFjDiMATBxLvmmHA4bf7F6hTRaJgtghFr9iziQ==}
'@esbuild/linux-arm64@0.25.11':
resolution: {integrity: sha512-Qr8AzcplUhGvdyUF08A1kHU3Vr2O88xxP0Tm8GcdVOUm25XYcMPp2YqSVHbLuXzYQMf9Bh/iKx7YPqECs6ffLA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [linux]
'@esbuild/linux-arm@0.25.10':
resolution: {integrity: sha512-oR31GtBTFYCqEBALI9r6WxoU/ZofZl962pouZRTEYECvNF/dtXKku8YXcJkhgK/beU+zedXfIzHijSRapJY3vg==}
'@esbuild/linux-arm@0.25.11':
resolution: {integrity: sha512-TBMv6B4kCfrGJ8cUPo7vd6NECZH/8hPpBHHlYI3qzoYFvWu2AdTvZNuU/7hsbKWqu/COU7NIK12dHAAqBLLXgw==}
engines: {node: '>=18'}
cpu: [arm]
os: [linux]
'@esbuild/linux-ia32@0.25.10':
resolution: {integrity: sha512-NrSCx2Kim3EnnWgS4Txn0QGt0Xipoumb6z6sUtl5bOEZIVKhzfyp/Lyw4C1DIYvzeW/5mWYPBFJU3a/8Yr75DQ==}
'@esbuild/linux-ia32@0.25.11':
resolution: {integrity: sha512-TmnJg8BMGPehs5JKrCLqyWTVAvielc615jbkOirATQvWWB1NMXY77oLMzsUjRLa0+ngecEmDGqt5jiDC6bfvOw==}
engines: {node: '>=18'}
cpu: [ia32]
os: [linux]
'@esbuild/linux-loong64@0.25.10':
resolution: {integrity: sha512-xoSphrd4AZda8+rUDDfD9J6FUMjrkTz8itpTITM4/xgerAZZcFW7Dv+sun7333IfKxGG8gAq+3NbfEMJfiY+Eg==}
'@esbuild/linux-loong64@0.25.11':
resolution: {integrity: sha512-DIGXL2+gvDaXlaq8xruNXUJdT5tF+SBbJQKbWy/0J7OhU8gOHOzKmGIlfTTl6nHaCOoipxQbuJi7O++ldrxgMw==}
engines: {node: '>=18'}
cpu: [loong64]
os: [linux]
'@esbuild/linux-mips64el@0.25.10':
resolution: {integrity: sha512-ab6eiuCwoMmYDyTnyptoKkVS3k8fy/1Uvq7Dj5czXI6DF2GqD2ToInBI0SHOp5/X1BdZ26RKc5+qjQNGRBelRA==}
'@esbuild/linux-mips64el@0.25.11':
resolution: {integrity: sha512-Osx1nALUJu4pU43o9OyjSCXokFkFbyzjXb6VhGIJZQ5JZi8ylCQ9/LFagolPsHtgw6himDSyb5ETSfmp4rpiKQ==}
engines: {node: '>=18'}
cpu: [mips64el]
os: [linux]
'@esbuild/linux-ppc64@0.25.10':
resolution: {integrity: sha512-NLinzzOgZQsGpsTkEbdJTCanwA5/wozN9dSgEl12haXJBzMTpssebuXR42bthOF3z7zXFWH1AmvWunUCkBE4EA==}
'@esbuild/linux-ppc64@0.25.11':
resolution: {integrity: sha512-nbLFgsQQEsBa8XSgSTSlrnBSrpoWh7ioFDUmwo158gIm5NNP+17IYmNWzaIzWmgCxq56vfr34xGkOcZ7jX6CPw==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [linux]
'@esbuild/linux-riscv64@0.25.10':
resolution: {integrity: sha512-FE557XdZDrtX8NMIeA8LBJX3dC2M8VGXwfrQWU7LB5SLOajfJIxmSdyL/gU1m64Zs9CBKvm4UAuBp5aJ8OgnrA==}
'@esbuild/linux-riscv64@0.25.11':
resolution: {integrity: sha512-HfyAmqZi9uBAbgKYP1yGuI7tSREXwIb438q0nqvlpxAOs3XnZ8RsisRfmVsgV486NdjD7Mw2UrFSw51lzUk1ww==}
engines: {node: '>=18'}
cpu: [riscv64]
os: [linux]
'@esbuild/linux-s390x@0.25.10':
resolution: {integrity: sha512-3BBSbgzuB9ajLoVZk0mGu+EHlBwkusRmeNYdqmznmMc9zGASFjSsxgkNsqmXugpPk00gJ0JNKh/97nxmjctdew==}
'@esbuild/linux-s390x@0.25.11':
resolution: {integrity: sha512-HjLqVgSSYnVXRisyfmzsH6mXqyvj0SA7pG5g+9W7ESgwA70AXYNpfKBqh1KbTxmQVaYxpzA/SvlB9oclGPbApw==}
engines: {node: '>=18'}
cpu: [s390x]
os: [linux]
'@esbuild/linux-x64@0.25.10':
resolution: {integrity: sha512-QSX81KhFoZGwenVyPoberggdW1nrQZSvfVDAIUXr3WqLRZGZqWk/P4T8p2SP+de2Sr5HPcvjhcJzEiulKgnxtA==}
'@esbuild/linux-x64@0.25.11':
resolution: {integrity: sha512-HSFAT4+WYjIhrHxKBwGmOOSpphjYkcswF449j6EjsjbinTZbp8PJtjsVK1XFJStdzXdy/jaddAep2FGY+wyFAQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [linux]
'@esbuild/netbsd-arm64@0.25.10':
resolution: {integrity: sha512-AKQM3gfYfSW8XRk8DdMCzaLUFB15dTrZfnX8WXQoOUpUBQ+NaAFCP1kPS/ykbbGYz7rxn0WS48/81l9hFl3u4A==}
'@esbuild/netbsd-arm64@0.25.11':
resolution: {integrity: sha512-hr9Oxj1Fa4r04dNpWr3P8QKVVsjQhqrMSUzZzf+LZcYjZNqhA3IAfPQdEh1FLVUJSiu6sgAwp3OmwBfbFgG2Xg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [netbsd]
'@esbuild/netbsd-x64@0.25.10':
resolution: {integrity: sha512-7RTytDPGU6fek/hWuN9qQpeGPBZFfB4zZgcz2VK2Z5VpdUxEI8JKYsg3JfO0n/Z1E/6l05n0unDCNc4HnhQGig==}
'@esbuild/netbsd-x64@0.25.11':
resolution: {integrity: sha512-u7tKA+qbzBydyj0vgpu+5h5AeudxOAGncb8N6C9Kh1N4n7wU1Xw1JDApsRjpShRpXRQlJLb9wY28ELpwdPcZ7A==}
engines: {node: '>=18'}
cpu: [x64]
os: [netbsd]
'@esbuild/openbsd-arm64@0.25.10':
resolution: {integrity: sha512-5Se0VM9Wtq797YFn+dLimf2Zx6McttsH2olUBsDml+lm0GOCRVebRWUvDtkY4BWYv/3NgzS8b/UM3jQNh5hYyw==}
'@esbuild/openbsd-arm64@0.25.11':
resolution: {integrity: sha512-Qq6YHhayieor3DxFOoYM1q0q1uMFYb7cSpLD2qzDSvK1NAvqFi8Xgivv0cFC6J+hWVw2teCYltyy9/m/14ryHg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openbsd]
'@esbuild/openbsd-x64@0.25.10':
resolution: {integrity: sha512-XkA4frq1TLj4bEMB+2HnI0+4RnjbuGZfet2gs/LNs5Hc7D89ZQBHQ0gL2ND6Lzu1+QVkjp3x1gIcPKzRNP8bXw==}
'@esbuild/openbsd-x64@0.25.11':
resolution: {integrity: sha512-CN+7c++kkbrckTOz5hrehxWN7uIhFFlmS/hqziSFVWpAzpWrQoAG4chH+nN3Be+Kzv/uuo7zhX716x3Sn2Jduw==}
engines: {node: '>=18'}
cpu: [x64]
os: [openbsd]
'@esbuild/openharmony-arm64@0.25.10':
resolution: {integrity: sha512-AVTSBhTX8Y/Fz6OmIVBip9tJzZEUcY8WLh7I59+upa5/GPhh2/aM6bvOMQySspnCCHvFi79kMtdJS1w0DXAeag==}
'@esbuild/openharmony-arm64@0.25.11':
resolution: {integrity: sha512-rOREuNIQgaiR+9QuNkbkxubbp8MSO9rONmwP5nKncnWJ9v5jQ4JxFnLu4zDSRPf3x4u+2VN4pM4RdyIzDty/wQ==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openharmony]
'@esbuild/sunos-x64@0.25.10':
resolution: {integrity: sha512-fswk3XT0Uf2pGJmOpDB7yknqhVkJQkAQOcW/ccVOtfx05LkbWOaRAtn5SaqXypeKQra1QaEa841PgrSL9ubSPQ==}
'@esbuild/sunos-x64@0.25.11':
resolution: {integrity: sha512-nq2xdYaWxyg9DcIyXkZhcYulC6pQ2FuCgem3LI92IwMgIZ69KHeY8T4Y88pcwoLIjbed8n36CyKoYRDygNSGhA==}
engines: {node: '>=18'}
cpu: [x64]
os: [sunos]
'@esbuild/win32-arm64@0.25.10':
resolution: {integrity: sha512-ah+9b59KDTSfpaCg6VdJoOQvKjI33nTaQr4UluQwW7aEwZQsbMCfTmfEO4VyewOxx4RaDT/xCy9ra2GPWmO7Kw==}
'@esbuild/win32-arm64@0.25.11':
resolution: {integrity: sha512-3XxECOWJq1qMZ3MN8srCJ/QfoLpL+VaxD/WfNRm1O3B4+AZ/BnLVgFbUV3eiRYDMXetciH16dwPbbHqwe1uU0Q==}
engines: {node: '>=18'}
cpu: [arm64]
os: [win32]
'@esbuild/win32-ia32@0.25.10':
resolution: {integrity: sha512-QHPDbKkrGO8/cz9LKVnJU22HOi4pxZnZhhA2HYHez5Pz4JeffhDjf85E57Oyco163GnzNCVkZK0b/n4Y0UHcSw==}
'@esbuild/win32-ia32@0.25.11':
resolution: {integrity: sha512-3ukss6gb9XZ8TlRyJlgLn17ecsK4NSQTmdIXRASVsiS2sQ6zPPZklNJT5GR5tE/MUarymmy8kCEf5xPCNCqVOA==}
engines: {node: '>=18'}
cpu: [ia32]
os: [win32]
'@esbuild/win32-x64@0.25.10':
resolution: {integrity: sha512-9KpxSVFCu0iK1owoez6aC/s/EdUQLDN3adTxGCqxMVhrPDj6bt5dbrHDXUuq+Bs2vATFBBrQS5vdQ/Ed2P+nbw==}
'@esbuild/win32-x64@0.25.11':
resolution: {integrity: sha512-D7Hpz6A2L4hzsRpPaCYkQnGOotdUpDzSGRIv9I+1ITdHROSFUWW95ZPZWQmGka1Fg7W3zFJowyn9WGwMJ0+KPA==}
engines: {node: '>=18'}
cpu: [x64]
os: [win32]
@@ -811,8 +811,8 @@ packages:
resolution: {integrity: sha512-KxektNH63SrbfUyDiwXqRb1rLwKt33AmMv+5Nhsw1kqZ13SJBRTgZHtGbE+hH3a1mVW1cz+4pqSWVPAtLVXTzQ==}
engines: {node: '>=18'}
devalue@5.3.2:
resolution: {integrity: sha512-UDsjUbpQn9kvm68slnrs+mfxwFkIflOhkanmyabZ8zOYk8SMEIbJ3TK+88g70hSIeytu4y18f0z/hYHMTrXIWw==}
devalue@5.4.1:
resolution: {integrity: sha512-YtoaOfsqjbZQKGIMRYDWKjUmSB4VJ/RElB+bXZawQAQYAo4xu08GKTMVlsZDTF6R2MbAgjcAQRPI5eIyRAT2OQ==}
devlop@1.1.0:
resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==}
@@ -861,8 +861,8 @@ packages:
esast-util-from-js@2.0.1:
resolution: {integrity: sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==}
esbuild@0.25.10:
resolution: {integrity: sha512-9RiGKvCwaqxO2owP61uQ4BgNborAQskMR6QusfWzQqv7AZOg5oGehdY2pRJMTKuwxd1IDBP4rSbI5lHzU7SMsQ==}
esbuild@0.25.11:
resolution: {integrity: sha512-KohQwyzrKTQmhXDW1PjCv3Tyspn9n5GcY2RTDqeORIdIJY8yKIF7sTSopFmn/wpMPW4rdPXI0UE5LJLuq3bx0Q==}
engines: {node: '>=18'}
hasBin: true
@@ -1344,8 +1344,8 @@ packages:
resolution: {integrity: sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg==}
engines: {node: '>=14.16'}
package-manager-detector@1.4.0:
resolution: {integrity: sha512-rRZ+pR1Usc+ND9M2NkmCvE/LYJS+8ORVV9X0KuNSY/gFsp7RBHJM/ADh9LYq4Vvfq6QkKrW6/weuh8SMEtN5gw==}
package-manager-detector@1.4.1:
resolution: {integrity: sha512-dSMiVLBEA4XaNJ0PRb4N5cV/SEP4BWrWZKBmfF+OUm2pQTiZ6DDkKeWaltwu3JRhLoy59ayIkJ00cx9K9CaYTg==}
pagefind@1.4.0:
resolution: {integrity: sha512-z2kY1mQlL4J8q5EIsQkLzQjilovKzfNVhX8De6oyE6uHpfFtyBaqUpcl/XzJC/4fjD8vBDyh1zolimIcVrCn9g==}
@@ -1753,8 +1753,8 @@ packages:
vfile@6.0.3:
resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==}
vite@6.3.7:
resolution: {integrity: sha512-mQYaKepA0NGMBsz8Xktt3tJUG5ELE2iT7IJ+ssXI6nxVdE2sFc/d/6w/JByqMLvWg8hNKHpPgzjgOkrhpKFnrA==}
vite@6.4.0:
resolution: {integrity: sha512-oLnWs9Hak/LOlKjeSpOwD6JMks8BeICEdYMJBf6P4Lac/pO9tKiv/XhXnAM7nNfSkZahjlCZu9sS50zL8fSnsw==}
engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0}
hasBin: true
peerDependencies:
@@ -1922,7 +1922,7 @@ snapshots:
stream-replace-string: 2.0.0
zod: 3.25.76
'@astrojs/starlight@0.36.0(astro@5.14.5(@types/node@24.7.2)(rollup@4.52.4)(typescript@5.9.3))':
'@astrojs/starlight@0.36.1(astro@5.14.5(@types/node@24.7.2)(rollup@4.52.4)(typescript@5.9.3))':
dependencies:
'@astrojs/markdown-remark': 6.3.8
'@astrojs/mdx': 4.3.7(astro@5.14.5(@types/node@24.7.2)(rollup@4.52.4)(typescript@5.9.3))
@@ -1993,82 +1993,82 @@ snapshots:
tslib: 2.8.1
optional: true
'@esbuild/aix-ppc64@0.25.10':
'@esbuild/aix-ppc64@0.25.11':
optional: true
'@esbuild/android-arm64@0.25.10':
'@esbuild/android-arm64@0.25.11':
optional: true
'@esbuild/android-arm@0.25.10':
'@esbuild/android-arm@0.25.11':
optional: true
'@esbuild/android-x64@0.25.10':
'@esbuild/android-x64@0.25.11':
optional: true
'@esbuild/darwin-arm64@0.25.10':
'@esbuild/darwin-arm64@0.25.11':
optional: true
'@esbuild/darwin-x64@0.25.10':
'@esbuild/darwin-x64@0.25.11':
optional: true
'@esbuild/freebsd-arm64@0.25.10':
'@esbuild/freebsd-arm64@0.25.11':
optional: true
'@esbuild/freebsd-x64@0.25.10':
'@esbuild/freebsd-x64@0.25.11':
optional: true
'@esbuild/linux-arm64@0.25.10':
'@esbuild/linux-arm64@0.25.11':
optional: true
'@esbuild/linux-arm@0.25.10':
'@esbuild/linux-arm@0.25.11':
optional: true
'@esbuild/linux-ia32@0.25.10':
'@esbuild/linux-ia32@0.25.11':
optional: true
'@esbuild/linux-loong64@0.25.10':
'@esbuild/linux-loong64@0.25.11':
optional: true
'@esbuild/linux-mips64el@0.25.10':
'@esbuild/linux-mips64el@0.25.11':
optional: true
'@esbuild/linux-ppc64@0.25.10':
'@esbuild/linux-ppc64@0.25.11':
optional: true
'@esbuild/linux-riscv64@0.25.10':
'@esbuild/linux-riscv64@0.25.11':
optional: true
'@esbuild/linux-s390x@0.25.10':
'@esbuild/linux-s390x@0.25.11':
optional: true
'@esbuild/linux-x64@0.25.10':
'@esbuild/linux-x64@0.25.11':
optional: true
'@esbuild/netbsd-arm64@0.25.10':
'@esbuild/netbsd-arm64@0.25.11':
optional: true
'@esbuild/netbsd-x64@0.25.10':
'@esbuild/netbsd-x64@0.25.11':
optional: true
'@esbuild/openbsd-arm64@0.25.10':
'@esbuild/openbsd-arm64@0.25.11':
optional: true
'@esbuild/openbsd-x64@0.25.10':
'@esbuild/openbsd-x64@0.25.11':
optional: true
'@esbuild/openharmony-arm64@0.25.10':
'@esbuild/openharmony-arm64@0.25.11':
optional: true
'@esbuild/sunos-x64@0.25.10':
'@esbuild/sunos-x64@0.25.11':
optional: true
'@esbuild/win32-arm64@0.25.10':
'@esbuild/win32-arm64@0.25.11':
optional: true
'@esbuild/win32-ia32@0.25.10':
'@esbuild/win32-ia32@0.25.11':
optional: true
'@esbuild/win32-x64@0.25.10':
'@esbuild/win32-x64@0.25.11':
optional: true
'@expressive-code/core@0.41.3':
@@ -2453,12 +2453,12 @@ snapshots:
cssesc: 3.0.0
debug: 4.4.3
deterministic-object-hash: 2.0.2
devalue: 5.3.2
devalue: 5.4.1
diff: 5.2.0
dlv: 1.1.3
dset: 3.1.4
es-module-lexer: 1.7.0
esbuild: 0.25.10
esbuild: 0.25.11
estree-walker: 3.0.3
flattie: 1.1.1
fontace: 0.3.1
@@ -2474,7 +2474,7 @@ snapshots:
neotraverse: 0.6.18
p-limit: 6.2.0
p-queue: 8.1.1
package-manager-detector: 1.4.0
package-manager-detector: 1.4.1
picomatch: 4.0.3
prompts: 2.4.2
rehype: 13.0.2
@@ -2489,8 +2489,8 @@ snapshots:
unist-util-visit: 5.0.0
unstorage: 1.17.1
vfile: 6.0.3
vite: 6.3.7(@types/node@24.7.2)
vitefu: 1.1.1(vite@6.3.7(@types/node@24.7.2))
vite: 6.4.0(@types/node@24.7.2)
vitefu: 1.1.1(vite@6.4.0(@types/node@24.7.2))
xxhash-wasm: 1.1.0
yargs-parser: 21.1.1
yocto-spinner: 0.2.3
@@ -2638,7 +2638,7 @@ snapshots:
dependencies:
base-64: 1.0.0
devalue@5.3.2: {}
devalue@5.4.1: {}
devlop@1.1.0:
dependencies:
@@ -2680,34 +2680,34 @@ snapshots:
esast-util-from-estree: 2.0.0
vfile-message: 4.0.3
esbuild@0.25.10:
esbuild@0.25.11:
optionalDependencies:
'@esbuild/aix-ppc64': 0.25.10
'@esbuild/android-arm': 0.25.10
'@esbuild/android-arm64': 0.25.10
'@esbuild/android-x64': 0.25.10
'@esbuild/darwin-arm64': 0.25.10
'@esbuild/darwin-x64': 0.25.10
'@esbuild/freebsd-arm64': 0.25.10
'@esbuild/freebsd-x64': 0.25.10
'@esbuild/linux-arm': 0.25.10
'@esbuild/linux-arm64': 0.25.10
'@esbuild/linux-ia32': 0.25.10
'@esbuild/linux-loong64': 0.25.10
'@esbuild/linux-mips64el': 0.25.10
'@esbuild/linux-ppc64': 0.25.10
'@esbuild/linux-riscv64': 0.25.10
'@esbuild/linux-s390x': 0.25.10
'@esbuild/linux-x64': 0.25.10
'@esbuild/netbsd-arm64': 0.25.10
'@esbuild/netbsd-x64': 0.25.10
'@esbuild/openbsd-arm64': 0.25.10
'@esbuild/openbsd-x64': 0.25.10
'@esbuild/openharmony-arm64': 0.25.10
'@esbuild/sunos-x64': 0.25.10
'@esbuild/win32-arm64': 0.25.10
'@esbuild/win32-ia32': 0.25.10
'@esbuild/win32-x64': 0.25.10
'@esbuild/aix-ppc64': 0.25.11
'@esbuild/android-arm': 0.25.11
'@esbuild/android-arm64': 0.25.11
'@esbuild/android-x64': 0.25.11
'@esbuild/darwin-arm64': 0.25.11
'@esbuild/darwin-x64': 0.25.11
'@esbuild/freebsd-arm64': 0.25.11
'@esbuild/freebsd-x64': 0.25.11
'@esbuild/linux-arm': 0.25.11
'@esbuild/linux-arm64': 0.25.11
'@esbuild/linux-ia32': 0.25.11
'@esbuild/linux-loong64': 0.25.11
'@esbuild/linux-mips64el': 0.25.11
'@esbuild/linux-ppc64': 0.25.11
'@esbuild/linux-riscv64': 0.25.11
'@esbuild/linux-s390x': 0.25.11
'@esbuild/linux-x64': 0.25.11
'@esbuild/netbsd-arm64': 0.25.11
'@esbuild/netbsd-x64': 0.25.11
'@esbuild/openbsd-arm64': 0.25.11
'@esbuild/openbsd-x64': 0.25.11
'@esbuild/openharmony-arm64': 0.25.11
'@esbuild/sunos-x64': 0.25.11
'@esbuild/win32-arm64': 0.25.11
'@esbuild/win32-ia32': 0.25.11
'@esbuild/win32-x64': 0.25.11
escape-html@1.0.3: {}
@@ -3595,7 +3595,7 @@ snapshots:
p-timeout@6.1.4: {}
package-manager-detector@1.4.0: {}
package-manager-detector@1.4.1: {}
pagefind@1.4.0:
optionalDependencies:
@@ -4116,9 +4116,9 @@ snapshots:
'@types/unist': 3.0.3
vfile-message: 4.0.3
vite@6.3.7(@types/node@24.7.2):
vite@6.4.0(@types/node@24.7.2):
dependencies:
esbuild: 0.25.10
esbuild: 0.25.11
fdir: 6.5.0(picomatch@4.0.3)
picomatch: 4.0.3
postcss: 8.5.6
@@ -4128,9 +4128,9 @@ snapshots:
'@types/node': 24.7.2
fsevents: 2.3.3
vitefu@1.1.1(vite@6.3.7(@types/node@24.7.2)):
vitefu@1.1.1(vite@6.4.0(@types/node@24.7.2)):
optionalDependencies:
vite: 6.3.7(@types/node@24.7.2)
vite: 6.4.0(@types/node@24.7.2)
web-namespaces@2.0.1: {}

View File

@@ -1,3 +0,0 @@
onlyBuiltDependencies:
- esbuild
- sharp

View File

@@ -1,51 +0,0 @@
---
title: Sync Server API
description: API endpoints for the Ascently sync server
---
The sync server provides a minimal REST API for data synchronization.
## Authentication
All endpoints require an `Authorization: Bearer <your-auth-token>` header.
## Endpoints
### Data Sync
**GET /sync**
- Download `ascently.json` file
- Returns: JSON data file or 404 if no data exists
**POST /sync**
- Upload `ascently.json` file
- Body: JSON data
- Returns: Success confirmation
### Images
**GET /images/{imageName}**
- Download an image file
- Returns: Image file or 404 if not found
**POST /images/{imageName}**
- Upload an image file
- Body: Image data
- Returns: Success confirmation
## Example Usage
```bash
# Download data
curl -H "Authorization: Bearer your-token" \
http://localhost:8080/sync
# Upload data
curl -X POST \
-H "Authorization: Bearer your-token" \
-H "Content-Type: application/json" \
-d @ascently.json \
http://localhost:8080/sync
```
See `main.go` in the sync directory for implementation details.

View File

@@ -0,0 +1,152 @@
---
title: API Reference
description: Complete API documentation for the Ascently sync server
---
Complete reference for all sync server endpoints.
## Authentication
All endpoints require a bearer token in the `Authorization` header:
```
Authorization: Bearer your-auth-token
```
Unauthorized requests return `401 Unauthorized`.
## Endpoints
### Health Check
**`GET /health`**
Check if the server is running.
**Response:**
```json
{
"status": "ok",
"version": "2.0.0"
}
```
### Full Sync - Download
**`GET /sync`**
Download the entire dataset from the server.
**Response:**
```json
{
"exportedAt": "2024-01-15T10:30:00.000Z",
"version": "2.0",
"formatVersion": "2.0",
"gyms": [...],
"problems": [...],
"sessions": [...],
"attempts": [...],
"deletedItems": [...]
}
```
Returns `200 OK` with the backup data, or `404 Not Found` if no data exists.
### Full Sync - Upload
**`POST /sync`**
Upload your entire dataset to the server. This overwrites all server data.
**Request Body:**
```json
{
"exportedAt": "2024-01-15T10:30:00.000Z",
"version": "2.0",
"formatVersion": "2.0",
"gyms": [...],
"problems": [...],
"sessions": [...],
"attempts": [...],
"deletedItems": [...]
}
```
**Response:**
```
200 OK
```
### Delta Sync
**`POST /sync/delta`**
Sync only changed data since your last sync. Much faster than full sync.
**Request Body:**
```json
{
"lastSyncTime": "2024-01-15T10:00:00.000Z",
"gyms": [...],
"problems": [...],
"sessions": [...],
"attempts": [...],
"deletedItems": [...]
}
```
Include only items modified after `lastSyncTime`. The server merges your changes with its data using last-write-wins based on `updatedAt` timestamps.
**Response:**
```json
{
"serverTime": "2024-01-15T10:30:00.000Z",
"gyms": [...],
"problems": [...],
"sessions": [...],
"attempts": [...],
"deletedItems": [...]
}
```
Returns only server items modified after your `lastSyncTime`. Save `serverTime` as your new `lastSyncTime` for the next delta sync.
### Image Upload
**`POST /images/upload?filename={name}`**
Upload an image file.
**Query Parameters:**
- `filename`: Image filename (e.g., `problem_abc123_0.jpg`)
**Request Body:**
Binary image data (JPEG, PNG, GIF, or WebP)
**Response:**
```
200 OK
```
### Image Download
**`GET /images/download?filename={name}`**
Download an image file.
**Query Parameters:**
- `filename`: Image filename
**Response:**
Binary image data with appropriate `Content-Type` header.
Returns `404 Not Found` if the image doesn't exist.
## Notes
- All timestamps are ISO 8601 format with milliseconds
- Active sessions (status `active`) are excluded from sync
- Images are stored separately and referenced by filename
- The server stores everything in a single `ascently.json` file
- No versioning or history - last write wins

View File

@@ -3,28 +3,49 @@ title: Self-Hosted Sync Overview
description: Learn about Ascently's optional sync server for cross-device data synchronization
---
You can run your own sync server to keep your data in sync across devices. The server is lightweight and easy to set up using Docker.
Run your own sync server to keep your data in sync across devices. The server is lightweight and easy to set up with Docker.
## How It Works
This server uses a single `ascently.json` file for your data and a directory for images. The last client to upload wins, overwriting the old data. Authentication is just a static bearer token.
The server stores your data in a single `ascently.json` file and images in a directory. It's simple: last write wins. Authentication is a static bearer token you set.
## API
## Features
All endpoints require an `Authorization: Bearer <your-auth-token>` header.
- **Delta sync**: Only syncs changed data
- **Image sync**: Automatically syncs problem images
- **Conflict resolution**: Last-write-wins based on timestamps
- **Cross-platform**: Works with iOS and Android clients
- **Privacy**: Your data, your server, no analytics
- `GET /sync`: Download `ascently.json`
- `POST /sync`: Upload `ascently.json`
- `GET /images/{imageName}`: Download an image
- `POST /images/{imageName}`: Upload an image
## API Endpoints
- `GET /health` - Health check
- `GET /sync` - Download full dataset
- `POST /sync` - Upload full dataset
- `POST /sync/delta` - Sync only changes (recommended)
- `POST /images/upload?filename={name}` - Upload image
- `GET /images/download?filename={name}` - Download image
All endpoints require `Authorization: Bearer <your-token>` header.
See the [API Reference](/sync/api-reference/) for complete documentation.
## Getting Started
The easiest way to get started is with the [Quick Start guide](/sync/quick-start/) using Docker Compose.
Check out the [Quick Start guide](/sync/quick-start/) to get your server running with Docker Compose.
You'll need:
- Docker and Docker Compose
- A secure authentication token
- A place to store your data
The server will be available at `http://localhost:8080` by default. Configure your clients with your server URL and auth token to start syncing.
The server will be available at `http://localhost:8080` by default. Configure your Ascently apps with your server URL and auth token to start syncing.
## How Sync Works
1. **First sync**: Client uploads or downloads full dataset
2. **Subsequent syncs**: Client uses delta sync to only transfer changed data
3. **Conflicts**: Resolved automatically using timestamps (newer wins)
4. **Images**: Synced automatically with problem data
Active sessions are excluded from sync until completed.

View File

@@ -3,7 +3,7 @@ title: Quick Start
description: Get your Ascently sync server running with Docker Compose
---
Get your Ascently sync server up and running using Docker Compose.
Get your sync server running in minutes with Docker Compose.
## Prerequisites
@@ -12,50 +12,158 @@ Get your Ascently sync server up and running using Docker Compose.
## Setup
1. Create a `.env` file with your configuration:
1. Create a `docker-compose.yml` file:
```env
IMAGE=git.atri.dad/atridad/ascently-sync:latest
APP_PORT=8080
AUTH_TOKEN=your-super-secret-token
DATA_FILE=/data/ascently.json
IMAGES_DIR=/data/images
ROOT_DIR=./ascently-data
```yaml
version: '3.8'
services:
ascently-sync:
image: git.atri.dad/atridad/ascently-sync:latest
ports:
- "8080:8080"
environment:
- AUTH_TOKEN=${AUTH_TOKEN}
- DATA_FILE=/data/ascently.json
- IMAGES_DIR=/data/images
volumes:
- ./ascently-data:/data
restart: unless-stopped
```
Set `AUTH_TOKEN` to a long, random string. `ROOT_DIR` is where the server will store its data on your machine.
2. Create a `.env` file in the same directory:
2. Use the provided `docker-compose.yml` in the `sync/` directory:
```env
AUTH_TOKEN=your-super-secret-token-here
```
Replace `your-super-secret-token-here` with a secure random token (see below).
3. Start the server:
```bash
cd sync/
docker-compose up -d
```
The server will be available at `http://localhost:8080`.
## Configure Your Clients
## Generate a Secure Token
Configure your Ascently apps with:
- **Server URL**: `http://your-server-ip:8080` (or your domain)
- **Auth Token**: The token from your `.env` file
Enable sync and perform your first sync to start synchronizing data across devices.
## Generating a Secure Token
Generate a secure authentication token:
Use this command to generate a secure authentication token:
```bash
# On Linux/macOS
openssl rand -base64 32
```
Keep this token secure and don't share it publicly.
Copy the output and paste it into your `.env` file as the `AUTH_TOKEN`.
## Accessing Remotely
Keep this token secret and don't commit it to version control.
For remote access, you'll need to:
- Set up port forwarding on your router (port 8080)
- Use your public IP address or set up a domain name
- Consider using HTTPS with a reverse proxy for security
## Configure Your Apps
Open Ascently on your iOS or Android device:
1. Go to **Settings**
2. Scroll to **Sync Configuration**
3. Enter your **Server URL**: `http://your-server-ip:8080`
4. Enter your **Auth Token**: (the token from your `.env` file)
5. Tap **Test Connection** to verify it works
6. Enable **Auto Sync**
7. Tap **Sync Now** to perform your first sync
Repeat this on all your devices to keep them in sync.
## Verify It's Working
Check the server logs:
```bash
docker-compose logs -f ascently-sync
```
You should see logs like:
```
Delta sync from 192.168.1.100: lastSyncTime=2024-01-15T10:00:00.000Z, gyms=1, problems=5, sessions=2, attempts=10, deletedItems=0
```
## Remote Access
To access your server remotely:
### Option 1: Port Forwarding
1. Forward port 8080 on your router to your server
2. Find your public IP address
3. Use `http://your-public-ip:8080` as the server URL
### Option 2: Domain Name (Recommended)
1. Get a domain name and point it to your server
2. Set up a reverse proxy (nginx, Caddy, Traefik)
3. Enable HTTPS with Let's Encrypt
4. Use `https://sync.yourdomain.com` as the server URL
Example nginx config with HTTPS:
```nginx
server {
listen 443 ssl http2;
server_name sync.yourdomain.com;
ssl_certificate /etc/letsencrypt/live/sync.yourdomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/sync.yourdomain.com/privkey.pem;
location / {
proxy_pass http://localhost:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
```
## Updating
Pull the latest image and restart:
```bash
docker-compose pull
docker-compose up -d
```
Your data is stored in `./ascently-data` and persists across updates.
## Troubleshooting
### Connection Failed
- Check the server is running: `docker-compose ps`
- Verify the auth token matches on server and client
- Check firewall settings and port forwarding
- Test locally first with `http://localhost:8080`
### Sync Errors
- Check server logs: `docker-compose logs ascently-sync`
- Verify your device has internet connection
- Try disabling and re-enabling sync
- Perform a manual sync from Settings
### Data Location
All data is stored in `./ascently-data/`:
```
ascently-data/
├── ascently.json # Your climb data
└── images/ # Problem images
```
You can back this up or move it to another server.
## Next Steps
- Read the [API Reference](/sync/api-reference/) for advanced usage
- Set up automated backups of your `ascently-data` directory
- Configure HTTPS for secure remote access
- Monitor server logs for sync activity

View File

@@ -465,7 +465,7 @@
CODE_SIGN_ENTITLEMENTS = Ascently/Ascently.entitlements;
CODE_SIGN_IDENTITY = "Apple Development";
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 26;
CURRENT_PROJECT_VERSION = 27;
DEVELOPMENT_TEAM = 4BC9Y2LL4B;
DRIVERKIT_DEPLOYMENT_TARGET = 24.6;
ENABLE_PREVIEWS = YES;
@@ -487,7 +487,7 @@
"@executable_path/Frameworks",
);
MACOSX_DEPLOYMENT_TARGET = 15.6;
MARKETING_VERSION = 2.0.0;
MARKETING_VERSION = 2.1.0;
PRODUCT_BUNDLE_IDENTIFIER = com.atridad.Ascently;
PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE_SPECIFIER = "";
@@ -513,7 +513,7 @@
CODE_SIGN_ENTITLEMENTS = Ascently/Ascently.entitlements;
CODE_SIGN_IDENTITY = "Apple Development";
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 26;
CURRENT_PROJECT_VERSION = 27;
DEVELOPMENT_TEAM = 4BC9Y2LL4B;
DRIVERKIT_DEPLOYMENT_TARGET = 24.6;
ENABLE_PREVIEWS = YES;
@@ -535,7 +535,7 @@
"@executable_path/Frameworks",
);
MACOSX_DEPLOYMENT_TARGET = 15.6;
MARKETING_VERSION = 2.0.0;
MARKETING_VERSION = 2.1.0;
PRODUCT_BUNDLE_IDENTIFIER = com.atridad.Ascently;
PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE_SPECIFIER = "";
@@ -602,7 +602,7 @@
ASSETCATALOG_COMPILER_WIDGET_BACKGROUND_COLOR_NAME = WidgetBackground;
CODE_SIGN_ENTITLEMENTS = SessionStatusLiveExtension.entitlements;
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 26;
CURRENT_PROJECT_VERSION = 27;
DEVELOPMENT_TEAM = 4BC9Y2LL4B;
GENERATE_INFOPLIST_FILE = YES;
INFOPLIST_FILE = SessionStatusLive/Info.plist;
@@ -613,7 +613,7 @@
"@executable_path/Frameworks",
"@executable_path/../../Frameworks",
);
MARKETING_VERSION = 2.0.0;
MARKETING_VERSION = 2.1.0;
PRODUCT_BUNDLE_IDENTIFIER = com.atridad.Ascently.SessionStatusLive;
PRODUCT_NAME = "$(TARGET_NAME)";
SKIP_INSTALL = YES;
@@ -632,7 +632,7 @@
ASSETCATALOG_COMPILER_WIDGET_BACKGROUND_COLOR_NAME = WidgetBackground;
CODE_SIGN_ENTITLEMENTS = SessionStatusLiveExtension.entitlements;
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 26;
CURRENT_PROJECT_VERSION = 27;
DEVELOPMENT_TEAM = 4BC9Y2LL4B;
GENERATE_INFOPLIST_FILE = YES;
INFOPLIST_FILE = SessionStatusLive/Info.plist;
@@ -643,7 +643,7 @@
"@executable_path/Frameworks",
"@executable_path/../../Frameworks",
);
MARKETING_VERSION = 2.0.0;
MARKETING_VERSION = 2.1.0;
PRODUCT_BUNDLE_IDENTIFIER = com.atridad.Ascently.SessionStatusLive;
PRODUCT_NAME = "$(TARGET_NAME)";
SKIP_INSTALL = YES;

View File

@@ -111,7 +111,6 @@ struct ContentView: View {
Task {
try? await Task.sleep(nanoseconds: 300_000_000) // 0.3 seconds
await dataManager.onAppBecomeActive()
// Ensure health integration is verified
await dataManager.healthKitService.verifyAndRestoreIntegration()
}
}

View File

@@ -55,7 +55,6 @@ struct BackupGym: Codable {
let createdAt: String
let updatedAt: String
/// Initialize from native iOS Gym model
init(from gym: Gym) {
self.id = gym.id.uuidString
self.name = gym.name
@@ -71,7 +70,6 @@ struct BackupGym: Codable {
self.updatedAt = formatter.string(from: gym.updatedAt)
}
/// Initialize with explicit parameters for import
init(
id: String,
name: String,
@@ -94,7 +92,6 @@ struct BackupGym: Codable {
self.updatedAt = updatedAt
}
/// Convert to native iOS Gym model
func toGym() throws -> Gym {
let formatter = ISO8601DateFormatter()
formatter.formatOptions = [.withInternetDateTime, .withFractionalSeconds]
@@ -137,7 +134,6 @@ struct BackupProblem: Codable {
let createdAt: String
let updatedAt: String
/// Initialize from native iOS Problem model
init(from problem: Problem) {
self.id = problem.id.uuidString
self.gymId = problem.gymId.uuidString
@@ -158,7 +154,6 @@ struct BackupProblem: Codable {
self.updatedAt = formatter.string(from: problem.updatedAt)
}
/// Initialize with explicit parameters for import
init(
id: String,
gymId: String,
@@ -191,7 +186,6 @@ struct BackupProblem: Codable {
self.updatedAt = updatedAt
}
/// Convert to native iOS Problem model
func toProblem() throws -> Problem {
let formatter = ISO8601DateFormatter()
formatter.formatOptions = [.withInternetDateTime, .withFractionalSeconds]
@@ -224,7 +218,6 @@ struct BackupProblem: Codable {
)
}
/// Create a copy with updated image paths for import processing
func withUpdatedImagePaths(_ newImagePaths: [String]) -> BackupProblem {
return BackupProblem(
id: self.id,
@@ -258,7 +251,6 @@ struct BackupClimbSession: Codable {
let createdAt: String
let updatedAt: String
/// Initialize from native iOS ClimbSession model
init(from session: ClimbSession) {
self.id = session.id.uuidString
self.gymId = session.gymId.uuidString
@@ -275,7 +267,6 @@ struct BackupClimbSession: Codable {
self.updatedAt = formatter.string(from: session.updatedAt)
}
/// Initialize with explicit parameters for import
init(
id: String,
gymId: String,
@@ -300,7 +291,6 @@ struct BackupClimbSession: Codable {
self.updatedAt = updatedAt
}
/// Convert to native iOS ClimbSession model
func toClimbSession() throws -> ClimbSession {
let formatter = ISO8601DateFormatter()
formatter.formatOptions = [.withInternetDateTime, .withFractionalSeconds]
@@ -347,7 +337,6 @@ struct BackupAttempt: Codable {
let createdAt: String
let updatedAt: String?
/// Initialize from native iOS Attempt model
init(from attempt: Attempt) {
self.id = attempt.id.uuidString
self.sessionId = attempt.sessionId.uuidString
@@ -365,7 +354,6 @@ struct BackupAttempt: Codable {
self.updatedAt = formatter.string(from: attempt.updatedAt)
}
/// Initialize with explicit parameters for import
init(
id: String,
sessionId: String,
@@ -392,7 +380,6 @@ struct BackupAttempt: Codable {
self.updatedAt = updatedAt
}
/// Convert to native iOS Attempt model
func toAttempt() throws -> Attempt {
let formatter = ISO8601DateFormatter()
formatter.formatOptions = [.withInternetDateTime, .withFractionalSeconds]

View File

@@ -0,0 +1,26 @@
//
// DeltaSyncFormat.swift
// Ascently
//
// Delta sync structures for incremental data synchronization
//
import Foundation
struct DeltaSyncRequest: Codable {
let lastSyncTime: String
let gyms: [BackupGym]
let problems: [BackupProblem]
let sessions: [BackupClimbSession]
let attempts: [BackupAttempt]
let deletedItems: [DeletedItem]
}
struct DeltaSyncResponse: Codable {
let serverTime: String
let gyms: [BackupGym]
let problems: [BackupProblem]
let sessions: [BackupClimbSession]
let attempts: [BackupAttempt]
let deletedItems: [DeletedItem]
}

View File

@@ -31,7 +31,6 @@ class HealthKitService: ObservableObject {
}
}
/// Restore active workout state
private func restoreActiveWorkout() {
if let startDate = userDefaults.object(forKey: workoutStartDateKey) as? Date,
let sessionIdString = userDefaults.string(forKey: workoutSessionIdKey),
@@ -43,7 +42,6 @@ class HealthKitService: ObservableObject {
}
}
/// Persist active workout state
private func persistActiveWorkout() {
if let startDate = currentWorkoutStartDate, let sessionId = currentWorkoutSessionId {
userDefaults.set(startDate, forKey: workoutStartDateKey)
@@ -54,7 +52,6 @@ class HealthKitService: ObservableObject {
}
}
/// Verify and restore health integration
func verifyAndRestoreIntegration() async {
guard isEnabled else { return }

View File

@@ -136,6 +136,314 @@ class SyncService: ObservableObject {
}
}
func performDeltaSync(dataManager: ClimbingDataManager) async throws {
guard isConfigured else {
throw SyncError.notConfigured
}
guard let url = URL(string: "\(serverURL)/sync/delta") else {
throw SyncError.invalidURL
}
// Get last sync time, or use epoch if never synced
let lastSync = lastSyncTime ?? Date(timeIntervalSince1970: 0)
let formatter = ISO8601DateFormatter()
let lastSyncString = formatter.string(from: lastSync)
// Collect items modified since last sync
let modifiedGyms = dataManager.gyms.filter { gym in
gym.updatedAt > lastSync
}.map { BackupGym(from: $0) }
let modifiedProblems = dataManager.problems.filter { problem in
problem.updatedAt > lastSync
}.map { problem -> BackupProblem in
var backupProblem = BackupProblem(from: problem)
if !problem.imagePaths.isEmpty {
let normalizedPaths = problem.imagePaths.enumerated().map { index, _ in
ImageNamingUtils.generateImageFilename(
problemId: problem.id.uuidString, imageIndex: index)
}
return BackupProblem(
id: backupProblem.id,
gymId: backupProblem.gymId,
name: backupProblem.name,
description: backupProblem.description,
climbType: backupProblem.climbType,
difficulty: backupProblem.difficulty,
tags: backupProblem.tags,
location: backupProblem.location,
imagePaths: normalizedPaths,
isActive: backupProblem.isActive,
dateSet: backupProblem.dateSet,
notes: backupProblem.notes,
createdAt: backupProblem.createdAt,
updatedAt: backupProblem.updatedAt
)
}
return backupProblem
}
let modifiedSessions = dataManager.sessions.filter { session in
session.status != .active && session.updatedAt > lastSync
}.map { BackupClimbSession(from: $0) }
let activeSessionIds = Set(
dataManager.sessions.filter { $0.status == .active }.map { $0.id })
let modifiedAttempts = dataManager.attempts.filter { attempt in
!activeSessionIds.contains(attempt.sessionId) && attempt.createdAt > lastSync
}.map { BackupAttempt(from: $0) }
let modifiedDeletions = dataManager.getDeletedItems().filter { item in
if let deletedDate = formatter.date(from: item.deletedAt) {
return deletedDate > lastSync
}
return false
}
print(
"iOS DELTA SYNC: Sending gyms=\(modifiedGyms.count), problems=\(modifiedProblems.count), sessions=\(modifiedSessions.count), attempts=\(modifiedAttempts.count), deletions=\(modifiedDeletions.count)"
)
// Create delta request
let deltaRequest = DeltaSyncRequest(
lastSyncTime: lastSyncString,
gyms: modifiedGyms,
problems: modifiedProblems,
sessions: modifiedSessions,
attempts: modifiedAttempts,
deletedItems: modifiedDeletions
)
let encoder = JSONEncoder()
encoder.dateEncodingStrategy = .iso8601
let jsonData = try encoder.encode(deltaRequest)
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.setValue("Bearer \(authToken)", forHTTPHeaderField: "Authorization")
request.setValue("application/json", forHTTPHeaderField: "Content-Type")
request.setValue("application/json", forHTTPHeaderField: "Accept")
request.httpBody = jsonData
let (data, response) = try await URLSession.shared.data(for: request)
guard let httpResponse = response as? HTTPURLResponse else {
throw SyncError.invalidResponse
}
switch httpResponse.statusCode {
case 200:
break
case 401:
throw SyncError.unauthorized
default:
throw SyncError.serverError(httpResponse.statusCode)
}
let decoder = JSONDecoder()
let deltaResponse = try decoder.decode(DeltaSyncResponse.self, from: data)
print(
"iOS DELTA SYNC: Received gyms=\(deltaResponse.gyms.count), problems=\(deltaResponse.problems.count), sessions=\(deltaResponse.sessions.count), attempts=\(deltaResponse.attempts.count), deletions=\(deltaResponse.deletedItems.count)"
)
// Apply server changes to local data
try await applyDeltaResponse(deltaResponse, dataManager: dataManager)
// Sync only modified problem images
try await syncModifiedImages(modifiedProblems: modifiedProblems, dataManager: dataManager)
// Update last sync time to server time
if let serverTime = formatter.date(from: deltaResponse.serverTime) {
lastSyncTime = serverTime
userDefaults.set(lastSyncTime, forKey: Keys.lastSyncTime)
}
}
private func applyDeltaResponse(_ response: DeltaSyncResponse, dataManager: ClimbingDataManager)
async throws
{
let formatter = ISO8601DateFormatter()
// Download images for new/modified problems from server
var imagePathMapping: [String: String] = [:]
for problem in response.problems {
guard let imagePaths = problem.imagePaths, !imagePaths.isEmpty else { continue }
for (index, imagePath) in imagePaths.enumerated() {
let serverFilename = URL(fileURLWithPath: imagePath).lastPathComponent
do {
let imageData = try await downloadImage(filename: serverFilename)
let consistentFilename = ImageNamingUtils.generateImageFilename(
problemId: problem.id, imageIndex: index)
let imageManager = ImageManager.shared
_ = try imageManager.saveImportedImage(imageData, filename: consistentFilename)
imagePathMapping[serverFilename] = consistentFilename
} catch SyncError.imageNotFound {
print("Image not found on server: \(serverFilename)")
continue
} catch {
print("Failed to download image \(serverFilename): \(error)")
continue
}
}
}
// Merge gyms
for backupGym in response.gyms {
if let index = dataManager.gyms.firstIndex(where: { $0.id.uuidString == backupGym.id })
{
let existing = dataManager.gyms[index]
if backupGym.updatedAt >= formatter.string(from: existing.updatedAt) {
dataManager.gyms[index] = try backupGym.toGym()
}
} else {
dataManager.gyms.append(try backupGym.toGym())
}
}
// Merge problems
for backupProblem in response.problems {
var problemToMerge = backupProblem
if !imagePathMapping.isEmpty, let imagePaths = backupProblem.imagePaths {
let updatedPaths = imagePaths.compactMap { imagePathMapping[$0] ?? $0 }
problemToMerge = BackupProblem(
id: backupProblem.id,
gymId: backupProblem.gymId,
name: backupProblem.name,
description: backupProblem.description,
climbType: backupProblem.climbType,
difficulty: backupProblem.difficulty,
tags: backupProblem.tags,
location: backupProblem.location,
imagePaths: updatedPaths,
isActive: backupProblem.isActive,
dateSet: backupProblem.dateSet,
notes: backupProblem.notes,
createdAt: backupProblem.createdAt,
updatedAt: backupProblem.updatedAt
)
}
if let index = dataManager.problems.firstIndex(where: {
$0.id.uuidString == problemToMerge.id
}) {
let existing = dataManager.problems[index]
if problemToMerge.updatedAt >= formatter.string(from: existing.updatedAt) {
dataManager.problems[index] = try problemToMerge.toProblem()
}
} else {
dataManager.problems.append(try problemToMerge.toProblem())
}
}
// Merge sessions
for backupSession in response.sessions {
if let index = dataManager.sessions.firstIndex(where: {
$0.id.uuidString == backupSession.id
}) {
let existing = dataManager.sessions[index]
if backupSession.updatedAt >= formatter.string(from: existing.updatedAt) {
dataManager.sessions[index] = try backupSession.toClimbSession()
}
} else {
dataManager.sessions.append(try backupSession.toClimbSession())
}
}
// Merge attempts
for backupAttempt in response.attempts {
if let index = dataManager.attempts.firstIndex(where: {
$0.id.uuidString == backupAttempt.id
}) {
let existing = dataManager.attempts[index]
if backupAttempt.createdAt >= formatter.string(from: existing.createdAt) {
dataManager.attempts[index] = try backupAttempt.toAttempt()
}
} else {
dataManager.attempts.append(try backupAttempt.toAttempt())
}
}
// Apply deletions
let allDeletions = dataManager.getDeletedItems() + response.deletedItems
let uniqueDeletions = Array(Set(allDeletions))
applyDeletionsToDataManager(deletions: uniqueDeletions, dataManager: dataManager)
// Save all changes
dataManager.saveGyms()
dataManager.saveProblems()
dataManager.saveSessions()
dataManager.saveAttempts()
// Update deletion records
dataManager.clearDeletedItems()
if let data = try? JSONEncoder().encode(uniqueDeletions) {
UserDefaults.standard.set(data, forKey: "ascently_deleted_items")
}
DataStateManager.shared.updateDataState()
}
private func applyDeletionsToDataManager(
deletions: [DeletedItem], dataManager: ClimbingDataManager
) {
let deletedGymIds = Set(deletions.filter { $0.type == "gym" }.map { $0.id })
let deletedProblemIds = Set(deletions.filter { $0.type == "problem" }.map { $0.id })
let deletedSessionIds = Set(deletions.filter { $0.type == "session" }.map { $0.id })
let deletedAttemptIds = Set(deletions.filter { $0.type == "attempt" }.map { $0.id })
dataManager.gyms.removeAll { deletedGymIds.contains($0.id.uuidString) }
dataManager.problems.removeAll { deletedProblemIds.contains($0.id.uuidString) }
dataManager.sessions.removeAll { deletedSessionIds.contains($0.id.uuidString) }
dataManager.attempts.removeAll { deletedAttemptIds.contains($0.id.uuidString) }
}
private func syncModifiedImages(
modifiedProblems: [BackupProblem], dataManager: ClimbingDataManager
) async throws {
guard !modifiedProblems.isEmpty else { return }
print("iOS DELTA SYNC: Syncing images for \(modifiedProblems.count) modified problems")
for backupProblem in modifiedProblems {
guard
let problem = dataManager.problems.first(where: {
$0.id.uuidString == backupProblem.id
})
else {
continue
}
for (index, imagePath) in problem.imagePaths.enumerated() {
let filename = URL(fileURLWithPath: imagePath).lastPathComponent
let consistentFilename = ImageNamingUtils.generateImageFilename(
problemId: problem.id.uuidString, imageIndex: index)
let imageManager = ImageManager.shared
let fullPath = imageManager.imagesDirectory.appendingPathComponent(filename).path
if let imageData = imageManager.loadImageData(fromPath: fullPath) {
do {
if filename != consistentFilename {
let newPath = imageManager.imagesDirectory.appendingPathComponent(
consistentFilename
).path
try? FileManager.default.moveItem(atPath: fullPath, toPath: newPath)
}
try await uploadImage(filename: consistentFilename, imageData: imageData)
print("Uploaded modified problem image: \(consistentFilename)")
} catch {
print("Failed to upload image \(consistentFilename): \(error)")
}
}
}
}
}
func uploadImage(filename: String, imageData: Data) async throws {
guard isConfigured else {
throw SyncError.notConfigured
@@ -246,6 +554,17 @@ class SyncService: ObservableObject {
!serverBackup.gyms.isEmpty || !serverBackup.problems.isEmpty
|| !serverBackup.sessions.isEmpty || !serverBackup.attempts.isEmpty
// If both client and server have been synced before, use delta sync
if hasLocalData && hasServerData && lastSyncTime != nil {
print("iOS SYNC: Using delta sync for incremental updates")
try await performDeltaSync(dataManager: dataManager)
// Update last sync time
lastSyncTime = Date()
userDefaults.set(lastSyncTime, forKey: Keys.lastSyncTime)
return
}
if !hasLocalData && hasServerData {
// Case 1: No local data - do full restore from server
print("iOS SYNC: Case 1 - No local data, performing full restore from server")
@@ -286,7 +605,6 @@ class SyncService: ObservableObject {
}
}
/// Parses ISO8601 timestamp to milliseconds for comparison
private func parseISO8601ToMillis(timestamp: String) -> Int64 {
let formatter = ISO8601DateFormatter()
if let date = formatter.date(from: timestamp) {
@@ -1150,7 +1468,6 @@ class SyncService: ObservableObject {
// Get active session IDs to protect their attempts
let activeSessionIds = Set(
local.compactMap { attempt in
// This is a simplified check - in a real implementation you'd want to cross-reference with sessions
return attempt.sessionId
}.filter { sessionId in
// Check if this session ID belongs to an active session

View File

@@ -37,46 +37,36 @@ class DataStateManager {
print("iOS Data state updated to: \(now)")
}
/// Gets the current data state timestamp. This represents when any data was last modified
/// locally.
func getLastModified() -> String {
if let storedTimestamp = userDefaults.string(forKey: Keys.lastModified) {
print("iOS DataStateManager returning stored timestamp: \(storedTimestamp)")
return storedTimestamp
}
// If no timestamp is stored, return epoch time to indicate very old data
// This ensures server data will be considered newer than uninitialized local data
let epochTime = "1970-01-01T00:00:00.000Z"
print("WARNING: No data state timestamp found - returning epoch time: \(epochTime)")
print("No data state timestamp found - returning epoch time: \(epochTime)")
return epochTime
}
/// Sets the data state timestamp to a specific value. Used when importing data from server to
/// sync the state.
func setLastModified(_ timestamp: String) {
userDefaults.set(timestamp, forKey: Keys.lastModified)
print("Data state set to: \(timestamp)")
}
/// Resets the data state (for testing or complete data wipe).
func reset() {
userDefaults.removeObject(forKey: Keys.lastModified)
userDefaults.removeObject(forKey: Keys.initialized)
print("Data state reset")
}
/// Checks if the data state has been initialized.
private func isInitialized() -> Bool {
return userDefaults.bool(forKey: Keys.initialized)
}
/// Marks the data state as initialized.
private func markAsInitialized() {
userDefaults.set(true, forKey: Keys.initialized)
}
/// Gets debug information about the current state.
func getDebugInfo() -> String {
return "DataState(lastModified=\(getLastModified()), initialized=\(isInitialized()))"
}

View File

@@ -690,7 +690,6 @@ class ImageManager {
}
private func cleanupOrphanedFiles() {
// This would need access to the data manager to check which files are actually referenced
print("Cleanup would require coordination with data manager")
}

View File

@@ -108,7 +108,6 @@ class ImageNamingUtils {
)
}
/// Generates the canonical filename that should be used for a problem image
static func getCanonicalImageFilename(problemId: String, imageIndex: Int) -> String {
return generateImageFilename(problemId: problemId, imageIndex: imageIndex)
}

View File

@@ -18,6 +18,7 @@ struct ZipUtils {
var fileEntries: [(name: String, data: Data, offset: UInt32)] = []
var currentOffset: UInt32 = 0
// Add metadata
let metadata = createMetadata(
exportData: exportData, referencedImagePaths: referencedImagePaths)
let metadataData = metadata.data(using: .utf8) ?? Data()
@@ -29,6 +30,7 @@ struct ZipUtils {
currentOffset: &currentOffset
)
// Encode JSON data
let encoder = JSONEncoder()
encoder.outputFormatting = .prettyPrinted
encoder.dateEncodingStrategy = .custom { date, encoder in
@@ -46,44 +48,49 @@ struct ZipUtils {
currentOffset: &currentOffset
)
print("Processing \(referencedImagePaths.count) referenced image paths")
// Process images in batches for better performance
print("Processing \(referencedImagePaths.count) images for export")
var successfulImages = 0
let batchSize = 10
let sortedPaths = Array(referencedImagePaths).sorted()
// Pre-allocate capacity for better memory performance
zipData.reserveCapacity(zipData.count + (referencedImagePaths.count * 200_000)) // Estimate 200KB per image
for (index, imagePath) in sortedPaths.enumerated() {
if index % batchSize == 0 {
print("Processing images \(index)/\(sortedPaths.count)")
}
for imagePath in referencedImagePaths {
print("Processing image path: \(imagePath)")
let imageURL = URL(fileURLWithPath: imagePath)
let imageName = imageURL.lastPathComponent
print("Image name: \(imageName)")
if FileManager.default.fileExists(atPath: imagePath) {
print("Image file exists at: \(imagePath)")
do {
let imageData = try Data(contentsOf: imageURL)
print("Image data size: \(imageData.count) bytes")
if imageData.count > 0 {
let imageEntryName = "\(IMAGES_DIR_NAME)/\(imageName)"
try addFileToZip(
filename: imageEntryName,
fileData: imageData,
zipData: &zipData,
fileEntries: &fileEntries,
currentOffset: &currentOffset
)
successfulImages += 1
print("Successfully added image to ZIP: \(imageEntryName)")
} else {
print("Image data is empty for: \(imagePath)")
}
} catch {
print("Failed to read image data for \(imagePath): \(error)")
guard FileManager.default.fileExists(atPath: imagePath) else {
continue
}
do {
let imageData = try Data(contentsOf: imageURL)
if imageData.count > 0 {
let imageEntryName = "\(IMAGES_DIR_NAME)/\(imageName)"
try addFileToZip(
filename: imageEntryName,
fileData: imageData,
zipData: &zipData,
fileEntries: &fileEntries,
currentOffset: &currentOffset
)
successfulImages += 1
}
} else {
print("Image file does not exist at: \(imagePath)")
} catch {
print("Failed to read image: \(imageName)")
}
}
print("Export completed: \(successfulImages)/\(referencedImagePaths.count) images included")
print("Export: included \(successfulImages)/\(referencedImagePaths.count) images")
// Build central directory
centralDirectory.reserveCapacity(fileEntries.count * 100) // Estimate 100 bytes per entry
for entry in fileEntries {
let centralDirEntry = createCentralDirectoryEntry(
filename: entry.name,
@@ -372,12 +379,12 @@ struct ZipUtils {
return data
}
private static func calculateCRC32(data: Data) -> UInt32 {
// CRC32 lookup table for faster calculation
private static let crc32Table: [UInt32] = {
let polynomial: UInt32 = 0xEDB8_8320
var crc: UInt32 = 0xFFFF_FFFF
for byte in data {
crc ^= UInt32(byte)
var table = [UInt32](repeating: 0, count: 256)
for i in 0..<256 {
var crc = UInt32(i)
for _ in 0..<8 {
if crc & 1 != 0 {
crc = (crc >> 1) ^ polynomial
@@ -385,6 +392,19 @@ struct ZipUtils {
crc >>= 1
}
}
table[i] = crc
}
return table
}()
private static func calculateCRC32(data: Data) -> UInt32 {
var crc: UInt32 = 0xFFFF_FFFF
data.withUnsafeBytes { (bytes: UnsafeRawBufferPointer) in
for byte in bytes {
let index = Int((crc ^ UInt32(byte)) & 0xFF)
crc = (crc >> 8) ^ crc32Table[index]
}
}
return ~crc

View File

@@ -653,9 +653,6 @@ class ClimbingDataManager: ObservableObject {
return gym(withId: mostUsedGymId)
}
/// Clean up orphaned data - removes attempts that reference non-existent sessions
/// and removes duplicate attempts. This ensures data integrity and prevents
/// orphaned attempts from appearing in widgets
private func cleanupOrphanedData() {
let validSessionIds = Set(sessions.map { $0.id })
let validProblemIds = Set(problems.map { $0.id })
@@ -761,8 +758,6 @@ class ClimbingDataManager: ObservableObject {
}
}
/// Validate data integrity and return a report
/// This can be called manually to check for issues
func validateDataIntegrity() -> String {
let validSessionIds = Set(sessions.map { $0.id })
let validProblemIds = Set(problems.map { $0.id })
@@ -801,8 +796,6 @@ class ClimbingDataManager: ObservableObject {
return report
}
/// Manually trigger cleanup of orphaned data
/// This can be called from settings or debug menu
func manualDataCleanup() {
cleanupOrphanedData()
successMessage = "Data cleanup completed"
@@ -830,12 +823,12 @@ class ClimbingDataManager: ObservableObject {
}
}
func exportData() -> Data? {
func exportData() async -> Data? {
do {
// Create backup objects on main thread (they access MainActor-isolated properties)
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSSSS"
// Create export data with normalized image paths
let exportData = ClimbDataBackup(
exportedAt: dateFormatter.string(from: Date()),
version: "2.0",
@@ -846,19 +839,30 @@ class ClimbingDataManager: ObservableObject {
attempts: attempts.map { BackupAttempt(from: $0) }
)
// Collect actual image paths from disk for the ZIP
let referencedImagePaths = collectReferencedImagePaths()
print("Starting export with \(referencedImagePaths.count) images")
// Get image manager path info on main thread
let imagesDirectory = ImageManager.shared.imagesDirectory.path
let problemsForImages = problems
let zipData = try ZipUtils.createExportZip(
exportData: exportData,
referencedImagePaths: referencedImagePaths
)
// Move heavy I/O operations to background thread
let zipData = try await Task.detached(priority: .userInitiated) {
// Collect actual image paths from disk for the ZIP
let referencedImagePaths = await Self.collectReferencedImagePathsStatic(
problems: problemsForImages,
imagesDirectory: imagesDirectory)
print("Starting export with \(referencedImagePaths.count) images")
print("Export completed successfully")
successMessage = "Export completed with \(referencedImagePaths.count) images"
let zipData = try await ZipUtils.createExportZip(
exportData: exportData,
referencedImagePaths: referencedImagePaths
)
print("Export completed successfully")
return (zipData, referencedImagePaths.count)
}.value
successMessage = "Export completed with \(zipData.1) images"
clearMessageAfterDelay()
return zipData
return zipData.0
} catch {
let errorMessage = "Export failed: \(error.localizedDescription)"
print("ERROR: \(errorMessage)")
@@ -955,36 +959,36 @@ class ClimbingDataManager: ObservableObject {
extension ClimbingDataManager {
private func collectReferencedImagePaths() -> Set<String> {
let imagesDirectory = ImageManager.shared.imagesDirectory.path
return Self.collectReferencedImagePathsStatic(
problems: problems,
imagesDirectory: imagesDirectory)
}
private static func collectReferencedImagePathsStatic(
problems: [Problem], imagesDirectory: String
) -> Set<String> {
var imagePaths = Set<String>()
print("Starting image path collection...")
print("Total problems: \(problems.count)")
var missingCount = 0
for problem in problems {
if !problem.imagePaths.isEmpty {
print(
"Problem '\(problem.name ?? "Unnamed")' has \(problem.imagePaths.count) images"
)
for imagePath in problem.imagePaths {
print(" - Stored path: \(imagePath)")
// Extract just the filename (migration should have normalized these)
let filename = URL(fileURLWithPath: imagePath).lastPathComponent
let fullPath = ImageManager.shared.getFullPath(from: filename)
print(" - Full disk path: \(fullPath)")
let fullPath = (imagesDirectory as NSString).appendingPathComponent(filename)
if FileManager.default.fileExists(atPath: fullPath) {
print(" ✓ File exists")
imagePaths.insert(fullPath)
} else {
print(" ✗ WARNING: File not found at \(fullPath)")
// Still add it to let ZipUtils handle the logging
missingCount += 1
imagePaths.insert(fullPath)
}
}
}
}
print("Collected \(imagePaths.count) total image paths for export")
print("Export: Collected \(imagePaths.count) images (\(missingCount) missing)")
return imagePaths
}
@@ -1273,7 +1277,9 @@ extension ClimbingDataManager {
) { [weak self] notification in
if let updateCount = notification.userInfo?["updateCount"] as? Int {
print("🔔 Image migration completed with \(updateCount) updates - reloading data")
self?.loadProblems()
Task { @MainActor in
self?.loadProblems()
}
}
}
}

View File

@@ -103,7 +103,6 @@ struct AddEditProblemView: View {
setupInitialGym()
}
.onChange(of: dataManager.gyms) {
// Ensure a gym is selected when gyms are loaded or changed
if selectedGym == nil && !dataManager.gyms.isEmpty {
selectedGym = dataManager.gyms.first
}

View File

@@ -180,10 +180,12 @@ struct DataManagementSection: View {
private func exportDataAsync() {
isExporting = true
Task {
let data = await MainActor.run { dataManager.exportData() }
isExporting = false
if let data = data {
activeSheet = .export(data)
let data = await dataManager.exportData()
await MainActor.run {
isExporting = false
if let data = data {
activeSheet = .export(data)
}
}
}
}

View File

@@ -256,10 +256,6 @@ final class AscentlyTests: XCTestCase {
// MARK: - Active Session Preservation Tests
func testActiveSessionPreservationDuringImport() throws {
// Test that active sessions are preserved during import operations
// This tests the fix for the bug where active sessions disappear after sync
// Simulate an active session that exists locally but not in import data
let activeSessionId = UUID()
let gymId = UUID()

View File

@@ -13,7 +13,7 @@ import (
"time"
)
const VERSION = "2.0.0"
const VERSION = "2.1.0"
func min(a, b int) int {
if a < b {
@@ -39,6 +39,24 @@ type ClimbDataBackup struct {
DeletedItems []DeletedItem `json:"deletedItems"`
}
type DeltaSyncRequest struct {
LastSyncTime string `json:"lastSyncTime"`
Gyms []BackupGym `json:"gyms"`
Problems []BackupProblem `json:"problems"`
Sessions []BackupClimbSession `json:"sessions"`
Attempts []BackupAttempt `json:"attempts"`
DeletedItems []DeletedItem `json:"deletedItems"`
}
type DeltaSyncResponse struct {
ServerTime string `json:"serverTime"`
Gyms []BackupGym `json:"gyms"`
Problems []BackupProblem `json:"problems"`
Sessions []BackupClimbSession `json:"sessions"`
Attempts []BackupAttempt `json:"attempts"`
DeletedItems []DeletedItem `json:"deletedItems"`
}
type BackupGym struct {
ID string `json:"id"`
Name string `json:"name"`
@@ -154,6 +172,174 @@ func (s *SyncServer) loadData() (*ClimbDataBackup, error) {
return &backup, nil
}
func (s *SyncServer) mergeGyms(existing []BackupGym, updates []BackupGym) []BackupGym {
gymMap := make(map[string]BackupGym)
for _, gym := range existing {
gymMap[gym.ID] = gym
}
for _, gym := range updates {
if existingGym, exists := gymMap[gym.ID]; exists {
// Keep newer version based on updatedAt timestamp
if gym.UpdatedAt >= existingGym.UpdatedAt {
gymMap[gym.ID] = gym
}
} else {
gymMap[gym.ID] = gym
}
}
result := make([]BackupGym, 0, len(gymMap))
for _, gym := range gymMap {
result = append(result, gym)
}
return result
}
func (s *SyncServer) mergeProblems(existing []BackupProblem, updates []BackupProblem) []BackupProblem {
problemMap := make(map[string]BackupProblem)
for _, problem := range existing {
problemMap[problem.ID] = problem
}
for _, problem := range updates {
if existingProblem, exists := problemMap[problem.ID]; exists {
if problem.UpdatedAt >= existingProblem.UpdatedAt {
problemMap[problem.ID] = problem
}
} else {
problemMap[problem.ID] = problem
}
}
result := make([]BackupProblem, 0, len(problemMap))
for _, problem := range problemMap {
result = append(result, problem)
}
return result
}
func (s *SyncServer) mergeSessions(existing []BackupClimbSession, updates []BackupClimbSession) []BackupClimbSession {
sessionMap := make(map[string]BackupClimbSession)
for _, session := range existing {
sessionMap[session.ID] = session
}
for _, session := range updates {
if existingSession, exists := sessionMap[session.ID]; exists {
if session.UpdatedAt >= existingSession.UpdatedAt {
sessionMap[session.ID] = session
}
} else {
sessionMap[session.ID] = session
}
}
result := make([]BackupClimbSession, 0, len(sessionMap))
for _, session := range sessionMap {
result = append(result, session)
}
return result
}
func (s *SyncServer) mergeAttempts(existing []BackupAttempt, updates []BackupAttempt) []BackupAttempt {
attemptMap := make(map[string]BackupAttempt)
for _, attempt := range existing {
attemptMap[attempt.ID] = attempt
}
for _, attempt := range updates {
if existingAttempt, exists := attemptMap[attempt.ID]; exists {
if attempt.CreatedAt >= existingAttempt.CreatedAt {
attemptMap[attempt.ID] = attempt
}
} else {
attemptMap[attempt.ID] = attempt
}
}
result := make([]BackupAttempt, 0, len(attemptMap))
for _, attempt := range attemptMap {
result = append(result, attempt)
}
return result
}
func (s *SyncServer) mergeDeletedItems(existing []DeletedItem, updates []DeletedItem) []DeletedItem {
deletedMap := make(map[string]DeletedItem)
for _, item := range existing {
key := item.Type + ":" + item.ID
deletedMap[key] = item
}
for _, item := range updates {
key := item.Type + ":" + item.ID
if existingItem, exists := deletedMap[key]; exists {
if item.DeletedAt >= existingItem.DeletedAt {
deletedMap[key] = item
}
} else {
deletedMap[key] = item
}
}
result := make([]DeletedItem, 0, len(deletedMap))
for _, item := range deletedMap {
result = append(result, item)
}
return result
}
func (s *SyncServer) applyDeletions(backup *ClimbDataBackup, deletedItems []DeletedItem) {
deletedMap := make(map[string]map[string]bool)
for _, item := range deletedItems {
if deletedMap[item.Type] == nil {
deletedMap[item.Type] = make(map[string]bool)
}
deletedMap[item.Type][item.ID] = true
}
if deletedMap["gym"] != nil {
filtered := []BackupGym{}
for _, gym := range backup.Gyms {
if !deletedMap["gym"][gym.ID] {
filtered = append(filtered, gym)
}
}
backup.Gyms = filtered
}
if deletedMap["problem"] != nil {
filtered := []BackupProblem{}
for _, problem := range backup.Problems {
if !deletedMap["problem"][problem.ID] {
filtered = append(filtered, problem)
}
}
backup.Problems = filtered
}
if deletedMap["session"] != nil {
filtered := []BackupClimbSession{}
for _, session := range backup.Sessions {
if !deletedMap["session"][session.ID] {
filtered = append(filtered, session)
}
}
backup.Sessions = filtered
}
if deletedMap["attempt"] != nil {
filtered := []BackupAttempt{}
for _, attempt := range backup.Attempts {
if !deletedMap["attempt"][attempt.ID] {
filtered = append(filtered, attempt)
}
}
backup.Attempts = filtered
}
}
func (s *SyncServer) saveData(backup *ClimbDataBackup) error {
backup.ExportedAt = time.Now().UTC().Format(time.RFC3339)
@@ -167,7 +353,6 @@ func (s *SyncServer) saveData(backup *ClimbDataBackup) error {
return err
}
// Ensure images directory exists
if err := os.MkdirAll(s.imagesDir, 0755); err != nil {
return err
}
@@ -315,6 +500,123 @@ func (s *SyncServer) handleImageDownload(w http.ResponseWriter, r *http.Request)
w.Write(imageData)
}
func (s *SyncServer) handleDeltaSync(w http.ResponseWriter, r *http.Request) {
if !s.authenticate(r) {
log.Printf("Unauthorized delta sync attempt from %s", r.RemoteAddr)
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
var deltaRequest DeltaSyncRequest
if err := json.NewDecoder(r.Body).Decode(&deltaRequest); err != nil {
log.Printf("Invalid JSON from %s: %v", r.RemoteAddr, err)
http.Error(w, "Invalid JSON", http.StatusBadRequest)
return
}
log.Printf("Delta sync from %s: lastSyncTime=%s, gyms=%d, problems=%d, sessions=%d, attempts=%d, deletedItems=%d",
r.RemoteAddr, deltaRequest.LastSyncTime,
len(deltaRequest.Gyms), len(deltaRequest.Problems),
len(deltaRequest.Sessions), len(deltaRequest.Attempts),
len(deltaRequest.DeletedItems))
// Load current server data
serverBackup, err := s.loadData()
if err != nil {
log.Printf("Failed to load data: %v", err)
http.Error(w, "Failed to load data", http.StatusInternalServerError)
return
}
// Merge client changes into server data
serverBackup.Gyms = s.mergeGyms(serverBackup.Gyms, deltaRequest.Gyms)
serverBackup.Problems = s.mergeProblems(serverBackup.Problems, deltaRequest.Problems)
serverBackup.Sessions = s.mergeSessions(serverBackup.Sessions, deltaRequest.Sessions)
serverBackup.Attempts = s.mergeAttempts(serverBackup.Attempts, deltaRequest.Attempts)
serverBackup.DeletedItems = s.mergeDeletedItems(serverBackup.DeletedItems, deltaRequest.DeletedItems)
// Apply deletions to remove deleted items
s.applyDeletions(serverBackup, serverBackup.DeletedItems)
// Save merged data
if err := s.saveData(serverBackup); err != nil {
log.Printf("Failed to save data: %v", err)
http.Error(w, "Failed to save data", http.StatusInternalServerError)
return
}
// Parse client's last sync time
clientLastSync, err := time.Parse(time.RFC3339, deltaRequest.LastSyncTime)
if err != nil {
// If parsing fails, send everything
clientLastSync = time.Time{}
}
// Prepare response with items modified since client's last sync
response := DeltaSyncResponse{
ServerTime: time.Now().UTC().Format(time.RFC3339),
Gyms: []BackupGym{},
Problems: []BackupProblem{},
Sessions: []BackupClimbSession{},
Attempts: []BackupAttempt{},
DeletedItems: []DeletedItem{},
}
// Filter gyms modified after client's last sync
for _, gym := range serverBackup.Gyms {
gymTime, err := time.Parse(time.RFC3339, gym.UpdatedAt)
if err == nil && gymTime.After(clientLastSync) {
response.Gyms = append(response.Gyms, gym)
}
}
// Filter problems modified after client's last sync
for _, problem := range serverBackup.Problems {
problemTime, err := time.Parse(time.RFC3339, problem.UpdatedAt)
if err == nil && problemTime.After(clientLastSync) {
response.Problems = append(response.Problems, problem)
}
}
// Filter sessions modified after client's last sync
for _, session := range serverBackup.Sessions {
sessionTime, err := time.Parse(time.RFC3339, session.UpdatedAt)
if err == nil && sessionTime.After(clientLastSync) {
response.Sessions = append(response.Sessions, session)
}
}
// Filter attempts created after client's last sync
for _, attempt := range serverBackup.Attempts {
attemptTime, err := time.Parse(time.RFC3339, attempt.CreatedAt)
if err == nil && attemptTime.After(clientLastSync) {
response.Attempts = append(response.Attempts, attempt)
}
}
// Filter deletions after client's last sync
for _, deletedItem := range serverBackup.DeletedItems {
deletedTime, err := time.Parse(time.RFC3339, deletedItem.DeletedAt)
if err == nil && deletedTime.After(clientLastSync) {
response.DeletedItems = append(response.DeletedItems, deletedItem)
}
}
log.Printf("Delta sync response to %s: gyms=%d, problems=%d, sessions=%d, attempts=%d, deletedItems=%d",
r.RemoteAddr,
len(response.Gyms), len(response.Problems),
len(response.Sessions), len(response.Attempts),
len(response.DeletedItems))
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
func (s *SyncServer) handleSync(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
@@ -354,6 +656,7 @@ func main() {
}
http.HandleFunc("/sync", server.handleSync)
http.HandleFunc("/sync/delta", server.handleDeltaSync)
http.HandleFunc("/health", server.handleHealth)
http.HandleFunc("/images/upload", server.handleImageUpload)
http.HandleFunc("/images/download", server.handleImageDownload)
@@ -362,6 +665,8 @@ func main() {
fmt.Printf("Data file: %s\n", dataFile)
fmt.Printf("Images directory: %s\n", imagesDir)
fmt.Printf("Health check available at /health\n")
fmt.Printf("Delta sync: POST /sync/delta (incremental sync)\n")
fmt.Printf("Full sync: GET /sync (download all), PUT /sync (upload all)\n")
fmt.Printf("Image upload: POST /images/upload?filename=<name>\n")
fmt.Printf("Image download: GET /images/download?filename=<name>\n")