Compare commits

..

7 Commits

Author SHA1 Message Date
shenlong-tanwen
02456a148e feat: android periodic work manager task 2025-11-03 23:10:35 +05:30
Daniel Dietzler
517c3e1d4c fix: exif gps parsing of malformed data (#23551)
* fix: exif gps parsing of malformed data

* chore: e2e test
2025-11-03 09:02:41 -05:00
Ben
619de2a5e4 fix(web): search bar accessibility (#23550)
* fix: always show search type when search bar is focused

* fix: indicate search type to screen reader users
2025-11-03 08:31:57 -05:00
Mert
79d0e3e1ed fix(ml): ocr inputs not resized correctly (#23541)
* fix resizing, use pillow

* unused import

* linting

* lanczos

* optimizations

fused operations

unused import
2025-11-03 07:21:30 +00:00
github-actions
f5ff36a1f8 chore: version v2.2.2 2025-11-02 21:56:36 +00:00
Alex
b5efc9c16e fix: passing secrets to trigger workflow (#23447)
* fix: passing secrets to trigger workflow

* pass secrets to workflow call
2025-11-02 15:54:35 -06:00
Alex
1036076b0d fix: disable prunning for more investigation (#23531) 2025-11-02 15:54:03 -06:00
28 changed files with 248 additions and 356 deletions

View File

@@ -20,6 +20,30 @@ on:
required: true
ANDROID_STORE_PASSWORD:
required: true
APP_STORE_CONNECT_API_KEY_ID:
required: true
APP_STORE_CONNECT_API_KEY_ISSUER_ID:
required: true
APP_STORE_CONNECT_API_KEY:
required: true
IOS_CERTIFICATE_P12:
required: true
IOS_CERTIFICATE_PASSWORD:
required: true
IOS_PROVISIONING_PROFILE:
required: true
IOS_PROVISIONING_PROFILE_SHARE_EXTENSION:
required: true
IOS_PROVISIONING_PROFILE_WIDGET_EXTENSION:
required: true
IOS_DEVELOPMENT_PROVISIONING_PROFILE:
required: true
IOS_DEVELOPMENT_PROVISIONING_PROFILE_SHARE_EXTENSION:
required: true
IOS_DEVELOPMENT_PROVISIONING_PROFILE_WIDGET_EXTENSION:
required: true
FASTLANE_TEAM_ID:
required: true
pull_request:
push:
branches: [main]

View File

@@ -99,6 +99,20 @@ jobs:
ALIAS: ${{ secrets.ALIAS }}
ANDROID_KEY_PASSWORD: ${{ secrets.ANDROID_KEY_PASSWORD }}
ANDROID_STORE_PASSWORD: ${{ secrets.ANDROID_STORE_PASSWORD }}
# iOS secrets
APP_STORE_CONNECT_API_KEY_ID: ${{ secrets.APP_STORE_CONNECT_API_KEY_ID }}
APP_STORE_CONNECT_API_KEY_ISSUER_ID: ${{ secrets.APP_STORE_CONNECT_API_KEY_ISSUER_ID }}
APP_STORE_CONNECT_API_KEY: ${{ secrets.APP_STORE_CONNECT_API_KEY }}
IOS_CERTIFICATE_P12: ${{ secrets.IOS_CERTIFICATE_P12 }}
IOS_CERTIFICATE_PASSWORD: ${{ secrets.IOS_CERTIFICATE_PASSWORD }}
IOS_PROVISIONING_PROFILE: ${{ secrets.IOS_PROVISIONING_PROFILE }}
IOS_PROVISIONING_PROFILE_SHARE_EXTENSION: ${{ secrets.IOS_PROVISIONING_PROFILE_SHARE_EXTENSION }}
IOS_PROVISIONING_PROFILE_WIDGET_EXTENSION: ${{ secrets.IOS_PROVISIONING_PROFILE_WIDGET_EXTENSION }}
IOS_DEVELOPMENT_PROVISIONING_PROFILE: ${{ secrets.IOS_DEVELOPMENT_PROVISIONING_PROFILE }}
IOS_DEVELOPMENT_PROVISIONING_PROFILE_SHARE_EXTENSION: ${{ secrets.IOS_DEVELOPMENT_PROVISIONING_PROFILE_SHARE_EXTENSION }}
IOS_DEVELOPMENT_PROVISIONING_PROFILE_WIDGET_EXTENSION: ${{ secrets.IOS_DEVELOPMENT_PROVISIONING_PROFILE_WIDGET_EXTENSION }}
FASTLANE_TEAM_ID: ${{ secrets.FASTLANE_TEAM_ID }}
with:
ref: ${{ needs.bump_version.outputs.ref }}
environment: production

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.99",
"version": "2.2.100",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",

View File

@@ -1,4 +1,8 @@
[
{
"label": "v2.2.2",
"url": "https://docs.v2.2.2.archive.immich.app"
},
{
"label": "v2.2.1",
"url": "https://docs.v2.2.1.archive.immich.app"

View File

@@ -1,6 +1,6 @@
{
"name": "immich-e2e",
"version": "2.2.1",
"version": "2.2.2",
"description": "",
"main": "index.js",
"type": "module",

View File

@@ -1140,6 +1140,16 @@ describe('/asset', () => {
},
},
},
{
input: 'metadata/gps-position/empty_gps.jpg',
expected: {
type: AssetTypeEnum.Image,
exifInfo: {
latitude: null,
longitude: null,
},
},
},
];
it.each(tests)(`should upload and generate a thumbnail for different file types`, async ({ input, expected }) => {

View File

@@ -1,8 +1,10 @@
from typing import Any
import cv2
import numpy as np
from numpy.typing import NDArray
from PIL import Image
from rapidocr.ch_ppocr_det import TextDetector as RapidTextDetector
from rapidocr.ch_ppocr_det.utils import DBPostProcess
from rapidocr.inference_engine.base import FileInfo, InferSession
from rapidocr.utils import DownloadFile, DownloadFileInput
from rapidocr.utils.typings import EngineType, LangDet, OCRVersion, TaskType
@@ -10,11 +12,10 @@ from rapidocr.utils.typings import ModelType as RapidModelType
from immich_ml.config import log
from immich_ml.models.base import InferenceModel
from immich_ml.models.transforms import decode_cv2
from immich_ml.schemas import ModelFormat, ModelSession, ModelTask, ModelType
from immich_ml.sessions.ort import OrtSession
from .schemas import OcrOptions, TextDetectionOutput
from .schemas import TextDetectionOutput
class TextDetector(InferenceModel):
@@ -24,13 +25,20 @@ class TextDetector(InferenceModel):
def __init__(self, model_name: str, **model_kwargs: Any) -> None:
super().__init__(model_name, **model_kwargs, model_format=ModelFormat.ONNX)
self.max_resolution = 736
self.min_score = 0.5
self.score_mode = "fast"
self.mean = np.array([0.5, 0.5, 0.5], dtype=np.float32)
self.std_inv = np.float32(1.0) / (np.array([0.5, 0.5, 0.5], dtype=np.float32) * 255.0)
self._empty: TextDetectionOutput = {
"image": np.empty(0, dtype=np.float32),
"boxes": np.empty(0, dtype=np.float32),
"scores": np.empty(0, dtype=np.float32),
}
self.postprocess = DBPostProcess(
thresh=0.3,
box_thresh=model_kwargs.get("minScore", 0.5),
max_candidates=1000,
unclip_ratio=1.6,
use_dilation=True,
score_mode="fast",
)
def _download(self) -> None:
model_info = InferSession.get_model_url(
@@ -52,35 +60,65 @@ class TextDetector(InferenceModel):
def _load(self) -> ModelSession:
# TODO: support other runtime sessions
session = OrtSession(self.model_path)
self.model = RapidTextDetector(
OcrOptions(
session=session.session,
limit_side_len=self.max_resolution,
limit_type="min",
box_thresh=self.min_score,
score_mode=self.score_mode,
)
)
return session
return OrtSession(self.model_path)
def _predict(self, inputs: bytes | Image.Image) -> TextDetectionOutput:
results = self.model(decode_cv2(inputs))
if results.boxes is None or results.scores is None or results.img is None:
# partly adapted from RapidOCR
def _predict(self, inputs: Image.Image) -> TextDetectionOutput:
w, h = inputs.size
if w < 32 or h < 32:
return self._empty
out = self.session.run(None, {"x": self._transform(inputs)})[0]
boxes, scores = self.postprocess(out, (h, w))
if len(boxes) == 0:
return self._empty
return {
"image": results.img,
"boxes": np.array(results.boxes, dtype=np.float32),
"scores": np.array(results.scores, dtype=np.float32),
"boxes": self.sorted_boxes(boxes),
"scores": np.array(scores, dtype=np.float32),
}
# adapted from RapidOCR
def _transform(self, img: Image.Image) -> NDArray[np.float32]:
if img.height < img.width:
ratio = float(self.max_resolution) / img.height
else:
ratio = float(self.max_resolution) / img.width
resize_h = int(img.height * ratio)
resize_w = int(img.width * ratio)
resize_h = int(round(resize_h / 32) * 32)
resize_w = int(round(resize_w / 32) * 32)
resized_img = img.resize((int(resize_w), int(resize_h)), resample=Image.Resampling.LANCZOS)
img_np: NDArray[np.float32] = cv2.cvtColor(np.array(resized_img, dtype=np.float32), cv2.COLOR_RGB2BGR) # type: ignore
img_np -= self.mean
img_np *= self.std_inv
img_np = np.transpose(img_np, (2, 0, 1))
return np.expand_dims(img_np, axis=0)
def sorted_boxes(self, dt_boxes: NDArray[np.float32]) -> NDArray[np.float32]:
if len(dt_boxes) == 0:
return dt_boxes
# Sort by y, then identify lines, then sort by (line, x)
y_order = np.argsort(dt_boxes[:, 0, 1], kind="stable")
sorted_y = dt_boxes[y_order, 0, 1]
line_ids = np.empty(len(dt_boxes), dtype=np.int32)
line_ids[0] = 0
np.cumsum(np.abs(np.diff(sorted_y)) >= 10, out=line_ids[1:])
# Create composite sort key for final ordering
# Shift line_ids by large factor, add x for tie-breaking
sort_key = line_ids[y_order] * 1e6 + dt_boxes[y_order, 0, 0]
final_order = np.argsort(sort_key, kind="stable")
sorted_boxes: NDArray[np.float32] = dt_boxes[y_order[final_order]]
return sorted_boxes
def configure(self, **kwargs: Any) -> None:
if (max_resolution := kwargs.get("maxResolution")) is not None:
self.max_resolution = max_resolution
self.model.limit_side_len = max_resolution
if (min_score := kwargs.get("minScore")) is not None:
self.min_score = min_score
self.model.postprocess_op.box_thresh = min_score
self.postprocess.box_thresh = min_score
if (score_mode := kwargs.get("scoreMode")) is not None:
self.score_mode = score_mode
self.model.postprocess_op.score_mode = score_mode
self.postprocess.score_mode = score_mode

View File

@@ -1,9 +1,8 @@
from typing import Any
import cv2
import numpy as np
from numpy.typing import NDArray
from PIL.Image import Image
from PIL import Image
from rapidocr.ch_ppocr_rec import TextRecInput
from rapidocr.ch_ppocr_rec import TextRecognizer as RapidTextRecognizer
from rapidocr.inference_engine.base import FileInfo, InferSession
@@ -14,6 +13,7 @@ from rapidocr.utils.vis_res import VisRes
from immich_ml.config import log, settings
from immich_ml.models.base import InferenceModel
from immich_ml.models.transforms import pil_to_cv2
from immich_ml.schemas import ModelFormat, ModelSession, ModelTask, ModelType
from immich_ml.sessions.ort import OrtSession
@@ -65,17 +65,16 @@ class TextRecognizer(InferenceModel):
)
return session
def _predict(self, _: Image, texts: TextDetectionOutput) -> TextRecognitionOutput:
boxes, img, box_scores = texts["boxes"], texts["image"], texts["scores"]
def _predict(self, img: Image.Image, texts: TextDetectionOutput) -> TextRecognitionOutput:
boxes, box_scores = texts["boxes"], texts["scores"]
if boxes.shape[0] == 0:
return self._empty
rec = self.model(TextRecInput(img=self.get_crop_img_list(img, boxes)))
if rec.txts is None:
return self._empty
height, width = img.shape[0:2]
boxes[:, :, 0] /= width
boxes[:, :, 1] /= height
boxes[:, :, 0] /= img.width
boxes[:, :, 1] /= img.height
text_scores = np.array(rec.scores)
valid_text_score_idx = text_scores > self.min_score
@@ -87,7 +86,7 @@ class TextRecognizer(InferenceModel):
"textScore": text_scores[valid_text_score_idx],
}
def get_crop_img_list(self, img: NDArray[np.float32], boxes: NDArray[np.float32]) -> list[NDArray[np.float32]]:
def get_crop_img_list(self, img: Image.Image, boxes: NDArray[np.float32]) -> list[NDArray[np.uint8]]:
img_crop_width = np.maximum(
np.linalg.norm(boxes[:, 1] - boxes[:, 0], axis=1), np.linalg.norm(boxes[:, 2] - boxes[:, 3], axis=1)
).astype(np.int32)
@@ -98,22 +97,55 @@ class TextRecognizer(InferenceModel):
pts_std[:, 1:3, 0] = img_crop_width[:, None]
pts_std[:, 2:4, 1] = img_crop_height[:, None]
img_crop_sizes = np.stack([img_crop_width, img_crop_height], axis=1).tolist()
imgs: list[NDArray[np.float32]] = []
for box, pts_std, dst_size in zip(list(boxes), list(pts_std), img_crop_sizes):
M = cv2.getPerspectiveTransform(box, pts_std)
dst_img: NDArray[np.float32] = cv2.warpPerspective(
img,
M,
dst_size,
borderMode=cv2.BORDER_REPLICATE,
flags=cv2.INTER_CUBIC,
) # type: ignore
dst_height, dst_width = dst_img.shape[0:2]
img_crop_sizes = np.stack([img_crop_width, img_crop_height], axis=1)
all_coeffs = self._get_perspective_transform(pts_std, boxes)
imgs: list[NDArray[np.uint8]] = []
for coeffs, dst_size in zip(all_coeffs, img_crop_sizes):
dst_img = img.transform(
size=tuple(dst_size),
method=Image.Transform.PERSPECTIVE,
data=tuple(coeffs),
resample=Image.Resampling.BICUBIC,
)
dst_width, dst_height = dst_img.size
if dst_height * 1.0 / dst_width >= 1.5:
dst_img = np.rot90(dst_img)
imgs.append(dst_img)
dst_img = dst_img.rotate(90, expand=True)
imgs.append(pil_to_cv2(dst_img))
return imgs
def _get_perspective_transform(self, src: NDArray[np.float32], dst: NDArray[np.float32]) -> NDArray[np.float32]:
N = src.shape[0]
x, y = src[:, :, 0], src[:, :, 1]
u, v = dst[:, :, 0], dst[:, :, 1]
A = np.zeros((N, 8, 9), dtype=np.float32)
# Fill even rows (0, 2, 4, 6): [x, y, 1, 0, 0, 0, -u*x, -u*y, -u]
A[:, ::2, 0] = x
A[:, ::2, 1] = y
A[:, ::2, 2] = 1
A[:, ::2, 6] = -u * x
A[:, ::2, 7] = -u * y
A[:, ::2, 8] = -u
# Fill odd rows (1, 3, 5, 7): [0, 0, 0, x, y, 1, -v*x, -v*y, -v]
A[:, 1::2, 3] = x
A[:, 1::2, 4] = y
A[:, 1::2, 5] = 1
A[:, 1::2, 6] = -v * x
A[:, 1::2, 7] = -v * y
A[:, 1::2, 8] = -v
# Solve using SVD for all matrices at once
_, _, Vt = np.linalg.svd(A)
H = Vt[:, -1, :].reshape(N, 3, 3)
H = H / H[:, 2:3, 2:3]
# Extract the 8 coefficients for each transformation
return np.column_stack(
[H[:, 0, 0], H[:, 0, 1], H[:, 0, 2], H[:, 1, 0], H[:, 1, 1], H[:, 1, 2], H[:, 2, 0], H[:, 2, 1]]
) # pyright: ignore[reportReturnType]
def configure(self, **kwargs: Any) -> None:
self.min_score = kwargs.get("minScore", self.min_score)

View File

@@ -7,7 +7,6 @@ from typing_extensions import TypedDict
class TextDetectionOutput(TypedDict):
image: npt.NDArray[np.float32]
boxes: npt.NDArray[np.float32]
scores: npt.NDArray[np.float32]

View File

@@ -1,6 +1,6 @@
[project]
name = "immich-ml"
version = "2.2.1"
version = "2.2.2"
description = ""
authors = [{ name = "Hau Tran", email = "alex.tran1502@gmail.com" }]
requires-python = ">=3.10,<4.0"

View File

@@ -43,8 +43,8 @@ class BackgroundEngineLock(context: Context) : BackgroundWorkerLockApi, ImmichPl
override fun onAttachedToEngine(binding: FlutterPlugin.FlutterPluginBinding) {
super.onAttachedToEngine(binding)
checkAndEnforceBackgroundLock(binding.applicationContext)
engineCount.incrementAndGet()
checkAndEnforceBackgroundLock(binding.applicationContext)
Log.i(TAG, "Flutter engine attached. Attached Engines count: $engineCount")
}

View File

@@ -5,8 +5,10 @@ import android.provider.MediaStore
import android.util.Log
import androidx.work.BackoffPolicy
import androidx.work.Constraints
import androidx.work.ExistingPeriodicWorkPolicy
import androidx.work.ExistingWorkPolicy
import androidx.work.OneTimeWorkRequest
import androidx.work.OneTimeWorkRequestBuilder
import androidx.work.PeriodicWorkRequestBuilder
import androidx.work.WorkManager
import io.flutter.embedding.engine.FlutterEngineCache
import java.util.concurrent.TimeUnit
@@ -18,6 +20,7 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
override fun enable() {
enqueueMediaObserver(ctx)
enqueuePeriodicWorker(ctx)
}
override fun saveNotificationMessage(title: String, body: String) {
@@ -27,12 +30,14 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
override fun configure(settings: BackgroundWorkerSettings) {
BackgroundWorkerPreferences(ctx).updateSettings(settings)
enqueueMediaObserver(ctx)
enqueuePeriodicWorker(ctx)
}
override fun disable() {
WorkManager.getInstance(ctx).apply {
cancelUniqueWork(OBSERVER_WORKER_NAME)
cancelUniqueWork(BACKGROUND_WORKER_NAME)
cancelUniqueWork(PERIODIC_WORKER_NAME)
}
Log.i(TAG, "Cancelled background upload tasks")
}
@@ -40,6 +45,7 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
companion object {
private const val BACKGROUND_WORKER_NAME = "immich/BackgroundWorkerV1"
private const val OBSERVER_WORKER_NAME = "immich/MediaObserverV1"
private const val PERIODIC_WORKER_NAME = "immich/PeriodicBackgroundWorkerV1"
const val ENGINE_CACHE_KEY = "immich::background_worker::engine"
@@ -55,7 +61,7 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
setRequiresCharging(settings.requiresCharging)
}.build()
val work = OneTimeWorkRequest.Builder(MediaObserver::class.java)
val work = OneTimeWorkRequestBuilder<MediaObserver>()
.setConstraints(constraints)
.build()
WorkManager.getInstance(ctx)
@@ -67,10 +73,30 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
)
}
fun enqueuePeriodicWorker(ctx: Context) {
val settings = BackgroundWorkerPreferences(ctx).getSettings()
val constraints = Constraints.Builder().apply {
setRequiresCharging(settings.requiresCharging)
}.build()
val work =
PeriodicWorkRequestBuilder<PeriodicWorker>(
1,
TimeUnit.HOURS,
15,
TimeUnit.MINUTES
).setConstraints(constraints)
.build()
WorkManager.getInstance(ctx)
.enqueueUniquePeriodicWork(PERIODIC_WORKER_NAME, ExistingPeriodicWorkPolicy.UPDATE, work)
Log.i(TAG, "Enqueued periodic background worker with name: $PERIODIC_WORKER_NAME")
}
fun enqueueBackgroundWorker(ctx: Context) {
val constraints = Constraints.Builder().setRequiresBatteryNotLow(true).build()
val work = OneTimeWorkRequest.Builder(BackgroundWorker::class.java)
val work = OneTimeWorkRequestBuilder<BackgroundWorker>()
.setConstraints(constraints)
.setBackoffCriteria(BackoffPolicy.EXPONENTIAL, 1, TimeUnit.MINUTES)
.build()

View File

@@ -0,0 +1,16 @@
package app.alextran.immich.background
import android.content.Context
import android.util.Log
import androidx.work.Worker
import androidx.work.WorkerParameters
class PeriodicWorker(context: Context, params: WorkerParameters) : Worker(context, params) {
private val ctx: Context = context.applicationContext
override fun doWork(): Result {
Log.i("PeriodicWorker", "Periodic worker triggered, starting background worker")
BackgroundWorkerApiImpl.enqueueBackgroundWorker(ctx)
return Result.success()
}
}

View File

@@ -35,8 +35,8 @@ platform :android do
task: 'bundle',
build_type: 'Release',
properties: {
"android.injected.version.code" => 3024,
"android.injected.version.name" => "2.2.1",
"android.injected.version.code" => 3025,
"android.injected.version.name" => "2.2.2",
}
)
upload_to_play_store(skip_upload_apk: true, skip_upload_images: true, skip_upload_screenshots: true, aab: '../build/app/outputs/bundle/release/app-release.aab')

View File

@@ -169,7 +169,7 @@ platform :ios do
targets: ["Runner", "ShareExtension", "WidgetExtension"]
)
increment_version_number(
version_number: "2.2.1"
version_number: "2.2.2"
)
increment_build_number(
build_number: latest_testflight_build_number + 1,

View File

@@ -132,7 +132,8 @@ class SyncStreamService {
return;
// SyncCompleteV1 is used to signal the completion of the sync process. Cleanup stale assets and signal completion
case SyncEntityType.syncCompleteV1:
return _syncStreamRepository.pruneAssets();
return;
// return _syncStreamRepository.pruneAssets();
// Request to reset the client state. Clear everything related to remote entities
case SyncEntityType.syncResetV1:
return _syncStreamRepository.reset();

View File

@@ -612,15 +612,12 @@ class SyncStreamRepository extends DriftDatabaseRepository {
final validUsers = {currentUserId, ...partnerIds.nonNulls};
// Asset is not owned by the current user or any of their partners and is not part of any (shared) album or memory
// Asset is not owned by the current user or any of their partners and is not part of any (shared) album
// Likely a stale asset that was previously shared but has been removed
await _db.remoteAssetEntity.deleteWhere((asset) {
return asset.ownerId.isNotIn(validUsers) &
asset.id.isNotInQuery(
_db.remoteAlbumAssetEntity.selectOnly()..addColumns([_db.remoteAlbumAssetEntity.assetId]),
) &
asset.id.isNotInQuery(
_db.memoryAssetEntity.selectOnly()..addColumns([_db.memoryAssetEntity.assetId]),
);
});
});

View File

@@ -3,7 +3,7 @@ Immich API
This Dart package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project:
- API version: 2.2.1
- API version: 2.2.2
- Generator version: 7.8.0
- Build package: org.openapitools.codegen.languages.DartClientCodegen

View File

@@ -2,7 +2,7 @@ name: immich_mobile
description: Immich - selfhosted backup media file on mobile phone
publish_to: 'none'
version: 2.2.1+3024
version: 2.2.2+3025
environment:
sdk: '>=3.8.0 <4.0.0'

View File

@@ -1,266 +0,0 @@
import 'package:drift/native.dart';
import 'package:flutter_test/flutter_test.dart';
import 'package:immich_mobile/infrastructure/repositories/db.repository.dart';
import 'package:immich_mobile/infrastructure/repositories/sync_stream.repository.dart';
import 'package:openapi/api.dart';
/// This test reproduces the bug where pruneAssets() deletes assets that are part of memories,
/// causing foreign key constraint failures when trying to insert memory-asset relationships.
void main() {
late DbRepository db;
late SyncStreamRepository sut;
setUp(() async {
db = DbRepository(NativeDatabase.memory());
sut = SyncStreamRepository(db);
// Set up test data: Create a user and a partner
await sut.updateAuthUsersV1([
SyncAuthUserV1(
email: 'current-user@test.com',
id: 'user-1',
isAdmin: false,
name: 'Current User',
avatarColor: null,
hasProfileImage: false,
profileChangedAt: DateTime(2025),
),
]);
await sut.updateUsersV1([
SyncUserV1(
deletedAt: null,
email: 'partner@test.com',
id: 'partner-1',
name: 'Partner User',
avatarColor: null,
hasProfileImage: false,
profileChangedAt: DateTime(2025),
),
]);
await sut.updatePartnerV1([
SyncPartnerV1(
inTimeline: true,
sharedById: 'partner-1',
sharedWithId: 'user-1',
),
]);
});
tearDown(() async {
await db.close();
});
group('pruneAssets - Memory Asset Bug', () {
test('BEFORE FIX: pruneAssets() should NOT delete assets that are part of memories', () async {
// Step 1: Create an asset owned by someone else (not current user or partner)
await sut.updateAssetsV1([
SyncAssetV1(
checksum: 'checksum-1'.codeUnits,
deletedAt: null,
deviceAssetId: 'device-1',
deviceId: 'device-1',
duplicateId: null,
duration: null,
fileCreatedAt: DateTime(2025, 1, 1),
fileModifiedAt: DateTime(2025, 1, 1),
id: 'asset-shared-memory',
isArchived: false,
isFavorite: false,
isOffline: false,
isTrashed: false,
libraryId: null,
livePhotoVideoId: null,
localDateTime: DateTime(2025, 1, 1),
originalFileName: 'shared-memory.jpg',
// Asset owned by someone else - should be pruned if not in album/memory
ownerId: 'other-user-not-partner',
resized: true,
stackId: null,
thumbhash: null,
type: AssetTypeEnum.IMAGE,
updatedAt: DateTime(2025, 1, 1),
visibility: AssetVisibility.public_,
),
]);
// Step 2: Create a memory owned by current user
await sut.updateMemoriesV1([
SyncMemoryV1(
createdAt: DateTime(2025, 1, 1),
data: {'year': 2025, 'title': 'Test Memory'},
deletedAt: null,
hideAt: null,
id: 'memory-1',
isSaved: false,
memoryAt: DateTime(2025, 1, 1),
ownerId: 'user-1',
seenAt: null,
showAt: DateTime(2025, 1, 1),
type: MemoryType.onThisDay,
updatedAt: DateTime(2025, 1, 1),
),
]);
// Step 3: Link the shared asset to the memory
await sut.updateMemoryAssetsV1([
SyncMemoryAssetV1(
assetId: 'asset-shared-memory',
memoryId: 'memory-1',
),
]);
// Verify the asset and memory-asset relationship exist
final assetsBefore = await db.remoteAssetEntity.select().get();
final memoryAssetsBefore = await db.memoryAssetEntity.select().get();
expect(assetsBefore.length, 1);
expect(assetsBefore.first.id, 'asset-shared-memory');
expect(memoryAssetsBefore.length, 1);
// Step 4: Call pruneAssets() - This is where the bug happens
await sut.pruneAssets();
// Step 5: Verify the asset is NOT deleted (because it's in a memory)
final assetsAfter = await db.remoteAssetEntity.select().get();
expect(
assetsAfter.length,
1,
reason: 'Asset should NOT be pruned because it is part of a memory',
);
expect(assetsAfter.first.id, 'asset-shared-memory');
// Step 6: Verify we can still work with memory-asset relationships
// This simulates receiving more sync events after pruning
await expectLater(
sut.updateMemoryAssetsV1([
SyncMemoryAssetV1(
assetId: 'asset-shared-memory',
memoryId: 'memory-1',
),
]),
completes,
reason: 'Should not throw foreign key constraint error',
);
});
test('pruneAssets() SHOULD delete assets not in albums or memories', () async {
// Step 1: Create an asset that's truly orphaned (not in album or memory)
await sut.updateAssetsV1([
SyncAssetV1(
checksum: 'checksum-2'.codeUnits,
deletedAt: null,
deviceAssetId: 'device-2',
deviceId: 'device-2',
duplicateId: null,
duration: null,
fileCreatedAt: DateTime(2025, 1, 1),
fileModifiedAt: DateTime(2025, 1, 1),
id: 'asset-orphaned',
isArchived: false,
isFavorite: false,
isOffline: false,
isTrashed: false,
libraryId: null,
livePhotoVideoId: null,
localDateTime: DateTime(2025, 1, 1),
originalFileName: 'orphaned.jpg',
ownerId: 'other-user-not-partner',
resized: true,
stackId: null,
thumbhash: null,
type: AssetTypeEnum.IMAGE,
updatedAt: DateTime(2025, 1, 1),
visibility: AssetVisibility.public_,
),
]);
// Verify the asset exists
final assetsBefore = await db.remoteAssetEntity.select().get();
expect(assetsBefore.length, 1);
// Call pruneAssets()
await sut.pruneAssets();
// Verify the orphaned asset IS deleted
final assetsAfter = await db.remoteAssetEntity.select().get();
expect(
assetsAfter.length,
0,
reason: 'Orphaned asset should be pruned',
);
});
test('pruneAssets() should NOT delete assets in albums', () async {
// Step 1: Create an asset and an album
await sut.updateAssetsV1([
SyncAssetV1(
checksum: 'checksum-3'.codeUnits,
deletedAt: null,
deviceAssetId: 'device-3',
deviceId: 'device-3',
duplicateId: null,
duration: null,
fileCreatedAt: DateTime(2025, 1, 1),
fileModifiedAt: DateTime(2025, 1, 1),
id: 'asset-in-album',
isArchived: false,
isFavorite: false,
isOffline: false,
isTrashed: false,
libraryId: null,
livePhotoVideoId: null,
localDateTime: DateTime(2025, 1, 1),
originalFileName: 'in-album.jpg',
ownerId: 'other-user-not-partner',
resized: true,
stackId: null,
thumbhash: null,
type: AssetTypeEnum.IMAGE,
updatedAt: DateTime(2025, 1, 1),
visibility: AssetVisibility.public_,
),
]);
await sut.updateAlbumsV1([
SyncAlbumV1(
albumName: 'Test Album',
albumThumbnailAssetId: null,
createdAt: DateTime(2025, 1, 1),
deletedAt: null,
description: 'Test',
id: 'album-1',
isActivityEnabled: false,
lastModifiedAssetTimestamp: DateTime(2025, 1, 1),
order: AlbumUserRole.editor,
ownerId: 'user-1',
startDate: DateTime(2025, 1, 1),
endDate: DateTime(2025, 1, 2),
updatedAt: DateTime(2025, 1, 1),
),
]);
await sut.updateAlbumToAssetsV1([
SyncAlbumToAssetV1(
albumId: 'album-1',
assetId: 'asset-in-album',
),
]);
// Verify setup
final assetsBefore = await db.remoteAssetEntity.select().get();
expect(assetsBefore.length, 1);
// Call pruneAssets()
await sut.pruneAssets();
// Verify asset is NOT deleted (protected by album membership)
final assetsAfter = await db.remoteAssetEntity.select().get();
expect(
assetsAfter.length,
1,
reason: 'Asset should NOT be pruned because it is in an album',
);
});
});
}

View File

@@ -10006,7 +10006,7 @@
"info": {
"title": "Immich",
"description": "Immich API",
"version": "2.2.1",
"version": "2.2.2",
"contact": {}
},
"tags": [],

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/sdk",
"version": "2.2.1",
"version": "2.2.2",
"description": "Auto-generated TypeScript SDK for the Immich API",
"type": "module",
"main": "./build/index.js",

View File

@@ -1,6 +1,6 @@
/**
* Immich
* 2.2.1
* 2.2.2
* DO NOT MODIFY - This file has been generated using oazapfts.
* See https://www.npmjs.com/package/oazapfts
*/

View File

@@ -1,6 +1,6 @@
{
"name": "immich",
"version": "2.2.1",
"version": "2.2.2",
"description": "",
"author": "",
"private": true,

View File

@@ -236,8 +236,8 @@ export class MetadataService extends BaseService {
latitude: number | null = null,
longitude: number | null = null;
if (this.hasGeo(exifTags)) {
latitude = exifTags.GPSLatitude;
longitude = exifTags.GPSLongitude;
latitude = Number(exifTags.GPSLatitude);
longitude = Number(exifTags.GPSLongitude);
if (reverseGeocoding.enabled) {
geo = await this.mapRepository.reverseGeocode({ latitude, longitude });
}
@@ -894,12 +894,10 @@ export class MetadataService extends BaseService {
};
}
private hasGeo(tags: ImmichTags): tags is ImmichTags & { GPSLatitude: number; GPSLongitude: number } {
return (
tags.GPSLatitude !== undefined &&
tags.GPSLongitude !== undefined &&
(tags.GPSLatitude !== 0 || tags.GPSLatitude !== 0)
);
private hasGeo(tags: ImmichTags) {
const lat = Number(tags.GPSLatitude);
const lng = Number(tags.GPSLongitude);
return !Number.isNaN(lat) && !Number.isNaN(lng) && (lat !== 0 || lng !== 0);
}
private getAutoStackId(tags: ImmichTags | null): string | null {

View File

@@ -1,6 +1,6 @@
{
"name": "immich-web",
"version": "2.2.1",
"version": "2.2.2",
"license": "GNU Affero General Public License version 3",
"type": "module",
"scripts": {

View File

@@ -30,10 +30,10 @@
let showSuggestions = $state(false);
let isSearchSuggestions = $state(false);
let selectedId: string | undefined = $state();
let isFocus = $state(false);
let close: (() => Promise<void>) | undefined;
const listboxId = generateId();
const searchTypeId = generateId();
onDestroy(() => {
searchStore.isSearchEnabled = false;
@@ -161,12 +161,10 @@
const openDropdown = () => {
showSuggestions = true;
isFocus = true;
};
const closeDropdown = () => {
showSuggestions = false;
isFocus = false;
searchHistoryBox?.clearSelection();
};
@@ -251,6 +249,7 @@
aria-activedescendant={selectedId ?? ''}
aria-expanded={showSuggestions && isSearchSuggestions}
aria-autocomplete="list"
aria-describedby={searchTypeId}
use:shortcuts={[
{ shortcut: { key: 'Escape' }, onShortcut: onEscape },
{ shortcut: { ctrl: true, shift: true, key: 'k' }, onShortcut: onFilterClick },
@@ -287,12 +286,12 @@
/>
</div>
{#if isFocus}
{#if searchStore.isSearchEnabled}
<div
class="absolute inset-y-0 flex items-center"
id={searchTypeId}
class="absolute inset-y-0 flex items-center end-16"
class:max-md:hidden={value}
class:end-16={isFocus}
class:end-28={isFocus && value.length > 0}
class:end-28={value.length > 0}
>
<p
class="bg-immich-primary text-white dark:bg-immich-dark-primary/90 dark:text-black/75 rounded-full px-3 py-1 text-xs"