repo: Make locking per-OstreeRepo
Previously each thread maintained its own lock file descriptor regardless of whether the thread was using the same `OstreeRepo` as another thread. This was very safe but it made certain multithreaded procedures difficult. For example, if a main thread took an exclusive lock and then spawned worker threads, it would deadlock if one of the worker threads tried to acquire the lock. This moves the file descriptor from thread local storage to the `OstreeRepo` structure so that threads using the same `OstreeRepo` can share the lock. A mutex guards against threads altering the lock state concurrently. Fixes: #2344
This commit is contained in:
parent
eb09207e1a
commit
ccef9784d7
|
|
@ -104,6 +104,13 @@ typedef struct {
|
||||||
fsblkcnt_t max_blocks;
|
fsblkcnt_t max_blocks;
|
||||||
} OstreeRepoTxn;
|
} OstreeRepoTxn;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
GMutex mutex; /* All other members should only be accessed with this held */
|
||||||
|
int fd; /* The open file or flock file descriptor */
|
||||||
|
guint shared; /* Number of shared locks curently held */
|
||||||
|
guint exclusive; /* Number of exclusive locks currently held */
|
||||||
|
} OstreeRepoLock;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
_OSTREE_FEATURE_NO,
|
_OSTREE_FEATURE_NO,
|
||||||
_OSTREE_FEATURE_MAYBE,
|
_OSTREE_FEATURE_MAYBE,
|
||||||
|
|
@ -159,6 +166,8 @@ struct OstreeRepo {
|
||||||
GWeakRef sysroot; /* Weak to avoid a circular ref; see also `is_system` */
|
GWeakRef sysroot; /* Weak to avoid a circular ref; see also `is_system` */
|
||||||
char *remotes_config_dir;
|
char *remotes_config_dir;
|
||||||
|
|
||||||
|
OstreeRepoLock lock;
|
||||||
|
|
||||||
GMutex txn_lock;
|
GMutex txn_lock;
|
||||||
OstreeRepoTxn txn;
|
OstreeRepoTxn txn;
|
||||||
gboolean txn_locked;
|
gboolean txn_locked;
|
||||||
|
|
|
||||||
|
|
@ -172,52 +172,43 @@ G_DEFINE_TYPE (OstreeRepo, ostree_repo, G_TYPE_OBJECT)
|
||||||
/* Repository locking
|
/* Repository locking
|
||||||
*
|
*
|
||||||
* To guard against objects being deleted (e.g., prune) while they're in
|
* To guard against objects being deleted (e.g., prune) while they're in
|
||||||
* use by another operation is accessing them (e.g., commit), the
|
* use by another operation that is accessing them (e.g., commit), the
|
||||||
* repository must be locked by concurrent writers.
|
* repository must be locked by concurrent writers.
|
||||||
*
|
*
|
||||||
* The locking is implemented by maintaining a thread local table of
|
* The repository locking has several important features:
|
||||||
* lock stacks per repository. This allows thread safe locking since
|
|
||||||
* each thread maintains its own lock stack. See the OstreeRepoLock type
|
|
||||||
* below.
|
|
||||||
*
|
*
|
||||||
* The actual locking is done using either open file descriptor locks or
|
* * There are 2 states - shared and exclusive. Multiple users can hold
|
||||||
* flock locks. This allows the locking to work with concurrent
|
* a shared lock concurrently while only one user can hold an
|
||||||
* processes. The lock file is held on the ".lock" file within the
|
* exclusive lock.
|
||||||
* repository.
|
*
|
||||||
|
* * The lock can be taken recursively so long as each acquisition is paired
|
||||||
|
* with a matching release. The recursion is also latched to the strongest
|
||||||
|
* state. Once an exclusive lock has been taken, it will remain exclusive
|
||||||
|
* until all exclusive locks have been released.
|
||||||
|
*
|
||||||
|
* * It is both multiprocess- and multithread-safe. Threads that share
|
||||||
|
* an OstreeRepo use the lock cooperatively while processes and
|
||||||
|
* threads using separate OstreeRepo structures will block when
|
||||||
|
* acquiring incompatible lock states.
|
||||||
|
*
|
||||||
|
* The actual locking is implemented using either open file descriptor
|
||||||
|
* locks or flock locks. This allows the locking to work with concurrent
|
||||||
|
* processes or concurrent threads using a separate OstreeRepo. The lock
|
||||||
|
* file is held on the ".lock" file within the repository.
|
||||||
*
|
*
|
||||||
* The intended usage is to take a shared lock when writing objects or
|
* The intended usage is to take a shared lock when writing objects or
|
||||||
* reading objects in critical sections. Exclusive locks are taken when
|
* reading objects in critical sections. Exclusive locks are taken when
|
||||||
* deleting objects.
|
* deleting objects.
|
||||||
*
|
*
|
||||||
* To allow fine grained locking within libostree, the lock is
|
* To allow fine grained locking, the lock state is maintained in shared and
|
||||||
* maintained as a stack. The core APIs then push or pop from the stack.
|
* exclusive counters. Callers then push or pop lock types to increment or
|
||||||
* When pushing or popping a lock state identical to the existing or
|
* decrement the counters. When pushing or popping a lock type identical to
|
||||||
* next state, the stack is simply updated. Only when upgrading or
|
* the existing or next state, the lock state is simply updated. Only when
|
||||||
* downgrading the lock (changing to/from unlocked, pushing exclusive on
|
* upgrading or downgrading the lock (changing to/from unlocked, pushing
|
||||||
* shared or popping exclusive to shared) are actual locking operations
|
* exclusive on shared or popping exclusive to shared) are actual locking
|
||||||
* performed.
|
* operations performed.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void
|
|
||||||
free_repo_lock_table (gpointer data)
|
|
||||||
{
|
|
||||||
GHashTable *lock_table = data;
|
|
||||||
|
|
||||||
if (lock_table != NULL)
|
|
||||||
{
|
|
||||||
g_debug ("Free lock table");
|
|
||||||
g_hash_table_destroy (lock_table);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static GPrivate repo_lock_table = G_PRIVATE_INIT (free_repo_lock_table);
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
int fd;
|
|
||||||
guint shared; /* Number of shared locks */
|
|
||||||
guint exclusive; /* Number of exclusive locks */
|
|
||||||
} OstreeRepoLock;
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
guint len;
|
guint len;
|
||||||
int state;
|
int state;
|
||||||
|
|
@ -241,16 +232,18 @@ lock_state_name (int state)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
repo_lock_info (OstreeRepoLock *lock, OstreeRepoLockInfo *out_info)
|
repo_lock_info (OstreeRepo *self, GMutexLocker *locker,
|
||||||
|
OstreeRepoLockInfo *out_info)
|
||||||
{
|
{
|
||||||
g_assert (lock != NULL);
|
g_assert (self != NULL);
|
||||||
|
g_assert (locker != NULL);
|
||||||
g_assert (out_info != NULL);
|
g_assert (out_info != NULL);
|
||||||
|
|
||||||
OstreeRepoLockInfo info;
|
OstreeRepoLockInfo info;
|
||||||
info.len = lock->shared + lock->exclusive;
|
info.len = self->lock.shared + self->lock.exclusive;
|
||||||
if (info.len == 0)
|
if (info.len == 0)
|
||||||
info.state = LOCK_UN;
|
info.state = LOCK_UN;
|
||||||
else if (lock->exclusive > 0)
|
else if (self->lock.exclusive > 0)
|
||||||
info.state = LOCK_EX;
|
info.state = LOCK_EX;
|
||||||
else
|
else
|
||||||
info.state = LOCK_SH;
|
info.state = LOCK_SH;
|
||||||
|
|
@ -259,26 +252,6 @@ repo_lock_info (OstreeRepoLock *lock, OstreeRepoLockInfo *out_info)
|
||||||
*out_info = info;
|
*out_info = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
free_repo_lock (gpointer data)
|
|
||||||
{
|
|
||||||
OstreeRepoLock *lock = data;
|
|
||||||
|
|
||||||
if (lock != NULL)
|
|
||||||
{
|
|
||||||
OstreeRepoLockInfo info;
|
|
||||||
repo_lock_info (lock, &info);
|
|
||||||
|
|
||||||
g_debug ("Free lock: state=%s, depth=%u", info.name, info.len);
|
|
||||||
if (lock->fd >= 0)
|
|
||||||
{
|
|
||||||
g_debug ("Closing repo lock file");
|
|
||||||
(void) close (lock->fd);
|
|
||||||
}
|
|
||||||
g_free (lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Wrapper to handle flock vs OFD locking based on GLnxLockFile */
|
/* Wrapper to handle flock vs OFD locking based on GLnxLockFile */
|
||||||
static gboolean
|
static gboolean
|
||||||
do_repo_lock (int fd,
|
do_repo_lock (int fd,
|
||||||
|
|
@ -356,42 +329,29 @@ push_repo_lock (OstreeRepo *self,
|
||||||
if (!blocking)
|
if (!blocking)
|
||||||
flags |= LOCK_NB;
|
flags |= LOCK_NB;
|
||||||
|
|
||||||
GHashTable *lock_table = g_private_get (&repo_lock_table);
|
g_autoptr(GMutexLocker) locker = g_mutex_locker_new (&self->lock.mutex);
|
||||||
if (lock_table == NULL)
|
|
||||||
{
|
|
||||||
g_debug ("Creating repo lock table");
|
|
||||||
lock_table = g_hash_table_new_full (NULL, NULL, NULL,
|
|
||||||
(GDestroyNotify)free_repo_lock);
|
|
||||||
g_private_set (&repo_lock_table, lock_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
OstreeRepoLock *lock = g_hash_table_lookup (lock_table, self);
|
if (self->lock.fd == -1)
|
||||||
if (lock == NULL)
|
|
||||||
{
|
{
|
||||||
lock = g_new0 (OstreeRepoLock, 1);
|
|
||||||
g_debug ("Opening repo lock file");
|
g_debug ("Opening repo lock file");
|
||||||
lock->fd = TEMP_FAILURE_RETRY (openat (self->repo_dir_fd, ".lock",
|
self->lock.fd = TEMP_FAILURE_RETRY (openat (self->repo_dir_fd, ".lock",
|
||||||
O_CREAT | O_RDWR | O_CLOEXEC,
|
O_CREAT | O_RDWR | O_CLOEXEC,
|
||||||
DEFAULT_REGFILE_MODE));
|
DEFAULT_REGFILE_MODE));
|
||||||
if (lock->fd < 0)
|
if (self->lock.fd < 0)
|
||||||
{
|
|
||||||
free_repo_lock (lock);
|
|
||||||
return glnx_throw_errno_prefix (error,
|
return glnx_throw_errno_prefix (error,
|
||||||
"Opening lock file %s/.lock failed",
|
"Opening lock file %s/.lock failed",
|
||||||
gs_file_get_path_cached (self->repodir));
|
gs_file_get_path_cached (self->repodir));
|
||||||
}
|
}
|
||||||
g_hash_table_insert (lock_table, self, lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
OstreeRepoLockInfo info;
|
OstreeRepoLockInfo info;
|
||||||
repo_lock_info (lock, &info);
|
repo_lock_info (self, locker, &info);
|
||||||
g_debug ("Push lock: state=%s, depth=%u", info.name, info.len);
|
g_debug ("Push lock: state=%s, depth=%u", info.name, info.len);
|
||||||
|
|
||||||
guint *counter;
|
guint *counter;
|
||||||
if (next_state == LOCK_EX)
|
if (next_state == LOCK_EX)
|
||||||
counter = &(lock->exclusive);
|
counter = &(self->lock.exclusive);
|
||||||
else
|
else
|
||||||
counter = &(lock->shared);
|
counter = &(self->lock.shared);
|
||||||
|
|
||||||
/* Check for overflow */
|
/* Check for overflow */
|
||||||
g_assert_cmpuint (*counter, <, G_MAXUINT);
|
g_assert_cmpuint (*counter, <, G_MAXUINT);
|
||||||
|
|
@ -407,7 +367,7 @@ push_repo_lock (OstreeRepo *self,
|
||||||
|
|
||||||
const char *next_state_name = lock_state_name (next_state);
|
const char *next_state_name = lock_state_name (next_state);
|
||||||
g_debug ("Locking repo %s", next_state_name);
|
g_debug ("Locking repo %s", next_state_name);
|
||||||
if (!do_repo_lock (lock->fd, flags))
|
if (!do_repo_lock (self->lock.fd, flags))
|
||||||
return glnx_throw_errno_prefix (error, "Locking repo %s failed",
|
return glnx_throw_errno_prefix (error, "Locking repo %s failed",
|
||||||
next_state_name);
|
next_state_name);
|
||||||
}
|
}
|
||||||
|
|
@ -426,15 +386,11 @@ pop_repo_lock (OstreeRepo *self,
|
||||||
{
|
{
|
||||||
int flags = blocking ? 0 : LOCK_NB;
|
int flags = blocking ? 0 : LOCK_NB;
|
||||||
|
|
||||||
GHashTable *lock_table = g_private_get (&repo_lock_table);
|
g_autoptr(GMutexLocker) locker = g_mutex_locker_new (&self->lock.mutex);
|
||||||
g_return_val_if_fail (lock_table != NULL, FALSE);
|
g_return_val_if_fail (self->lock.fd != -1, FALSE);
|
||||||
|
|
||||||
OstreeRepoLock *lock = g_hash_table_lookup (lock_table, self);
|
|
||||||
g_return_val_if_fail (lock != NULL, FALSE);
|
|
||||||
g_return_val_if_fail (lock->fd != -1, FALSE);
|
|
||||||
|
|
||||||
OstreeRepoLockInfo info;
|
OstreeRepoLockInfo info;
|
||||||
repo_lock_info (lock, &info);
|
repo_lock_info (self, locker, &info);
|
||||||
g_return_val_if_fail (info.len > 0, FALSE);
|
g_return_val_if_fail (info.len > 0, FALSE);
|
||||||
g_debug ("Pop lock: state=%s, depth=%u", info.name, info.len);
|
g_debug ("Pop lock: state=%s, depth=%u", info.name, info.len);
|
||||||
|
|
||||||
|
|
@ -443,12 +399,12 @@ pop_repo_lock (OstreeRepo *self,
|
||||||
if (lock_type == OSTREE_REPO_LOCK_EXCLUSIVE)
|
if (lock_type == OSTREE_REPO_LOCK_EXCLUSIVE)
|
||||||
{
|
{
|
||||||
state_to_drop = LOCK_EX;
|
state_to_drop = LOCK_EX;
|
||||||
counter = &(lock->exclusive);
|
counter = &(self->lock.exclusive);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
state_to_drop = LOCK_SH;
|
state_to_drop = LOCK_SH;
|
||||||
counter = &(lock->shared);
|
counter = &(self->lock.shared);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure caller specified a valid type to release */
|
/* Make sure caller specified a valid type to release */
|
||||||
|
|
@ -461,14 +417,14 @@ pop_repo_lock (OstreeRepo *self,
|
||||||
next_state = LOCK_UN;
|
next_state = LOCK_UN;
|
||||||
}
|
}
|
||||||
else if (state_to_drop == LOCK_EX)
|
else if (state_to_drop == LOCK_EX)
|
||||||
next_state = (lock->exclusive > 1) ? LOCK_EX : LOCK_SH;
|
next_state = (self->lock.exclusive > 1) ? LOCK_EX : LOCK_SH;
|
||||||
else
|
else
|
||||||
next_state = (lock->exclusive > 0) ? LOCK_EX : LOCK_SH;
|
next_state = (self->lock.exclusive > 0) ? LOCK_EX : LOCK_SH;
|
||||||
|
|
||||||
if (next_state == LOCK_UN)
|
if (next_state == LOCK_UN)
|
||||||
{
|
{
|
||||||
g_debug ("Unlocking repo");
|
g_debug ("Unlocking repo");
|
||||||
if (!do_repo_unlock (lock->fd, flags))
|
if (!do_repo_unlock (self->lock.fd, flags))
|
||||||
return glnx_throw_errno_prefix (error, "Unlocking repo failed");
|
return glnx_throw_errno_prefix (error, "Unlocking repo failed");
|
||||||
}
|
}
|
||||||
else if (info.state == next_state)
|
else if (info.state == next_state)
|
||||||
|
|
@ -480,7 +436,7 @@ pop_repo_lock (OstreeRepo *self,
|
||||||
/* We should never drop from shared to exclusive */
|
/* We should never drop from shared to exclusive */
|
||||||
g_return_val_if_fail (next_state == LOCK_SH, FALSE);
|
g_return_val_if_fail (next_state == LOCK_SH, FALSE);
|
||||||
g_debug ("Returning lock state to shared");
|
g_debug ("Returning lock state to shared");
|
||||||
if (!do_repo_lock (lock->fd, next_state | flags))
|
if (!do_repo_lock (self->lock.fd, next_state | flags))
|
||||||
return glnx_throw_errno_prefix (error,
|
return glnx_throw_errno_prefix (error,
|
||||||
"Setting repo lock to shared failed");
|
"Setting repo lock to shared failed");
|
||||||
}
|
}
|
||||||
|
|
@ -1117,13 +1073,8 @@ ostree_repo_finalize (GObject *object)
|
||||||
g_clear_pointer (&self->remotes, g_hash_table_destroy);
|
g_clear_pointer (&self->remotes, g_hash_table_destroy);
|
||||||
g_mutex_clear (&self->remotes_lock);
|
g_mutex_clear (&self->remotes_lock);
|
||||||
|
|
||||||
GHashTable *lock_table = g_private_get (&repo_lock_table);
|
glnx_close_fd (&self->lock.fd);
|
||||||
if (lock_table)
|
g_mutex_clear (&self->lock.mutex);
|
||||||
{
|
|
||||||
g_hash_table_remove (lock_table, self);
|
|
||||||
if (g_hash_table_size (lock_table) == 0)
|
|
||||||
g_private_replace (&repo_lock_table, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
G_OBJECT_CLASS (ostree_repo_parent_class)->finalize (object);
|
G_OBJECT_CLASS (ostree_repo_parent_class)->finalize (object);
|
||||||
}
|
}
|
||||||
|
|
@ -1285,6 +1236,7 @@ ostree_repo_init (OstreeRepo *self)
|
||||||
self->test_error_flags = g_parse_debug_string (g_getenv ("OSTREE_REPO_TEST_ERROR"),
|
self->test_error_flags = g_parse_debug_string (g_getenv ("OSTREE_REPO_TEST_ERROR"),
|
||||||
test_error_keys, G_N_ELEMENTS (test_error_keys));
|
test_error_keys, G_N_ELEMENTS (test_error_keys));
|
||||||
|
|
||||||
|
g_mutex_init (&self->lock.mutex);
|
||||||
g_mutex_init (&self->cache_lock);
|
g_mutex_init (&self->cache_lock);
|
||||||
g_mutex_init (&self->txn_lock);
|
g_mutex_init (&self->txn_lock);
|
||||||
|
|
||||||
|
|
@ -1298,6 +1250,7 @@ ostree_repo_init (OstreeRepo *self)
|
||||||
self->tmp_dir_fd = -1;
|
self->tmp_dir_fd = -1;
|
||||||
self->objects_dir_fd = -1;
|
self->objects_dir_fd = -1;
|
||||||
self->uncompressed_objects_dir_fd = -1;
|
self->uncompressed_objects_dir_fd = -1;
|
||||||
|
self->lock.fd = -1;
|
||||||
self->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_UNKNOWN;
|
self->sysroot_kind = OSTREE_REPO_SYSROOT_KIND_UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue