From bdc179955635c0a814ba324ea263c875a6733cb4 Mon Sep 17 00:00:00 2001 From: Corinna Vinschen Date: Thu, 22 Jan 2015 15:32:51 +0000 Subject: [PATCH] * libc/sys/or1k/mlock.c: Fix dependency on libgloss. --- newlib/ChangeLog | 4 ++++ newlib/libc/sys/or1k/mlock.c | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/newlib/ChangeLog b/newlib/ChangeLog index e67c2cf87..845dfa54d 100644 --- a/newlib/ChangeLog +++ b/newlib/ChangeLog @@ -1,3 +1,7 @@ +2015-01-22 Stefan Wallentowitz + + * libc/sys/or1k/mlock.c: Fix dependency on libgloss. + 2015-01-21 Eric Botcazou * configure.host: Add Visium support. diff --git a/newlib/libc/sys/or1k/mlock.c b/newlib/libc/sys/or1k/mlock.c index 660c3f087..a439da603 100644 --- a/newlib/libc/sys/or1k/mlock.c +++ b/newlib/libc/sys/or1k/mlock.c @@ -16,7 +16,7 @@ */ #include -#include +#include /* Lock calls from different cores, but allows recursive calls from the same * core. The lock is not only atomic to other cores calling malloc, but also @@ -36,6 +36,8 @@ volatile uint32_t _or1k_malloc_lock_cnt; // The exception enable restore of the current mutex holder volatile uint32_t _or1k_malloc_lock_restore; +extern uint32_t or1k_sync_cas(void *address, uint32_t compare, uint32_t swap); + /** * Recursive lock of the malloc */ @@ -59,7 +61,7 @@ void __malloc_lock(struct _reent *ptr) { while (_or1k_malloc_lock != 0) {} // .. and then try to set it atomically. As this may // fail, we need to repeat this - } while (or1k_sync_cas(&_or1k_malloc_lock, 0, id) != 0); + } while (or1k_sync_cas((void*) &_or1k_malloc_lock, 0, id) != 0); } // Store the TEE and IEE flags for later restore