6b289a1423
This patch replaces the former prominent use of pointers by references wherever feasible. This has the following benefits: * The contract between caller and callee becomes more obvious. When passing a reference, the contract says that the argument cannot be a null pointer. The caller is responsible to ensure that. Therefore, the use of reference eliminates the need to add defensive null-pointer checks at the callee site, which sometimes merely exist to be on the safe side. The bottom line is that the code becomes easier to follow. * Reference members must be initialized via an object initializer, which promotes a programming style that avoids intermediate object- construction states. Within core, there are still a few pointers as member variables left though. E.g., caused by the late association of 'Platform_thread' objects with their 'Platform_pd' objects. * If no pointers are present as member variables, we don't need to manually provide declarations of a private copy constructor and an assignment operator to avoid -Weffc++ errors "class ... has pointer data members [-Werror=effc++]". This patch also changes a few system bindings on NOVA and Fiasco.OC, e.g., the return value of the global 'cap_map' accessor has become a reference. Hence, the patch touches a few places outside of core. Fixes #3135
88 lines
2.3 KiB
C++
88 lines
2.3 KiB
C++
/*
|
|
* \brief Quota-bounds-checking implementation of the 'Ram_allocator'
|
|
* interface specifically for core
|
|
* \author Norman Feske
|
|
* \date 2017-05-02
|
|
*/
|
|
|
|
/*
|
|
* Copyright (C) 2017 Genode Labs GmbH
|
|
*
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
* under the terms of the GNU Affero General Public License version 3.
|
|
*/
|
|
|
|
#ifndef _CORE__INCLUDE__CORE_CONSTRAINED_CORE_RAM_H_
|
|
#define _CORE__INCLUDE__CORE_CONSTRAINED_CORE_RAM_H_
|
|
|
|
#include <base/allocator.h>
|
|
|
|
namespace Genode { class Constrained_core_ram; }
|
|
|
|
class Genode::Constrained_core_ram : public Allocator
|
|
{
|
|
private:
|
|
|
|
Ram_quota_guard &_ram_guard;
|
|
Cap_quota_guard &_cap_guard;
|
|
Range_allocator &_core_mem;
|
|
|
|
uint64_t core_mem_allocated { 0 };
|
|
|
|
public:
|
|
|
|
Constrained_core_ram(Ram_quota_guard &ram_guard,
|
|
Cap_quota_guard &cap_guard,
|
|
Range_allocator &core_mem)
|
|
:
|
|
_ram_guard(ram_guard), _cap_guard(cap_guard), _core_mem(core_mem)
|
|
{ }
|
|
|
|
~Constrained_core_ram()
|
|
{
|
|
if (!core_mem_allocated)
|
|
return;
|
|
|
|
error(this, " memory leaking of size ", core_mem_allocated,
|
|
" in core !");
|
|
}
|
|
|
|
bool alloc(size_t const size, void **ptr) override
|
|
{
|
|
size_t const page_aligned_size = align_addr(size, 12);
|
|
|
|
Ram_quota_guard::Reservation ram (_ram_guard,
|
|
Ram_quota{page_aligned_size});
|
|
/* on some kernels we require a cap, on some not XXX */
|
|
Cap_quota_guard::Reservation caps(_cap_guard, Cap_quota{1});
|
|
|
|
if (!_core_mem.alloc(page_aligned_size, ptr))
|
|
return false;
|
|
|
|
ram.acknowledge();
|
|
caps.acknowledge();
|
|
|
|
core_mem_allocated += page_aligned_size;
|
|
|
|
return true;
|
|
}
|
|
|
|
void free(void *ptr, size_t const size) override
|
|
{
|
|
size_t const page_aligned_size = align_addr(size, 12);
|
|
|
|
_core_mem.free(ptr, page_aligned_size);
|
|
|
|
_ram_guard.replenish(Ram_quota{page_aligned_size});
|
|
/* on some kernels we require a cap, on some not XXX */
|
|
_cap_guard.replenish(Cap_quota{1});
|
|
|
|
core_mem_allocated -= page_aligned_size;
|
|
}
|
|
|
|
size_t consumed() const override { return core_mem_allocated; }
|
|
size_t overhead(size_t) const override { return 0; }
|
|
bool need_size_for_free() const override { return true; }
|
|
};
|
|
#endif /* _CORE__INCLUDE__CORE_CONSTRAINED_CORE_RAM_H_ */
|