libdrm: simplify resource accounting

Upgrade to the well known worst cases by the GPU multiplexer. Do not
keep track of resources locally, in case resources are exceeded the
remain so anyway.

issue #4451
This commit is contained in:
Sebastian Sumpf 2022-03-22 10:56:07 +01:00 committed by Christian Helmuth
parent 105e82ad84
commit 49b8232ebd

View File

@ -549,241 +549,24 @@ class Drm_call
using Context_space = Genode::Id_space<Drm::Context>;
Context_space _context_space { };
struct Resource_guard
template <typename FN> void _gpu_op( FN const &fn)
{
struct Upgrade_failed : Genode::Exception { };
struct Invalid_accounting : Genode::Exception { };
Gpu::Connection &_gpu;
Cap_quota cap_donated;
Ram_quota ram_donated;
Cap_quota cap_used;
Ram_quota ram_used;
Genode::int64_t cap_count { 0 };
Genode::int64_t ram_count { 0 };
Genode::int64_t map_count { 0 };
Genode::int64_t map_ppgtt_count { 0 };
Genode::int64_t alloc_count { 0 };
bool _available(Cap_quota needed) const {
return cap_donated.value - cap_used.value >= needed.value; }
bool _available(Ram_quota needed) const {
return ram_donated.value - ram_used.value >= needed.value; }
void _upgrade(Cap_quota caps)
{
_gpu.upgrade_caps(caps.value);
cap_donated.value += caps.value;
}
void _upgrade(Ram_quota ram)
{
_gpu.upgrade_ram(ram.value);
ram_donated.value += ram.value;
}
void _withdraw(Cap_quota caps)
{
if (caps.value == 0)
return;
Cap_quota const avail { cap_donated.value - cap_used.value };
if (caps.value > avail.value) {
Genode::error(__func__, ": assert CAP amount failed, ",
"caps: ", caps.value, " avail: ", avail,
" count: ", cap_count);
throw Invalid_accounting();
}
cap_used.value += caps.value;
cap_count++;
}
void _withdraw(Ram_quota ram)
{
if (ram.value == 0)
return;
Ram_quota const avail { ram_donated.value - ram_used.value };
if (ram.value > avail.value) {
Genode::error(__func__, ": assert CAP amount failed, ",
"ram: ", ram.value, " avail: ", avail,
" count: ", ram_count);
throw Invalid_accounting();
}
ram_used.value += ram.value;
ram_count++;
}
void _replenish(Cap_quota caps)
{
if (caps.value == 0)
return;
if (cap_used.value < caps.value) {
Genode::error(__func__, ": assert CAP amount failed, ",
"used: ", cap_used.value, " caps: ",
caps.value, " count: ", cap_count);
throw Invalid_accounting();
}
cap_used.value -= caps.value;
cap_count--;
}
void _replenish(Ram_quota ram)
{
if (ram.value == 0)
return;
if (ram_used.value < ram.value) {
Genode::error(__func__, ": assert RAM amount failed, ",
"used: ", ram_used.value, " ram: ", ram.value,
" count: ", ram_count);
throw Invalid_accounting();
}
ram_used.value -= ram.value;
ram_count--;
}
enum {
ALLOC_BUFFER_CAP_AMOUNT = 4,
MAP_BUFFER_CAP_AMOUNT = 2,
MAP_BUFFER_RAM_AMOUNT = 1024,
};
Resource_guard(Gpu::Connection &gpu)
:
_gpu { gpu },
cap_donated { 0 },
ram_donated { 0 },
cap_used { 0 },
ram_used { 0 }
{ }
template <typename FN> void _perform_gpu_op(Cap_quota caps, Ram_quota ram,
FN const &fn)
{
Cap_quota donated_caps { caps.value };
if (!_available(caps)) {
_upgrade(caps);
caps.value /= 2;
}
Ram_quota donated_ram { ram.value };
if (!_available(ram)) {
_upgrade(ram);
ram.value /= 2;
}
Genode::retry<Gpu::Session::Out_of_ram>(
Genode::retry<Gpu::Session::Out_of_ram>(
[&] () {
Genode::retry<Gpu::Session::Out_of_caps>(
[&] () {
Genode::retry<Gpu::Session::Out_of_caps>(
[&] () {
fn();
},
[&] () {
if (caps.value == 0)
throw Upgrade_failed();
_upgrade(caps);
donated_caps.value += caps.value;
caps.value /= 2;
});
fn();
},
[&] () {
if (ram.value == 0)
throw Upgrade_failed();
_upgrade(ram);
donated_ram.value += ram.value;
ram.value /= 2;
_gpu_session.upgrade_caps(2);
});
_withdraw(donated_caps);
_withdraw(donated_ram);
}
Buffer *alloc_buffer(Genode::Allocator &alloc, Buffer_space &buffer_space,
Genode::size_t size)
{
/* round to next page size */
size = ((size + 0xffful) & ~0xffful);
Cap_quota caps { ALLOC_BUFFER_CAP_AMOUNT };
Ram_quota ram { size };
Buffer *buffer = nullptr;
try {
_perform_gpu_op(caps, ram, [&] () {
buffer = new (alloc) Buffer(_gpu, size, buffer_space);
});
} catch (Upgrade_failed) {
return nullptr;
}
alloc_count++;
return buffer;
}
void free_buffer(Genode::size_t size)
{
alloc_count--;
Cap_quota const caps { ALLOC_BUFFER_CAP_AMOUNT };
_replenish(caps);
_replenish(Ram_quota { size });
}
bool map_buffer(Buffer &buffer)
{
Cap_quota caps { MAP_BUFFER_CAP_AMOUNT };
Ram_quota ram { MAP_BUFFER_RAM_AMOUNT };
try {
_perform_gpu_op(caps, ram, [&] () {
buffer.map_cap =
_gpu.map_buffer(buffer.id(), true,
Gpu::Mapping_attributes::rw());
});
} catch (Upgrade_failed) {
return false;
}
return true;
}
void unmap_buffer(Buffer &buffer)
{
map_count--;
Cap_quota const caps { MAP_BUFFER_CAP_AMOUNT };
_replenish(caps);
Ram_quota const ram { MAP_BUFFER_RAM_AMOUNT };
_replenish(ram);
_gpu.unmap_buffer(buffer.id());
}
void dump()
{
Genode::error("Resource_guard: ",
"caps: ", cap_used.value, "/", cap_donated.value, " "
"ram: ", ram_used.value, "/", ram_donated.value, " "
"counter: ", alloc_count, "/", map_count, "/", map_ppgtt_count);
}
};
/*
* The initial connection quota is needed for bringup,
* start from 0 for all other allocations.
*/
Resource_guard _resources { _gpu_session };
},
[&] () {
/* heap allocation granularity */
_gpu_session.upgrade_ram(2*1024*1024);
});
}
struct Sync_obj
{
@ -808,22 +591,15 @@ class Drm_call
return offset;
}
if (_resources.map_buffer(b)) {
_gpu_op([&] () {
b.map_cap = _gpu_session.map_buffer(b.id(), true, Gpu::Mapping_attributes::rw());
});
// XXX attach might faile
b.map_offset = static_cast<Offset>(_env.rm().attach(b.map_cap));
offset = b.map_offset;
// XXX attach might faile
b.map_offset = static_cast<Offset>(_env.rm().attach(b.map_cap));
offset = b.map_offset;
_available_gtt_size -= b.size;
} else {
if (b.map_cap.valid()) {
_unmap_buffer(b);
Genode::error("could not attach GEM buffer handle: ", b.id().value);
Genode::sleep_forever();
}
}
_available_gtt_size -= b.size;
return offset;
}
@ -842,25 +618,30 @@ class Drm_call
return offset;
}
void _unmap_buffer(Buffer &h)
void _unmap_buffer(Buffer &buffer)
{
if (!h.map_cap.valid())
if (!buffer.map_cap.valid())
return;
_env.rm().detach(h.map_offset);
h.map_offset = 0;
_env.rm().detach(buffer.map_offset);
buffer.map_offset = 0;
_resources.unmap_buffer(h);
_gpu_session.unmap_buffer(buffer.id());
h.map_cap = Genode::Dataspace_capability();
buffer.map_cap = Genode::Dataspace_capability();
_available_gtt_size += h.size;
_available_gtt_size += buffer.size;
}
template <typename FUNC>
void _alloc_buffer(uint64_t const size, FUNC const &fn)
{
Buffer *buffer = _resources.alloc_buffer(_heap, _buffer_space, size);
Buffer *buffer { nullptr };
_gpu_op([&] () {
buffer = new (_heap) Buffer(_gpu_session, Genode::align_addr(size, 12),
_buffer_space);
});
if (buffer)
fn(*buffer);
@ -874,8 +655,6 @@ class Drm_call
/* callee checks for mappings */
_unmap_buffer(b);
_resources.free_buffer(b.size);
_context_space.for_each<Drm::Context>([&] (Drm::Context &context) {
context.free_buffer(b.id()); });
Genode::destroy(&_heap, &b);
@ -952,50 +731,9 @@ class Drm_call
int _device_gem_mmap_gtt(void *arg)
{
auto const p = reinterpret_cast<drm_i915_gem_mmap_gtt *>(arg);
Gpu::Buffer_id const id { .value = p->handle };
if (verbose_ioctl) {
Genode::error(__func__, ": ", "handle: ", id.value,
" offset: ", Genode::Hex(p->offset));
}
bool successful = true;
try {
_buffer_space.apply<Buffer>(id, [&] (Buffer &b) {
p->offset = _map_buffer(b);
if (p->offset == 0) {
successful = false;
return;
}
/*
* Judging by iris mode == 0 is I915_TILING_NONE for
* which no fencing seems to be necessary.
*/
if (b.tiling.valid() && b.tiling.mode != 0) {
uint32_t const m = (b.tiling.stride << 16) | (b.tiling.mode == 1 ? 1 : 0);
successful = _gpu_session.set_tiling(b.id(), m);
} else {
successful = true;
}
if (!successful) {
_unmap_buffer(b);
return;
}
});
} catch (Genode::Id_space<Buffer>::Unknown_id) { }
if (verbose_ioctl) {
Genode::error(__func__, ": ", "handle: ", id.value,
" offset: ", Genode::Hex(p->offset),
successful ? " (mapped)" : " (failed)");
}
return successful ? 0 : -1;
Genode::error(__func__, " not implemented");
while (1) ;
return -1;
}
char const *_domain_name(uint32_t d)
@ -1522,22 +1260,6 @@ class Drm_call
return size;
}
bool map_buffer_ggtt(Offset offset, size_t length)
{
bool result = false;
_buffer_space.for_each<Buffer>([&] (Buffer &h) {
if (h.map_offset != offset) { return; }
if (length > h.size) { Genode::error("map_buffer_ggtt: size mismatch"); return; }
result = true;
});
if (!result)
Genode::error("could not lookup buffer for offset: ", offset);
return result;
}
void unmap_buffer(void *addr, size_t length)
{
bool found = false;
@ -1555,9 +1277,6 @@ class Drm_call
return;
}
if (b.tiling.valid())
b.tiling = Gpu::Buffer::Tiling();
if (b.map_cap.valid())
_unmap_buffer(b);
@ -1572,32 +1291,6 @@ class Drm_call
}
}
void unmap_buffer_ggtt(void *addr, size_t length)
{
Offset const offset = Offset(addr);
bool handled = false;
_buffer_space.for_each<Buffer>([&] (Buffer &h) {
if (handled) return;
if (h.map_offset != offset) return;
if (length > h.size) { Genode::error("unmap_buffer_ggtt: size mismatch"); return; }
if (!h.map_cap.valid()) {
Genode::error("no valid capability found for offset: ", Genode::Hex(offset));
return;
}
_unmap_buffer(h);
handled = true;
});
if (!handled) {
Genode::error(__func__, ": unknown addr ", addr, "+", Genode::Hex(length));
Genode::sleep_forever();
}
}
void unmap_buffer_ppgtt(__u32 handle)
{
Gpu::Buffer_id const id = { .value = handle };
@ -1641,11 +1334,8 @@ extern "C" void *drm_mmap(void * /* vaddr */, size_t length,
int /* prot */, int /* flags */,
int /* fd */, off_t offset)
{
if (_call.constructed() == false) { errno = EIO; return nullptr; }
/* sanity check if we got a GTT mapped offset */
bool const ok = _call->map_buffer_ggtt(offset, length);
return ok ? (void *)offset : nullptr;
Genode::error(__func__, " called not implemented");
return nullptr;
}
/**