1a59d1b8e0
Based on 1 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms of the gnu general public license as published by the free software foundation either version 2 of the license or at your option any later version this program is distributed in the hope that it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details you should have received a copy of the gnu general public license along with this program if not write to the free software foundation inc 59 temple place suite 330 boston ma 02111 1307 usa extracted by the scancode license scanner the SPDX license identifier GPL-2.0-or-later has been chosen to replace the boilerplate/reference in 1334 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Allison Randal <allison@lohutok.net> Reviewed-by: Richard Fontana <rfontana@redhat.com> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190527070033.113240726@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
224 lines
5.0 KiB
C
224 lines
5.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* SN Platform GRU Driver
|
|
*
|
|
* Dump GRU State
|
|
*
|
|
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/bitops.h>
|
|
#include <asm/uv/uv_hub.h>
|
|
|
|
#include <linux/nospec.h>
|
|
|
|
#include "gru.h"
|
|
#include "grutables.h"
|
|
#include "gruhandles.h"
|
|
#include "grulib.h"
|
|
|
|
#define CCH_LOCK_ATTEMPTS 10
|
|
|
|
static int gru_user_copy_handle(void __user **dp, void *s)
|
|
{
|
|
if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
|
|
return -1;
|
|
*dp += GRU_HANDLE_BYTES;
|
|
return 0;
|
|
}
|
|
|
|
static int gru_dump_context_data(void *grubase,
|
|
struct gru_context_configuration_handle *cch,
|
|
void __user *ubuf, int ctxnum, int dsrcnt,
|
|
int flush_cbrs)
|
|
{
|
|
void *cb, *cbe, *tfh, *gseg;
|
|
int i, scr;
|
|
|
|
gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
|
|
cb = gseg + GRU_CB_BASE;
|
|
cbe = grubase + GRU_CBE_BASE;
|
|
tfh = grubase + GRU_TFH_BASE;
|
|
|
|
for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
|
|
if (flush_cbrs)
|
|
gru_flush_cache(cb);
|
|
if (gru_user_copy_handle(&ubuf, cb))
|
|
goto fail;
|
|
if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
|
|
goto fail;
|
|
if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
|
|
goto fail;
|
|
cb += GRU_HANDLE_STRIDE;
|
|
}
|
|
if (dsrcnt)
|
|
memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
|
|
return 0;
|
|
|
|
fail:
|
|
return -EFAULT;
|
|
}
|
|
|
|
static int gru_dump_tfm(struct gru_state *gru,
|
|
void __user *ubuf, void __user *ubufend)
|
|
{
|
|
struct gru_tlb_fault_map *tfm;
|
|
int i;
|
|
|
|
if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
|
|
return -EFBIG;
|
|
|
|
for (i = 0; i < GRU_NUM_TFM; i++) {
|
|
tfm = get_tfm(gru->gs_gru_base_vaddr, i);
|
|
if (gru_user_copy_handle(&ubuf, tfm))
|
|
goto fail;
|
|
}
|
|
return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
|
|
|
|
fail:
|
|
return -EFAULT;
|
|
}
|
|
|
|
static int gru_dump_tgh(struct gru_state *gru,
|
|
void __user *ubuf, void __user *ubufend)
|
|
{
|
|
struct gru_tlb_global_handle *tgh;
|
|
int i;
|
|
|
|
if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
|
|
return -EFBIG;
|
|
|
|
for (i = 0; i < GRU_NUM_TGH; i++) {
|
|
tgh = get_tgh(gru->gs_gru_base_vaddr, i);
|
|
if (gru_user_copy_handle(&ubuf, tgh))
|
|
goto fail;
|
|
}
|
|
return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
|
|
|
|
fail:
|
|
return -EFAULT;
|
|
}
|
|
|
|
static int gru_dump_context(struct gru_state *gru, int ctxnum,
|
|
void __user *ubuf, void __user *ubufend, char data_opt,
|
|
char lock_cch, char flush_cbrs)
|
|
{
|
|
struct gru_dump_context_header hdr;
|
|
struct gru_dump_context_header __user *uhdr = ubuf;
|
|
struct gru_context_configuration_handle *cch, *ubufcch;
|
|
struct gru_thread_state *gts;
|
|
int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
|
|
void *grubase;
|
|
|
|
memset(&hdr, 0, sizeof(hdr));
|
|
grubase = gru->gs_gru_base_vaddr;
|
|
cch = get_cch(grubase, ctxnum);
|
|
for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
|
|
cch_locked = trylock_cch_handle(cch);
|
|
if (cch_locked)
|
|
break;
|
|
msleep(1);
|
|
}
|
|
|
|
ubuf += sizeof(hdr);
|
|
ubufcch = ubuf;
|
|
if (gru_user_copy_handle(&ubuf, cch)) {
|
|
if (cch_locked)
|
|
unlock_cch_handle(cch);
|
|
return -EFAULT;
|
|
}
|
|
if (cch_locked)
|
|
ubufcch->delresp = 0;
|
|
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
|
|
|
|
if (cch_locked || !lock_cch) {
|
|
gts = gru->gs_gts[ctxnum];
|
|
if (gts && gts->ts_vma) {
|
|
hdr.pid = gts->ts_tgid_owner;
|
|
hdr.vaddr = gts->ts_vma->vm_start;
|
|
}
|
|
if (cch->state != CCHSTATE_INACTIVE) {
|
|
cbrcnt = hweight64(cch->cbr_allocation_map) *
|
|
GRU_CBR_AU_SIZE;
|
|
dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
|
|
GRU_DSR_AU_CL : 0;
|
|
}
|
|
bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
|
|
if (bytes > ubufend - ubuf)
|
|
ret = -EFBIG;
|
|
else
|
|
ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
|
|
dsrcnt, flush_cbrs);
|
|
}
|
|
if (cch_locked)
|
|
unlock_cch_handle(cch);
|
|
if (ret)
|
|
return ret;
|
|
|
|
hdr.magic = GRU_DUMP_MAGIC;
|
|
hdr.gid = gru->gs_gid;
|
|
hdr.ctxnum = ctxnum;
|
|
hdr.cbrcnt = cbrcnt;
|
|
hdr.dsrcnt = dsrcnt;
|
|
hdr.cch_locked = cch_locked;
|
|
if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
|
|
return -EFAULT;
|
|
|
|
return bytes;
|
|
}
|
|
|
|
int gru_dump_chiplet_request(unsigned long arg)
|
|
{
|
|
struct gru_state *gru;
|
|
struct gru_dump_chiplet_state_req req;
|
|
void __user *ubuf;
|
|
void __user *ubufend;
|
|
int ctxnum, ret, cnt = 0;
|
|
|
|
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
|
|
return -EFAULT;
|
|
|
|
/* Currently, only dump by gid is implemented */
|
|
if (req.gid >= gru_max_gids)
|
|
return -EINVAL;
|
|
req.gid = array_index_nospec(req.gid, gru_max_gids);
|
|
|
|
gru = GID_TO_GRU(req.gid);
|
|
ubuf = req.buf;
|
|
ubufend = req.buf + req.buflen;
|
|
|
|
ret = gru_dump_tfm(gru, ubuf, ubufend);
|
|
if (ret < 0)
|
|
goto fail;
|
|
ubuf += ret;
|
|
|
|
ret = gru_dump_tgh(gru, ubuf, ubufend);
|
|
if (ret < 0)
|
|
goto fail;
|
|
ubuf += ret;
|
|
|
|
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
|
|
if (req.ctxnum == ctxnum || req.ctxnum < 0) {
|
|
ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
|
|
req.data_opt, req.lock_cch,
|
|
req.flush_cbrs);
|
|
if (ret < 0)
|
|
goto fail;
|
|
ubuf += ret;
|
|
cnt++;
|
|
}
|
|
}
|
|
|
|
if (copy_to_user((void __user *)arg, &req, sizeof(req)))
|
|
return -EFAULT;
|
|
return cnt;
|
|
|
|
fail:
|
|
return ret;
|
|
}
|