linux/drivers/infiniband/hw/hfi1/mmu_rb.h
Christophe JAILLET 5a905e33b2 RDMA/hfi1: Constify struct mmu_rb_ops
'struct mmu_rb_ops' is not modified in this driver.

Constifying this structure moves some data to a read-only section, so
increase overall security.

On a x86_64, with allmodconfig, as an example:
Before:
======
   text	   data	    bss	    dec	    hex	filename
  10879	    164	      0	  11043	   2b23	drivers/infiniband/hw/hfi1/pin_system.o

After:
=====
   text	   data	    bss	    dec	    hex	filename
  10907	    140	      0	  11047	   2b27	drivers/infiniband/hw/hfi1/pin_system.o

Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Link: https://lore.kernel.org/r/b826dd05eefa5f4d6a7a1b4d191eaf37c714ed04.1719259997.git.christophe.jaillet@wanadoo.fr
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2024-06-26 10:53:29 -03:00

67 lines
2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 Intel Corporation.
*/
#ifndef _HFI1_MMU_RB_H
#define _HFI1_MMU_RB_H
#include "hfi.h"
struct mmu_rb_node {
unsigned long addr;
unsigned long len;
unsigned long __last;
struct rb_node node;
struct mmu_rb_handler *handler;
struct list_head list;
struct kref refcount;
};
/* filter and evict must not sleep. Only remove is allowed to sleep. */
struct mmu_rb_ops {
bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
unsigned long len);
void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
void *evict_arg, bool *stop);
};
struct mmu_rb_handler {
/*
* struct mmu_notifier is 56 bytes, and spinlock_t is 4 bytes, so
* they fit together in one cache line. mn is relatively rarely
* accessed, so co-locating the spinlock with it achieves much of
* the cacheline contention reduction of giving the spinlock its own
* cacheline without the overhead of doing so.
*/
struct mmu_notifier mn;
spinlock_t lock; /* protect the RB tree */
/* Begin on a new cachline boundary here */
struct rb_root_cached root ____cacheline_aligned_in_smp;
void *ops_arg;
const struct mmu_rb_ops *ops;
struct list_head lru_list;
struct work_struct del_work;
struct list_head del_list;
struct workqueue_struct *wq;
void *free_ptr;
};
int hfi1_mmu_rb_register(void *ops_arg,
const struct mmu_rb_ops *ops,
struct workqueue_struct *wq,
struct mmu_rb_handler **handler);
void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
struct mmu_rb_node *mnode);
void hfi1_mmu_rb_release(struct kref *refcount);
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
unsigned long addr,
unsigned long len);
#endif /* _HFI1_MMU_RB_H */