mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

IBM Cell Blades used the Cell processor and the "blade" server form factor. They were sold as models QS20, QS21 & QS22 from roughly 2006 to 2012 [1]. They were used in a few supercomputers (eg. Roadrunner) that have since been dismantled, and were not that widely used otherwise. Until recently I still had a working QS22, which meant I was able to keep the platform support working, but unfortunately that machine has now died. I'm not aware of any users. If there is a user that wants to keep the upstream support working, we can look at bringing some of the code back as appropriate. See previous discussion at [2]. Remove the top-level config symbol PPC_IBM_CELL_BLADE, and then the dependent symbols PPC_CELL_NATIVE, PPC_CELL_COMMON, CBE_RAS, PPC_IBM_CELL_RESETBUTTON, PPC_IBM_CELL_POWERBUTTON, CBE_THERM, and AXON_MSI. Then remove the associated C files and headers, and trim unused header content (some is shared with PS3). Note that PPC_CELL_COMMON sounds like it would build code shared with PS3, but it does not. It's a relic from when code was shared between the Blade support and QPACE support. Most of the primary authors already have CREDITS entries, with the exception of Christian, so add one for him. [1]: https://www.theregister.com/2011/06/28/ibm_kills_qs22_blade [2]: https://lore.kernel.org/linuxppc-dev/60581044-df82-40ad-b94c-56468007a93e@app.fastmail.com Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Jeremy Kerr <jk@ozlabs.org> Acked-by: Segher Boessenkool <segher@kernel.crashing.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20241218105523.416573-1-mpe@ellerman.id.au
221 lines
5 KiB
C
221 lines
5 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Defines an spu hypervisor abstraction layer.
|
|
*
|
|
* Copyright 2006 Sony Corp.
|
|
*/
|
|
|
|
#if !defined(_SPU_PRIV1_H)
|
|
#define _SPU_PRIV1_H
|
|
#if defined(__KERNEL__)
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct spu;
|
|
struct spu_context;
|
|
|
|
/* access to priv1 registers */
|
|
|
|
struct spu_priv1_ops {
|
|
void (*int_mask_and) (struct spu *spu, int class, u64 mask);
|
|
void (*int_mask_or) (struct spu *spu, int class, u64 mask);
|
|
void (*int_mask_set) (struct spu *spu, int class, u64 mask);
|
|
u64 (*int_mask_get) (struct spu *spu, int class);
|
|
void (*int_stat_clear) (struct spu *spu, int class, u64 stat);
|
|
u64 (*int_stat_get) (struct spu *spu, int class);
|
|
void (*cpu_affinity_set) (struct spu *spu, int cpu);
|
|
u64 (*mfc_dar_get) (struct spu *spu);
|
|
u64 (*mfc_dsisr_get) (struct spu *spu);
|
|
void (*mfc_dsisr_set) (struct spu *spu, u64 dsisr);
|
|
void (*mfc_sdr_setup) (struct spu *spu);
|
|
void (*mfc_sr1_set) (struct spu *spu, u64 sr1);
|
|
u64 (*mfc_sr1_get) (struct spu *spu);
|
|
void (*mfc_tclass_id_set) (struct spu *spu, u64 tclass_id);
|
|
u64 (*mfc_tclass_id_get) (struct spu *spu);
|
|
void (*tlb_invalidate) (struct spu *spu);
|
|
void (*resource_allocation_groupID_set) (struct spu *spu, u64 id);
|
|
u64 (*resource_allocation_groupID_get) (struct spu *spu);
|
|
void (*resource_allocation_enable_set) (struct spu *spu, u64 enable);
|
|
u64 (*resource_allocation_enable_get) (struct spu *spu);
|
|
};
|
|
|
|
extern const struct spu_priv1_ops* spu_priv1_ops;
|
|
|
|
static inline void
|
|
spu_int_mask_and (struct spu *spu, int class, u64 mask)
|
|
{
|
|
spu_priv1_ops->int_mask_and(spu, class, mask);
|
|
}
|
|
|
|
static inline void
|
|
spu_int_mask_or (struct spu *spu, int class, u64 mask)
|
|
{
|
|
spu_priv1_ops->int_mask_or(spu, class, mask);
|
|
}
|
|
|
|
static inline void
|
|
spu_int_mask_set (struct spu *spu, int class, u64 mask)
|
|
{
|
|
spu_priv1_ops->int_mask_set(spu, class, mask);
|
|
}
|
|
|
|
static inline u64
|
|
spu_int_mask_get (struct spu *spu, int class)
|
|
{
|
|
return spu_priv1_ops->int_mask_get(spu, class);
|
|
}
|
|
|
|
static inline void
|
|
spu_int_stat_clear (struct spu *spu, int class, u64 stat)
|
|
{
|
|
spu_priv1_ops->int_stat_clear(spu, class, stat);
|
|
}
|
|
|
|
static inline u64
|
|
spu_int_stat_get (struct spu *spu, int class)
|
|
{
|
|
return spu_priv1_ops->int_stat_get (spu, class);
|
|
}
|
|
|
|
static inline void
|
|
spu_cpu_affinity_set (struct spu *spu, int cpu)
|
|
{
|
|
spu_priv1_ops->cpu_affinity_set(spu, cpu);
|
|
}
|
|
|
|
static inline u64
|
|
spu_mfc_dar_get (struct spu *spu)
|
|
{
|
|
return spu_priv1_ops->mfc_dar_get(spu);
|
|
}
|
|
|
|
static inline u64
|
|
spu_mfc_dsisr_get (struct spu *spu)
|
|
{
|
|
return spu_priv1_ops->mfc_dsisr_get(spu);
|
|
}
|
|
|
|
static inline void
|
|
spu_mfc_dsisr_set (struct spu *spu, u64 dsisr)
|
|
{
|
|
spu_priv1_ops->mfc_dsisr_set(spu, dsisr);
|
|
}
|
|
|
|
static inline void
|
|
spu_mfc_sdr_setup (struct spu *spu)
|
|
{
|
|
spu_priv1_ops->mfc_sdr_setup(spu);
|
|
}
|
|
|
|
static inline void
|
|
spu_mfc_sr1_set (struct spu *spu, u64 sr1)
|
|
{
|
|
spu_priv1_ops->mfc_sr1_set(spu, sr1);
|
|
}
|
|
|
|
static inline u64
|
|
spu_mfc_sr1_get (struct spu *spu)
|
|
{
|
|
return spu_priv1_ops->mfc_sr1_get(spu);
|
|
}
|
|
|
|
static inline void
|
|
spu_mfc_tclass_id_set (struct spu *spu, u64 tclass_id)
|
|
{
|
|
spu_priv1_ops->mfc_tclass_id_set(spu, tclass_id);
|
|
}
|
|
|
|
static inline u64
|
|
spu_mfc_tclass_id_get (struct spu *spu)
|
|
{
|
|
return spu_priv1_ops->mfc_tclass_id_get(spu);
|
|
}
|
|
|
|
static inline void
|
|
spu_tlb_invalidate (struct spu *spu)
|
|
{
|
|
spu_priv1_ops->tlb_invalidate(spu);
|
|
}
|
|
|
|
static inline void
|
|
spu_resource_allocation_groupID_set (struct spu *spu, u64 id)
|
|
{
|
|
spu_priv1_ops->resource_allocation_groupID_set(spu, id);
|
|
}
|
|
|
|
static inline u64
|
|
spu_resource_allocation_groupID_get (struct spu *spu)
|
|
{
|
|
return spu_priv1_ops->resource_allocation_groupID_get(spu);
|
|
}
|
|
|
|
static inline void
|
|
spu_resource_allocation_enable_set (struct spu *spu, u64 enable)
|
|
{
|
|
spu_priv1_ops->resource_allocation_enable_set(spu, enable);
|
|
}
|
|
|
|
static inline u64
|
|
spu_resource_allocation_enable_get (struct spu *spu)
|
|
{
|
|
return spu_priv1_ops->resource_allocation_enable_get(spu);
|
|
}
|
|
|
|
/* spu management abstraction */
|
|
|
|
struct spu_management_ops {
|
|
int (*enumerate_spus)(int (*fn)(void *data));
|
|
int (*create_spu)(struct spu *spu, void *data);
|
|
int (*destroy_spu)(struct spu *spu);
|
|
void (*enable_spu)(struct spu_context *ctx);
|
|
void (*disable_spu)(struct spu_context *ctx);
|
|
int (*init_affinity)(void);
|
|
};
|
|
|
|
extern const struct spu_management_ops* spu_management_ops;
|
|
|
|
static inline int
|
|
spu_enumerate_spus (int (*fn)(void *data))
|
|
{
|
|
return spu_management_ops->enumerate_spus(fn);
|
|
}
|
|
|
|
static inline int
|
|
spu_create_spu (struct spu *spu, void *data)
|
|
{
|
|
return spu_management_ops->create_spu(spu, data);
|
|
}
|
|
|
|
static inline int
|
|
spu_destroy_spu (struct spu *spu)
|
|
{
|
|
return spu_management_ops->destroy_spu(spu);
|
|
}
|
|
|
|
static inline int
|
|
spu_init_affinity (void)
|
|
{
|
|
return spu_management_ops->init_affinity();
|
|
}
|
|
|
|
static inline void
|
|
spu_enable_spu (struct spu_context *ctx)
|
|
{
|
|
spu_management_ops->enable_spu(ctx);
|
|
}
|
|
|
|
static inline void
|
|
spu_disable_spu (struct spu_context *ctx)
|
|
{
|
|
spu_management_ops->disable_spu(ctx);
|
|
}
|
|
|
|
/*
|
|
* The declarations following are put here for convenience
|
|
* and only intended to be used by the platform setup code.
|
|
*/
|
|
|
|
extern const struct spu_management_ops spu_management_of_ops;
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif
|