mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
s390: fix various typos
Fix various typos found with codespell. Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
parent
2b70a11955
commit
cada938a01
36 changed files with 50 additions and 50 deletions
|
@ -67,7 +67,7 @@ ipl_start:
|
||||||
jz .Lagain1 # skip dataset header
|
jz .Lagain1 # skip dataset header
|
||||||
larl %r13,.L_eof
|
larl %r13,.L_eof
|
||||||
clc 0(3,%r4),0(%r13) # if it is EOFx
|
clc 0(3,%r4),0(%r13) # if it is EOFx
|
||||||
jz .Lagain1 # skip dateset trailer
|
jz .Lagain1 # skip data set trailer
|
||||||
lgr %r5,%r2
|
lgr %r5,%r2
|
||||||
la %r6,COMMAND_LINE-PARMAREA(%r12)
|
la %r6,COMMAND_LINE-PARMAREA(%r12)
|
||||||
lgr %r7,%r2
|
lgr %r7,%r2
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
*
|
*
|
||||||
* Note that the constant definitions below are extended in order to compute
|
* Note that the constant definitions below are extended in order to compute
|
||||||
* intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
|
* intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
|
||||||
* The righmost doubleword can be 0 to prevent contribution to the result or
|
* The rightmost doubleword can be 0 to prevent contribution to the result or
|
||||||
* can be multiplied by 1 to perform an XOR without the need for a separate
|
* can be multiplied by 1 to perform an XOR without the need for a separate
|
||||||
* VECTOR EXCLUSIVE OR instruction.
|
* VECTOR EXCLUSIVE OR instruction.
|
||||||
*
|
*
|
||||||
|
|
|
@ -333,7 +333,7 @@ union ap_qact_ap_info {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ap_qact(): Query AP combatibility type.
|
* ap_qact(): Query AP compatibility type.
|
||||||
* @qid: The AP queue number
|
* @qid: The AP queue number
|
||||||
* @apinfo: On input the info about the AP queue. On output the
|
* @apinfo: On input the info about the AP queue. On output the
|
||||||
* alternate AP queue info provided by the qact function
|
* alternate AP queue info provided by the qact function
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
struct cmbdata {
|
struct cmbdata {
|
||||||
__u64 size;
|
__u64 size;
|
||||||
__u64 elapsed_time;
|
__u64 elapsed_time;
|
||||||
/* basic and exended format: */
|
/* basic and extended format: */
|
||||||
__u64 ssch_rsch_count;
|
__u64 ssch_rsch_count;
|
||||||
__u64 sample_count;
|
__u64 sample_count;
|
||||||
__u64 device_connect_time;
|
__u64 device_connect_time;
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
/*
|
/*
|
||||||
* struct dasd_information2_t
|
* struct dasd_information2_t
|
||||||
* represents any data about the device, which is visible to userspace.
|
* represents any data about the device, which is visible to userspace.
|
||||||
* including foramt and featueres.
|
* including format and featueres.
|
||||||
*/
|
*/
|
||||||
typedef struct dasd_information2_t {
|
typedef struct dasd_information2_t {
|
||||||
unsigned int devno; /* S/390 devno */
|
unsigned int devno; /* S/390 devno */
|
||||||
|
|
|
@ -353,7 +353,7 @@ struct pkey_kblob2pkey2 {
|
||||||
* Is able to find out which type of secure key is given (CCA AES secure
|
* Is able to find out which type of secure key is given (CCA AES secure
|
||||||
* key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private
|
* key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private
|
||||||
* key) and tries to find all matching crypto cards based on the MKVP and maybe
|
* key) and tries to find all matching crypto cards based on the MKVP and maybe
|
||||||
* other criterias (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
|
* other criteria (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
|
||||||
* with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of
|
* with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of
|
||||||
* APQNs is further filtered by the key's mkvp which needs to match to either
|
* APQNs is further filtered by the key's mkvp which needs to match to either
|
||||||
* the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters
|
* the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters
|
||||||
|
@ -370,7 +370,7 @@ struct pkey_kblob2pkey2 {
|
||||||
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
|
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
|
||||||
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
|
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
|
||||||
* but the number of apqn targets does not fit into the list, the apqn_targets
|
* but the number of apqn targets does not fit into the list, the apqn_targets
|
||||||
* field is updatedd with the number of reqired entries but there are no apqn
|
* field is updated with the number of required entries but there are no apqn
|
||||||
* values stored in the list and the ioctl returns with ENOSPC. If no matching
|
* values stored in the list and the ioctl returns with ENOSPC. If no matching
|
||||||
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
|
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
|
||||||
*/
|
*/
|
||||||
|
@ -408,7 +408,7 @@ struct pkey_apqns4key {
|
||||||
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
|
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
|
||||||
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
|
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
|
||||||
* but the number of apqn targets does not fit into the list, the apqn_targets
|
* but the number of apqn targets does not fit into the list, the apqn_targets
|
||||||
* field is updatedd with the number of reqired entries but there are no apqn
|
* field is updated with the number of required entries but there are no apqn
|
||||||
* values stored in the list and the ioctl returns with ENOSPC. If no matching
|
* values stored in the list and the ioctl returns with ENOSPC. If no matching
|
||||||
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
|
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -516,7 +516,7 @@ void show_code(struct pt_regs *regs)
|
||||||
if (copy_from_regs(regs, code + end, (void *)addr, 2))
|
if (copy_from_regs(regs, code + end, (void *)addr, 2))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* Code snapshot useable ? */
|
/* Code snapshot usable ? */
|
||||||
if ((regs->psw.addr & 1) || start >= end) {
|
if ((regs->psw.addr & 1) || start >= end) {
|
||||||
printk("%s Code: Bad PSW.\n", mode);
|
printk("%s Code: Bad PSW.\n", mode);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -14,7 +14,7 @@ static int __init nobp_setup_early(char *str)
|
||||||
return rc;
|
return rc;
|
||||||
if (enabled && test_facility(82)) {
|
if (enabled && test_facility(82)) {
|
||||||
/*
|
/*
|
||||||
* The user explicitely requested nobp=1, enable it and
|
* The user explicitly requested nobp=1, enable it and
|
||||||
* disable the expoline support.
|
* disable the expoline support.
|
||||||
*/
|
*/
|
||||||
__set_facility(82, alt_stfle_fac_list);
|
__set_facility(82, alt_stfle_fac_list);
|
||||||
|
|
|
@ -1865,7 +1865,7 @@ static void cpumsf_pmu_read(struct perf_event *event)
|
||||||
/* Nothing to do ... updates are interrupt-driven */
|
/* Nothing to do ... updates are interrupt-driven */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if the new sampling period/freqeuncy is appropriate.
|
/* Check if the new sampling period/frequency is appropriate.
|
||||||
*
|
*
|
||||||
* Return non-zero on error and zero on passed checks.
|
* Return non-zero on error and zero on passed checks.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -84,7 +84,7 @@ static int paiext_root_alloc(void)
|
||||||
/* The memory is already zeroed. */
|
/* The memory is already zeroed. */
|
||||||
paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
|
paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
|
||||||
if (!paiext_root.mapptr) {
|
if (!paiext_root.mapptr) {
|
||||||
/* Returing without refcnt adjustment is ok. The
|
/* Returning without refcnt adjustment is ok. The
|
||||||
* error code is handled by paiext_alloc() which
|
* error code is handled by paiext_alloc() which
|
||||||
* decrements refcnt when an event can not be
|
* decrements refcnt when an event can not be
|
||||||
* created.
|
* created.
|
||||||
|
@ -190,7 +190,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
|
||||||
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
|
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
|
||||||
: PAI_MODE_COUNTING;
|
: PAI_MODE_COUNTING;
|
||||||
} else {
|
} else {
|
||||||
/* Multiple invocation, check whats active.
|
/* Multiple invocation, check what is active.
|
||||||
* Supported are multiple counter events or only one sampling
|
* Supported are multiple counter events or only one sampling
|
||||||
* event concurrently at any one time.
|
* event concurrently at any one time.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -529,7 +529,7 @@ static void __init setup_resources(void)
|
||||||
res->start = start;
|
res->start = start;
|
||||||
/*
|
/*
|
||||||
* In memblock, end points to the first byte after the
|
* In memblock, end points to the first byte after the
|
||||||
* range while in resourses, end points to the last byte in
|
* range while in resources, end points to the last byte in
|
||||||
* the range.
|
* the range.
|
||||||
*/
|
*/
|
||||||
res->end = end - 1;
|
res->end = end - 1;
|
||||||
|
|
|
@ -113,7 +113,7 @@ early_param("smt", early_smt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The smp_cpu_state_mutex must be held when changing the state or polarization
|
* The smp_cpu_state_mutex must be held when changing the state or polarization
|
||||||
* member of a pcpu data structure within the pcpu_devices arreay.
|
* member of a pcpu data structure within the pcpu_devices array.
|
||||||
*/
|
*/
|
||||||
DEFINE_MUTEX(smp_cpu_state_mutex);
|
DEFINE_MUTEX(smp_cpu_state_mutex);
|
||||||
|
|
||||||
|
|
|
@ -702,7 +702,7 @@ static void stp_work_fn(struct work_struct *work)
|
||||||
|
|
||||||
if (!check_sync_clock())
|
if (!check_sync_clock())
|
||||||
/*
|
/*
|
||||||
* There is a usable clock but the synchonization failed.
|
* There is a usable clock but the synchronization failed.
|
||||||
* Retry after a second.
|
* Retry after a second.
|
||||||
*/
|
*/
|
||||||
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
|
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
|
||||||
|
|
|
@ -478,7 +478,7 @@ struct trans_exc_code_bits {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
|
FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
|
||||||
FSI_STORE = 1, /* Exception was due to store operation */
|
FSI_STORE = 1, /* Exception was due to store operation */
|
||||||
FSI_FETCH = 2 /* Exception was due to fetch operation */
|
FSI_FETCH = 2 /* Exception was due to fetch operation */
|
||||||
};
|
};
|
||||||
|
@ -625,7 +625,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
|
||||||
* Returns: - zero on success; @gpa contains the resulting absolute address
|
* Returns: - zero on success; @gpa contains the resulting absolute address
|
||||||
* - a negative value if guest access failed due to e.g. broken
|
* - a negative value if guest access failed due to e.g. broken
|
||||||
* guest mapping
|
* guest mapping
|
||||||
* - a positve value if an access exception happened. In this case
|
* - a positive value if an access exception happened. In this case
|
||||||
* the returned value is the program interruption code as defined
|
* the returned value is the program interruption code as defined
|
||||||
* by the architecture
|
* by the architecture
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -630,7 +630,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* process PER, also if the instrution is processed in user space */
|
/* process PER, also if the instruction is processed in user space */
|
||||||
if (vcpu->arch.sie_block->icptstatus & 0x02 &&
|
if (vcpu->arch.sie_block->icptstatus & 0x02 &&
|
||||||
(!rc || rc == -EOPNOTSUPP))
|
(!rc || rc == -EOPNOTSUPP))
|
||||||
per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
|
per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
|
||||||
|
|
|
@ -4157,7 +4157,7 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
|
||||||
vcpu->run->s.regs.fpc = 0;
|
vcpu->run->s.regs.fpc = 0;
|
||||||
/*
|
/*
|
||||||
* Do not reset these registers in the protected case, as some of
|
* Do not reset these registers in the protected case, as some of
|
||||||
* them are overlayed and they are not accessible in this case
|
* them are overlaid and they are not accessible in this case
|
||||||
* anyway.
|
* anyway.
|
||||||
*/
|
*/
|
||||||
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
|
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
|
||||||
|
|
|
@ -427,7 +427,7 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register device with the specified KVM. If interpetation facilities are
|
* Register device with the specified KVM. If interpretation facilities are
|
||||||
* available, enable them and let userspace indicate whether or not they will
|
* available, enable them and let userspace indicate whether or not they will
|
||||||
* be used (specify SHM bit to disable).
|
* be used (specify SHM bit to disable).
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -273,7 +273,7 @@ static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||||
uvcb.header.rc, uvcb.header.rrc);
|
uvcb.header.rc, uvcb.header.rrc);
|
||||||
WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
|
WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
|
||||||
kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
|
kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
|
||||||
/* Inteded memory leak on "impossible" error */
|
/* Intended memory leak on "impossible" error */
|
||||||
if (!cc)
|
if (!cc)
|
||||||
kvm_s390_pv_dealloc_vm(kvm);
|
kvm_s390_pv_dealloc_vm(kvm);
|
||||||
return cc ? -EIO : 0;
|
return cc ? -EIO : 0;
|
||||||
|
|
|
@ -469,7 +469,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
|
||||||
*
|
*
|
||||||
* This interception will occur at the source cpu when a source cpu sends an
|
* This interception will occur at the source cpu when a source cpu sends an
|
||||||
* external call to a target cpu and the target cpu has the WAIT bit set in
|
* external call to a target cpu and the target cpu has the WAIT bit set in
|
||||||
* its cpuflags. Interception will occurr after the interrupt indicator bits at
|
* its cpuflags. Interception will occur after the interrupt indicator bits at
|
||||||
* the target cpu have been set. All error cases will lead to instruction
|
* the target cpu have been set. All error cases will lead to instruction
|
||||||
* interception, therefore nothing is to be checked or prepared.
|
* interception, therefore nothing is to be checked or prepared.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -502,7 +502,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||||
scb_s->mso = new_mso;
|
scb_s->mso = new_mso;
|
||||||
scb_s->prefix = new_prefix;
|
scb_s->prefix = new_prefix;
|
||||||
|
|
||||||
/* We have to definetly flush the tlb if this scb never ran */
|
/* We have to definitely flush the tlb if this scb never ran */
|
||||||
if (scb_s->ihcpu != 0xffffU)
|
if (scb_s->ihcpu != 0xffffU)
|
||||||
scb_s->ihcpu = scb_o->ihcpu;
|
scb_s->ihcpu = scb_o->ihcpu;
|
||||||
|
|
||||||
|
@ -899,7 +899,7 @@ static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
|
||||||
(vaddr & 0xfffffffffffff000UL) |
|
(vaddr & 0xfffffffffffff000UL) |
|
||||||
/* 52-53: store / fetch */
|
/* 52-53: store / fetch */
|
||||||
(((unsigned int) !write_flag) + 1) << 10,
|
(((unsigned int) !write_flag) + 1) << 10,
|
||||||
/* 62-63: asce id (alway primary == 0) */
|
/* 62-63: asce id (always primary == 0) */
|
||||||
.exc_access_id = 0, /* always primary */
|
.exc_access_id = 0, /* always primary */
|
||||||
.op_access_id = 0, /* not MVPG */
|
.op_access_id = 0, /* not MVPG */
|
||||||
};
|
};
|
||||||
|
|
|
@ -1740,7 +1740,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow);
|
||||||
* The r2t parameter specifies the address of the source table. The
|
* The r2t parameter specifies the address of the source table. The
|
||||||
* four pages of the source table are made read-only in the parent gmap
|
* four pages of the source table are made read-only in the parent gmap
|
||||||
* address space. A write to the source table area @r2t will automatically
|
* address space. A write to the source table area @r2t will automatically
|
||||||
* remove the shadow r2 table and all of its decendents.
|
* remove the shadow r2 table and all of its descendants.
|
||||||
*
|
*
|
||||||
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
|
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
|
||||||
* shadow table structure is incomplete, -ENOMEM if out of memory and
|
* shadow table structure is incomplete, -ENOMEM if out of memory and
|
||||||
|
|
|
@ -558,7 +558,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
|
||||||
* to any physical address. If missing, allocate segment- and region-
|
* to any physical address. If missing, allocate segment- and region-
|
||||||
* table entries along. Meeting a large segment- or region-table entry
|
* table entries along. Meeting a large segment- or region-table entry
|
||||||
* while traversing is an error, since the function is expected to be
|
* while traversing is an error, since the function is expected to be
|
||||||
* called against virtual regions reserverd for 4KB mappings only.
|
* called against virtual regions reserved for 4KB mappings only.
|
||||||
*/
|
*/
|
||||||
pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
|
pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
|
||||||
{
|
{
|
||||||
|
|
|
@ -163,7 +163,7 @@ static void zpci_handle_cpu_local_irq(bool rescan)
|
||||||
if (!rescan || irqs_on++)
|
if (!rescan || irqs_on++)
|
||||||
/* End of second scan with interrupts on. */
|
/* End of second scan with interrupts on. */
|
||||||
break;
|
break;
|
||||||
/* First scan complete, reenable interrupts. */
|
/* First scan complete, re-enable interrupts. */
|
||||||
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib))
|
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib))
|
||||||
break;
|
break;
|
||||||
bit = 0;
|
bit = 0;
|
||||||
|
@ -202,7 +202,7 @@ static void zpci_handle_fallback_irq(void)
|
||||||
if (irqs_on++)
|
if (irqs_on++)
|
||||||
/* End of second scan with interrupts on. */
|
/* End of second scan with interrupts on. */
|
||||||
break;
|
break;
|
||||||
/* First scan complete, reenable interrupts. */
|
/* First scan complete, re-enable interrupts. */
|
||||||
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
|
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
|
||||||
break;
|
break;
|
||||||
cpu = 0;
|
cpu = 0;
|
||||||
|
@ -247,7 +247,7 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
|
||||||
if (irqs_on++)
|
if (irqs_on++)
|
||||||
/* End of second scan with interrupts on. */
|
/* End of second scan with interrupts on. */
|
||||||
break;
|
break;
|
||||||
/* First scan complete, reenable interrupts. */
|
/* First scan complete, re-enable interrupts. */
|
||||||
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
|
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
|
||||||
break;
|
break;
|
||||||
si = 0;
|
si = 0;
|
||||||
|
|
|
@ -100,7 +100,7 @@ SYM_CODE_START(purgatory_start)
|
||||||
* checksum verification only (%r2 = 0 -> verification only).
|
* checksum verification only (%r2 = 0 -> verification only).
|
||||||
*
|
*
|
||||||
* Check now and preserve over C function call by storing in
|
* Check now and preserve over C function call by storing in
|
||||||
* %r10 whith
|
* %r10 with
|
||||||
* 1 -> checksum verification only
|
* 1 -> checksum verification only
|
||||||
* 0 -> load new kernel
|
* 0 -> load new kernel
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -152,7 +152,7 @@ static ssize_t ccwgroup_online_show(struct device *dev,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Provide an 'ungroup' attribute so the user can remove group devices no
|
* Provide an 'ungroup' attribute so the user can remove group devices no
|
||||||
* longer needed or accidentially created. Saves memory :)
|
* longer needed or accidentally created. Saves memory :)
|
||||||
*/
|
*/
|
||||||
static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
|
static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -943,7 +943,7 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
|
||||||
cdev->private->dev_id.devno, sch->schid.ssid,
|
cdev->private->dev_id.devno, sch->schid.ssid,
|
||||||
sch->schib.pmcw.dev, rc);
|
sch->schib.pmcw.dev, rc);
|
||||||
if (old_enabled) {
|
if (old_enabled) {
|
||||||
/* Try to reenable the old subchannel. */
|
/* Try to re-enable the old subchannel. */
|
||||||
spin_lock_irq(old_sch->lock);
|
spin_lock_irq(old_sch->lock);
|
||||||
cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
|
cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
|
||||||
spin_unlock_irq(old_sch->lock);
|
spin_unlock_irq(old_sch->lock);
|
||||||
|
|
|
@ -310,7 +310,7 @@ static void ccw_device_oper_notify(struct ccw_device *cdev)
|
||||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||||
|
|
||||||
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
|
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
|
||||||
/* Reenable channel measurements, if needed. */
|
/* Re-enable channel measurements, if needed. */
|
||||||
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
|
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
|
||||||
/* Save indication for new paths. */
|
/* Save indication for new paths. */
|
||||||
cdev->private->path_new_mask = sch->vpm;
|
cdev->private->path_new_mask = sch->vpm;
|
||||||
|
@ -947,7 +947,7 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
|
||||||
*/
|
*/
|
||||||
sch->lpm = sch->schib.pmcw.pam & sch->opm;
|
sch->lpm = sch->schib.pmcw.pam & sch->opm;
|
||||||
/*
|
/*
|
||||||
* Use the initial configuration since we can't be shure that the old
|
* Use the initial configuration since we can't be sure that the old
|
||||||
* paths are valid.
|
* paths are valid.
|
||||||
*/
|
*/
|
||||||
io_subchannel_init_config(sch);
|
io_subchannel_init_config(sch);
|
||||||
|
|
|
@ -672,7 +672,7 @@ out_init:
|
||||||
/*
|
/*
|
||||||
* Fetch one ccw.
|
* Fetch one ccw.
|
||||||
* To reduce memory copy, we'll pin the cda page in memory,
|
* To reduce memory copy, we'll pin the cda page in memory,
|
||||||
* and to get rid of the cda 2G limitiaion of ccw1, we'll translate
|
* and to get rid of the cda 2G limitation of ccw1, we'll translate
|
||||||
* direct ccws to idal ccws.
|
* direct ccws to idal ccws.
|
||||||
*/
|
*/
|
||||||
static int ccwchain_fetch_one(struct ccw1 *ccw,
|
static int ccwchain_fetch_one(struct ccw1 *ccw,
|
||||||
|
@ -787,7 +787,7 @@ void cp_free(struct channel_program *cp)
|
||||||
* program.
|
* program.
|
||||||
*
|
*
|
||||||
* These APIs will copy the ccws into kernel-space buffers, and update
|
* These APIs will copy the ccws into kernel-space buffers, and update
|
||||||
* the guest phsical addresses with their corresponding host physical
|
* the guest physical addresses with their corresponding host physical
|
||||||
* addresses. Then channel I/O device drivers could issue the
|
* addresses. Then channel I/O device drivers could issue the
|
||||||
* translated channel program to real devices to perform an I/O
|
* translated channel program to real devices to perform an I/O
|
||||||
* operation.
|
* operation.
|
||||||
|
|
|
@ -497,7 +497,7 @@ static void ap_tasklet_fn(unsigned long dummy)
|
||||||
enum ap_sm_wait wait = AP_SM_WAIT_NONE;
|
enum ap_sm_wait wait = AP_SM_WAIT_NONE;
|
||||||
|
|
||||||
/* Reset the indicator if interrupts are used. Thus new interrupts can
|
/* Reset the indicator if interrupts are used. Thus new interrupts can
|
||||||
* be received. Doing it in the beginning of the tasklet is therefor
|
* be received. Doing it in the beginning of the tasklet is therefore
|
||||||
* important that no requests on any AP get lost.
|
* important that no requests on any AP get lost.
|
||||||
*/
|
*/
|
||||||
if (ap_irq_flag)
|
if (ap_irq_flag)
|
||||||
|
@ -2289,7 +2289,7 @@ static int __init ap_module_init(void)
|
||||||
timer_setup(&ap_config_timer, ap_config_timeout, 0);
|
timer_setup(&ap_config_timer, ap_config_timeout, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the high resultion poll timer.
|
* Setup the high resolution poll timer.
|
||||||
* If we are running under z/VM adjust polling to z/VM polling rate.
|
* If we are running under z/VM adjust polling to z/VM polling rate.
|
||||||
*/
|
*/
|
||||||
if (MACHINE_IS_VM)
|
if (MACHINE_IS_VM)
|
||||||
|
|
|
@ -359,7 +359,7 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
|
||||||
* like "+1-16,-32,-0x40,+128" where only single bits or ranges of
|
* like "+1-16,-32,-0x40,+128" where only single bits or ranges of
|
||||||
* bits are cleared or set. Distinction is done based on the very
|
* bits are cleared or set. Distinction is done based on the very
|
||||||
* first character which may be '+' or '-' for the relative string
|
* first character which may be '+' or '-' for the relative string
|
||||||
* and othewise assume to be an absolute value string. If parsing fails
|
* and otherwise assume to be an absolute value string. If parsing fails
|
||||||
* a negative errno value is returned. All arguments and bitmaps are
|
* a negative errno value is returned. All arguments and bitmaps are
|
||||||
* big endian order.
|
* big endian order.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -445,7 +445,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
|
||||||
q->saved_isc = isc;
|
q->saved_isc = isc;
|
||||||
break;
|
break;
|
||||||
case AP_RESPONSE_OTHERWISE_CHANGED:
|
case AP_RESPONSE_OTHERWISE_CHANGED:
|
||||||
/* We could not modify IRQ setings: clear new configuration */
|
/* We could not modify IRQ settings: clear new configuration */
|
||||||
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
|
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
|
||||||
kvm_s390_gisc_unregister(kvm, isc);
|
kvm_s390_gisc_unregister(kvm, isc);
|
||||||
break;
|
break;
|
||||||
|
@ -524,7 +524,7 @@ static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
|
||||||
* Response.status may be set to following Response Code:
|
* Response.status may be set to following Response Code:
|
||||||
* - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
|
* - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
|
||||||
* - AP_RESPONSE_DECONFIGURED: if the queue is not configured
|
* - AP_RESPONSE_DECONFIGURED: if the queue is not configured
|
||||||
* - AP_RESPONSE_NORMAL (0) : in case of successs
|
* - AP_RESPONSE_NORMAL (0) : in case of success
|
||||||
* Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
|
* Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
|
||||||
* We take the matrix_dev lock to ensure serialization on queues and
|
* We take the matrix_dev lock to ensure serialization on queues and
|
||||||
* mediated device access.
|
* mediated device access.
|
||||||
|
|
|
@ -674,7 +674,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
|
||||||
pref_zq = NULL;
|
pref_zq = NULL;
|
||||||
spin_lock(&zcrypt_list_lock);
|
spin_lock(&zcrypt_list_lock);
|
||||||
for_each_zcrypt_card(zc) {
|
for_each_zcrypt_card(zc) {
|
||||||
/* Check for usable accelarator or CCA card */
|
/* Check for usable accelerator or CCA card */
|
||||||
if (!zc->online || !zc->card->config || zc->card->chkstop ||
|
if (!zc->online || !zc->card->config || zc->card->chkstop ||
|
||||||
!(zc->card->functions & 0x18000000))
|
!(zc->card->functions & 0x18000000))
|
||||||
continue;
|
continue;
|
||||||
|
@ -779,7 +779,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
|
||||||
pref_zq = NULL;
|
pref_zq = NULL;
|
||||||
spin_lock(&zcrypt_list_lock);
|
spin_lock(&zcrypt_list_lock);
|
||||||
for_each_zcrypt_card(zc) {
|
for_each_zcrypt_card(zc) {
|
||||||
/* Check for usable accelarator or CCA card */
|
/* Check for usable accelerator or CCA card */
|
||||||
if (!zc->online || !zc->card->config || zc->card->chkstop ||
|
if (!zc->online || !zc->card->config || zc->card->chkstop ||
|
||||||
!(zc->card->functions & 0x18000000))
|
!(zc->card->functions & 0x18000000))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -689,7 +689,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy the tanslated protected key */
|
/* copy the translated protected key */
|
||||||
switch (prepparm->lv3.ckb.len) {
|
switch (prepparm->lv3.ckb.len) {
|
||||||
case 16 + 32:
|
case 16 + 32:
|
||||||
/* AES 128 protected key */
|
/* AES 128 protected key */
|
||||||
|
|
|
@ -115,7 +115,7 @@ struct eccprivkeytoken {
|
||||||
u64 mkvp; /* master key verification pattern */
|
u64 mkvp; /* master key verification pattern */
|
||||||
u8 opk[48]; /* encrypted object protection key data */
|
u8 opk[48]; /* encrypted object protection key data */
|
||||||
u16 adatalen; /* associated data length in bytes */
|
u16 adatalen; /* associated data length in bytes */
|
||||||
u16 fseclen; /* formated section length in bytes */
|
u16 fseclen; /* formatted section length in bytes */
|
||||||
u8 more_data[]; /* more data follows */
|
u8 more_data[]; /* more data follows */
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
|
||||||
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
|
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
|
||||||
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
|
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
|
||||||
* may be casted to struct pkey_apqn. The return value is either 0 for success
|
* may be casted to struct pkey_apqn. The return value is either 0 for success
|
||||||
* or a negative errno value. If no apqn meeting the criterias is found,
|
* or a negative errno value. If no apqn meeting the criteria is found,
|
||||||
* -ENODEV is returned.
|
* -ENODEV is returned.
|
||||||
*/
|
*/
|
||||||
int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
|
int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
|
||||||
|
|
|
@ -1368,7 +1368,7 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy the tanslated protected key */
|
/* copy the translated protected key */
|
||||||
if (wki->pkeysize > *protkeylen) {
|
if (wki->pkeysize > *protkeylen) {
|
||||||
DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
|
DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
|
||||||
__func__, wki->pkeysize, *protkeylen);
|
__func__, wki->pkeysize, *protkeylen);
|
||||||
|
|
|
@ -131,14 +131,14 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||||
* - if minapi > 0 only apqns with API_ord_nr >= minapi
|
* - if minapi > 0 only apqns with API_ord_nr >= minapi
|
||||||
* - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
|
* - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
|
||||||
* to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
|
* to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
|
||||||
* key for this domain. When a wkvp is given there will aways be a re-fetch
|
* key for this domain. When a wkvp is given there will always be a re-fetch
|
||||||
* of the domain info for the potential apqn - so this triggers an request
|
* of the domain info for the potential apqn - so this triggers an request
|
||||||
* reply to each apqn eligible.
|
* reply to each apqn eligible.
|
||||||
* The array of apqn entries is allocated with kmalloc and returned in *apqns;
|
* The array of apqn entries is allocated with kmalloc and returned in *apqns;
|
||||||
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
|
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
|
||||||
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
|
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
|
||||||
* may be casted to struct pkey_apqn. The return value is either 0 for success
|
* may be casted to struct pkey_apqn. The return value is either 0 for success
|
||||||
* or a negative errno value. If no apqn meeting the criterias is found,
|
* or a negative errno value. If no apqn meeting the criteria is found,
|
||||||
* -ENODEV is returned.
|
* -ENODEV is returned.
|
||||||
*/
|
*/
|
||||||
int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
|
int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
|
||||||
|
|
Loading…
Add table
Reference in a new issue