Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Overlapping changes in drivers/net/phy/marvell.c, bug fix in 'net'
restricting a HW workaround alongside cleanups in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-05-26 20:46:35 -04:00
commit 34aa83c2fc
152 changed files with 2371 additions and 1258 deletions

View file

@ -16,6 +16,11 @@ Required properties:
- reg: Base address of PMIC on Hi6220 SoC. - reg: Base address of PMIC on Hi6220 SoC.
- interrupt-controller: Hi655x has internal IRQs (has own IRQ domain). - interrupt-controller: Hi655x has internal IRQs (has own IRQ domain).
- pmic-gpios: The GPIO used by PMIC IRQ. - pmic-gpios: The GPIO used by PMIC IRQ.
- #clock-cells: From common clock binding; shall be set to 0
Optional properties:
- clock-output-names: From common clock binding to override the
default output clock name
Example: Example:
pmic: pmic@f8000000 { pmic: pmic@f8000000 {
@ -24,4 +29,5 @@ Example:
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>; #interrupt-cells = <2>;
pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
#clock-cells = <0>;
} }

View file

@ -18,6 +18,8 @@ Optional properties:
"ext_clock" (External clock provided to the card). "ext_clock" (External clock provided to the card).
- post-power-on-delay-ms : Delay in ms after powering the card and - post-power-on-delay-ms : Delay in ms after powering the card and
de-asserting the reset-gpios (if any) de-asserting the reset-gpios (if any)
- power-off-delay-us : Delay in us after asserting the reset-gpios (if any)
during power off of the card.
Example: Example:

View file

@ -15,6 +15,10 @@ Optional properties:
- phy-reset-active-high : If present then the reset sequence using the GPIO - phy-reset-active-high : If present then the reset sequence using the GPIO
specified in the "phy-reset-gpios" property is reversed (H=reset state, specified in the "phy-reset-gpios" property is reversed (H=reset state,
L=operation state). L=operation state).
- phy-reset-post-delay : Post reset delay in milliseconds. If present then
a delay of phy-reset-post-delay milliseconds will be observed after the
phy-reset-gpios has been toggled. Can be omitted thus no delay is
observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
- phy-supply : regulator that powers the Ethernet PHY. - phy-supply : regulator that powers the Ethernet PHY.
- phy-handle : phandle to the PHY device connected to this device. - phy-handle : phandle to the PHY device connected to this device.
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.

View file

@ -16,6 +16,8 @@ ALC880
6-jack in back, 2-jack in front 6-jack in back, 2-jack in front
6stack-digout 6stack-digout
6-jack with a SPDIF out 6-jack with a SPDIF out
6stack-automute
6-jack with headphone jack detection
ALC260 ALC260
====== ======
@ -62,6 +64,8 @@ lenovo-dock
Enables docking station I/O for some Lenovos Enables docking station I/O for some Lenovos
hp-gpio-led hp-gpio-led
GPIO LED support on HP laptops GPIO LED support on HP laptops
hp-dock-gpio-mic1-led
HP dock with mic LED support
dell-headset-multi dell-headset-multi
Headset jack, which can also be used as mic-in Headset jack, which can also be used as mic-in
dell-headset-dock dell-headset-dock
@ -72,6 +76,12 @@ alc283-sense-combo
Combo jack sensing on ALC283 Combo jack sensing on ALC283
tpt440-dock tpt440-dock
Pin configs for Lenovo Thinkpad Dock support Pin configs for Lenovo Thinkpad Dock support
tpt440
Lenovo Thinkpad T440s setup
tpt460
Lenovo Thinkpad T460/560 setup
dual-codecs
Lenovo laptops with dual codecs
ALC66x/67x/892 ALC66x/67x/892
============== ==============
@ -97,6 +107,8 @@ inv-dmic
Inverted internal mic workaround Inverted internal mic workaround
dell-headset-multi dell-headset-multi
Headset jack, which can also be used as mic-in Headset jack, which can also be used as mic-in
dual-codecs
Lenovo laptops with dual codecs
ALC680 ALC680
====== ======
@ -114,6 +126,8 @@ inv-dmic
Inverted internal mic workaround Inverted internal mic workaround
no-primary-hp no-primary-hp
VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC) VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC)
dual-codecs
ALC1220 dual codecs for Gaming mobos
ALC861/660 ALC861/660
========== ==========
@ -206,65 +220,47 @@ auto
Conexant 5045 Conexant 5045
============= =============
laptop-hpsense cap-mix-amp
Laptop with HP sense (old model laptop) Fix max input level on mixer widget
laptop-micsense toshiba-p105
Laptop with Mic sense (old model fujitsu) Toshiba P105 quirk
laptop-hpmicsense hp-530
Laptop with HP and Mic senses HP 530 quirk
benq
Benq R55E
laptop-hp530
HP 530 laptop
test
for testing/debugging purpose, almost all controls can be
adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y
Conexant 5047 Conexant 5047
============= =============
laptop cap-mix-amp
Basic Laptop config Fix max input level on mixer widget
laptop-hp
Laptop config for some HP models (subdevice 30A5)
laptop-eapd
Laptop config with EAPD support
test
for testing/debugging purpose, almost all controls can be
adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y
Conexant 5051 Conexant 5051
============= =============
laptop lenovo-x200
Basic Laptop config (default) Lenovo X200 quirk
hp
HP Spartan laptop
hp-dv6736
HP dv6736
hp-f700
HP Compaq Presario F700
ideapad
Lenovo IdeaPad laptop
toshiba
Toshiba Satellite M300
Conexant 5066 Conexant 5066
============= =============
laptop stereo-dmic
Basic Laptop config (default) Workaround for inverted stereo digital mic
hp-laptop gpio1
HP laptops, e g G60 Enable GPIO1 pin
asus headphone-mic-pin
Asus K52JU, Lenovo G560 Enable headphone mic NID 0x18 without detection
dell-laptop tp410
Dell laptops Thinkpad T400 & co quirks
dell-vostro
Dell Vostro
olpc-xo-1_5
OLPC XO 1.5
ideapad
Lenovo IdeaPad U150
thinkpad thinkpad
Lenovo Thinkpad Thinkpad mute/mic LED quirk
lemote-a1004
Lemote A1004 quirk
lemote-a1205
Lemote A1205 quirk
olpc-xo
OLPC XO quirk
mute-led-eapd
Mute LED control via EAPD
hp-dock
HP dock support
mute-led-gpio
Mute LED control via GPIO
STAC9200 STAC9200
======== ========
@ -444,6 +440,8 @@ dell-eq
Dell desktops/laptops Dell desktops/laptops
alienware alienware
Alienware M17x Alienware M17x
asus-mobo
Pin configs for ASUS mobo with 5.1/SPDIF out
auto auto
BIOS setup (default) BIOS setup (default)
@ -477,6 +475,8 @@ hp-envy-ts-bass
Pin fixup for HP Envy TS bass speaker (NID 0x10) Pin fixup for HP Envy TS bass speaker (NID 0x10)
hp-bnb13-eq hp-bnb13-eq
Hardware equalizer setup for HP laptops Hardware equalizer setup for HP laptops
hp-envy-ts-bass
HP Envy TS bass support
auto auto
BIOS setup (default) BIOS setup (default)
@ -496,10 +496,22 @@ auto
Cirrus Logic CS4206/4207 Cirrus Logic CS4206/4207
======================== ========================
mbp53
MacBook Pro 5,3
mbp55 mbp55
MacBook Pro 5,5 MacBook Pro 5,5
imac27 imac27
IMac 27 Inch IMac 27 Inch
imac27_122
iMac 12,2
apple
Generic Apple quirk
mbp101
MacBookPro 10,1
mbp81
MacBookPro 8,1
mba42
MacBookAir 4,2
auto auto
BIOS setup (default) BIOS setup (default)
@ -509,6 +521,10 @@ mba6
MacBook Air 6,1 and 6,2 MacBook Air 6,1 and 6,2
gpio0 gpio0
Enable GPIO 0 amp Enable GPIO 0 amp
mbp11
MacBookPro 11,2
macmini
MacMini 7,1
auto auto
BIOS setup (default) BIOS setup (default)

View file

@ -81,6 +81,45 @@
}; };
}; };
reg_sys_5v: regulator@0 {
compatible = "regulator-fixed";
regulator-name = "SYS_5V";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-boot-on;
regulator-always-on;
};
reg_vdd_3v3: regulator@1 {
compatible = "regulator-fixed";
regulator-name = "VDD_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
vin-supply = <&reg_sys_5v>;
};
reg_5v_hub: regulator@2 {
compatible = "regulator-fixed";
regulator-name = "5V_HUB";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-boot-on;
gpio = <&gpio0 7 0>;
regulator-always-on;
vin-supply = <&reg_sys_5v>;
};
wl1835_pwrseq: wl1835-pwrseq {
compatible = "mmc-pwrseq-simple";
/* WLAN_EN GPIO */
reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
clocks = <&pmic>;
clock-names = "ext_clock";
power-off-delay-us = <10>;
};
soc { soc {
spi0: spi@f7106000 { spi0: spi@f7106000 {
status = "ok"; status = "ok";
@ -256,11 +295,31 @@
/* GPIO blocks 16 thru 19 do not appear to be routed to pins */ /* GPIO blocks 16 thru 19 do not appear to be routed to pins */
dwmmc_2: dwmmc2@f723f000 { dwmmc_0: dwmmc0@f723d000 {
ti,non-removable; cap-mmc-highspeed;
non-removable; non-removable;
/* WL_EN */ bus-width = <0x8>;
vmmc-supply = <&wlan_en_reg>; vmmc-supply = <&ldo19>;
};
dwmmc_1: dwmmc1@f723e000 {
card-detect-delay = <200>;
cap-sd-highspeed;
sd-uhs-sdr12;
sd-uhs-sdr25;
sd-uhs-sdr50;
vqmmc-supply = <&ldo7>;
vmmc-supply = <&ldo10>;
bus-width = <0x4>;
disable-wp;
cd-gpios = <&gpio1 0 1>;
};
dwmmc_2: dwmmc2@f723f000 {
bus-width = <0x4>;
non-removable;
vmmc-supply = <&reg_vdd_3v3>;
mmc-pwrseq = <&wl1835_pwrseq>;
#address-cells = <0x1>; #address-cells = <0x1>;
#size-cells = <0x0>; #size-cells = <0x0>;
@ -272,18 +331,6 @@
interrupts = <3 IRQ_TYPE_EDGE_RISING>; interrupts = <3 IRQ_TYPE_EDGE_RISING>;
}; };
}; };
wlan_en_reg: regulator@1 {
compatible = "regulator-fixed";
regulator-name = "wlan-en-regulator";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
/* WLAN_EN GPIO */
gpio = <&gpio0 5 0>;
/* WLAN card specific delay */
startup-delay-us = <70000>;
enable-active-high;
};
}; };
leds { leds {
@ -330,6 +377,7 @@
pmic: pmic@f8000000 { pmic: pmic@f8000000 {
compatible = "hisilicon,hi655x-pmic"; compatible = "hisilicon,hi655x-pmic";
reg = <0x0 0xf8000000 0x0 0x1000>; reg = <0x0 0xf8000000 0x0 0x1000>;
#clock-cells = <0>;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>; #interrupt-cells = <2>;
pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;

View file

@ -725,20 +725,10 @@
status = "disabled"; status = "disabled";
}; };
fixed_5v_hub: regulator@0 {
compatible = "regulator-fixed";
regulator-name = "fixed_5v_hub";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-boot-on;
gpio = <&gpio0 7 0>;
regulator-always-on;
};
usb_phy: usbphy { usb_phy: usbphy {
compatible = "hisilicon,hi6220-usb-phy"; compatible = "hisilicon,hi6220-usb-phy";
#phy-cells = <0>; #phy-cells = <0>;
phy-supply = <&fixed_5v_hub>; phy-supply = <&reg_5v_hub>;
hisilicon,peripheral-syscon = <&sys_ctrl>; hisilicon,peripheral-syscon = <&sys_ctrl>;
}; };
@ -766,17 +756,12 @@
dwmmc_0: dwmmc0@f723d000 { dwmmc_0: dwmmc0@f723d000 {
compatible = "hisilicon,hi6220-dw-mshc"; compatible = "hisilicon,hi6220-dw-mshc";
num-slots = <0x1>;
cap-mmc-highspeed;
non-removable;
reg = <0x0 0xf723d000 0x0 0x1000>; reg = <0x0 0xf723d000 0x0 0x1000>;
interrupts = <0x0 0x48 0x4>; interrupts = <0x0 0x48 0x4>;
clocks = <&sys_ctrl 2>, <&sys_ctrl 1>; clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
clock-names = "ciu", "biu"; clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>; resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
reset-names = "reset"; reset-names = "reset";
bus-width = <0x8>;
vmmc-supply = <&ldo19>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func
&emmc_cfg_func &emmc_rst_cfg_func>; &emmc_cfg_func &emmc_rst_cfg_func>;
@ -784,13 +769,7 @@
dwmmc_1: dwmmc1@f723e000 { dwmmc_1: dwmmc1@f723e000 {
compatible = "hisilicon,hi6220-dw-mshc"; compatible = "hisilicon,hi6220-dw-mshc";
num-slots = <0x1>;
card-detect-delay = <200>;
hisilicon,peripheral-syscon = <&ao_ctrl>; hisilicon,peripheral-syscon = <&ao_ctrl>;
cap-sd-highspeed;
sd-uhs-sdr12;
sd-uhs-sdr25;
sd-uhs-sdr50;
reg = <0x0 0xf723e000 0x0 0x1000>; reg = <0x0 0xf723e000 0x0 0x1000>;
interrupts = <0x0 0x49 0x4>; interrupts = <0x0 0x49 0x4>;
#address-cells = <0x1>; #address-cells = <0x1>;
@ -799,11 +778,6 @@
clock-names = "ciu", "biu"; clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>; resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
reset-names = "reset"; reset-names = "reset";
vqmmc-supply = <&ldo7>;
vmmc-supply = <&ldo10>;
bus-width = <0x4>;
disable-wp;
cd-gpios = <&gpio1 0 1>;
pinctrl-names = "default", "idle"; pinctrl-names = "default", "idle";
pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>; pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>;
pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>; pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>;
@ -811,15 +785,12 @@
dwmmc_2: dwmmc2@f723f000 { dwmmc_2: dwmmc2@f723f000 {
compatible = "hisilicon,hi6220-dw-mshc"; compatible = "hisilicon,hi6220-dw-mshc";
num-slots = <0x1>;
reg = <0x0 0xf723f000 0x0 0x1000>; reg = <0x0 0xf723f000 0x0 0x1000>;
interrupts = <0x0 0x4a 0x4>; interrupts = <0x0 0x4a 0x4>;
clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>; clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
clock-names = "ciu", "biu"; clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>; resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
reset-names = "reset"; reset-names = "reset";
bus-width = <0x4>;
broken-cd;
pinctrl-names = "default", "idle"; pinctrl-names = "default", "idle";
pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>; pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>;
pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>; pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;

View file

@ -628,25 +628,6 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
} }
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
void blk_mq_abort_requeue_list(struct request_queue *q)
{
unsigned long flags;
LIST_HEAD(rq_list);
spin_lock_irqsave(&q->requeue_lock, flags);
list_splice_init(&q->requeue_list, &rq_list);
spin_unlock_irqrestore(&q->requeue_lock, flags);
while (!list_empty(&rq_list)) {
struct request *rq;
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
blk_mq_end_request(rq, -EIO);
}
}
EXPORT_SYMBOL(blk_mq_abort_requeue_list);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{ {
if (tag < tags->nr_tags) { if (tag < tags->nr_tags) {

View file

@ -887,10 +887,10 @@ int blk_register_queue(struct gendisk *disk)
goto unlock; goto unlock;
} }
if (q->mq_ops) if (q->mq_ops) {
__blk_mq_register_dev(dev, q); __blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
blk_mq_debugfs_register(q); }
kobject_uevent(&q->kobj, KOBJ_ADD); kobject_uevent(&q->kobj, KOBJ_ADD);

View file

@ -22,11 +22,11 @@ static int throtl_quantum = 32;
#define DFL_THROTL_SLICE_HD (HZ / 10) #define DFL_THROTL_SLICE_HD (HZ / 10)
#define DFL_THROTL_SLICE_SSD (HZ / 50) #define DFL_THROTL_SLICE_SSD (HZ / 50)
#define MAX_THROTL_SLICE (HZ) #define MAX_THROTL_SLICE (HZ)
#define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */
#define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */
#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
/* default latency target is 0, eg, guarantee IO latency by default */ #define MIN_THROTL_BPS (320 * 1024)
#define DFL_LATENCY_TARGET (0) #define MIN_THROTL_IOPS (10)
#define DFL_LATENCY_TARGET (-1L)
#define DFL_IDLE_THRESHOLD (0)
#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
@ -157,6 +157,7 @@ struct throtl_grp {
unsigned long last_check_time; unsigned long last_check_time;
unsigned long latency_target; /* us */ unsigned long latency_target; /* us */
unsigned long latency_target_conf; /* us */
/* When did we start a new slice */ /* When did we start a new slice */
unsigned long slice_start[2]; unsigned long slice_start[2];
unsigned long slice_end[2]; unsigned long slice_end[2];
@ -165,6 +166,7 @@ struct throtl_grp {
unsigned long checked_last_finish_time; /* ns / 1024 */ unsigned long checked_last_finish_time; /* ns / 1024 */
unsigned long avg_idletime; /* ns / 1024 */ unsigned long avg_idletime; /* ns / 1024 */
unsigned long idletime_threshold; /* us */ unsigned long idletime_threshold; /* us */
unsigned long idletime_threshold_conf; /* us */
unsigned int bio_cnt; /* total bios */ unsigned int bio_cnt; /* total bios */
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
@ -201,8 +203,6 @@ struct throtl_data
unsigned int limit_index; unsigned int limit_index;
bool limit_valid[LIMIT_CNT]; bool limit_valid[LIMIT_CNT];
unsigned long dft_idletime_threshold; /* us */
unsigned long low_upgrade_time; unsigned long low_upgrade_time;
unsigned long low_downgrade_time; unsigned long low_downgrade_time;
@ -294,8 +294,14 @@ static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
td = tg->td; td = tg->td;
ret = tg->bps[rw][td->limit_index]; ret = tg->bps[rw][td->limit_index];
if (ret == 0 && td->limit_index == LIMIT_LOW) if (ret == 0 && td->limit_index == LIMIT_LOW) {
return tg->bps[rw][LIMIT_MAX]; /* intermediate node or iops isn't 0 */
if (!list_empty(&blkg->blkcg->css.children) ||
tg->iops[rw][td->limit_index])
return U64_MAX;
else
return MIN_THROTL_BPS;
}
if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
@ -315,10 +321,17 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
return UINT_MAX; return UINT_MAX;
td = tg->td; td = tg->td;
ret = tg->iops[rw][td->limit_index]; ret = tg->iops[rw][td->limit_index];
if (ret == 0 && tg->td->limit_index == LIMIT_LOW) if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
return tg->iops[rw][LIMIT_MAX]; /* intermediate node or bps isn't 0 */
if (!list_empty(&blkg->blkcg->css.children) ||
tg->bps[rw][td->limit_index])
return UINT_MAX;
else
return MIN_THROTL_IOPS;
}
if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
@ -482,6 +495,9 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
/* LIMIT_LOW will have default value 0 */ /* LIMIT_LOW will have default value 0 */
tg->latency_target = DFL_LATENCY_TARGET; tg->latency_target = DFL_LATENCY_TARGET;
tg->latency_target_conf = DFL_LATENCY_TARGET;
tg->idletime_threshold = DFL_IDLE_THRESHOLD;
tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
return &tg->pd; return &tg->pd;
} }
@ -510,8 +526,6 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
tg->td = td; tg->td = td;
tg->idletime_threshold = td->dft_idletime_threshold;
} }
/* /*
@ -1349,7 +1363,7 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v)
return 0; return 0;
} }
static void tg_conf_updated(struct throtl_grp *tg) static void tg_conf_updated(struct throtl_grp *tg, bool global)
{ {
struct throtl_service_queue *sq = &tg->service_queue; struct throtl_service_queue *sq = &tg->service_queue;
struct cgroup_subsys_state *pos_css; struct cgroup_subsys_state *pos_css;
@ -1367,8 +1381,26 @@ static void tg_conf_updated(struct throtl_grp *tg)
* restrictions in the whole hierarchy and allows them to bypass * restrictions in the whole hierarchy and allows them to bypass
* blk-throttle. * blk-throttle.
*/ */
blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg)) blkg_for_each_descendant_pre(blkg, pos_css,
tg_update_has_rules(blkg_to_tg(blkg)); global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
struct throtl_grp *this_tg = blkg_to_tg(blkg);
struct throtl_grp *parent_tg;
tg_update_has_rules(this_tg);
/* ignore root/second level */
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
!blkg->parent->parent)
continue;
parent_tg = blkg_to_tg(blkg->parent);
/*
* make sure all children has lower idle time threshold and
* higher latency target
*/
this_tg->idletime_threshold = min(this_tg->idletime_threshold,
parent_tg->idletime_threshold);
this_tg->latency_target = max(this_tg->latency_target,
parent_tg->latency_target);
}
/* /*
* We're already holding queue_lock and know @tg is valid. Let's * We're already holding queue_lock and know @tg is valid. Let's
@ -1413,7 +1445,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
else else
*(unsigned int *)((void *)tg + of_cft(of)->private) = v; *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
tg_conf_updated(tg); tg_conf_updated(tg, false);
ret = 0; ret = 0;
out_finish: out_finish:
blkg_conf_finish(&ctx); blkg_conf_finish(&ctx);
@ -1497,34 +1529,34 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
tg->iops_conf[READ][off] == iops_dft && tg->iops_conf[READ][off] == iops_dft &&
tg->iops_conf[WRITE][off] == iops_dft && tg->iops_conf[WRITE][off] == iops_dft &&
(off != LIMIT_LOW || (off != LIMIT_LOW ||
(tg->idletime_threshold == tg->td->dft_idletime_threshold && (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
tg->latency_target == DFL_LATENCY_TARGET))) tg->latency_target_conf == DFL_LATENCY_TARGET)))
return 0; return 0;
if (tg->bps_conf[READ][off] != bps_dft) if (tg->bps_conf[READ][off] != U64_MAX)
snprintf(bufs[0], sizeof(bufs[0]), "%llu", snprintf(bufs[0], sizeof(bufs[0]), "%llu",
tg->bps_conf[READ][off]); tg->bps_conf[READ][off]);
if (tg->bps_conf[WRITE][off] != bps_dft) if (tg->bps_conf[WRITE][off] != U64_MAX)
snprintf(bufs[1], sizeof(bufs[1]), "%llu", snprintf(bufs[1], sizeof(bufs[1]), "%llu",
tg->bps_conf[WRITE][off]); tg->bps_conf[WRITE][off]);
if (tg->iops_conf[READ][off] != iops_dft) if (tg->iops_conf[READ][off] != UINT_MAX)
snprintf(bufs[2], sizeof(bufs[2]), "%u", snprintf(bufs[2], sizeof(bufs[2]), "%u",
tg->iops_conf[READ][off]); tg->iops_conf[READ][off]);
if (tg->iops_conf[WRITE][off] != iops_dft) if (tg->iops_conf[WRITE][off] != UINT_MAX)
snprintf(bufs[3], sizeof(bufs[3]), "%u", snprintf(bufs[3], sizeof(bufs[3]), "%u",
tg->iops_conf[WRITE][off]); tg->iops_conf[WRITE][off]);
if (off == LIMIT_LOW) { if (off == LIMIT_LOW) {
if (tg->idletime_threshold == ULONG_MAX) if (tg->idletime_threshold_conf == ULONG_MAX)
strcpy(idle_time, " idle=max"); strcpy(idle_time, " idle=max");
else else
snprintf(idle_time, sizeof(idle_time), " idle=%lu", snprintf(idle_time, sizeof(idle_time), " idle=%lu",
tg->idletime_threshold); tg->idletime_threshold_conf);
if (tg->latency_target == ULONG_MAX) if (tg->latency_target_conf == ULONG_MAX)
strcpy(latency_time, " latency=max"); strcpy(latency_time, " latency=max");
else else
snprintf(latency_time, sizeof(latency_time), snprintf(latency_time, sizeof(latency_time),
" latency=%lu", tg->latency_target); " latency=%lu", tg->latency_target_conf);
} }
seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n", seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
@ -1563,8 +1595,8 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
v[2] = tg->iops_conf[READ][index]; v[2] = tg->iops_conf[READ][index];
v[3] = tg->iops_conf[WRITE][index]; v[3] = tg->iops_conf[WRITE][index];
idle_time = tg->idletime_threshold; idle_time = tg->idletime_threshold_conf;
latency_time = tg->latency_target; latency_time = tg->latency_target_conf;
while (true) { while (true) {
char tok[27]; /* wiops=18446744073709551616 */ char tok[27]; /* wiops=18446744073709551616 */
char *p; char *p;
@ -1623,17 +1655,33 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
tg->iops_conf[READ][LIMIT_MAX]); tg->iops_conf[READ][LIMIT_MAX]);
tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
tg->iops_conf[WRITE][LIMIT_MAX]); tg->iops_conf[WRITE][LIMIT_MAX]);
tg->idletime_threshold_conf = idle_time;
tg->latency_target_conf = latency_time;
if (index == LIMIT_LOW) { /* force user to configure all settings for low limit */
blk_throtl_update_limit_valid(tg->td); if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
if (tg->td->limit_valid[LIMIT_LOW]) tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
tg->td->limit_index = LIMIT_LOW; tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
tg->idletime_threshold = (idle_time == ULONG_MAX) ? tg->latency_target_conf == DFL_LATENCY_TARGET) {
ULONG_MAX : idle_time; tg->bps[READ][LIMIT_LOW] = 0;
tg->latency_target = (latency_time == ULONG_MAX) ? tg->bps[WRITE][LIMIT_LOW] = 0;
ULONG_MAX : latency_time; tg->iops[READ][LIMIT_LOW] = 0;
tg->iops[WRITE][LIMIT_LOW] = 0;
tg->idletime_threshold = DFL_IDLE_THRESHOLD;
tg->latency_target = DFL_LATENCY_TARGET;
} else if (index == LIMIT_LOW) {
tg->idletime_threshold = tg->idletime_threshold_conf;
tg->latency_target = tg->latency_target_conf;
} }
tg_conf_updated(tg);
blk_throtl_update_limit_valid(tg->td);
if (tg->td->limit_valid[LIMIT_LOW]) {
if (index == LIMIT_LOW)
tg->td->limit_index = LIMIT_LOW;
} else
tg->td->limit_index = LIMIT_MAX;
tg_conf_updated(tg, index == LIMIT_LOW &&
tg->td->limit_valid[LIMIT_LOW]);
ret = 0; ret = 0;
out_finish: out_finish:
blkg_conf_finish(&ctx); blkg_conf_finish(&ctx);
@ -1722,17 +1770,25 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg)
/* /*
* cgroup is idle if: * cgroup is idle if:
* - single idle is too long, longer than a fixed value (in case user * - single idle is too long, longer than a fixed value (in case user
* configure a too big threshold) or 4 times of slice * configure a too big threshold) or 4 times of idletime threshold
* - average think time is more than threshold * - average think time is more than threshold
* - IO latency is largely below threshold * - IO latency is largely below threshold
*/ */
unsigned long time = jiffies_to_usecs(4 * tg->td->throtl_slice); unsigned long time;
bool ret;
time = min_t(unsigned long, MAX_IDLE_TIME, time); time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
return (ktime_get_ns() >> 10) - tg->last_finish_time > time || ret = tg->latency_target == DFL_LATENCY_TARGET ||
tg->avg_idletime > tg->idletime_threshold || tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
(tg->latency_target && tg->bio_cnt && (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
tg->avg_idletime > tg->idletime_threshold ||
(tg->latency_target && tg->bio_cnt &&
tg->bad_bio_cnt * 5 < tg->bio_cnt); tg->bad_bio_cnt * 5 < tg->bio_cnt);
throtl_log(&tg->service_queue,
"avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
tg->bio_cnt, ret, tg->td->scale);
return ret;
} }
static bool throtl_tg_can_upgrade(struct throtl_grp *tg) static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
@ -1828,6 +1884,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
struct cgroup_subsys_state *pos_css; struct cgroup_subsys_state *pos_css;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
throtl_log(&td->service_queue, "upgrade to max");
td->limit_index = LIMIT_MAX; td->limit_index = LIMIT_MAX;
td->low_upgrade_time = jiffies; td->low_upgrade_time = jiffies;
td->scale = 0; td->scale = 0;
@ -1850,6 +1907,7 @@ static void throtl_downgrade_state(struct throtl_data *td, int new)
{ {
td->scale /= 2; td->scale /= 2;
throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
if (td->scale) { if (td->scale) {
td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
return; return;
@ -2023,6 +2081,11 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
td->avg_buckets[i].valid = true; td->avg_buckets[i].valid = true;
last_latency = td->avg_buckets[i].latency; last_latency = td->avg_buckets[i].latency;
} }
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
throtl_log(&td->service_queue,
"Latency bucket %d: latency=%ld, valid=%d", i,
td->avg_buckets[i].latency, td->avg_buckets[i].valid);
} }
#else #else
static inline void throtl_update_latency_buckets(struct throtl_data *td) static inline void throtl_update_latency_buckets(struct throtl_data *td)
@ -2354,19 +2417,14 @@ void blk_throtl_exit(struct request_queue *q)
void blk_throtl_register_queue(struct request_queue *q) void blk_throtl_register_queue(struct request_queue *q)
{ {
struct throtl_data *td; struct throtl_data *td;
struct cgroup_subsys_state *pos_css;
struct blkcg_gq *blkg;
td = q->td; td = q->td;
BUG_ON(!td); BUG_ON(!td);
if (blk_queue_nonrot(q)) { if (blk_queue_nonrot(q))
td->throtl_slice = DFL_THROTL_SLICE_SSD; td->throtl_slice = DFL_THROTL_SLICE_SSD;
td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_SSD; else
} else {
td->throtl_slice = DFL_THROTL_SLICE_HD; td->throtl_slice = DFL_THROTL_SLICE_HD;
td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_HD;
}
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
/* if no low limit, use previous default */ /* if no low limit, use previous default */
td->throtl_slice = DFL_THROTL_SLICE_HD; td->throtl_slice = DFL_THROTL_SLICE_HD;
@ -2375,18 +2433,6 @@ void blk_throtl_register_queue(struct request_queue *q)
td->track_bio_latency = !q->mq_ops && !q->request_fn; td->track_bio_latency = !q->mq_ops && !q->request_fn;
if (!td->track_bio_latency) if (!td->track_bio_latency)
blk_stat_enable_accounting(q); blk_stat_enable_accounting(q);
/*
* some tg are created before queue is fully initialized, eg, nonrot
* isn't initialized yet
*/
rcu_read_lock();
blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
struct throtl_grp *tg = blkg_to_tg(blkg);
tg->idletime_threshold = td->dft_idletime_threshold;
}
rcu_read_unlock();
} }
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW #ifdef CONFIG_BLK_DEV_THROTTLING_LOW

View file

@ -320,8 +320,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
if (info) { if (info) {
struct partition_meta_info *pinfo = alloc_part_info(disk); struct partition_meta_info *pinfo = alloc_part_info(disk);
if (!pinfo) if (!pinfo) {
err = -ENOMEM;
goto out_free_stats; goto out_free_stats;
}
memcpy(pinfo, info, sizeof(*info)); memcpy(pinfo, info, sizeof(*info));
p->info = pinfo; p->info = pinfo;
} }

View file

@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
continue; continue;
bsd_start = le32_to_cpu(p->p_offset); bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size); bsd_size = le32_to_cpu(p->p_size);
if (memcmp(flavour, "bsd\0", 4) == 0)
bsd_start += offset;
if (offset == bsd_start && size == bsd_size) if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */ /* full parent partition, we have it already */
continue; continue;

View file

@ -764,6 +764,44 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
return 0; return 0;
} }
static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
u8 *buffer, *alignbuffer;
unsigned long absize;
int ret;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = cipher->setkey(tfm, alignbuffer, keylen);
kzfree(buffer);
return ret;
}
static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if ((unsigned long)key & alignmask)
return skcipher_setkey_unaligned(tfm, key, keylen);
return cipher->setkey(tfm, key, keylen);
}
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@ -784,7 +822,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
tfm->__crt_alg->cra_type == &crypto_givcipher_type) tfm->__crt_alg->cra_type == &crypto_givcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm); return crypto_init_skcipher_ops_ablkcipher(tfm);
skcipher->setkey = alg->setkey; skcipher->setkey = skcipher_setkey;
skcipher->encrypt = alg->encrypt; skcipher->encrypt = alg->encrypt;
skcipher->decrypt = alg->decrypt; skcipher->decrypt = alg->decrypt;
skcipher->ivsize = alg->ivsize; skcipher->ivsize = alg->ivsize;

View file

@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
{ {
struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; struct amdgpu_fbdev *afbdev;
struct drm_fb_helper *fb_helper; struct drm_fb_helper *fb_helper;
int ret; int ret;
if (!adev)
return;
afbdev = adev->mode_info.rfbdev;
if (!afbdev) if (!afbdev)
return; return;

View file

@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
} }
if (gds_switch_needed) { if (ring->funcs->emit_gds_switch && gds_switch_needed) {
id->gds_base = job->gds_base; id->gds_base = job->gds_base;
id->gds_size = job->gds_size; id->gds_size = job->gds_size;
id->gws_base = job->gws_base; id->gws_base = job->gws_base;
@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
atomic64_set(&id->owner, 0);
id->gds_base = 0; id->gds_base = 0;
id->gds_size = 0; id->gds_size = 0;
id->gws_base = 0; id->gws_base = 0;
@ -680,6 +681,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
id->oa_size = 0; id->oa_size = 0;
} }
/**
* amdgpu_vm_reset_all_id - reset VMID to zero
*
* @adev: amdgpu device structure
*
* Reset VMID to force flush on next use
*/
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
{
unsigned i, j;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
for (j = 1; j < id_mgr->num_ids; ++j)
amdgpu_vm_reset_id(adev, i, j);
}
}
/** /**
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
* *
@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0; adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0); atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock); spin_lock_init(&adev->vm_manager.prt_lock);

View file

@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid); unsigned vmid);
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev,

View file

@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
/* disable mclk switching if the refresh is >120Hz, even if the
* blanking period would allow it
*/
if (amdgpu_dpm_get_vrefresh(adev) > 120)
return true;
if (vblank_time < switch_limit) if (vblank_time < switch_limit)
return true; return true;
else else

View file

@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v6_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v6_0_hw_fini(adev); gmc_v6_0_hw_fini(adev);
return 0; return 0;
@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
if (r) if (r)
return r; return r;
if (!adev->vm_manager.enabled) { amdgpu_vm_reset_all_ids(adev);
r = gmc_v6_0_vm_init(adev);
if (r) {
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
return r; return 0;
} }
static bool gmc_v6_0_is_idle(void *handle) static bool gmc_v6_0_is_idle(void *handle)

View file

@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v7_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v7_0_hw_fini(adev); gmc_v7_0_hw_fini(adev);
return 0; return 0;
@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
if (r) if (r)
return r; return r;
if (!adev->vm_manager.enabled) { amdgpu_vm_reset_all_ids(adev);
r = gmc_v7_0_vm_init(adev);
if (r) {
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
return r; return 0;
} }
static bool gmc_v7_0_is_idle(void *handle) static bool gmc_v7_0_is_idle(void *handle)

View file

@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v8_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v8_0_hw_fini(adev); gmc_v8_0_hw_fini(adev);
return 0; return 0;
@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
if (r) if (r)
return r; return r;
if (!adev->vm_manager.enabled) { amdgpu_vm_reset_all_ids(adev);
r = gmc_v8_0_vm_init(adev);
if (r) {
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
return r; return 0;
} }
static bool gmc_v8_0_is_idle(void *handle) static bool gmc_v8_0_is_idle(void *handle)

View file

@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v9_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v9_0_hw_fini(adev); gmc_v9_0_hw_fini(adev);
return 0; return 0;
@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
if (r) if (r)
return r; return r;
if (!adev->vm_manager.enabled) { amdgpu_vm_reset_all_ids(adev);
r = gmc_v9_0_vm_init(adev);
if (r) {
dev_err(adev->dev,
"vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
return r; return 0;
} }
static bool gmc_v9_0_is_idle(void *handle) static bool gmc_v9_0_is_idle(void *handle)

View file

@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct smu7_power_state); return sizeof(struct smu7_power_state);
} }
static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
uint32_t vblank_time_us)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t switch_limit_us;
switch (hwmgr->chip_id) {
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
break;
default:
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
break;
}
if (vblank_time_us < switch_limit_us)
return true;
else
return false;
}
static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps, struct pp_power_state *request_ps,
@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
bool disable_mclk_switching; bool disable_mclk_switching;
bool disable_mclk_switching_for_frame_lock; bool disable_mclk_switching_for_frame_lock;
struct cgs_display_info info = {0}; struct cgs_display_info info = {0};
struct cgs_mode_info mode_info = {0};
const struct phm_clock_and_voltage_limits *max_limits; const struct phm_clock_and_voltage_limits *max_limits;
uint32_t i; uint32_t i;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
int32_t count; int32_t count;
int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
info.mode_info = &mode_info;
data->battery_state = (PP_StateUILabel_Battery == data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label); request_ps->classification.ui_label);
@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cgs_get_active_displays_info(hwmgr->device, &info); cgs_get_active_displays_info(hwmgr->device, &info);
/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
disable_mclk_switching = (1 < info.display_count) || disable_mclk_switching = ((1 < info.display_count) ||
disable_mclk_switching_for_frame_lock; disable_mclk_switching_for_frame_lock ||
smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
(mode_info.refresh_rate > 120));
sclk = smu7_ps->performance_levels[0].engine_clock; sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock; mclk = smu7_ps->performance_levels[0].memory_clock;

View file

@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask) enum pp_clock_type type, uint32_t mask)
{ {
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
uint32_t i; int i;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
return -EINVAL; return -EINVAL;

View file

@ -948,8 +948,6 @@ retry:
} }
out: out:
if (ret && crtc->funcs->page_flip_target)
drm_crtc_vblank_put(crtc);
if (fb) if (fb)
drm_framebuffer_put(fb); drm_framebuffer_put(fb);
if (crtc->primary->old_fb) if (crtc->primary->old_fb)
@ -964,5 +962,8 @@ out:
drm_modeset_drop_locks(&ctx); drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx); drm_modeset_acquire_fini(&ctx);
if (ret && crtc->funcs->page_flip_target)
drm_crtc_vblank_put(crtc);
return ret; return ret;
} }

View file

@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (scan->type & DRM_MODE_TYPE_PREFERRED) { if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode = mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan); drm_mode_duplicate(dev, scan);
DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */ goto out; /* FIXME: check for quirks */
} }
} }
/* Failed to get EDID, what about VBT? do we need this? */ /* Failed to get EDID, what about VBT? do we need this? */
if (mode_dev->vbt_mode) if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode = mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, mode_dev->vbt_mode); drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (!mode_dev->panel_fixed_mode) if (mode_dev->panel_fixed_mode) {
if (dev_priv->lfp_lvds_vbt_mode) mode_dev->panel_fixed_mode->type |=
mode_dev->panel_fixed_mode = DRM_MODE_TYPE_PREFERRED;
drm_mode_duplicate(dev, DRM_DEBUG_KMS("Using mode from VBT\n");
dev_priv->lfp_lvds_vbt_mode); goto out;
}
}
/* /*
* If we didn't get EDID, try checking if the panel is already turned * If we didn't get EDID, try checking if the panel is already turned
@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (mode_dev->panel_fixed_mode) { if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |= mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED; DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */ goto out; /* FIXME: check for quirks */
} }
} }

View file

@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret) if (ret)
return; return;
cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
if (fb != old_state->fb) { if (fb != old_state->fb) {
obj = to_qxl_framebuffer(fb)->obj; obj = to_qxl_framebuffer(fb)->obj;
user_bo = gem_to_qxl_bo(obj); user_bo = gem_to_qxl_bo(obj);
@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_bo_kunmap(cursor_bo); qxl_bo_kunmap(cursor_bo);
qxl_bo_kunmap(user_bo); qxl_bo_kunmap(user_bo);
cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->u.set.visible = 1; cmd->u.set.visible = 1;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cmd->u.set.shape = qxl_bo_physical_address(qdev,
cursor_bo, 0); cursor_bo, 0);
@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret) if (ret)
goto out_free_release; goto out_free_release;
cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE; cmd->type = QXL_CURSOR_MOVE;
} }

View file

@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
u32 vblank_time = r600_dpm_get_vblank_time(rdev); u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300; u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
/* disable mclk switching if the refresh is >120Hz, even if the
* blanking period would allow it
*/
if (r600_dpm_get_vrefresh(rdev) > 120)
return true;
if (vblank_time < switch_limit) if (vblank_time < switch_limit)
return true; return true;
else else

View file

@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
} }
@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK; tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
} }

View file

@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
} }
@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK; tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
} }

View file

@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
} }

View file

@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((radeon_runtime_pm != 0) && if ((radeon_runtime_pm != 0) &&
radeon_has_atpx() && radeon_has_atpx() &&
((flags & RADEON_IS_IGP) == 0) && ((flags & RADEON_IS_IGP) == 0) &&
!pci_is_thunderbolt_attached(rdev->pdev)) !pci_is_thunderbolt_attached(dev->pdev))
flags |= RADEON_IS_PX; flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error /* radeon_device_init should report only fatal error

View file

@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK; tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
} }
@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp); WREG32(DC_HPD5_INT_CONTROL, tmp);
} }
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL); tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK; tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp); WREG32(DC_HPD6_INT_CONTROL, tmp);
} }

View file

@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len) int value, int index, void *data, int len)
{ {
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
void *dmadata = kmalloc(len, GFP_KERNEL);
int ret;
if (!dmadata)
return -ENOMEM;
/* do control transfer */ /* do control transfer */
return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
USB_DIR_IN, value, index, data, len, 2000); USB_DIR_IN, value, index, dmadata, len, 2000);
memcpy(data, dmadata, len);
kfree(dmadata);
return ret;
} }
static int usb_write(struct i2c_adapter *adapter, int cmd, static int usb_write(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len) int value, int index, void *data, int len)
{ {
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
void *dmadata = kmemdup(data, len, GFP_KERNEL);
int ret;
if (!dmadata)
return -ENOMEM;
/* do control transfer */ /* do control transfer */
return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, index, data, len, 2000); value, index, dmadata, len, 2000);
kfree(dmadata);
return ret;
} }
static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)

View file

@ -285,7 +285,7 @@ static int pca955x_probe(struct i2c_client *client,
"slave address 0x%02x\n", "slave address 0x%02x\n",
client->name, chip->bits, client->addr); client->name, chip->bits, client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO; return -EIO;
if (pdata) { if (pdata) {

View file

@ -27,6 +27,7 @@ struct mmc_pwrseq_simple {
struct mmc_pwrseq pwrseq; struct mmc_pwrseq pwrseq;
bool clk_enabled; bool clk_enabled;
u32 post_power_on_delay_ms; u32 post_power_on_delay_ms;
u32 power_off_delay_us;
struct clk *ext_clk; struct clk *ext_clk;
struct gpio_descs *reset_gpios; struct gpio_descs *reset_gpios;
}; };
@ -78,6 +79,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1); mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
if (pwrseq->power_off_delay_us)
usleep_range(pwrseq->power_off_delay_us,
2 * pwrseq->power_off_delay_us);
if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) { if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
clk_disable_unprepare(pwrseq->ext_clk); clk_disable_unprepare(pwrseq->ext_clk);
pwrseq->clk_enabled = false; pwrseq->clk_enabled = false;
@ -119,6 +124,8 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
device_property_read_u32(dev, "post-power-on-delay-ms", device_property_read_u32(dev, "post-power-on-delay-ms",
&pwrseq->post_power_on_delay_ms); &pwrseq->post_power_on_delay_ms);
device_property_read_u32(dev, "power-off-delay-us",
&pwrseq->power_off_delay_us);
pwrseq->pwrseq.dev = dev; pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;

View file

@ -108,7 +108,7 @@ static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
{ {
writeq(val, host->base + MIO_EMM_INT(host)); writeq(val, host->base + MIO_EMM_INT(host));
if (!host->dma_active || (host->dma_active && !host->has_ciu3)) if (!host->has_ciu3)
writeq(val, host->base + MIO_EMM_INT_EN(host)); writeq(val, host->base + MIO_EMM_INT_EN(host));
} }
@ -267,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
} }
host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
"power-gpios", "power",
GPIOD_OUT_HIGH); GPIOD_OUT_HIGH);
if (IS_ERR(host->global_pwr_gpiod)) { if (IS_ERR(host->global_pwr_gpiod)) {
dev_err(&pdev->dev, "Invalid power GPIO\n"); dev_err(&pdev->dev, "Invalid power GPIO\n");
@ -288,11 +288,20 @@ static int octeon_mmc_probe(struct platform_device *pdev)
if (ret) { if (ret) {
dev_err(&pdev->dev, "Error populating slots\n"); dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0); octeon_mmc_set_shared_power(host, 0);
return ret; goto error;
} }
i++; i++;
} }
return 0; return 0;
error:
for (i = 0; i < CAVIUM_MAX_MMC; i++) {
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
if (host->slot_pdev[i])
of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
}
return ret;
} }
static int octeon_mmc_remove(struct platform_device *pdev) static int octeon_mmc_remove(struct platform_device *pdev)

View file

@ -146,6 +146,12 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
return 0; return 0;
error: error:
for (i = 0; i < CAVIUM_MAX_MMC; i++) {
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
if (host->slot_pdev[i])
of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
}
clk_disable_unprepare(host->clk); clk_disable_unprepare(host->clk);
return ret; return ret;
} }

View file

@ -839,14 +839,14 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
cvm_mmc_reset_bus(slot); cvm_mmc_reset_bus(slot);
if (host->global_pwr_gpiod) if (host->global_pwr_gpiod)
host->set_shared_power(host, 0); host->set_shared_power(host, 0);
else else if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
break; break;
case MMC_POWER_UP: case MMC_POWER_UP:
if (host->global_pwr_gpiod) if (host->global_pwr_gpiod)
host->set_shared_power(host, 1); host->set_shared_power(host, 1);
else else if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
break; break;
} }
@ -968,20 +968,15 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
return -EINVAL; return -EINVAL;
} }
mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); ret = mmc_regulator_get_supply(mmc);
if (IS_ERR(mmc->supply.vmmc)) { if (ret == -EPROBE_DEFER)
if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) return ret;
return -EPROBE_DEFER; /*
/* * Legacy Octeon firmware has no regulator entry, fall-back to
* Legacy Octeon firmware has no regulator entry, fall-back to * a hard-coded voltage to get a sane OCR.
* a hard-coded voltage to get a sane OCR. */
*/ if (IS_ERR(mmc->supply.vmmc))
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
} else {
ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
if (ret > 0)
mmc->ocr_avail = ret;
}
/* Common MMC bindings */ /* Common MMC bindings */
ret = mmc_of_parse(mmc); ret = mmc_of_parse(mmc);

View file

@ -187,7 +187,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
}; };
static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
.ops = &sdhci_iproc_ops, .ops = &sdhci_iproc_ops,
}; };

View file

@ -787,14 +787,6 @@ int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios)
return ret; return ret;
} }
void xenon_clean_phy(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
kfree(priv->phy_params);
}
static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
const char *phy_name) const char *phy_name)
{ {
@ -819,11 +811,7 @@ static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
if (ret) if (ret)
return ret; return ret;
ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
if (ret)
xenon_clean_phy(host);
return ret;
} }
int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host) int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host)

View file

@ -486,7 +486,7 @@ static int xenon_probe(struct platform_device *pdev)
err = xenon_sdhc_prepare(host); err = xenon_sdhc_prepare(host);
if (err) if (err)
goto clean_phy_param; goto err_clk;
err = sdhci_add_host(host); err = sdhci_add_host(host);
if (err) if (err)
@ -496,8 +496,6 @@ static int xenon_probe(struct platform_device *pdev)
remove_sdhc: remove_sdhc:
xenon_sdhc_unprepare(host); xenon_sdhc_unprepare(host);
clean_phy_param:
xenon_clean_phy(host);
err_clk: err_clk:
clk_disable_unprepare(pltfm_host->clk); clk_disable_unprepare(pltfm_host->clk);
free_pltfm: free_pltfm:
@ -510,8 +508,6 @@ static int xenon_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
xenon_clean_phy(host);
sdhci_remove_host(host, 0); sdhci_remove_host(host, 0);
xenon_sdhc_unprepare(host); xenon_sdhc_unprepare(host);

View file

@ -93,7 +93,6 @@ struct xenon_priv {
}; };
int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios);
void xenon_clean_phy(struct sdhci_host *host);
int xenon_phy_parse_dt(struct device_node *np, int xenon_phy_parse_dt(struct device_node *np,
struct sdhci_host *host); struct sdhci_host *host);
void xenon_soc_pad_ctrl(struct sdhci_host *host, void xenon_soc_pad_ctrl(struct sdhci_host *host,

View file

@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev); unsigned long trans_start = dev_trans_start(slave->dev);
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) { if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) && if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) { bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->link = BOND_LINK_UP; slave->new_link = BOND_LINK_UP;
slave_state_changed = 1; slave_state_changed = 1;
/* primary_slave has no meaning in round-robin /* primary_slave has no meaning in round-robin
@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (!bond_time_in_interval(bond, trans_start, 2) || if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) { !bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->link = BOND_LINK_DOWN; slave->new_link = BOND_LINK_DOWN;
slave_state_changed = 1; slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX) if (slave->link_failure_count < UINT_MAX)
@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (!rtnl_trylock()) if (!rtnl_trylock())
goto re_arm; goto re_arm;
bond_for_each_slave(bond, slave, iter) {
if (slave->new_link != BOND_LINK_NOCHANGE)
slave->link = slave->new_link;
}
if (slave_state_changed) { if (slave_state_changed) {
bond_slave_state_change(bond); bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR) if (BOND_MODE(bond) == BOND_MODE_XOR)

View file

@ -754,13 +754,13 @@ static int ax_init_dev(struct net_device *dev)
ret = ax_mii_init(dev); ret = ax_mii_init(dev);
if (ret) if (ret)
goto out_irq; goto err_out;
ax_NS8390_init(dev, 0); ax_NS8390_init(dev, 0);
ret = register_netdev(dev); ret = register_netdev(dev);
if (ret) if (ret)
goto out_irq; goto err_out;
netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
@ -768,9 +768,6 @@ static int ax_init_dev(struct net_device *dev)
return 0; return 0;
out_irq:
/* cleanup irq */
free_irq(dev->irq, dev);
err_out: err_out:
return ret; return ret;
} }

View file

@ -5089,9 +5089,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
struct be_adapter *adapter = netdev_priv(dev); struct be_adapter *adapter = netdev_priv(dev);
u8 l4_hdr = 0; u8 l4_hdr = 0;
/* The code below restricts offload features for some tunneled packets. /* The code below restricts offload features for some tunneled and
* Q-in-Q packets.
* Offload features for normal (non tunnel) packets are unchanged. * Offload features for normal (non tunnel) packets are unchanged.
*/ */
features = vlan_features_check(skb, features);
if (!skb->encapsulation || if (!skb->encapsulation ||
!(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
return features; return features;

View file

@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev)
{ {
int err, phy_reset; int err, phy_reset;
bool active_high = false; bool active_high = false;
int msec = 1; int msec = 1, phy_post_delay = 0;
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
if (!np) if (!np)
@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev)
else if (!gpio_is_valid(phy_reset)) else if (!gpio_is_valid(phy_reset))
return 0; return 0;
err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
/* valid reset duration should be less than 1s */
if (!err && phy_post_delay > 1000)
return -EINVAL;
active_high = of_property_read_bool(np, "phy-reset-active-high"); active_high = of_property_read_bool(np, "phy-reset-active-high");
err = devm_gpio_request_one(&pdev->dev, phy_reset, err = devm_gpio_request_one(&pdev->dev, phy_reset,
@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev)
gpio_set_value_cansleep(phy_reset, !active_high); gpio_set_value_cansleep(phy_reset, !active_high);
if (!phy_post_delay)
return 0;
if (phy_post_delay > 20)
msleep(phy_post_delay);
else
usleep_range(phy_post_delay * 1000,
phy_post_delay * 1000 + 1000);
return 0; return 0;
} }
#else /* CONFIG_OF */ #else /* CONFIG_OF */

View file

@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)), mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in)); msg_to_opcode(ent->in));
mlx5_cmd_comp_handler(dev, 1UL << ent->idx); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
} }
static void cmd_work_handler(struct work_struct *work) static void cmd_work_handler(struct work_struct *work)
@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work)
} }
cmd->ent_arr[ent->idx] = ent; cmd->ent_arr[ent->idx] = ent;
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx); lay = get_inst(cmd, ent->idx);
ent->lay = lay; ent->lay = lay;
memset(lay, 0, sizeof(*lay)); memset(lay, 0, sizeof(*lay));
@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback) if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
u8 status = 0;
u32 drv_synd;
ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
MLX5_SET(mbox_out, ent->out, status, status);
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
return;
}
/* ring doorbell after the descriptor is valid */ /* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb(); wmb();
@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent); poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */ /* make sure we read the descriptor after ownership is SW */
rmb(); rmb();
mlx5_cmd_comp_handler(dev, 1UL << ent->idx); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
} }
} }
@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
wait_for_completion(&ent->done); wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) { } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
ent->ret = -ETIMEDOUT; ent->ret = -ETIMEDOUT;
mlx5_cmd_comp_handler(dev, 1UL << ent->idx); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
} }
err = ent->ret; err = ent->ret;
@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
} }
} }
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
{ {
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_work_ent *ent;
@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem; struct semaphore *sem;
ent = cmd->ent_arr[i]; ent = cmd->ent_arr[i];
/* if we already completed the command, ignore it */
if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
&ent->state)) {
/* only real completion can free the cmd slot */
if (!forced) {
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx);
free_ent(cmd, ent->idx);
}
continue;
}
if (ent->callback) if (ent->callback)
cancel_delayed_work(&ent->cb_timeout_work); cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue) if (ent->page_queue)
@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status); ent->ret, deliv_status_to_str(ent->status), ent->status);
} }
free_ent(cmd, ent->idx);
/* only real completion will free the entry slot */
if (!forced)
free_ent(cmd, ent->idx);
if (ent->callback) { if (ent->callback) {
ds = ent->ts2 - ent->ts1; ds = ent->ts2 - ent->ts1;

View file

@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
#define MLX5_IB_GRH_BYTES 40 #define MLX5_IB_GRH_BYTES 40
#define MLX5_IPOIB_ENCAP_LEN 4 #define MLX5_IPOIB_ENCAP_LEN 4
#define MLX5_GID_SIZE 16 #define MLX5_GID_SIZE 16
#define MLX5_IPOIB_PSEUDO_LEN 20
#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct net_device *netdev = rq->netdev; struct net_device *netdev = rq->netdev;
char *pseudo_header;
u8 *dgid; u8 *dgid;
u8 g; u8 g;
@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
if (likely(netdev->features & NETIF_F_RXHASH)) if (likely(netdev->features & NETIF_F_RXHASH))
mlx5e_skb_set_hash(cqe, skb); mlx5e_skb_set_hash(cqe, skb);
/* 20 bytes of ipoib header and 4 for encap existing */
pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_pull(skb, MLX5_IPOIB_ENCAP_LEN); skb_pull(skb, MLX5_IPOIB_HARD_LEN);
skb->dev = netdev; skb->dev = netdev;

View file

@ -43,6 +43,7 @@
#include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/arp.h> #include <net/arp.h>
#include "en.h" #include "en.h"
@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_VALID) if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_encap_dealloc(priv->mdev, e->encap_id); mlx5_encap_dealloc(priv->mdev, e->encap_id);
hlist_del_rcu(&e->encap_hlist); hash_del_rcu(&e->encap_hlist);
kfree(e->encap_header); kfree(e->encap_header);
kfree(e); kfree(e);
} }
@ -925,11 +926,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
struct mlx5e_tc_flow_parse_attr *parse_attr) struct mlx5e_tc_flow_parse_attr *parse_attr)
{ {
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
int i, action_size, nactions, max_actions, first, last; int i, action_size, nactions, max_actions, first, last, first_z;
void *s_masks_p, *a_masks_p, *vals_p; void *s_masks_p, *a_masks_p, *vals_p;
u32 s_mask, a_mask, val;
struct mlx5_fields *f; struct mlx5_fields *f;
u8 cmd, field_bsize; u8 cmd, field_bsize;
u32 s_mask, a_mask;
unsigned long mask; unsigned long mask;
void *action; void *action;
@ -946,7 +947,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
for (i = 0; i < ARRAY_SIZE(fields); i++) { for (i = 0; i < ARRAY_SIZE(fields); i++) {
f = &fields[i]; f = &fields[i];
/* avoid seeing bits set from previous iterations */ /* avoid seeing bits set from previous iterations */
s_mask = a_mask = mask = val = 0; s_mask = 0;
a_mask = 0;
s_masks_p = (void *)set_masks + f->offset; s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset; a_masks_p = (void *)add_masks + f->offset;
@ -981,12 +983,12 @@ static int offload_pedit_fields(struct pedit_headers *masks,
memset(a_masks_p, 0, f->size); memset(a_masks_p, 0, f->size);
} }
memcpy(&val, vals_p, f->size);
field_bsize = f->size * BITS_PER_BYTE; field_bsize = f->size * BITS_PER_BYTE;
first_z = find_first_zero_bit(&mask, field_bsize);
first = find_first_bit(&mask, field_bsize); first = find_first_bit(&mask, field_bsize);
last = find_last_bit(&mask, field_bsize); last = find_last_bit(&mask, field_bsize);
if (first > 0 || last != (field_bsize - 1)) { if (first > 0 || last != (field_bsize - 1) || first_z < last) {
printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
mask); mask);
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1002,11 +1004,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
} }
if (field_bsize == 32) if (field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(val)); MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
else if (field_bsize == 16) else if (field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(val)); MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
else if (field_bsize == 8) else if (field_bsize == 8)
MLX5_SET(set_action_in, action, data, val); MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
action += action_size; action += action_size;
nactions++; nactions++;
@ -1109,6 +1111,28 @@ out_err:
return err; return err;
} }
static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
return false;
}
return true;
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
@ -1149,6 +1173,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue; continue;
} }
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, attr->action,
tcf_csum_update_flags(a)))
continue;
return -EOPNOTSUPP;
}
if (is_tcf_skbedit_mark(a)) { if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a); u32 mark = tcf_skbedit_mark(a);
@ -1651,6 +1683,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue; continue;
} }
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, attr->action,
tcf_csum_update_flags(a)))
continue;
return -EOPNOTSUPP;
}
if (is_tcf_mirred_egress_redirect(a)) { if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a); int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev, *encap_dev = NULL; struct net_device *out_dev, *encap_dev = NULL;

View file

@ -425,7 +425,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break; break;
case MLX5_EVENT_TYPE_CMD: case MLX5_EVENT_TYPE_CMD:
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
break; break;
case MLX5_EVENT_TYPE_PORT_CHANGE: case MLX5_EVENT_TYPE_PORT_CHANGE:

View file

@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
mlx5_core_dbg(dev, "vector 0x%llx\n", vector); mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
mlx5_cmd_comp_handler(dev, vector); mlx5_cmd_comp_handler(dev, vector, true);
return; return;
no_trig: no_trig:

View file

@ -613,7 +613,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv; struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr; struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@ -623,18 +622,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask); priv->irq_info[i].mask);
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); #ifdef CONFIG_SMP
if (err) { if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
irq); #endif
goto err_clear_mask;
}
return 0; return 0;
err_clear_mask:
free_cpumask_var(priv->irq_info[i].mask);
return err;
} }
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)

View file

@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
goto nla_put_failure; goto nla_put_failure;
if (ip_tunnel_info_af(info) == AF_INET) { if (rtnl_dereference(geneve->sock4)) {
if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
info->key.u.ipv4.dst)) info->key.u.ipv4.dst))
goto nla_put_failure; goto nla_put_failure;
@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(info->key.tun_flags & TUNNEL_CSUM))) !!(info->key.tun_flags & TUNNEL_CSUM)))
goto nla_put_failure; goto nla_put_failure;
}
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { if (rtnl_dereference(geneve->sock6)) {
if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
&info->key.u.ipv6.dst)) &info->key.u.ipv6.dst))
goto nla_put_failure; goto nla_put_failure;
@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums)) !geneve->use_udp6_rx_checksums))
goto nla_put_failure; goto nla_put_failure;
#endif
} }
#endif
if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||

View file

@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
/* Check if there's an existing gtpX device to configure */ /* Check if there's an existing gtpX device to configure */
dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
if (dev->netdev_ops == &gtp_netdev_ops) if (dev && dev->netdev_ops == &gtp_netdev_ops)
gtp = netdev_priv(dev); gtp = netdev_priv(dev);
put_net(net); put_net(net);

View file

@ -108,7 +108,7 @@ config MDIO_MOXART
config MDIO_OCTEON config MDIO_OCTEON
tristate "Octeon and some ThunderX SOCs MDIO buses" tristate "Octeon and some ThunderX SOCs MDIO buses"
depends on 64BIT depends on 64BIT
depends on HAS_IOMEM depends on HAS_IOMEM && OF_MDIO
select MDIO_CAVIUM select MDIO_CAVIUM
help help
This module provides a driver for the Octeon and ThunderX MDIO This module provides a driver for the Octeon and ThunderX MDIO

View file

@ -278,35 +278,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
{ {
int err; int err;
/* The Marvell PHY has an errata which requires
* that certain registers get written in order
* to restart autonegotiation
*/
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x1f);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x200c);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x5);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x100);
if (err < 0)
return err;
err = marvell_set_polarity(phydev, phydev->mdix_ctrl); err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0) if (err < 0)
return err; return err;
@ -339,6 +310,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0; return 0;
} }
static int m88e1101_config_aneg(struct phy_device *phydev)
{
int err;
/* This Marvell PHY has an errata which requires
* that certain registers get written in order
* to restart autonegotiation
*/
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x1f);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x200c);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x5);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x100);
if (err < 0)
return err;
return marvell_config_aneg(phydev);
}
static int m88e1111_config_aneg(struct phy_device *phydev) static int m88e1111_config_aneg(struct phy_device *phydev)
{ {
int err; int err;
@ -1975,7 +1982,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT, .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe, .probe = marvell_probe,
.config_init = &marvell_config_init, .config_init = &marvell_config_init,
.config_aneg = &marvell_config_aneg, .config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status, .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt, .ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr, .config_intr = &marvell_config_intr,

View file

@ -310,13 +310,6 @@ skip:
return -ENODEV; return -ENODEV;
} }
/* Some devices don't initialise properly. In particular
* the packet filter is not reset. There are devices that
* don't do reset all the way. So the packet filter should
* be set to a sane initial value.
*/
usbnet_cdc_update_filter(dev);
return 0; return 0;
bad_desc: bad_desc:
@ -325,6 +318,30 @@ bad_desc:
} }
EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
/* like usbnet_generic_cdc_bind() but handles filter initialization
* correctly
*/
int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
{
int rv;
rv = usbnet_generic_cdc_bind(dev, intf);
if (rv < 0)
goto bail_out;
/* Some devices don't initialise properly. In particular
* the packet filter is not reset. There are devices that
* don't do reset all the way. So the packet filter should
* be set to a sane initial value.
*/
usbnet_cdc_update_filter(dev);
bail_out:
return rv;
}
EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind);
void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
{ {
struct cdc_state *info = (void *) &dev->data; struct cdc_state *info = (void *) &dev->data;
@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
< sizeof(struct cdc_state))); < sizeof(struct cdc_state)));
status = usbnet_generic_cdc_bind(dev, intf); status = usbnet_ether_cdc_bind(dev, intf);
if (status < 0) if (status < 0)
return status; return status;

View file

@ -1989,6 +1989,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_poll_controller = virtnet_netpoll, .ndo_poll_controller = virtnet_netpoll,
#endif #endif
.ndo_xdp = virtnet_xdp, .ndo_xdp = virtnet_xdp,
.ndo_features_check = passthru_features_check,
}; };
static void virtnet_config_changed_work(struct work_struct *work) static void virtnet_config_changed_work(struct work_struct *work)

View file

@ -925,6 +925,29 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
} }
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
u16 bs)
{
struct nvme_ns *ns = disk->private_data;
u16 old_ms = ns->ms;
u8 pi_type = 0;
ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
/* PI implementation requires metadata equal t10 pi tuple size */
if (ns->ms == sizeof(struct t10_pi_tuple))
pi_type = id->dps & NVME_NS_DPS_PI_MASK;
if (blk_get_integrity(disk) &&
(ns->pi_type != pi_type || ns->ms != old_ms ||
bs != queue_logical_block_size(disk->queue) ||
(ns->ms && ns->ext)))
blk_integrity_unregister(disk);
ns->pi_type = pi_type;
}
static void nvme_init_integrity(struct nvme_ns *ns) static void nvme_init_integrity(struct nvme_ns *ns)
{ {
struct blk_integrity integrity; struct blk_integrity integrity;
@ -951,6 +974,10 @@ static void nvme_init_integrity(struct nvme_ns *ns)
blk_queue_max_integrity_segments(ns->queue, 1); blk_queue_max_integrity_segments(ns->queue, 1);
} }
#else #else
static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
u16 bs)
{
}
static void nvme_init_integrity(struct nvme_ns *ns) static void nvme_init_integrity(struct nvme_ns *ns)
{ {
} }
@ -997,37 +1024,22 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{ {
struct nvme_ns *ns = disk->private_data; struct nvme_ns *ns = disk->private_data;
u8 lbaf, pi_type; u16 bs;
u16 old_ms;
unsigned short bs;
old_ms = ns->ms;
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
ns->lba_shift = id->lbaf[lbaf].ds;
ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
/* /*
* If identify namespace failed, use default 512 byte block size so * If identify namespace failed, use default 512 byte block size so
* block layer can use before failing read/write for 0 capacity. * block layer can use before failing read/write for 0 capacity.
*/ */
ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
if (ns->lba_shift == 0) if (ns->lba_shift == 0)
ns->lba_shift = 9; ns->lba_shift = 9;
bs = 1 << ns->lba_shift; bs = 1 << ns->lba_shift;
/* XXX: PI implementation requires metadata equal t10 pi tuple size */
pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
id->dps & NVME_NS_DPS_PI_MASK : 0;
blk_mq_freeze_queue(disk->queue); blk_mq_freeze_queue(disk->queue);
if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
ns->ms != old_ms ||
bs != queue_logical_block_size(disk->queue) ||
(ns->ms && ns->ext)))
blk_integrity_unregister(disk);
ns->pi_type = pi_type; if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
nvme_prep_integrity(disk, id, bs);
blk_queue_logical_block_size(ns->queue, bs); blk_queue_logical_block_size(ns->queue, bs);
if (ns->ms && !blk_get_integrity(disk) && !ns->ext) if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
nvme_init_integrity(ns); nvme_init_integrity(ns);
if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@ -1605,7 +1617,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
} }
memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
if (ctrl->ops->is_fabrics) { if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->icdoff = le16_to_cpu(id->icdoff); ctrl->icdoff = le16_to_cpu(id->icdoff);
ctrl->ioccsz = le32_to_cpu(id->ioccsz); ctrl->ioccsz = le32_to_cpu(id->ioccsz);
ctrl->iorcsz = le32_to_cpu(id->iorcsz); ctrl->iorcsz = le32_to_cpu(id->iorcsz);
@ -2098,7 +2110,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (ns->ndev) if (ns->ndev)
nvme_nvm_unregister_sysfs(ns); nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk); del_gendisk(ns->disk);
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue); blk_cleanup_queue(ns->queue);
} }
@ -2436,8 +2447,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
continue; continue;
revalidate_disk(ns->disk); revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue); blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true); /*
* Forcibly start all queues to avoid having stuck requests.
* Note that we must ensure the queues are not stopped
* when the final removal happens.
*/
blk_mq_start_hw_queues(ns->queue);
/* draining requests in requeue list */
blk_mq_kick_requeue_list(ns->queue);
} }
mutex_unlock(&ctrl->namespaces_mutex); mutex_unlock(&ctrl->namespaces_mutex);
} }

View file

@ -45,8 +45,6 @@ enum nvme_fc_queue_flags {
#define NVMEFC_QUEUE_DELAY 3 /* ms units */ #define NVMEFC_QUEUE_DELAY 3 /* ms units */
#define NVME_FC_MAX_CONNECT_ATTEMPTS 1
struct nvme_fc_queue { struct nvme_fc_queue {
struct nvme_fc_ctrl *ctrl; struct nvme_fc_ctrl *ctrl;
struct device *dev; struct device *dev;
@ -165,8 +163,6 @@ struct nvme_fc_ctrl {
struct work_struct delete_work; struct work_struct delete_work;
struct work_struct reset_work; struct work_struct reset_work;
struct delayed_work connect_work; struct delayed_work connect_work;
int reconnect_delay;
int connect_attempts;
struct kref ref; struct kref ref;
u32 flags; u32 flags;
@ -1376,9 +1372,9 @@ done:
complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
if (!complete_rq) { if (!complete_rq) {
if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
status = cpu_to_le16(NVME_SC_ABORT_REQ); status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
if (blk_queue_dying(rq->q)) if (blk_queue_dying(rq->q))
status |= cpu_to_le16(NVME_SC_DNR); status |= cpu_to_le16(NVME_SC_DNR << 1);
} }
nvme_end_request(rq, status, result); nvme_end_request(rq, status, result);
} else } else
@ -1751,7 +1747,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport association error detected: %s\n", "NVME-FC{%d}: transport association error detected: %s\n",
ctrl->cnum, errmsg); ctrl->cnum, errmsg);
dev_info(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum); "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
/* stop the queues on error, cleanup is in reset thread */ /* stop the queues on error, cleanup is in reset thread */
@ -2195,9 +2191,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
if (!opts->nr_io_queues) if (!opts->nr_io_queues)
return 0; return 0;
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
opts->nr_io_queues);
nvme_fc_init_io_queues(ctrl); nvme_fc_init_io_queues(ctrl);
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
@ -2268,9 +2261,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
if (ctrl->queue_count == 1) if (ctrl->queue_count == 1)
return 0; return 0;
dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
opts->nr_io_queues);
nvme_fc_init_io_queues(ctrl); nvme_fc_init_io_queues(ctrl);
ret = blk_mq_reinit_tagset(&ctrl->tag_set); ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@ -2306,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
int ret; int ret;
bool changed; bool changed;
ctrl->connect_attempts++; ++ctrl->ctrl.opts->nr_reconnects;
/* /*
* Create the admin queue * Create the admin queue
@ -2403,9 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed); WARN_ON_ONCE(!changed);
ctrl->connect_attempts = 0; ctrl->ctrl.opts->nr_reconnects = 0;
kref_get(&ctrl->ctrl.kref);
if (ctrl->queue_count > 1) { if (ctrl->queue_count > 1) {
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
@ -2536,26 +2524,32 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
/* /*
* tear down the controller * tear down the controller
* This will result in the last reference on the nvme ctrl to * After the last reference on the nvme ctrl is removed,
* expire, calling the transport nvme_fc_nvme_ctrl_freed() callback. * the transport nvme_fc_nvme_ctrl_freed() callback will be
* From there, the transport will tear down it's logical queues and * invoked. From there, the transport will tear down it's
* association. * logical queues and association.
*/ */
nvme_uninit_ctrl(&ctrl->ctrl); nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl);
} }
static bool
__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
return true;
if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
return true;
return false;
}
static int static int
__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
{ {
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
return -EBUSY;
if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
} }
/* /*
@ -2580,6 +2574,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
return ret; return ret;
} }
static void
nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
{
/* If we are resetting/deleting then do nothing */
if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
ctrl->ctrl.state == NVME_CTRL_LIVE);
return;
}
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
ctrl->cnum, status);
if (nvmf_should_reconnect(&ctrl->ctrl)) {
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
ctrl->ctrl.opts->reconnect_delay * HZ);
} else {
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) "
"reached. Removing controller\n",
ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
}
}
static void static void
nvme_fc_reset_ctrl_work(struct work_struct *work) nvme_fc_reset_ctrl_work(struct work_struct *work)
{ {
@ -2591,34 +2614,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
nvme_fc_delete_association(ctrl); nvme_fc_delete_association(ctrl);
ret = nvme_fc_create_association(ctrl); ret = nvme_fc_create_association(ctrl);
if (ret) { if (ret)
dev_warn(ctrl->ctrl.device, nvme_fc_reconnect_or_delete(ctrl, ret);
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", else
ctrl->cnum, ret);
if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) "
"reached. Removing controller\n",
ctrl->cnum, ctrl->connect_attempts);
if (!nvme_change_ctrl_state(&ctrl->ctrl,
NVME_CTRL_DELETING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to change state "
"to DELETING\n", ctrl->cnum);
return;
}
WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
return;
}
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
ctrl->cnum, ctrl->reconnect_delay);
queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
ctrl->reconnect_delay * HZ);
} else
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: controller reset complete\n", ctrl->cnum); "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
} }
@ -2632,7 +2630,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
{ {
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
dev_warn(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
@ -2649,7 +2647,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc", .name = "fc",
.module = THIS_MODULE, .module = THIS_MODULE,
.is_fabrics = true, .flags = NVME_F_FABRICS,
.reg_read32 = nvmf_reg_read32, .reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64, .reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32, .reg_write32 = nvmf_reg_write32,
@ -2671,34 +2669,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
struct nvme_fc_ctrl, connect_work); struct nvme_fc_ctrl, connect_work);
ret = nvme_fc_create_association(ctrl); ret = nvme_fc_create_association(ctrl);
if (ret) { if (ret)
dev_warn(ctrl->ctrl.device, nvme_fc_reconnect_or_delete(ctrl, ret);
"NVME-FC{%d}: Reconnect attempt failed (%d)\n", else
ctrl->cnum, ret);
if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) "
"reached. Removing controller\n",
ctrl->cnum, ctrl->connect_attempts);
if (!nvme_change_ctrl_state(&ctrl->ctrl,
NVME_CTRL_DELETING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to change state "
"to DELETING\n", ctrl->cnum);
return;
}
WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
return;
}
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
ctrl->cnum, ctrl->reconnect_delay);
queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
ctrl->reconnect_delay * HZ);
} else
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: controller reconnect complete\n", "NVME-FC{%d}: controller reconnect complete\n",
ctrl->cnum); ctrl->cnum);
@ -2755,7 +2728,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
ctrl->reconnect_delay = opts->reconnect_delay;
spin_lock_init(&ctrl->lock); spin_lock_init(&ctrl->lock);
/* io queue count */ /* io queue count */
@ -2819,7 +2791,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->ctrl.opts = NULL; ctrl->ctrl.opts = NULL;
/* initiate nvme ctrl ref counting teardown */ /* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl); nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
/* as we're past the point where we transition to the ref /* as we're past the point where we transition to the ref
* counting teardown path, if we return a bad pointer here, * counting teardown path, if we return a bad pointer here,
@ -2835,6 +2806,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
kref_get(&ctrl->ctrl.kref);
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n", "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
ctrl->cnum, ctrl->ctrl.opts->subsysnqn); ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
@ -2971,7 +2944,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
static struct nvmf_transport_ops nvme_fc_transport = { static struct nvmf_transport_ops nvme_fc_transport = {
.name = "fc", .name = "fc",
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
.allowed_opts = NVMF_OPT_RECONNECT_DELAY, .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
.create_ctrl = nvme_fc_create_ctrl, .create_ctrl = nvme_fc_create_ctrl,
}; };

View file

@ -208,7 +208,9 @@ struct nvme_ns {
struct nvme_ctrl_ops { struct nvme_ctrl_ops {
const char *name; const char *name;
struct module *module; struct module *module;
bool is_fabrics; unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);

View file

@ -263,7 +263,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
dev_warn(dev->dev, "unable to set dbbuf\n"); dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */ /* Free memory and continue on */
nvme_dbbuf_dma_free(dev); nvme_dbbuf_dma_free(dev);
} }
@ -1394,11 +1394,11 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
&pci_status); &pci_status);
if (result == PCIBIOS_SUCCESSFUL) if (result == PCIBIOS_SUCCESSFUL)
dev_warn(dev->dev, dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
csts, pci_status); csts, pci_status);
else else
dev_warn(dev->dev, dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
csts, result); csts, result);
} }
@ -1740,8 +1740,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
*/ */
if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
dev->q_depth = 2; dev->q_depth = 2;
dev_warn(dev->dev, "detected Apple NVMe controller, set " dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
"queue depth=%u to work around controller resets\n", "set queue depth=%u to work around controller resets\n",
dev->q_depth); dev->q_depth);
} }
@ -1759,7 +1759,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
if (dev->cmbsz) { if (dev->cmbsz) {
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL)) &dev_attr_cmb.attr, NULL))
dev_warn(dev->dev, dev_warn(dev->ctrl.device,
"failed to add sysfs attribute for CMB\n"); "failed to add sysfs attribute for CMB\n");
} }
} }
@ -2047,6 +2047,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie", .name = "pcie",
.module = THIS_MODULE, .module = THIS_MODULE,
.flags = NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvme_pci_reg_read32, .reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32, .reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64, .reg_read64 = nvme_pci_reg_read64,
@ -2293,6 +2294,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54), { PCI_VDEVICE(INTEL, 0x0a54),
.driver_data = NVME_QUIRK_STRIPE_SIZE | .driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, }, NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, }, .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */

View file

@ -1038,6 +1038,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
nvme_rdma_wr_error(cq, wc, "SEND"); nvme_rdma_wr_error(cq, wc, "SEND");
} }
static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
{
int sig_limit;
/*
* We signal completion every queue depth/2 and also handle the
* degenerated case of a device with queue_depth=1, where we
* would need to signal every message.
*/
sig_limit = max(queue->queue_size / 2, 1);
return (++queue->sig_count % sig_limit) == 0;
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
struct ib_send_wr *first, bool flush) struct ib_send_wr *first, bool flush)
@ -1065,9 +1078,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
* Would have been way to obvious to handle this in hardware or * Would have been way to obvious to handle this in hardware or
* at least the RDMA stack.. * at least the RDMA stack..
* *
* This messy and racy code sniplet is copy and pasted from the iSER
* initiator, and the magic '32' comes from there as well.
*
* Always signal the flushes. The magic request used for the flush * Always signal the flushes. The magic request used for the flush
* sequencer is not allocated in our driver's tagset and it's * sequencer is not allocated in our driver's tagset and it's
* triggered to be freed by blk_cleanup_queue(). So we need to * triggered to be freed by blk_cleanup_queue(). So we need to
@ -1075,7 +1085,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
* embedded in request's payload, is not freed when __ib_process_cq() * embedded in request's payload, is not freed when __ib_process_cq()
* calls wr_cqe->done(). * calls wr_cqe->done().
*/ */
if ((++queue->sig_count % 32) == 0 || flush) if (nvme_rdma_queue_sig_limit(queue) || flush)
wr.send_flags |= IB_SEND_SIGNALED; wr.send_flags |= IB_SEND_SIGNALED;
if (first) if (first)
@ -1782,7 +1792,7 @@ static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.name = "rdma", .name = "rdma",
.module = THIS_MODULE, .module = THIS_MODULE,
.is_fabrics = true, .flags = NVME_F_FABRICS,
.reg_read32 = nvmf_reg_read32, .reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64, .reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32, .reg_write32 = nvmf_reg_write32,

View file

@ -558,7 +558,7 @@ static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
.name = "loop", .name = "loop",
.module = THIS_MODULE, .module = THIS_MODULE,
.is_fabrics = true, .flags = NVME_F_FABRICS,
.reg_read32 = nvmf_reg_read32, .reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64, .reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32, .reg_write32 = nvmf_reg_write32,

View file

@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void)
arch_initcall_sync(of_platform_default_populate_init); arch_initcall_sync(of_platform_default_populate_init);
#endif #endif
static int of_platform_device_destroy(struct device *dev, void *data) int of_platform_device_destroy(struct device *dev, void *data)
{ {
/* Do not touch devices not populated from the device tree */ /* Do not touch devices not populated from the device tree */
if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED))
@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data)
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(of_platform_device_destroy);
/** /**
* of_platform_depopulate() - Remove devices populated from device tree * of_platform_depopulate() - Remove devices populated from device tree

View file

@ -252,7 +252,34 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
static int imx6q_pcie_abort_handler(unsigned long addr, static int imx6q_pcie_abort_handler(unsigned long addr,
unsigned int fsr, struct pt_regs *regs) unsigned int fsr, struct pt_regs *regs)
{ {
return 0; unsigned long pc = instruction_pointer(regs);
unsigned long instr = *(unsigned long *)pc;
int reg = (instr >> 12) & 15;
/*
* If the instruction being executed was a read,
* make it look like it read all-ones.
*/
if ((instr & 0x0c100000) == 0x04100000) {
unsigned long val;
if (instr & 0x00400000)
val = 255;
else
val = -1;
regs->uregs[reg] = val;
regs->ARM_pc += 4;
return 0;
}
if ((instr & 0x0e100090) == 0x00100090) {
regs->uregs[reg] = -1;
regs->ARM_pc += 4;
return 0;
}
return 1;
} }
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
@ -819,8 +846,8 @@ static int __init imx6_pcie_init(void)
* we can install the handler here without risking it * we can install the handler here without risking it
* accessing some uninitialized driver state. * accessing some uninitialized driver state.
*/ */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort"); "external abort on non-linefetch");
return platform_driver_register(&imx6_pcie_driver); return platform_driver_register(&imx6_pcie_driver);
} }

View file

@ -6,6 +6,7 @@ menu "PCI Endpoint"
config PCI_ENDPOINT config PCI_ENDPOINT
bool "PCI Endpoint Support" bool "PCI Endpoint Support"
depends on HAS_DMA
help help
Enable this configuration option to support configurable PCI Enable this configuration option to support configurable PCI
endpoint. This should be enabled if the platform has a PCI endpoint. This should be enabled if the platform has a PCI

View file

@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
if (!pm_runtime_suspended(dev) if (!pm_runtime_suspended(dev)
|| pci_target_state(pci_dev) != pci_dev->current_state || pci_target_state(pci_dev) != pci_dev->current_state
|| platform_pci_need_resume(pci_dev)) || platform_pci_need_resume(pci_dev)
|| (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
return false; return false;
/* /*

View file

@ -1291,7 +1291,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
cdev = &stdev->cdev; cdev = &stdev->cdev;
cdev_init(cdev, &switchtec_fops); cdev_init(cdev, &switchtec_fops);
cdev->owner = THIS_MODULE; cdev->owner = THIS_MODULE;
cdev->kobj.parent = &dev->kobj;
return stdev; return stdev;
@ -1442,12 +1441,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
if (stdev->partition_count < 1)
stdev->partition_count = 1;
init_pff(stdev); init_pff(stdev);
pci_set_drvdata(pdev, stdev); pci_set_drvdata(pdev, stdev);
@ -1479,11 +1481,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
SWITCHTEC_EVENT_EN_IRQ, SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr); &stdev->mmio_part_cfg->mrpc_comp_hdr);
rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc)
goto err_put;
rc = device_add(&stdev->dev);
if (rc) if (rc)
goto err_devadd; goto err_devadd;
@ -1492,7 +1490,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
return 0; return 0;
err_devadd: err_devadd:
cdev_del(&stdev->cdev);
stdev_kill(stdev); stdev_kill(stdev);
err_put: err_put:
ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
@ -1506,8 +1503,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
device_del(&stdev->dev); cdev_device_del(&stdev->cdev, &stdev->dev);
cdev_del(&stdev->cdev);
ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n"); dev_info(&stdev->dev, "unregistered.\n");

View file

@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
goto bye; goto bye;
} }
mempool_free(mbp, hw->mb_mempool);
if (finicsum != cfcsum) { if (finicsum != cfcsum) {
csio_warn(hw, csio_warn(hw,
"Config File checksum mismatch: csum=%#x, computed=%#x\n", "Config File checksum mismatch: csum=%#x, computed=%#x\n",
@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
rv = csio_hw_validate_caps(hw, mbp); rv = csio_hw_validate_caps(hw, mbp);
if (rv != 0) if (rv != 0)
goto bye; goto bye;
mempool_free(mbp, hw->mb_mempool);
mbp = NULL;
/* /*
* Note that we're operating with parameters * Note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired * not supplied by the driver, rather than from hard-wired

View file

@ -1422,7 +1422,7 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
fp = fc_frame_alloc(lport, sizeof(*rtv)); fp = fc_frame_alloc(lport, sizeof(*rtv));
if (!fp) { if (!fp) {
rjt_data.reason = ELS_RJT_UNAB; rjt_data.reason = ELS_RJT_UNAB;
rjt_data.reason = ELS_EXPL_INSUF_RES; rjt_data.explan = ELS_EXPL_INSUF_RES;
fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
goto drop; goto drop;
} }

View file

@ -141,6 +141,13 @@ struct lpfc_dmabuf {
uint32_t buffer_tag; /* used for tagged queue ring */ uint32_t buffer_tag; /* used for tagged queue ring */
}; };
struct lpfc_nvmet_ctxbuf {
struct list_head list;
struct lpfc_nvmet_rcv_ctx *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
};
struct lpfc_dma_pool { struct lpfc_dma_pool {
struct lpfc_dmabuf *elements; struct lpfc_dmabuf *elements;
uint32_t max_count; uint32_t max_count;
@ -163,9 +170,7 @@ struct rqb_dmabuf {
struct lpfc_dmabuf dbuf; struct lpfc_dmabuf dbuf;
uint16_t total_size; uint16_t total_size;
uint16_t bytes_recv; uint16_t bytes_recv;
void *context; uint16_t idx;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
struct lpfc_queue *hrq; /* ptr to associated Header RQ */ struct lpfc_queue *hrq; /* ptr to associated Header RQ */
struct lpfc_queue *drq; /* ptr to associated Data RQ */ struct lpfc_queue *drq; /* ptr to associated Data RQ */
}; };
@ -670,6 +675,8 @@ struct lpfc_hba {
/* INIT_LINK mailbox command */ /* INIT_LINK mailbox command */
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
uint32_t hba_flag; /* hba generic flags */ uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@ -777,7 +784,6 @@ struct lpfc_hba {
uint32_t cfg_nvme_oas; uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_io_channel; uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq; uint32_t cfg_nvmet_mrq;
uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_enable_nvmet; uint32_t cfg_enable_nvmet;
uint32_t cfg_nvme_enable_fb; uint32_t cfg_nvme_enable_fb;
uint32_t cfg_nvmet_fb_size; uint32_t cfg_nvmet_fb_size;
@ -943,6 +949,7 @@ struct lpfc_hba {
struct pci_pool *lpfc_mbuf_pool; struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct pci_pool *txrdy_payload_pool; struct pci_pool *txrdy_payload_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@ -1228,7 +1235,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
static inline struct lpfc_sli_ring * static inline struct lpfc_sli_ring *
lpfc_phba_elsring(struct lpfc_hba *phba) lpfc_phba_elsring(struct lpfc_hba *phba)
{ {
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4) {
return phba->sli4_hba.els_wq->pring; if (phba->sli4_hba.els_wq)
return phba->sli4_hba.els_wq->pring;
else
return NULL;
}
return &phba->sli.sli3_ring[LPFC_ELS_RING]; return &phba->sli.sli3_ring[LPFC_ELS_RING];
} }

View file

@ -60,9 +60,9 @@
#define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255 #define LPFC_MAX_DEVLOSS_TMO 255
#define LPFC_DEF_MRQ_POST 256 #define LPFC_DEF_MRQ_POST 512
#define LPFC_MIN_MRQ_POST 32 #define LPFC_MIN_MRQ_POST 512
#define LPFC_MAX_MRQ_POST 512 #define LPFC_MAX_MRQ_POST 2048
/* /*
* Write key size should be multiple of 4. If write key is changed * Write key size should be multiple of 4. If write key is changed
@ -205,8 +205,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_ls_rsp_error)); atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rcv %08x Drop %08x\n", "FCP: Rcv %08x Release %08x Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop)); atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) != if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
@ -218,15 +219,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
} }
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
"FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n", "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
"drop %08x\n",
atomic_read(&tgtp->xmt_fcp_read), atomic_read(&tgtp->xmt_fcp_read),
atomic_read(&tgtp->xmt_fcp_read_rsp), atomic_read(&tgtp->xmt_fcp_read_rsp),
atomic_read(&tgtp->xmt_fcp_write), atomic_read(&tgtp->xmt_fcp_write),
atomic_read(&tgtp->xmt_fcp_rsp)); atomic_read(&tgtp->xmt_fcp_rsp),
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP Rsp: abort %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_drop)); atomic_read(&tgtp->xmt_fcp_drop));
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
@ -236,10 +234,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_fcp_rsp_drop)); atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
"ABORT: Xmt %08x Err %08x Cmpl %08x", "ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
len += snprintf(buf + len, PAGE_SIZE - len,
"ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
atomic_read(&tgtp->xmt_abort_sol),
atomic_read(&tgtp->xmt_abort_unsol),
atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error), atomic_read(&tgtp->xmt_abort_rsp_error));
atomic_read(&tgtp->xmt_abort_cmpl));
len += snprintf(buf + len, PAGE_SIZE - len,
"IO_CTX: %08x outstanding %08x total %x",
phba->sli4_hba.nvmet_ctx_cnt,
phba->sli4_hba.nvmet_io_wait_cnt,
phba->sli4_hba.nvmet_io_wait_total);
len += snprintf(buf+len, PAGE_SIZE-len, "\n"); len += snprintf(buf+len, PAGE_SIZE-len, "\n");
return len; return len;
@ -3311,14 +3321,6 @@ LPFC_ATTR_R(nvmet_mrq,
1, 1, 16, 1, 1, 16,
"Specify number of RQ pairs for processing NVMET cmds"); "Specify number of RQ pairs for processing NVMET cmds");
/*
* lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
"Specify number of buffers to post on every MRQ");
/* /*
* lpfc_enable_fc4_type: Defines what FC4 types are supported. * lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP * Supported Values: 1 - register just FCP
@ -5154,7 +5156,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_suppress_rsp, &dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel, &dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq, &dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb, &dev_attr_lpfc_nvme_enable_fb,
&dev_attr_lpfc_nvmet_fb_size, &dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg, &dev_attr_lpfc_enable_bg,
@ -6194,7 +6195,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
/* Initialize first burst. Target vs Initiator are different. */ /* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
@ -6291,7 +6291,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
/* Not NVME Target mode. Turn off Target parameters. */ /* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0; phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0; phba->cfg_nvmet_mrq = 0;
phba->cfg_nvmet_mrq_post = 0;
phba->cfg_nvmet_fb_size = 0; phba->cfg_nvmet_fb_size = 0;
} }

View file

@ -75,6 +75,10 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -246,16 +250,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
struct lpfc_dmabuf *mp); struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr); struct fc_frame_header *fc_hdr);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t); uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
struct lpfc_queue *dq, int count);
int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
void lpfc_unregister_fcf(struct lpfc_hba *); void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
@ -271,6 +273,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align); int lpfc_mem_alloc(struct lpfc_hba *, int align);
int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba);
int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *);
void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_mem_free_all(struct lpfc_hba *);

View file

@ -2092,6 +2092,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
size = FOURBYTES + 32; size = FOURBYTES + 32;
ad->AttrLen = cpu_to_be16(size); ad->AttrLen = cpu_to_be16(size);

View file

@ -797,11 +797,6 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_write), atomic_read(&tgtp->xmt_fcp_write),
atomic_read(&tgtp->xmt_fcp_rsp)); atomic_read(&tgtp->xmt_fcp_rsp));
len += snprintf(buf + len, size - len,
"FCP Rsp: abort %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_drop));
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"FCP Rsp Cmpl: %08x err %08x drop %08x\n", "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_rsp_cmpl), atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
@ -809,10 +804,16 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_rsp_drop)); atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"ABORT: Xmt %08x Err %08x Cmpl %08x", "ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
len += snprintf(buf + len, size - len,
"ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
atomic_read(&tgtp->xmt_abort_sol),
atomic_read(&tgtp->xmt_abort_unsol),
atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error), atomic_read(&tgtp->xmt_abort_rsp_error));
atomic_read(&tgtp->xmt_abort_cmpl));
len += snprintf(buf + len, size - len, "\n"); len += snprintf(buf + len, size - len, "\n");
@ -841,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
} }
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
} }
len += snprintf(buf + len, size - len,
"IO_CTX: %08x outstanding %08x total %08x\n",
phba->sli4_hba.nvmet_ctx_cnt,
phba->sli4_hba.nvmet_io_wait_cnt,
phba->sli4_hba.nvmet_io_wait_total);
} else { } else {
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len; return len;
@ -1959,6 +1966,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
atomic_set(&tgtp->rcv_ls_req_out, 0); atomic_set(&tgtp->rcv_ls_req_out, 0);
atomic_set(&tgtp->rcv_ls_req_drop, 0); atomic_set(&tgtp->rcv_ls_req_drop, 0);
atomic_set(&tgtp->xmt_ls_abort, 0); atomic_set(&tgtp->xmt_ls_abort, 0);
atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
atomic_set(&tgtp->xmt_ls_rsp, 0); atomic_set(&tgtp->xmt_ls_rsp, 0);
atomic_set(&tgtp->xmt_ls_drop, 0); atomic_set(&tgtp->xmt_ls_drop, 0);
atomic_set(&tgtp->xmt_ls_rsp_error, 0); atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@ -1967,19 +1975,22 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
atomic_set(&tgtp->rcv_fcp_cmd_in, 0); atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
atomic_set(&tgtp->rcv_fcp_cmd_out, 0); atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_drop, 0); atomic_set(&tgtp->xmt_fcp_drop, 0);
atomic_set(&tgtp->xmt_fcp_read_rsp, 0); atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
atomic_set(&tgtp->xmt_fcp_read, 0); atomic_set(&tgtp->xmt_fcp_read, 0);
atomic_set(&tgtp->xmt_fcp_write, 0); atomic_set(&tgtp->xmt_fcp_write, 0);
atomic_set(&tgtp->xmt_fcp_rsp, 0); atomic_set(&tgtp->xmt_fcp_rsp, 0);
atomic_set(&tgtp->xmt_fcp_release, 0);
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
atomic_set(&tgtp->xmt_fcp_rsp_error, 0); atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
atomic_set(&tgtp->xmt_abort_sol, 0);
atomic_set(&tgtp->xmt_abort_unsol, 0);
atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp, 0);
atomic_set(&tgtp->xmt_abort_rsp_error, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0);
atomic_set(&tgtp->xmt_abort_cmpl, 0);
} }
return nbytes; return nbytes;
} }
@ -3070,11 +3081,11 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
qp->assoc_qid, qp->q_cnt_1, qp->assoc_qid, qp->q_cnt_1,
(unsigned long long)qp->q_cnt_4); (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]", "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
qp->queue_id, qp->entry_count, qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index, qp->entry_size, qp->host_index,
qp->hba_index); qp->hba_index, qp->entry_repost);
len += snprintf(pbuffer + len, len += snprintf(pbuffer + len,
LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
return len; return len;
@ -3121,11 +3132,11 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]", "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
qp->queue_id, qp->entry_count, qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index, qp->entry_size, qp->host_index,
qp->hba_index); qp->hba_index, qp->entry_repost);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
@ -3143,20 +3154,20 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
"\t\t%s RQ info: ", rqtype); "\t\t%s RQ info: ", rqtype);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
"trunc:x%x rcv:x%llx]\n", "posted:x%x rcv:x%llx]\n",
qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]\n", "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
qp->queue_id, qp->entry_count, qp->entry_size, qp->queue_id, qp->entry_count, qp->entry_size,
qp->host_index, qp->hba_index); qp->host_index, qp->hba_index, qp->entry_repost);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]\n", "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
datqp->queue_id, datqp->entry_count, datqp->queue_id, datqp->entry_count,
datqp->entry_size, datqp->host_index, datqp->entry_size, datqp->host_index,
datqp->hba_index); datqp->hba_index, datqp->entry_repost);
return len; return len;
} }
@ -3242,10 +3253,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4); (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]", "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
qp->queue_id, qp->entry_count, qp->entry_size, qp->queue_id, qp->entry_count, qp->entry_size,
qp->host_index, qp->hba_index); qp->host_index, qp->hba_index, qp->entry_repost);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
return len; return len;
@ -5855,8 +5866,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
atomic_dec(&lpfc_debugfs_hba_count); atomic_dec(&lpfc_debugfs_hba_count);
} }
debugfs_remove(lpfc_debugfs_root); /* lpfc */ if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
lpfc_debugfs_root = NULL; debugfs_remove(lpfc_debugfs_root); /* lpfc */
lpfc_debugfs_root = NULL;
}
} }
#endif #endif
return; return;

View file

@ -90,6 +90,7 @@ struct lpfc_nodelist {
#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ #define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ #define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */
#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ #define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */
#define NLP_NVME_DISCOVERY 0x80 /* entry has NVME disc srvc */
uint16_t nlp_fc4_type; /* FC types node supports. */ uint16_t nlp_fc4_type; /* FC types node supports. */
/* Assigned from GID_FF, only /* Assigned from GID_FF, only

View file

@ -1047,6 +1047,13 @@ stop_rr_fcf_flogi:
irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout); irsp->ulpTimeout);
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
/* FLOGI failed, so there is no fabric */ /* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@ -2077,16 +2084,19 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) { if (irsp->ulpStatus) {
/* Check for retry */ /* Check for retry */
ndlp->fc4_prli_sent--;
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */ /* ELS command is being retried */
ndlp->fc4_prli_sent--;
goto out; goto out;
} }
/* PRLI failed */ /* PRLI failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x\n", "2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]); irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (lpfc_error_lost_link(irsp)) if (lpfc_error_lost_link(irsp))
goto out; goto out;
@ -7441,6 +7451,13 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
pring = lpfc_phba_elsring(phba); pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) {
spin_unlock_irq(&phba->hbalock);
return;
}
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock); spin_lock(&pring->ring_lock);
@ -8667,7 +8684,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_do_scr_ns_plogi(phba, vport); lpfc_do_scr_ns_plogi(phba, vport);
goto out; goto out;
fdisc_failed: fdisc_failed:
if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS) if (vport->fc_vport &&
(vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_vport_set_state(vport, FC_VPORT_FAILED);
/* Cancel discovery timer */ /* Cancel discovery timer */
lpfc_can_disctmo(vport); lpfc_can_disctmo(vport);

View file

@ -693,15 +693,16 @@ lpfc_work_done(struct lpfc_hba *phba)
pring = lpfc_phba_elsring(phba); pring = lpfc_phba_elsring(phba);
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING); status >>= (4*LPFC_ELS_RING);
if ((status & HA_RXMASK) || if (pring && (status & HA_RXMASK ||
(pring->flag & LPFC_DEFERRED_RING_EVENT) || pring->flag & LPFC_DEFERRED_RING_EVENT ||
(phba->hba_flag & HBA_SP_QUEUE_EVT)) { phba->hba_flag & HBA_SP_QUEUE_EVT)) {
if (pring->flag & LPFC_STOP_IOCB_EVENT) { if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT; pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */ /* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags); set_bit(LPFC_DATA_READY, &phba->data_flags);
} else { } else {
if (phba->link_state >= LPFC_LINK_UP) { if (phba->link_state >= LPFC_LINK_UP ||
phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT; pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring, lpfc_sli_handle_slow_ring_event(phba, pring,
(status & (status &

View file

@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy {
#define LPFC_HDR_BUF_SIZE 128 #define LPFC_HDR_BUF_SIZE 128
#define LPFC_DATA_BUF_SIZE 2048 #define LPFC_DATA_BUF_SIZE 2048
#define LPFC_NVMET_DATA_BUF_SIZE 128
struct rq_context { struct rq_context {
uint32_t word0; uint32_t word0;
#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
@ -4420,6 +4421,19 @@ struct fcp_treceive64_wqe {
}; };
#define TXRDY_PAYLOAD_LEN 12 #define TXRDY_PAYLOAD_LEN 12
#define CMD_SEND_FRAME 0xE1
struct send_frame_wqe {
struct ulp_bde64 bde; /* words 0-2 */
uint32_t frame_len; /* word 3 */
uint32_t fc_hdr_wd0; /* word 4 */
uint32_t fc_hdr_wd1; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
uint32_t fc_hdr_wd2; /* word 12 */
uint32_t fc_hdr_wd3; /* word 13 */
uint32_t fc_hdr_wd4; /* word 14 */
uint32_t fc_hdr_wd5; /* word 15 */
};
union lpfc_wqe { union lpfc_wqe {
uint32_t words[16]; uint32_t words[16];
@ -4438,7 +4452,7 @@ union lpfc_wqe {
struct fcp_trsp64_wqe fcp_trsp; struct fcp_trsp64_wqe fcp_trsp;
struct fcp_tsend64_wqe fcp_tsend; struct fcp_tsend64_wqe fcp_tsend;
struct fcp_treceive64_wqe fcp_treceive; struct fcp_treceive64_wqe fcp_treceive;
struct send_frame_wqe send_frame;
}; };
union lpfc_wqe128 { union lpfc_wqe128 {

View file

@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
} }
} }
@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
{ {
struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
uint16_t i, lxri, xri_cnt, els_xri_cnt; uint16_t i, lxri, xri_cnt, els_xri_cnt;
uint16_t nvmet_xri_cnt, tot_cnt; uint16_t nvmet_xri_cnt;
LIST_HEAD(nvmet_sgl_list); LIST_HEAD(nvmet_sgl_list);
int rc; int rc;
@ -3389,15 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
* update on pci function's nvmet xri-sgl list * update on pci function's nvmet xri-sgl list
*/ */
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
if (nvmet_xri_cnt > tot_cnt) { nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"6301 NVMET post-sgl count changed to %d\n",
phba->cfg_nvmet_mrq_post);
}
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */ /* els xri-sgl expanded */
@ -4546,6 +4540,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
pmb->vport = phba->pport; pmb->vport = phba->pport;
if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
switch (phba->sli4_hba.link_state.status) {
case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
phba->link_flag |= LS_MDS_LINK_DOWN;
break;
case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
phba->link_flag |= LS_MDS_LOOPBACK;
break;
default:
break;
}
/* Parse and translate status field */ /* Parse and translate status field */
mb = &pmb->u.mb; mb = &pmb->u.mb;
mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
@ -5830,6 +5837,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
/* Fast-path XRI aborted CQ Event work queue list */ /* Fast-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
} }
@ -5837,6 +5847,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */ /* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock); spin_lock_init(&phba->sli4_hba.sgl_list_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_lock); spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
/* /*
* Initialize driver internal slow-path work queues * Initialize driver internal slow-path work queues
@ -5951,16 +5962,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
if (wwn == lpfc_enable_nvmet[i]) { if (wwn == lpfc_enable_nvmet[i]) {
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
if (lpfc_nvmet_mem_alloc(phba))
break;
phba->nvmet_support = 1; /* a match */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6017 NVME Target %016llx\n", "6017 NVME Target %016llx\n",
wwn); wwn);
phba->nvmet_support = 1; /* a match */
#else #else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6021 Can't enable NVME Target." "6021 Can't enable NVME Target."
" NVME_TARGET_FC infrastructure" " NVME_TARGET_FC infrastructure"
" is not in kernel\n"); " is not in kernel\n");
#endif #endif
break;
} }
} }
} }
@ -6269,7 +6285,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
* *
* This routine is invoked to free the driver's IOCB list and memory. * This routine is invoked to free the driver's IOCB list and memory.
**/ **/
static void void
lpfc_free_iocb_list(struct lpfc_hba *phba) lpfc_free_iocb_list(struct lpfc_hba *phba)
{ {
struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
@ -6297,7 +6313,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
* 0 - successful * 0 - successful
* other values - error * other values - error
**/ **/
static int int
lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
{ {
struct lpfc_iocbq *iocbq_entry = NULL; struct lpfc_iocbq *iocbq_entry = NULL;
@ -6525,7 +6541,6 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
uint16_t rpi_limit, curr_rpi_range; uint16_t rpi_limit, curr_rpi_range;
struct lpfc_dmabuf *dmabuf; struct lpfc_dmabuf *dmabuf;
struct lpfc_rpi_hdr *rpi_hdr; struct lpfc_rpi_hdr *rpi_hdr;
uint32_t rpi_count;
/* /*
* If the SLI4 port supports extents, posting the rpi header isn't * If the SLI4 port supports extents, posting the rpi header isn't
@ -6538,8 +6553,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
return NULL; return NULL;
/* The limit on the logical index is just the max_rpi count. */ /* The limit on the logical index is just the max_rpi count. */
rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
phba->sli4_hba.max_cfg_param.max_rpi - 1;
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
/* /*
@ -6550,18 +6564,10 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
curr_rpi_range = phba->sli4_hba.next_rpi; curr_rpi_range = phba->sli4_hba.next_rpi;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* /* Reached full RPI range */
* The port has a limited number of rpis. The increment here if (curr_rpi_range == rpi_limit)
* is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
* and to allow the full max_rpi range per port.
*/
if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
rpi_count = rpi_limit - curr_rpi_range;
else
rpi_count = LPFC_RPI_HDR_COUNT;
if (!rpi_count)
return NULL; return NULL;
/* /*
* First allocate the protocol header region for the port. The * First allocate the protocol header region for the port. The
* port expects a 4KB DMA-mapped memory region that is 4K aligned. * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@ -6595,13 +6601,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
/* The rpi_hdr stores the logical index only. */ /* The rpi_hdr stores the logical index only. */
rpi_hdr->start_rpi = curr_rpi_range; rpi_hdr->start_rpi = curr_rpi_range;
rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
/*
* The next_rpi stores the next logical module-64 rpi value used
* to post physical rpis in subsequent rpi postings.
*/
phba->sli4_hba.next_rpi += rpi_count;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
return rpi_hdr; return rpi_hdr;
@ -8172,7 +8174,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVMET Receive Queue for header */ /* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba, qdesc = lpfc_sli4_queue_alloc(phba,
phba->sli4_hba.rq_esize, phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount); LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) { if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3146 Failed allocate " "3146 Failed allocate "
@ -8194,7 +8196,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVMET Receive Queue for data */ /* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba, qdesc = lpfc_sli4_queue_alloc(phba,
phba->sli4_hba.rq_esize, phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount); LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) { if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3156 Failed allocate " "3156 Failed allocate "
@ -8325,46 +8327,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
} }
int
lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count)
{
int rc, i;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
struct lpfc_rqb *rqbp;
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
if (!rqb_buffer)
break;
rqb_buffer->hrq = hrq;
rqb_buffer->drq = drq;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0) {
(rqbp->rqb_free_buffer)(phba, rqb_buffer);
} else {
list_add_tail(&rqb_buffer->hbuf.list,
&rqbp->rqb_buffer_list);
rqbp->buffer_count++;
}
}
return 1;
}
int int
lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
{ {
@ -8784,9 +8746,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
goto out_destroy; goto out_destroy;
} }
lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
phba->sli4_hba.els_cq, LPFC_USOL); phba->sli4_hba.els_cq, LPFC_USOL);
if (rc) { if (rc) {
@ -11110,7 +11069,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct lpfc_hba *phba; struct lpfc_hba *phba;
struct lpfc_vport *vport = NULL; struct lpfc_vport *vport = NULL;
struct Scsi_Host *shost = NULL; struct Scsi_Host *shost = NULL;
int error, cnt; int error;
uint32_t cfg_mode, intr_mode; uint32_t cfg_mode, intr_mode;
/* Allocate memory for HBA structure */ /* Allocate memory for HBA structure */
@ -11144,22 +11103,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_unset_pci_mem_s4; goto out_unset_pci_mem_s4;
} }
cnt = phba->cfg_iocb_cnt * 1024;
if (phba->nvmet_support)
cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2821 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
error = lpfc_init_iocb_list(phba, cnt);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1413 Failed to initialize iocb list.\n");
goto out_unset_driver_resource_s4;
}
INIT_LIST_HEAD(&phba->active_rrq_list); INIT_LIST_HEAD(&phba->active_rrq_list);
INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
@ -11168,7 +11111,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
if (error) { if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1414 Failed to set up driver resource.\n"); "1414 Failed to set up driver resource.\n");
goto out_free_iocb_list; goto out_unset_driver_resource_s4;
} }
/* Get the default values for Model Name and Description */ /* Get the default values for Model Name and Description */
@ -11268,8 +11211,6 @@ out_destroy_shost:
lpfc_destroy_shost(phba); lpfc_destroy_shost(phba);
out_unset_driver_resource: out_unset_driver_resource:
lpfc_unset_driver_resource_phase2(phba); lpfc_unset_driver_resource_phase2(phba);
out_free_iocb_list:
lpfc_free_iocb_list(phba);
out_unset_driver_resource_s4: out_unset_driver_resource_s4:
lpfc_sli4_driver_resource_unset(phba); lpfc_sli4_driver_resource_unset(phba);
out_unset_pci_mem_s4: out_unset_pci_mem_s4:

View file

@ -214,6 +214,21 @@ fail_free_drb_pool:
return -ENOMEM; return -ENOMEM;
} }
int
lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
{
phba->lpfc_nvmet_drb_pool =
pci_pool_create("lpfc_nvmet_drb_pool",
phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
SGL_ALIGN_SZ, 0);
if (!phba->lpfc_nvmet_drb_pool) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6024 Can't enable NVME Target - no memory\n");
return -ENOMEM;
}
return 0;
}
/** /**
* lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
* @phba: HBA to free memory for * @phba: HBA to free memory for
@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */ /* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba); lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_nvmet_drb_pool)
pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
phba->lpfc_nvmet_drb_pool = NULL;
if (phba->lpfc_drb_pool) if (phba->lpfc_drb_pool)
pci_pool_destroy(phba->lpfc_drb_pool); pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL; phba->lpfc_drb_pool = NULL;
@ -611,8 +629,6 @@ struct rqb_dmabuf *
lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{ {
struct rqb_dmabuf *dma_buf; struct rqb_dmabuf *dma_buf;
struct lpfc_iocbq *nvmewqe;
union lpfc_wqe128 *wqe;
dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
if (!dma_buf) if (!dma_buf)
@ -624,69 +640,15 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
kfree(dma_buf); kfree(dma_buf);
return NULL; return NULL;
} }
dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
&dma_buf->dbuf.phys); GFP_KERNEL, &dma_buf->dbuf.phys);
if (!dma_buf->dbuf.virt) { if (!dma_buf->dbuf.virt) {
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys); dma_buf->hbuf.phys);
kfree(dma_buf); kfree(dma_buf);
return NULL; return NULL;
} }
dma_buf->total_size = LPFC_DATA_BUF_SIZE; dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
GFP_KERNEL);
if (!dma_buf->context) {
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
return NULL;
}
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!dma_buf->iocbq) {
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"2621 Ran out of nvmet iocb/WQEs\n");
return NULL;
}
dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
nvmewqe = dma_buf->iocbq;
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
/* Word 7 */
bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
/* Word 10 */
bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
dma_buf->iocbq->context1 = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
if (!dma_buf->sglq) {
lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6132 Ran out of nvmet XRIs\n");
return NULL;
}
return dma_buf; return dma_buf;
} }
@ -705,20 +667,9 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
void void
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
{ {
unsigned long flags;
__lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
dmab->sglq->state = SGL_FREED;
dmab->sglq->ndlp = NULL;
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
lpfc_sli_release_iocbq(phba, dmab->iocbq);
kfree(dmab->context);
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); pci_pool_free(phba->lpfc_nvmet_drb_pool,
dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab); kfree(dmab);
} }
@ -803,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) { if (rc < 0) {
(rqbp->rqb_free_buffer)(phba, rqb_entry); (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to RQ %d: %x %x\n",
rqb_entry->hrq->queue_id,
rqb_entry->hrq->host_index,
rqb_entry->hrq->hba_index);
} else { } else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++; rqbp->buffer_count++;

View file

@ -1944,7 +1944,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Target driver cannot solicit NVME FB. */ /* Target driver cannot solicit NVME FB. */
if (bf_get_be32(prli_tgt, nvpr)) { if (bf_get_be32(prli_tgt, nvpr)) {
/* Complete the nvme target roles. The transport
* needs to know if the rport is capable of
* discovery in addition to its role.
*/
ndlp->nlp_type |= NLP_NVME_TARGET; ndlp->nlp_type |= NLP_NVME_TARGET;
if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
if ((bf_get_be32(prli_fba, nvpr) == 1) && if ((bf_get_be32(prli_fba, nvpr) == 1) &&
(bf_get_be32(prli_fb_sz, nvpr) > 0) && (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
(phba->cfg_nvme_enable_fb) && (phba->cfg_nvme_enable_fb) &&

View file

@ -142,7 +142,7 @@ out:
} }
/** /**
* lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
* @phba: HBA buffer is associated with * @phba: HBA buffer is associated with
* @ctxp: context to clean up * @ctxp: context to clean up
* @mp: Buffer to free * @mp: Buffer to free
@ -155,24 +155,113 @@ out:
* Returns: None * Returns: None
**/ **/
void void
lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
struct lpfc_dmabuf *mp)
{ {
if (ctxp) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
if (ctxp->flag) struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, struct lpfc_nvmet_tgtport *tgtp;
"6314 rq_post ctx xri x%x flag x%x\n", struct fc_frame_header *fc_hdr;
ctxp->oxid, ctxp->flag); struct rqb_dmabuf *nvmebuf;
struct lpfc_dmabuf *hbufp;
uint32_t *payload;
uint32_t size, oxid, sid, rc;
unsigned long iflag;
if (ctxp->txrdy) { if (ctxp->txrdy) {
pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
ctxp->txrdy_phys); ctxp->txrdy_phys);
ctxp->txrdy = NULL; ctxp->txrdy = NULL;
ctxp->txrdy_phys = 0; ctxp->txrdy_phys = 0;
}
ctxp->state = LPFC_NVMET_STE_FREE;
} }
lpfc_rq_buf_free(phba, mp); ctxp->state = LPFC_NVMET_STE_FREE;
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
if (phba->sli4_hba.nvmet_io_wait_cnt) {
hbufp = &nvmebuf->hbuf;
list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
nvmebuf, struct rqb_dmabuf,
hbuf.list);
phba->sli4_hba.nvmet_io_wait_cnt--;
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
iflag);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = nvmebuf->bytes_recv;
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
memset(ctxp, 0, sizeof(ctxp->ctx));
ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
ctxp->oxid = oxid;
ctxp->sid = sid;
ctxp->state = LPFC_NVMET_STE_RCV;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on) {
ctxp->ts_cmd_nvme = ktime_get_ns();
ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
ctxp->ts_nvme_data = 0;
ctxp->ts_data_wqput = 0;
ctxp->ts_isr_data = 0;
ctxp->ts_data_nvme = 0;
ctxp->ts_nvme_status = 0;
ctxp->ts_status_wqput = 0;
ctxp->ts_isr_status = 0;
ctxp->ts_status_nvme = 0;
}
#endif
atomic_inc(&tgtp->rcv_fcp_cmd_in);
/*
* The calling sequence should be:
* nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
* lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
* When we return from nvmet_fc_rcv_fcp_req, all relevant info
* the NVME command / FC header is stored.
* A buffer has already been reposted for this IO, so just free
* the nvmebuf.
*/
rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
payload, size);
/* Process FCP command */
if (rc == 0) {
atomic_inc(&tgtp->rcv_fcp_cmd_out);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
return;
}
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
ctxp->oxid, rc,
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
lpfc_nvmet_defer_release(phba, ctxp);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
return;
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
list_add_tail(&ctx_buf->list,
&phba->sli4_hba.lpfc_nvmet_ctx_list);
phba->sli4_hba.nvmet_ctx_cnt++;
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
#endif
} }
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@ -502,6 +591,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
"6150 LS Drop IO x%x: Prep\n", "6150 LS Drop IO x%x: Prep\n",
ctxp->oxid); ctxp->oxid);
lpfc_in_buf_free(phba, &nvmebuf->dbuf); lpfc_in_buf_free(phba, &nvmebuf->dbuf);
atomic_inc(&nvmep->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
ctxp->sid, ctxp->oxid); ctxp->sid, ctxp->oxid);
return -ENOMEM; return -ENOMEM;
@ -545,6 +635,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
lpfc_nlp_put(nvmewqeq->context1); lpfc_nlp_put(nvmewqeq->context1);
lpfc_in_buf_free(phba, &nvmebuf->dbuf); lpfc_in_buf_free(phba, &nvmebuf->dbuf);
atomic_inc(&nvmep->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
return -ENXIO; return -ENXIO;
} }
@ -612,9 +703,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
ctxp->oxid, rsp->op, rsp->rsplen); ctxp->oxid, rsp->op, rsp->rsplen);
ctxp->flag |= LPFC_NVMET_IO_INP;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
if (rc == WQE_SUCCESS) { if (rc == WQE_SUCCESS) {
ctxp->flag |= LPFC_NVMET_IO_INP;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (!phba->ktime_on) if (!phba->ktime_on)
return 0; return 0;
@ -692,6 +783,7 @@ static void
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp) struct nvmefc_tgt_fcp_req *rsp)
{ {
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
struct lpfc_nvmet_rcv_ctx *ctxp = struct lpfc_nvmet_rcv_ctx *ctxp =
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba; struct lpfc_hba *phba = ctxp->phba;
@ -710,10 +802,12 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
ctxp->state, 0); ctxp->state, 0);
atomic_inc(&lpfc_nvmep->xmt_fcp_release);
if (aborting) if (aborting)
return; return;
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
} }
static struct nvmet_fc_target_template lpfc_tgttemplate = { static struct nvmet_fc_target_template lpfc_tgttemplate = {
@ -734,17 +828,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
}; };
void
lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
{
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
unsigned long flags;
list_for_each_entry_safe(
ctx_buf, next_ctx_buf,
&phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
spin_lock_irqsave(
&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
list_del_init(&ctx_buf->list);
spin_unlock_irqrestore(
&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
__lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
flags);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
}
int
lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
{
struct lpfc_nvmet_ctxbuf *ctx_buf;
struct lpfc_iocbq *nvmewqe;
union lpfc_wqe128 *wqe;
int i;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6403 Allocate NVMET resources for %d XRIs\n",
phba->sli4_hba.nvmet_xri_cnt);
/* For all nvmet xris, allocate resources needed to process a
* received command on a per xri basis.
*/
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
if (!ctx_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6404 Ran out of memory for NVMET\n");
return -ENOMEM;
}
ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
GFP_KERNEL);
if (!ctx_buf->context) {
kfree(ctx_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6405 Ran out of NVMET "
"context memory\n");
return -ENOMEM;
}
ctx_buf->context->ctxbuf = ctx_buf;
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!ctx_buf->iocbq) {
kfree(ctx_buf->context);
kfree(ctx_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6406 Ran out of NVMET iocb/WQEs\n");
return -ENOMEM;
}
ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
nvmewqe = ctx_buf->iocbq;
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
/* Word 7 */
bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
/* Word 10 */
bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
ctx_buf->iocbq->context1 = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
if (!ctx_buf->sglq) {
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
kfree(ctx_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
spin_lock(&phba->sli4_hba.nvmet_io_lock);
list_add_tail(&ctx_buf->list,
&phba->sli4_hba.lpfc_nvmet_ctx_list);
spin_unlock(&phba->sli4_hba.nvmet_io_lock);
}
phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
return 0;
}
int int
lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
{ {
struct lpfc_vport *vport = phba->pport; struct lpfc_vport *vport = phba->pport;
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
struct nvmet_fc_port_info pinfo; struct nvmet_fc_port_info pinfo;
int error = 0; int error;
if (phba->targetport) if (phba->targetport)
return 0; return 0;
error = lpfc_nvmet_setup_io_context(phba);
if (error)
return error;
memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
@ -772,13 +977,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
&phba->pcidev->dev, &phba->pcidev->dev,
&phba->targetport); &phba->targetport);
#else #else
error = -ENOMEM; error = -ENOENT;
#endif #endif
if (error) { if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6025 Cannot register NVME targetport " "6025 Cannot register NVME targetport "
"x%x\n", error); "x%x\n", error);
phba->targetport = NULL; phba->targetport = NULL;
lpfc_nvmet_cleanup_io_context(phba);
} else { } else {
tgtp = (struct lpfc_nvmet_tgtport *) tgtp = (struct lpfc_nvmet_tgtport *)
phba->targetport->private; phba->targetport->private;
@ -795,6 +1003,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->rcv_ls_req_out, 0); atomic_set(&tgtp->rcv_ls_req_out, 0);
atomic_set(&tgtp->rcv_ls_req_drop, 0); atomic_set(&tgtp->rcv_ls_req_drop, 0);
atomic_set(&tgtp->xmt_ls_abort, 0); atomic_set(&tgtp->xmt_ls_abort, 0);
atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
atomic_set(&tgtp->xmt_ls_rsp, 0); atomic_set(&tgtp->xmt_ls_rsp, 0);
atomic_set(&tgtp->xmt_ls_drop, 0); atomic_set(&tgtp->xmt_ls_drop, 0);
atomic_set(&tgtp->xmt_ls_rsp_error, 0); atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@ -802,18 +1011,21 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->rcv_fcp_cmd_in, 0); atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
atomic_set(&tgtp->rcv_fcp_cmd_out, 0); atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_drop, 0); atomic_set(&tgtp->xmt_fcp_drop, 0);
atomic_set(&tgtp->xmt_fcp_read_rsp, 0); atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
atomic_set(&tgtp->xmt_fcp_read, 0); atomic_set(&tgtp->xmt_fcp_read, 0);
atomic_set(&tgtp->xmt_fcp_write, 0); atomic_set(&tgtp->xmt_fcp_write, 0);
atomic_set(&tgtp->xmt_fcp_rsp, 0); atomic_set(&tgtp->xmt_fcp_rsp, 0);
atomic_set(&tgtp->xmt_fcp_release, 0);
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
atomic_set(&tgtp->xmt_fcp_rsp_error, 0); atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
atomic_set(&tgtp->xmt_abort_unsol, 0);
atomic_set(&tgtp->xmt_abort_sol, 0);
atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp, 0);
atomic_set(&tgtp->xmt_abort_rsp_error, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0);
atomic_set(&tgtp->xmt_abort_cmpl, 0);
} }
return error; return error;
} }
@ -864,7 +1076,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue; continue;
/* Check if we already received a free context call /* Check if we already received a free context call
@ -885,7 +1097,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
lpfc_set_rrq_active(phba, ndlp, lpfc_set_rrq_active(phba, ndlp,
ctxp->rqb_buffer->sglq->sli4_lxritag, ctxp->ctxbuf->sglq->sli4_lxritag,
rxid, 1); rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri); lpfc_sli4_abts_err_handler(phba, ndlp, axri);
} }
@ -894,8 +1106,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
"6318 XB aborted %x flg x%x (%x)\n", "6318 XB aborted %x flg x%x (%x)\n",
ctxp->oxid, ctxp->flag, released); ctxp->oxid, ctxp->flag, released);
if (released) if (released)
lpfc_nvmet_rq_post(phba, ctxp, lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
&ctxp->rqb_buffer->hbuf);
if (rrq_empty) if (rrq_empty)
lpfc_worker_wake_up(phba); lpfc_worker_wake_up(phba);
return; return;
@ -923,7 +1135,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue; continue;
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
@ -975,6 +1187,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
init_completion(&tgtp->tport_unreg_done); init_completion(&tgtp->tport_unreg_done);
nvmet_fc_unregister_targetport(phba->targetport); nvmet_fc_unregister_targetport(phba->targetport);
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
lpfc_nvmet_cleanup_io_context(phba);
} }
phba->targetport = NULL; phba->targetport = NULL;
#endif #endif
@ -1010,6 +1223,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
oxid = 0; oxid = 0;
size = 0; size = 0;
sid = 0; sid = 0;
ctxp = NULL;
goto dropit; goto dropit;
} }
@ -1104,39 +1318,71 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr; struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_ctxbuf *ctx_buf;
uint32_t *payload; uint32_t *payload;
uint32_t size, oxid, sid, rc; uint32_t size, oxid, sid, rc, qno;
unsigned long iflag;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t id; uint32_t id;
#endif #endif
ctx_buf = NULL;
if (!nvmebuf || !phba->targetport) { if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6157 FCP Drop IO\n"); "6157 NVMET FCP Drop IO\n");
oxid = 0; oxid = 0;
size = 0; size = 0;
sid = 0; sid = 0;
ctxp = NULL;
goto dropit; goto dropit;
} }
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
if (phba->sli4_hba.nvmet_ctx_cnt) {
list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
phba->sli4_hba.nvmet_ctx_cnt--;
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
size = nvmebuf->bytes_recv;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
id = smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT)
phba->cpucheck_rcv_io[id]++;
}
#endif
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
oxid, size, smp_processor_id());
if (!ctx_buf) {
/* Queue this NVME IO to process later */
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
list_add_tail(&nvmebuf->hbuf.list,
&phba->sli4_hba.lpfc_nvmet_io_wait_list);
phba->sli4_hba.nvmet_io_wait_cnt++;
phba->sli4_hba.nvmet_io_wait_total++;
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
iflag);
/* Post a brand new DMA buffer to RQ */
qno = nvmebuf->idx;
lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
return;
}
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt); payload = (uint32_t *)(nvmebuf->dbuf.virt);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
size = nvmebuf->bytes_recv;
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
sid = sli4_sid_from_fc_hdr(fc_hdr); sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
if (ctxp == NULL) {
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6158 FCP Drop IO x%x: Alloc\n",
oxid);
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
/* Cannot send ABTS without context */
return;
}
memset(ctxp, 0, sizeof(ctxp->ctx)); memset(ctxp, 0, sizeof(ctxp->ctx));
ctxp->wqeq = NULL; ctxp->wqeq = NULL;
ctxp->txrdy = NULL; ctxp->txrdy = NULL;
@ -1146,9 +1392,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->oxid = oxid; ctxp->oxid = oxid;
ctxp->sid = sid; ctxp->sid = sid;
ctxp->state = LPFC_NVMET_STE_RCV; ctxp->state = LPFC_NVMET_STE_RCV;
ctxp->rqb_buffer = nvmebuf;
ctxp->entry_cnt = 1; ctxp->entry_cnt = 1;
ctxp->flag = 0; ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
spin_lock_init(&ctxp->ctxlock); spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@ -1164,22 +1410,16 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->ts_isr_status = 0; ctxp->ts_isr_status = 0;
ctxp->ts_status_nvme = 0; ctxp->ts_status_nvme = 0;
} }
if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
id = smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT)
phba->cpucheck_rcv_io[id]++;
}
#endif #endif
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
oxid, size, smp_processor_id());
atomic_inc(&tgtp->rcv_fcp_cmd_in); atomic_inc(&tgtp->rcv_fcp_cmd_in);
/* /*
* The calling sequence should be: * The calling sequence should be:
* nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
* lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
* When we return from nvmet_fc_rcv_fcp_req, all relevant info in
* the NVME command / FC header is stored, so we are free to repost
* the buffer.
*/ */
rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
payload, size); payload, size);
@ -1187,26 +1427,32 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Process FCP command */ /* Process FCP command */
if (rc == 0) { if (rc == 0) {
atomic_inc(&tgtp->rcv_fcp_cmd_out); atomic_inc(&tgtp->rcv_fcp_cmd_out);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return; return;
} }
atomic_inc(&tgtp->rcv_fcp_cmd_drop); atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6159 FCP Drop IO x%x: err x%x\n", "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
ctxp->oxid, rc); ctxp->oxid, rc,
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
dropit: dropit:
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
oxid, size, sid); oxid, size, sid);
if (oxid) { if (oxid) {
lpfc_nvmet_defer_release(phba, ctxp);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return; return;
} }
if (nvmebuf) { if (ctx_buf)
nvmebuf->iocbq->hba_wqidx = 0; lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); if (nvmebuf)
} lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
#endif #endif
} }
@ -1258,7 +1504,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint64_t isr_timestamp) uint64_t isr_timestamp)
{ {
if (phba->nvmet_support == 0) { if (phba->nvmet_support == 0) {
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return; return;
} }
lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
@ -1459,7 +1705,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe = ctxp->wqeq; nvmewqe = ctxp->wqeq;
if (nvmewqe == NULL) { if (nvmewqe == NULL) {
/* Allocate buffer for command wqe */ /* Allocate buffer for command wqe */
nvmewqe = ctxp->rqb_buffer->iocbq; nvmewqe = ctxp->ctxbuf->iocbq;
if (nvmewqe == NULL) { if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6110 lpfc_nvmet_prep_fcp_wqe: No " "6110 lpfc_nvmet_prep_fcp_wqe: No "
@ -1486,7 +1732,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL; return NULL;
} }
sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
switch (rsp->op) { switch (rsp->op) {
case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA:
case NVMET_FCOP_READDATA_RSP: case NVMET_FCOP_READDATA_RSP:
@ -1811,7 +2057,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result = wcqe->parameter; result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
atomic_inc(&tgtp->xmt_abort_cmpl); if (ctxp->flag & LPFC_NVMET_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
ctxp->state = LPFC_NVMET_STE_DONE; ctxp->state = LPFC_NVMET_STE_DONE;
@ -1826,6 +2073,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
} }
ctxp->flag &= ~LPFC_NVMET_ABORT_OP; ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags); spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6165 ABORT cmpl: xri x%x flg x%x (%d) " "6165 ABORT cmpl: xri x%x flg x%x (%d) "
@ -1834,15 +2082,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed, wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3); result, wcqe->word3);
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
/* /*
* if transport has released ctx, then can reuse it. Otherwise, * if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call. * will be recycled by transport release call.
*/ */
if (released) if (released)
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
cmdwqe->context2 = NULL; /* This is the iocbq for the abort, not the command */
cmdwqe->context3 = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe); lpfc_sli_release_iocbq(phba, cmdwqe);
/* Since iaab/iaar are NOT set, there is no work left. /* Since iaab/iaar are NOT set, there is no work left.
@ -1876,7 +2125,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result = wcqe->parameter; result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
atomic_inc(&tgtp->xmt_abort_cmpl); if (ctxp->flag & LPFC_NVMET_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
if (!ctxp) { if (!ctxp) {
/* if context is clear, related io alrady complete */ /* if context is clear, related io alrady complete */
@ -1906,6 +2156,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
} }
ctxp->flag &= ~LPFC_NVMET_ABORT_OP; ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags); spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6316 ABTS cmpl xri x%x flg x%x (%x) " "6316 ABTS cmpl xri x%x flg x%x (%x) "
@ -1913,15 +2164,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->oxid, ctxp->flag, released, ctxp->oxid, ctxp->flag, released,
wcqe->word0, wcqe->total_data_placed, wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3); result, wcqe->word3);
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
/* /*
* if transport has released ctx, then can reuse it. Otherwise, * if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call. * will be recycled by transport release call.
*/ */
if (released) if (released)
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
/* Since iaab/iaar are NOT set, there is no work left. /* Since iaab/iaar are NOT set, there is no work left.
* For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
@ -1952,7 +2203,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result = wcqe->parameter; result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
atomic_inc(&tgtp->xmt_abort_cmpl); atomic_inc(&tgtp->xmt_ls_abort_cmpl);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
@ -1983,10 +2234,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
sid, xri, ctxp->wqeq->sli4_xritag); sid, xri, ctxp->wqeq->sli4_xritag);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctxp->wqeq) {
ctxp->wqeq = ctxp->rqb_buffer->iocbq;
ctxp->wqeq->hba_wqidx = 0;
}
ndlp = lpfc_findnode_did(phba->pport, sid); ndlp = lpfc_findnode_did(phba->pport, sid);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
@ -2082,7 +2329,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctxp->wqeq) { if (!ctxp->wqeq) {
ctxp->wqeq = ctxp->rqb_buffer->iocbq; ctxp->wqeq = ctxp->ctxbuf->iocbq;
ctxp->wqeq->hba_wqidx = 0; ctxp->wqeq->hba_wqidx = 0;
} }
@ -2103,6 +2350,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Issue ABTS for this WQE based on iotag */ /* Issue ABTS for this WQE based on iotag */
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->abort_wqeq) { if (!ctxp->abort_wqeq) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
"6161 ABORT failed: No wqeqs: " "6161 ABORT failed: No wqeqs: "
"xri: x%x\n", ctxp->oxid); "xri: x%x\n", ctxp->oxid);
@ -2127,6 +2375,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* driver queued commands are in process of being flushed */ /* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME, lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6163 Driver in reset cleanup - flushing " "6163 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x oxid x%x\n", "NVME Req now. hba_flag x%x oxid x%x\n",
@ -2139,6 +2388,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Outstanding abort is in progress */ /* Outstanding abort is in progress */
if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME, lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6164 Outstanding NVME I/O Abort Request " "6164 Outstanding NVME I/O Abort Request "
"still pending on oxid x%x\n", "still pending on oxid x%x\n",
@ -2189,9 +2439,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->context2 = ctxp; abts_wqeq->context2 = ctxp;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) if (rc == WQE_SUCCESS) {
atomic_inc(&tgtp->xmt_abort_sol);
return 0; return 0;
}
atomic_inc(&tgtp->xmt_abort_rsp_error);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP; ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
lpfc_sli_release_iocbq(phba, abts_wqeq); lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@ -2214,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctxp->wqeq) { if (!ctxp->wqeq) {
ctxp->wqeq = ctxp->rqb_buffer->iocbq; ctxp->wqeq = ctxp->ctxbuf->iocbq;
ctxp->wqeq->hba_wqidx = 0; ctxp->wqeq->hba_wqidx = 0;
} }
@ -2230,11 +2483,11 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) { if (rc == WQE_SUCCESS) {
atomic_inc(&tgtp->xmt_abort_rsp);
return 0; return 0;
} }
aerr: aerr:
atomic_inc(&tgtp->xmt_abort_rsp_error);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP; ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
atomic_inc(&tgtp->xmt_abort_rsp_error); atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
@ -2269,6 +2522,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
} }
abts_wqeq = ctxp->wqeq; abts_wqeq = ctxp->wqeq;
wqe_abts = &abts_wqeq->wqe; wqe_abts = &abts_wqeq->wqe;
lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
@ -2278,7 +2532,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) { if (rc == WQE_SUCCESS) {
atomic_inc(&tgtp->xmt_abort_rsp); atomic_inc(&tgtp->xmt_abort_unsol);
return 0; return 0;
} }

View file

@ -22,6 +22,7 @@
********************************************************************/ ********************************************************************/
#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
#define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12 #define LPFC_NVMET_SUCCESS_LEN 12
/* Used for NVME Target */ /* Used for NVME Target */
@ -34,6 +35,7 @@ struct lpfc_nvmet_tgtport {
atomic_t rcv_ls_req_out; atomic_t rcv_ls_req_out;
atomic_t rcv_ls_req_drop; atomic_t rcv_ls_req_drop;
atomic_t xmt_ls_abort; atomic_t xmt_ls_abort;
atomic_t xmt_ls_abort_cmpl;
/* Stats counters - lpfc_nvmet_xmt_ls_rsp */ /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
atomic_t xmt_ls_rsp; atomic_t xmt_ls_rsp;
@ -47,9 +49,9 @@ struct lpfc_nvmet_tgtport {
atomic_t rcv_fcp_cmd_in; atomic_t rcv_fcp_cmd_in;
atomic_t rcv_fcp_cmd_out; atomic_t rcv_fcp_cmd_out;
atomic_t rcv_fcp_cmd_drop; atomic_t rcv_fcp_cmd_drop;
atomic_t xmt_fcp_release;
/* Stats counters - lpfc_nvmet_xmt_fcp_op */ /* Stats counters - lpfc_nvmet_xmt_fcp_op */
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_drop; atomic_t xmt_fcp_drop;
atomic_t xmt_fcp_read_rsp; atomic_t xmt_fcp_read_rsp;
atomic_t xmt_fcp_read; atomic_t xmt_fcp_read;
@ -62,12 +64,13 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_fcp_rsp_drop; atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_unsol_issue_abort */ /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_abort_cmpl;
atomic_t xmt_abort_sol;
atomic_t xmt_abort_unsol;
atomic_t xmt_abort_rsp; atomic_t xmt_abort_rsp;
atomic_t xmt_abort_rsp_error; atomic_t xmt_abort_rsp_error;
/* Stats counters - lpfc_nvmet_xmt_abort_cmp */
atomic_t xmt_abort_cmpl;
}; };
struct lpfc_nvmet_rcv_ctx { struct lpfc_nvmet_rcv_ctx {
@ -103,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
struct rqb_dmabuf *rqb_buffer; struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t ts_isr_cmd; uint64_t ts_isr_cmd;

View file

@ -74,6 +74,8 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
struct lpfc_iocbq *); struct lpfc_iocbq *);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *); struct hbq_dmabuf *);
static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
struct hbq_dmabuf *dmabuf);
static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *); struct lpfc_cqe *);
static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
@ -479,22 +481,23 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
if (unlikely(!hq) || unlikely(!dq)) if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM; return -ENOMEM;
put_index = hq->host_index; put_index = hq->host_index;
temp_hrqe = hq->qe[hq->host_index].rqe; temp_hrqe = hq->qe[put_index].rqe;
temp_drqe = dq->qe[dq->host_index].rqe; temp_drqe = dq->qe[dq->host_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL; return -EINVAL;
if (hq->host_index != dq->host_index) if (put_index != dq->host_index)
return -EINVAL; return -EINVAL;
/* If the host has not yet processed the next entry then we are done */ /* If the host has not yet processed the next entry then we are done */
if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) if (((put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY; return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */ /* Update the host index to point to the next slot */
hq->host_index = ((hq->host_index + 1) % hq->entry_count); hq->host_index = ((put_index + 1) % hq->entry_count);
dq->host_index = ((dq->host_index + 1) % dq->entry_count); dq->host_index = ((dq->host_index + 1) % dq->entry_count);
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */ /* Ring The Header Receive Queue Doorbell */
if (!(hq->host_index % hq->entry_repost)) { if (!(hq->host_index % hq->entry_repost)) {
@ -5906,7 +5909,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
bf_set(lpfc_mbx_set_feature_mds, bf_set(lpfc_mbx_set_feature_mds,
&mbox->u.mqe.un.set_feature, 1); &mbox->u.mqe.un.set_feature, 1);
bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
&mbox->u.mqe.un.set_feature, 0); &mbox->u.mqe.un.set_feature, 1);
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8; mbox->u.mqe.un.set_feature.param_len = 8;
break; break;
@ -6512,6 +6515,50 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
(phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
} }
int
lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx)
{
int rc, i;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
struct lpfc_rqb *rqbp;
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
/* IF RQ is already full, don't bother */
if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
break;
rqb_buffer = rqbp->rqb_alloc_buffer(phba);
if (!rqb_buffer)
break;
rqb_buffer->hrq = hrq;
rqb_buffer->drq = drq;
rqb_buffer->idx = idx;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0) {
rqbp->rqb_free_buffer(phba, rqb_buffer);
} else {
list_add_tail(&rqb_buffer->hbuf.list,
&rqbp->rqb_buffer_list);
rqbp->buffer_count++;
}
}
return 1;
}
/** /**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
@ -6524,7 +6571,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
int int
lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{ {
int rc, i; int rc, i, cnt;
LPFC_MBOXQ_t *mboxq; LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe; struct lpfc_mqe *mqe;
uint8_t *vpd; uint8_t *vpd;
@ -6875,6 +6922,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_destroy_queue; goto out_destroy_queue;
} }
phba->sli4_hba.nvmet_xri_cnt = rc; phba->sli4_hba.nvmet_xri_cnt = rc;
cnt = phba->cfg_iocb_cnt * 1024;
/* We need 1 iocbq for every SGL, for IO processing */
cnt += phba->sli4_hba.nvmet_xri_cnt;
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2821 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1413 Failed to init iocb list.\n");
goto out_destroy_queue;
}
lpfc_nvmet_create_targetport(phba); lpfc_nvmet_create_targetport(phba);
} else { } else {
/* update host scsi xri-sgl sizes and mappings */ /* update host scsi xri-sgl sizes and mappings */
@ -6894,28 +6956,34 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"and mapping: %d\n", rc); "and mapping: %d\n", rc);
goto out_destroy_queue; goto out_destroy_queue;
} }
cnt = phba->cfg_iocb_cnt * 1024;
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2820 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6301 Failed to init iocb list.\n");
goto out_destroy_queue;
}
} }
if (phba->nvmet_support && phba->cfg_nvmet_mrq) { if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
/* Post initial buffers to all RQs created */ /* Post initial buffers to all RQs created */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) { for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
INIT_LIST_HEAD(&rqbp->rqb_buffer_list); INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
rqbp->entry_count = 256; rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
rqbp->buffer_count = 0; rqbp->buffer_count = 0;
/* Divide by 4 and round down to multiple of 16 */
rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8;
phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc;
phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc;
lpfc_post_rq_buffer( lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[i], phba, phba->sli4_hba.nvmet_mrq_hdr[i],
phba->sli4_hba.nvmet_mrq_data[i], phba->sli4_hba.nvmet_mrq_data[i],
phba->cfg_nvmet_mrq_post); LPFC_NVMET_RQE_DEF_COUNT, i);
} }
} }
@ -7082,6 +7150,7 @@ out_unset_queue:
/* Unset all the queues set up in this routine when error out */ /* Unset all the queues set up in this routine when error out */
lpfc_sli4_queue_unset(phba); lpfc_sli4_queue_unset(phba);
out_destroy_queue: out_destroy_queue:
lpfc_free_iocb_list(phba);
lpfc_sli4_queue_destroy(phba); lpfc_sli4_queue_destroy(phba);
out_stop_timers: out_stop_timers:
lpfc_stop_hba_timers(phba); lpfc_stop_hba_timers(phba);
@ -8621,8 +8690,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
memset(wqe, 0, sizeof(union lpfc_wqe128)); memset(wqe, 0, sizeof(union lpfc_wqe128));
/* Some of the fields are in the right position already */ /* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
wqe->generic.wqe_com.word10 = 0; /* The ct field has moved so reset */
wqe->generic.wqe_com.word7 = 0;
wqe->generic.wqe_com.word10 = 0;
}
abort_tag = (uint32_t) iocbq->iotag; abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag; xritag = iocbq->sli4_xritag;
@ -9116,6 +9188,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
} }
break; break;
case CMD_SEND_FRAME:
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
return 0;
case CMD_XRI_ABORTED_CX: case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@ -12788,6 +12864,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
struct fc_frame_header *fc_hdr; struct fc_frame_header *fc_hdr;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct lpfc_nvmet_tgtport *tgtp;
struct hbq_dmabuf *dma_buf; struct hbq_dmabuf *dma_buf;
uint32_t status, rq_id; uint32_t status, rq_id;
unsigned long iflags; unsigned long iflags;
@ -12808,7 +12885,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_RQ_BUF_LEN_EXCEEDED: case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n"); "2537 Receive Frame Truncated!!\n");
hrq->RQ_buf_trunc++;
case FC_STATUS_RQ_SUCCESS: case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq); lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
@ -12819,6 +12895,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
goto out; goto out;
} }
hrq->RQ_rcv_buf++; hrq->RQ_rcv_buf++;
hrq->RQ_buf_posted--;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
/* If a NVME LS event (type 0x28), treat it as Fast path */ /* If a NVME LS event (type 0x28), treat it as Fast path */
@ -12832,8 +12909,21 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
spin_unlock_irqrestore(&phba->hbalock, iflags); spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true; workposted = true;
break; break;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
case FC_STATUS_INSUFF_BUF_FRM_DISC: case FC_STATUS_INSUFF_BUF_FRM_DISC:
if (phba->nvmet_support) {
tgtp = phba->targetport->private;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
"6402 RQE Error x%x, posted %d err_cnt "
"%d: %x %x %x\n",
status, hrq->RQ_buf_posted,
hrq->RQ_no_posted_buf,
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
/* fallthrough */
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++; hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */ /* Post more buffers if possible */
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
@ -12951,7 +13041,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
while ((cqe = lpfc_sli4_cq_get(cq))) { while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
if (!(++ecount % cq->entry_repost)) if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); break;
cq->CQ_mbox++; cq->CQ_mbox++;
} }
break; break;
@ -12965,7 +13055,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
cqe); cqe);
if (!(++ecount % cq->entry_repost)) if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); break;
} }
/* Track the max number of CQEs processed in 1 EQ */ /* Track the max number of CQEs processed in 1 EQ */
@ -13135,6 +13225,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_queue *drq; struct lpfc_queue *drq;
struct rqb_dmabuf *dma_buf; struct rqb_dmabuf *dma_buf;
struct fc_frame_header *fc_hdr; struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t status, rq_id; uint32_t status, rq_id;
unsigned long iflags; unsigned long iflags;
uint32_t fctl, idx; uint32_t fctl, idx;
@ -13165,8 +13256,6 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
case FC_STATUS_RQ_BUF_LEN_EXCEEDED: case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"6126 Receive Frame Truncated!!\n"); "6126 Receive Frame Truncated!!\n");
hrq->RQ_buf_trunc++;
break;
case FC_STATUS_RQ_SUCCESS: case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq); lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
@ -13178,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
} }
spin_unlock_irqrestore(&phba->hbalock, iflags); spin_unlock_irqrestore(&phba->hbalock, iflags);
hrq->RQ_rcv_buf++; hrq->RQ_rcv_buf++;
hrq->RQ_buf_posted--;
fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
/* Just some basic sanity checks on FCP Command frame */ /* Just some basic sanity checks on FCP Command frame */
@ -13200,14 +13290,23 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
drop: drop:
lpfc_in_buf_free(phba, &dma_buf->dbuf); lpfc_in_buf_free(phba, &dma_buf->dbuf);
break; break;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
case FC_STATUS_INSUFF_BUF_FRM_DISC: case FC_STATUS_INSUFF_BUF_FRM_DISC:
if (phba->nvmet_support) {
tgtp = phba->targetport->private;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
"6401 RQE Error x%x, posted %d err_cnt "
"%d: %x %x %x\n",
status, hrq->RQ_buf_posted,
hrq->RQ_no_posted_buf,
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
/* fallthrough */
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++; hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */ /* Post more buffers if possible */
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break; break;
} }
out: out:
@ -13361,7 +13460,7 @@ process_cq:
while ((cqe = lpfc_sli4_cq_get(cq))) { while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
if (!(++ecount % cq->entry_repost)) if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); break;
} }
/* Track the max number of CQEs processed in 1 EQ */ /* Track the max number of CQEs processed in 1 EQ */
@ -13452,7 +13551,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
while ((cqe = lpfc_sli4_cq_get(cq))) { while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
if (!(++ecount % cq->entry_repost)) if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); break;
} }
/* Track the max number of CQEs processed in 1 EQ */ /* Track the max number of CQEs processed in 1 EQ */
@ -13534,7 +13633,7 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
while ((eqe = lpfc_sli4_eq_get(eq))) { while ((eqe = lpfc_sli4_eq_get(eq))) {
lpfc_sli4_fof_handle_eqe(phba, eqe); lpfc_sli4_fof_handle_eqe(phba, eqe);
if (!(++ecount % eq->entry_repost)) if (!(++ecount % eq->entry_repost))
lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); break;
eq->EQ_processed++; eq->EQ_processed++;
} }
@ -13651,7 +13750,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
if (!(++ecount % fpeq->entry_repost)) if (!(++ecount % fpeq->entry_repost))
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); break;
fpeq->EQ_processed++; fpeq->EQ_processed++;
} }
@ -13832,17 +13931,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
} }
queue->entry_size = entry_size; queue->entry_size = entry_size;
queue->entry_count = entry_count; queue->entry_count = entry_count;
/*
* entry_repost is calculated based on the number of entries in the
* queue. This works out except for RQs. If buffers are NOT initially
* posted for every RQE, entry_repost should be adjusted accordingly.
*/
queue->entry_repost = (entry_count >> 3);
if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
queue->phba = phba; queue->phba = phba;
/* entry_repost will be set during q creation */
return queue; return queue;
out_fail: out_fail:
lpfc_sli4_queue_free(queue); lpfc_sli4_queue_free(queue);
@ -14073,6 +14165,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
status = -ENXIO; status = -ENXIO;
eq->host_index = 0; eq->host_index = 0;
eq->hba_index = 0; eq->hba_index = 0;
eq->entry_repost = LPFC_EQ_REPOST;
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
return status; return status;
@ -14146,9 +14239,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
default: default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: " "0361 Unsupported CQ count: "
"entry cnt %d sz %d pg cnt %d repost %d\n", "entry cnt %d sz %d pg cnt %d\n",
cq->entry_count, cq->entry_size, cq->entry_count, cq->entry_size,
cq->page_count, cq->entry_repost); cq->page_count);
if (cq->entry_count < 256) { if (cq->entry_count < 256) {
status = -EINVAL; status = -EINVAL;
goto out; goto out;
@ -14201,6 +14294,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->assoc_qid = eq->queue_id; cq->assoc_qid = eq->queue_id;
cq->host_index = 0; cq->host_index = 0;
cq->hba_index = 0; cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
out: out:
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
@ -14392,6 +14486,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->assoc_qid = eq->queue_id; cq->assoc_qid = eq->queue_id;
cq->host_index = 0; cq->host_index = 0;
cq->hba_index = 0; cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
rc = 0; rc = 0;
list_for_each_entry(dmabuf, &cq->page_list, list) { list_for_each_entry(dmabuf, &cq->page_list, list) {
@ -14640,6 +14735,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
mq->subtype = subtype; mq->subtype = subtype;
mq->host_index = 0; mq->host_index = 0;
mq->hba_index = 0; mq->hba_index = 0;
mq->entry_repost = LPFC_MQ_REPOST;
/* link the mq onto the parent cq child list */ /* link the mq onto the parent cq child list */
list_add_tail(&mq->list, &cq->child_list); list_add_tail(&mq->list, &cq->child_list);
@ -14864,34 +14960,6 @@ out:
return status; return status;
} }
/**
* lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
* @phba: HBA structure that indicates port to create a queue on.
* @rq: The queue structure to use for the receive queue.
* @qno: The associated HBQ number
*
*
* For SLI4 we need to adjust the RQ repost value based on
* the number of buffers that are initially posted to the RQ.
*/
void
lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
{
uint32_t cnt;
/* sanity check on queue memory */
if (!rq)
return;
cnt = lpfc_hbq_defs[qno]->entry_count;
/* Recalc repost for RQs based on buffers initially posted */
cnt = (cnt >> 3);
if (cnt < LPFC_QUEUE_MIN_REPOST)
cnt = LPFC_QUEUE_MIN_REPOST;
rq->entry_repost = cnt;
}
/** /**
* lpfc_rq_create - Create a Receive Queue on the HBA * lpfc_rq_create - Create a Receive Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on. * @phba: HBA structure that indicates port to create a queue on.
@ -15077,6 +15145,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
hrq->subtype = subtype; hrq->subtype = subtype;
hrq->host_index = 0; hrq->host_index = 0;
hrq->hba_index = 0; hrq->hba_index = 0;
hrq->entry_repost = LPFC_RQ_REPOST;
/* now create the data queue */ /* now create the data queue */
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@ -15087,7 +15156,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
bf_set(lpfc_rq_context_rqe_count_1, bf_set(lpfc_rq_context_rqe_count_1,
&rq_create->u.request.context, hrq->entry_count); &rq_create->u.request.context, hrq->entry_count);
rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; if (subtype == LPFC_NVMET)
rq_create->u.request.context.buffer_size =
LPFC_NVMET_DATA_BUF_SIZE;
else
rq_create->u.request.context.buffer_size =
LPFC_DATA_BUF_SIZE;
bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
LPFC_RQE_SIZE_8); LPFC_RQE_SIZE_8);
bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
@ -15124,8 +15198,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
LPFC_RQ_RING_SIZE_4096); LPFC_RQ_RING_SIZE_4096);
break; break;
} }
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, if (subtype == LPFC_NVMET)
LPFC_DATA_BUF_SIZE); bf_set(lpfc_rq_context_buf_size,
&rq_create->u.request.context,
LPFC_NVMET_DATA_BUF_SIZE);
else
bf_set(lpfc_rq_context_buf_size,
&rq_create->u.request.context,
LPFC_DATA_BUF_SIZE);
} }
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id); cq->queue_id);
@ -15158,6 +15238,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
drq->subtype = subtype; drq->subtype = subtype;
drq->host_index = 0; drq->host_index = 0;
drq->hba_index = 0; drq->hba_index = 0;
drq->entry_repost = LPFC_RQ_REPOST;
/* link the header and data RQs onto the parent cq child list */ /* link the header and data RQs onto the parent cq child list */
list_add_tail(&hrq->list, &cq->child_list); list_add_tail(&hrq->list, &cq->child_list);
@ -15270,7 +15351,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
cq->queue_id); cq->queue_id);
bf_set(lpfc_rq_context_data_size, bf_set(lpfc_rq_context_data_size,
&rq_create->u.request.context, &rq_create->u.request.context,
LPFC_DATA_BUF_SIZE); LPFC_NVMET_DATA_BUF_SIZE);
bf_set(lpfc_rq_context_hdr_size, bf_set(lpfc_rq_context_hdr_size,
&rq_create->u.request.context, &rq_create->u.request.context,
LPFC_HDR_BUF_SIZE); LPFC_HDR_BUF_SIZE);
@ -15315,6 +15396,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
hrq->subtype = subtype; hrq->subtype = subtype;
hrq->host_index = 0; hrq->host_index = 0;
hrq->hba_index = 0; hrq->hba_index = 0;
hrq->entry_repost = LPFC_RQ_REPOST;
drq->db_format = LPFC_DB_RING_FORMAT; drq->db_format = LPFC_DB_RING_FORMAT;
drq->db_regaddr = phba->sli4_hba.RQDBregaddr; drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@ -15323,6 +15405,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
drq->subtype = subtype; drq->subtype = subtype;
drq->host_index = 0; drq->host_index = 0;
drq->hba_index = 0; drq->hba_index = 0;
drq->entry_repost = LPFC_RQ_REPOST;
list_add_tail(&hrq->list, &cq->child_list); list_add_tail(&hrq->list, &cq->child_list);
list_add_tail(&drq->list, &cq->child_list); list_add_tail(&drq->list, &cq->child_list);
@ -16063,6 +16146,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
struct fc_vft_header *fc_vft_hdr; struct fc_vft_header *fc_vft_hdr;
uint32_t *header = (uint32_t *) fc_hdr; uint32_t *header = (uint32_t *) fc_hdr;
#define FC_RCTL_MDS_DIAGS 0xF4
switch (fc_hdr->fh_r_ctl) { switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */ case FC_RCTL_DD_UNCAT: /* uncategorized information */
case FC_RCTL_DD_SOL_DATA: /* solicited data */ case FC_RCTL_DD_SOL_DATA: /* solicited data */
@ -16090,6 +16175,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
case FC_RCTL_F_BSY: /* fabric busy to data frame */ case FC_RCTL_F_BSY: /* fabric busy to data frame */
case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
case FC_RCTL_LCR: /* link credit reset */ case FC_RCTL_LCR: /* link credit reset */
case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
case FC_RCTL_END: /* end */ case FC_RCTL_END: /* end */
break; break;
case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
@ -16099,12 +16185,16 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
default: default:
goto drop; goto drop;
} }
#define FC_TYPE_VENDOR_UNIQUE 0xFF
switch (fc_hdr->fh_type) { switch (fc_hdr->fh_type) {
case FC_TYPE_BLS: case FC_TYPE_BLS:
case FC_TYPE_ELS: case FC_TYPE_ELS:
case FC_TYPE_FCP: case FC_TYPE_FCP:
case FC_TYPE_CT: case FC_TYPE_CT:
case FC_TYPE_NVME: case FC_TYPE_NVME:
case FC_TYPE_VENDOR_UNIQUE:
break; break;
case FC_TYPE_IP: case FC_TYPE_IP:
case FC_TYPE_ILS: case FC_TYPE_ILS:
@ -16115,12 +16205,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
lpfc_printf_log(phba, KERN_INFO, LOG_ELS, lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2538 Received frame rctl:%s (x%x), type:%s (x%x), " "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n", "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
(fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type, (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
be32_to_cpu(header[0]), be32_to_cpu(header[1]), "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
be32_to_cpu(header[2]), be32_to_cpu(header[3]), fc_hdr->fh_type, be32_to_cpu(header[0]),
be32_to_cpu(header[4]), be32_to_cpu(header[5]), be32_to_cpu(header[1]), be32_to_cpu(header[2]),
be32_to_cpu(header[6])); be32_to_cpu(header[3]), be32_to_cpu(header[4]),
be32_to_cpu(header[5]), be32_to_cpu(header[6]));
return 0; return 0;
drop: drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@ -16926,6 +17018,96 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
lpfc_sli_release_iocbq(phba, iocbq); lpfc_sli_release_iocbq(phba, iocbq);
} }
static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_dmabuf *pcmd = cmdiocb->context2;
if (pcmd && pcmd->virt)
pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
lpfc_sli_release_iocbq(phba, cmdiocb);
}
static void
lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
struct hbq_dmabuf *dmabuf)
{
struct fc_frame_header *fc_hdr;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq = NULL;
union lpfc_wqe *wqe;
struct lpfc_dmabuf *pcmd = NULL;
uint32_t frame_len;
int rc;
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
/* Send the received frame back */
iocbq = lpfc_sli_get_iocbq(phba);
if (!iocbq)
goto exit;
/* Allocate buffer for command payload */
pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (pcmd)
pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
&pcmd->phys);
if (!pcmd || !pcmd->virt)
goto exit;
INIT_LIST_HEAD(&pcmd->list);
/* copyin the payload */
memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
/* fill in BDE's for command */
iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
iocbq->context2 = pcmd;
iocbq->vport = vport;
iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
/*
* Setup rest of the iocb as though it were a WQE
* Build the SEND_FRAME WQE
*/
wqe = (union lpfc_wqe *)&iocbq->iocb;
wqe->send_frame.frame_len = frame_len;
wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
iocbq->iocb.ulpLe = 1;
iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
if (rc == IOCB_ERROR)
goto exit;
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
exit:
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2023 Unable to process MDS loopback frame\n");
if (pcmd && pcmd->virt)
pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
lpfc_sli_release_iocbq(phba, iocbq);
lpfc_in_buf_free(phba, &dmabuf->dbuf);
}
/** /**
* lpfc_sli4_handle_received_buffer - Handle received buffers from firmware * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
@ -16964,6 +17146,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fcfi = bf_get(lpfc_rcqe_fcf_id, fcfi = bf_get(lpfc_rcqe_fcf_id,
&dmabuf->cq_event.cqe.rcqe_cmpl); &dmabuf->cq_event.cqe.rcqe_cmpl);
if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
vport = phba->pport;
/* Handle MDS Loopback frames */
lpfc_sli4_handle_mds_loopback(vport, dmabuf);
return;
}
/* d_id this frame is directed to */ /* d_id this frame is directed to */
did = sli4_did_from_fc_hdr(fc_hdr); did = sli4_did_from_fc_hdr(fc_hdr);
@ -17137,6 +17326,14 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
"status x%x add_status x%x, mbx status x%x\n", "status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc); shdr_status, shdr_add_status, rc);
rc = -ENXIO; rc = -ENXIO;
} else {
/*
* The next_rpi stores the next logical module-64 rpi value used
* to post physical rpis in subsequent rpi postings.
*/
spin_lock_irq(&phba->hbalock);
phba->sli4_hba.next_rpi = rpi_page->next_rpi;
spin_unlock_irq(&phba->hbalock);
} }
return rc; return rc;
} }
@ -18717,7 +18914,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags); spin_lock_irqsave(&pring->ring_lock, iflags);
ctxp = pwqe->context2; ctxp = pwqe->context2;
sglq = ctxp->rqb_buffer->sglq; sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) { if (pwqe->sli4_xritag == NO_XRI) {
pwqe->sli4_lxritag = sglq->sli4_lxritag; pwqe->sli4_lxritag = sglq->sli4_lxritag;
pwqe->sli4_xritag = sglq->sli4_xritag; pwqe->sli4_xritag = sglq->sli4_xritag;

View file

@ -24,7 +24,6 @@
#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
#define LPFC_RPI_LOW_WATER_MARK 10 #define LPFC_RPI_LOW_WATER_MARK 10
#define LPFC_UNREG_FCF 1 #define LPFC_UNREG_FCF 1
@ -155,7 +154,11 @@ struct lpfc_queue {
uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */ uint32_t entry_size; /* Size of each queue entry. */
uint32_t entry_repost; /* Count of entries before doorbell is rung */ uint32_t entry_repost; /* Count of entries before doorbell is rung */
#define LPFC_QUEUE_MIN_REPOST 8 #define LPFC_EQ_REPOST 8
#define LPFC_MQ_REPOST 8
#define LPFC_CQ_REPOST 64
#define LPFC_RQ_REPOST 64
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */ uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
uint32_t page_count; /* Number of pages allocated for this queue */ uint32_t page_count; /* Number of pages allocated for this queue */
@ -195,7 +198,7 @@ struct lpfc_queue {
/* defines for RQ stats */ /* defines for RQ stats */
#define RQ_no_posted_buf q_cnt_1 #define RQ_no_posted_buf q_cnt_1
#define RQ_no_buf_found q_cnt_2 #define RQ_no_buf_found q_cnt_2
#define RQ_buf_trunc q_cnt_3 #define RQ_buf_posted q_cnt_3
#define RQ_rcv_buf q_cnt_4 #define RQ_rcv_buf q_cnt_4
uint64_t isr_timestamp; uint64_t isr_timestamp;
@ -617,12 +620,17 @@ struct lpfc_sli4_hba {
uint16_t scsi_xri_start; uint16_t scsi_xri_start;
uint16_t els_xri_cnt; uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt; uint16_t nvmet_xri_cnt;
uint16_t nvmet_ctx_cnt;
uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list; struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list; struct list_head lpfc_abts_els_sgl_list;
struct list_head lpfc_nvmet_sgl_list; struct list_head lpfc_nvmet_sgl_list;
struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list; struct list_head lpfc_abts_nvme_buf_list;
struct list_head lpfc_nvmet_ctx_list;
struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_sglq **lpfc_sglq_active_list; struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list; struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask; unsigned long *rpi_bmask;
@ -654,6 +662,7 @@ struct lpfc_sli4_hba {
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */
spinlock_t nvmet_io_lock; spinlock_t nvmet_io_lock;
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port; uint32_t physical_port;
/* CPU to vector mapping information */ /* CPU to vector mapping information */
@ -661,8 +670,6 @@ struct lpfc_sli4_hba {
uint16_t num_online_cpu; uint16_t num_online_cpu;
uint16_t num_present_cpu; uint16_t num_present_cpu;
uint16_t curr_disp_cpu; uint16_t curr_disp_cpu;
uint16_t nvmet_mrq_post_idx;
}; };
enum lpfc_sge_type { enum lpfc_sge_type {
@ -698,6 +705,7 @@ struct lpfc_rpi_hdr {
struct lpfc_dmabuf *dmabuf; struct lpfc_dmabuf *dmabuf;
uint32_t page_count; uint32_t page_count;
uint32_t start_rpi; uint32_t start_rpi;
uint16_t next_rpi;
}; };
struct lpfc_rsrc_blks { struct lpfc_rsrc_blks {
@ -762,7 +770,6 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
struct lpfc_queue **drqp, struct lpfc_queue **cqp, struct lpfc_queue **drqp, struct lpfc_queue **cqp,
uint32_t subtype); uint32_t subtype);
void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);

View file

@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "11.2.0.12" #define LPFC_DRIVER_VERSION "11.2.0.14"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View file

@ -1851,7 +1851,7 @@ static int scsi_mq_prep_fn(struct request *req)
/* zero out the cmd, except for the embedded scsi_request */ /* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0, memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req)); sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size);
req->special = cmd; req->special = cmd;

View file

@ -827,21 +827,32 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
int ret;
if (!(rq->cmd_flags & REQ_NOUNMAP)) { if (!(rq->cmd_flags & REQ_NOUNMAP)) {
switch (sdkp->zeroing_mode) { switch (sdkp->zeroing_mode) {
case SD_ZERO_WS16_UNMAP: case SD_ZERO_WS16_UNMAP:
return sd_setup_write_same16_cmnd(cmd, true); ret = sd_setup_write_same16_cmnd(cmd, true);
goto out;
case SD_ZERO_WS10_UNMAP: case SD_ZERO_WS10_UNMAP:
return sd_setup_write_same10_cmnd(cmd, true); ret = sd_setup_write_same10_cmnd(cmd, true);
goto out;
} }
} }
if (sdp->no_write_same) if (sdp->no_write_same)
return BLKPREP_INVALID; return BLKPREP_INVALID;
if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false); ret = sd_setup_write_same16_cmnd(cmd, false);
return sd_setup_write_same10_cmnd(cmd, false); else
ret = sd_setup_write_same10_cmnd(cmd, false);
out:
if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
return sd_zbc_write_lock_zone(cmd);
return ret;
} }
static void sd_config_write_same(struct scsi_disk *sdkp) static void sd_config_write_same(struct scsi_disk *sdkp)
@ -948,6 +959,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
rq->__data_len = sdp->sector_size; rq->__data_len = sdp->sector_size;
ret = scsi_init_io(cmd); ret = scsi_init_io(cmd);
rq->__data_len = nr_bytes; rq->__data_len = nr_bytes;
if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
sd_zbc_write_unlock_zone(cmd);
return ret; return ret;
} }
@ -1567,17 +1582,21 @@ out:
return retval; return retval;
} }
static int sd_sync_cache(struct scsi_disk *sdkp) static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
{ {
int retries, res; int retries, res;
struct scsi_device *sdp = sdkp->device; struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout const int timeout = sdp->request_queue->rq_timeout
* SD_FLUSH_TIMEOUT_MULTIPLIER; * SD_FLUSH_TIMEOUT_MULTIPLIER;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr my_sshdr;
if (!scsi_device_online(sdp)) if (!scsi_device_online(sdp))
return -ENODEV; return -ENODEV;
/* caller might not be interested in sense, but we need it */
if (!sshdr)
sshdr = &my_sshdr;
for (retries = 3; retries > 0; --retries) { for (retries = 3; retries > 0; --retries) {
unsigned char cmd[10] = { 0 }; unsigned char cmd[10] = { 0 };
@ -1586,7 +1605,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
* Leave the rest of the command zero to indicate * Leave the rest of the command zero to indicate
* flush everything. * flush everything.
*/ */
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res == 0) if (res == 0)
break; break;
@ -1596,11 +1615,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
sd_print_result(sdkp, "Synchronize Cache(10) failed", res); sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
if (driver_byte(res) & DRIVER_SENSE) if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr); sd_print_sense_hdr(sdkp, sshdr);
/* we need to evaluate the error return */ /* we need to evaluate the error return */
if (scsi_sense_valid(&sshdr) && if (scsi_sense_valid(sshdr) &&
(sshdr.asc == 0x3a || /* medium not present */ (sshdr->asc == 0x3a || /* medium not present */
sshdr.asc == 0x20)) /* invalid command */ sshdr->asc == 0x20)) /* invalid command */
/* this is no error here */ /* this is no error here */
return 0; return 0;
@ -3444,7 +3464,7 @@ static void sd_shutdown(struct device *dev)
if (sdkp->WCE && sdkp->media_present) { if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
sd_sync_cache(sdkp); sd_sync_cache(sdkp, NULL);
} }
if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
@ -3456,6 +3476,7 @@ static void sd_shutdown(struct device *dev)
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
{ {
struct scsi_disk *sdkp = dev_get_drvdata(dev); struct scsi_disk *sdkp = dev_get_drvdata(dev);
struct scsi_sense_hdr sshdr;
int ret = 0; int ret = 0;
if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
@ -3463,12 +3484,23 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
if (sdkp->WCE && sdkp->media_present) { if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp); ret = sd_sync_cache(sdkp, &sshdr);
if (ret) { if (ret) {
/* ignore OFFLINE device */ /* ignore OFFLINE device */
if (ret == -ENODEV) if (ret == -ENODEV)
ret = 0; return 0;
goto done;
if (!scsi_sense_valid(&sshdr) ||
sshdr.sense_key != ILLEGAL_REQUEST)
return ret;
/*
* sshdr.sense_key == ILLEGAL_REQUEST means this drive
* doesn't support sync. There's not much to do and
* suspend shouldn't fail.
*/
ret = 0;
} }
} }
@ -3480,7 +3512,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
ret = 0; ret = 0;
} }
done:
return ret; return ret;
} }

View file

@ -2074,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
if ((1 == resp->done) && (!resp->sg_io_owned) && if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) { ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */ resp->done = 2; /* guard against other readers */
break; write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
} }
} }
write_unlock_irqrestore(&sfp->rq_list_lock, iflags); write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp; return NULL;
} }
/* always adds to end of list */ /* always adds to end of list */

View file

@ -7698,6 +7698,12 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
ufshcd_add_spm_lvl_sysfs_nodes(hba); ufshcd_add_spm_lvl_sysfs_nodes(hba);
} }
static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
{
device_remove_file(hba->dev, &hba->rpm_lvl_attr);
device_remove_file(hba->dev, &hba->spm_lvl_attr);
}
/** /**
* ufshcd_shutdown - shutdown routine * ufshcd_shutdown - shutdown routine
* @hba: per adapter instance * @hba: per adapter instance
@ -7735,6 +7741,7 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/ */
void ufshcd_remove(struct ufs_hba *hba) void ufshcd_remove(struct ufs_hba *hba)
{ {
ufshcd_remove_sysfs_nodes(hba);
scsi_remove_host(hba->host); scsi_remove_host(hba->host);
/* disable interrupts */ /* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_disable_intr(hba, hba->intr_mask);

View file

@ -1671,8 +1671,12 @@ static long ceph_fallocate(struct file *file, int mode,
} }
size = i_size_read(inode); size = i_size_read(inode);
if (!(mode & FALLOC_FL_KEEP_SIZE)) if (!(mode & FALLOC_FL_KEEP_SIZE)) {
endoff = offset + length; endoff = offset + length;
ret = inode_newsize_ok(inode, endoff);
if (ret)
goto unlock;
}
if (fi->fmode & CEPH_FILE_MODE_LAZY) if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;

View file

@ -1280,7 +1280,6 @@ xfs_bmap_read_extents(
xfs_bmbt_rec_t *frp; xfs_bmbt_rec_t *frp;
xfs_fsblock_t nextbno; xfs_fsblock_t nextbno;
xfs_extnum_t num_recs; xfs_extnum_t num_recs;
xfs_extnum_t start;
num_recs = xfs_btree_get_numrecs(block); num_recs = xfs_btree_get_numrecs(block);
if (unlikely(i + num_recs > room)) { if (unlikely(i + num_recs > room)) {
@ -1303,7 +1302,6 @@ xfs_bmap_read_extents(
* Copy records into the extent records. * Copy records into the extent records.
*/ */
frp = XFS_BMBT_REC_ADDR(mp, block, 1); frp = XFS_BMBT_REC_ADDR(mp, block, 1);
start = i;
for (j = 0; j < num_recs; j++, i++, frp++) { for (j = 0; j < num_recs; j++, i++, frp++) {
xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
trp->l0 = be64_to_cpu(frp->l0); trp->l0 = be64_to_cpu(frp->l0);
@ -2065,8 +2063,10 @@ xfs_bmap_add_extent_delay_real(
} }
temp = xfs_bmap_worst_indlen(bma->ip, temp); temp = xfs_bmap_worst_indlen(bma->ip, temp);
temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - diff = (int)(temp + temp2 -
(bma->cur ? bma->cur->bc_private.b.allocated : 0)); (startblockval(PREV.br_startblock) -
(bma->cur ?
bma->cur->bc_private.b.allocated : 0)));
if (diff > 0) { if (diff > 0) {
error = xfs_mod_fdblocks(bma->ip->i_mount, error = xfs_mod_fdblocks(bma->ip->i_mount,
-((int64_t)diff), false); -((int64_t)diff), false);
@ -2123,7 +2123,6 @@ xfs_bmap_add_extent_delay_real(
temp = da_new; temp = da_new;
if (bma->cur) if (bma->cur)
temp += bma->cur->bc_private.b.allocated; temp += bma->cur->bc_private.b.allocated;
ASSERT(temp <= da_old);
if (temp < da_old) if (temp < da_old)
xfs_mod_fdblocks(bma->ip->i_mount, xfs_mod_fdblocks(bma->ip->i_mount,
(int64_t)(da_old - temp), false); (int64_t)(da_old - temp), false);

View file

@ -4395,7 +4395,7 @@ xfs_btree_visit_blocks(
xfs_btree_readahead_ptr(cur, ptr, 1); xfs_btree_readahead_ptr(cur, ptr, 1);
/* save for the next iteration of the loop */ /* save for the next iteration of the loop */
lptr = *ptr; xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
} }
/* for each buffer in the level */ /* for each buffer in the level */

View file

@ -1629,13 +1629,28 @@ xfs_refcount_recover_cow_leftovers(
if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START) if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
return -EOPNOTSUPP; return -EOPNOTSUPP;
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); INIT_LIST_HEAD(&debris);
/*
* In this first part, we use an empty transaction to gather up
* all the leftover CoW extents so that we can subsequently
* delete them. The empty transaction is used to avoid
* a buffer lock deadlock if there happens to be a loop in the
* refcountbt because we're allowed to re-grab a buffer that is
* already attached to our transaction. When we're done
* recording the CoW debris we cancel the (empty) transaction
* and everything goes away cleanly.
*/
error = xfs_trans_alloc_empty(mp, &tp);
if (error) if (error)
return error; return error;
cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
goto out_trans;
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
/* Find all the leftover CoW staging extents. */ /* Find all the leftover CoW staging extents. */
INIT_LIST_HEAD(&debris);
memset(&low, 0, sizeof(low)); memset(&low, 0, sizeof(low));
memset(&high, 0, sizeof(high)); memset(&high, 0, sizeof(high));
low.rc.rc_startblock = XFS_REFC_COW_START; low.rc.rc_startblock = XFS_REFC_COW_START;
@ -1645,10 +1660,11 @@ xfs_refcount_recover_cow_leftovers(
if (error) if (error)
goto out_cursor; goto out_cursor;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
xfs_buf_relse(agbp); xfs_trans_brelse(tp, agbp);
xfs_trans_cancel(tp);
/* Now iterate the list to free the leftovers */ /* Now iterate the list to free the leftovers */
list_for_each_entry(rr, &debris, rr_list) { list_for_each_entry_safe(rr, n, &debris, rr_list) {
/* Set up transaction. */ /* Set up transaction. */
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
if (error) if (error)
@ -1676,8 +1692,16 @@ xfs_refcount_recover_cow_leftovers(
error = xfs_trans_commit(tp); error = xfs_trans_commit(tp);
if (error) if (error)
goto out_free; goto out_free;
list_del(&rr->rr_list);
kmem_free(rr);
} }
return error;
out_defer:
xfs_defer_cancel(&dfops);
out_trans:
xfs_trans_cancel(tp);
out_free: out_free:
/* Free the leftover list */ /* Free the leftover list */
list_for_each_entry_safe(rr, n, &debris, rr_list) { list_for_each_entry_safe(rr, n, &debris, rr_list) {
@ -1688,11 +1712,6 @@ out_free:
out_cursor: out_cursor:
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
xfs_buf_relse(agbp); xfs_trans_brelse(tp, agbp);
goto out_free; goto out_trans;
out_defer:
xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
goto out_free;
} }

View file

@ -582,9 +582,13 @@ xfs_getbmap(
} }
break; break;
default: default:
/* Local format data forks report no extents. */
if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
bmv->bmv_entries = 0;
return 0;
}
if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_format != XFS_DINODE_FMT_BTREE && ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
return -EINVAL; return -EINVAL;
if (xfs_get_extsz_hint(ip) || if (xfs_get_extsz_hint(ip) ||
@ -712,7 +716,7 @@ xfs_getbmap(
* extents. * extents.
*/ */
if (map[i].br_startblock == DELAYSTARTBLOCK && if (map[i].br_startblock == DELAYSTARTBLOCK &&
map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
ASSERT((iflags & BMV_IF_DELALLOC) != 0); ASSERT((iflags & BMV_IF_DELALLOC) != 0);
if (map[i].br_startblock == HOLESTARTBLOCK && if (map[i].br_startblock == HOLESTARTBLOCK &&

View file

@ -1043,49 +1043,17 @@ xfs_find_get_desired_pgoff(
index = startoff >> PAGE_SHIFT; index = startoff >> PAGE_SHIFT;
endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
end = endoff >> PAGE_SHIFT; end = (endoff - 1) >> PAGE_SHIFT;
do { do {
int want; int want;
unsigned nr_pages; unsigned nr_pages;
unsigned int i; unsigned int i;
want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
want); want);
/* if (nr_pages == 0)
* No page mapped into given range. If we are searching holes
* and if this is the first time we got into the loop, it means
* that the given offset is landed in a hole, return it.
*
* If we have already stepped through some block buffers to find
* holes but they all contains data. In this case, the last
* offset is already updated and pointed to the end of the last
* mapped page, if it does not reach the endpoint to search,
* that means there should be a hole between them.
*/
if (nr_pages == 0) {
/* Data search found nothing */
if (type == DATA_OFF)
break;
ASSERT(type == HOLE_OFF);
if (lastoff == startoff || lastoff < endoff) {
found = true;
*offset = lastoff;
}
break; break;
}
/*
* At lease we found one page. If this is the first time we
* step into the loop, and if the first page index offset is
* greater than the given search offset, a hole was found.
*/
if (type == HOLE_OFF && lastoff == startoff &&
lastoff < page_offset(pvec.pages[0])) {
found = true;
break;
}
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
@ -1098,18 +1066,18 @@ xfs_find_get_desired_pgoff(
* file mapping. However, page->index will not change * file mapping. However, page->index will not change
* because we have a reference on the page. * because we have a reference on the page.
* *
* Searching done if the page index is out of range. * If current page offset is beyond where we've ended,
* If the current offset is not reaches the end of * we've found a hole.
* the specified search range, there should be a hole
* between them.
*/ */
if (page->index > end) { if (type == HOLE_OFF && lastoff < endoff &&
if (type == HOLE_OFF && lastoff < endoff) { lastoff < page_offset(pvec.pages[i])) {
*offset = lastoff; found = true;
found = true; *offset = lastoff;
}
goto out; goto out;
} }
/* Searching done if the page index is out of range. */
if (page->index > end)
goto out;
lock_page(page); lock_page(page);
/* /*
@ -1151,21 +1119,20 @@ xfs_find_get_desired_pgoff(
/* /*
* The number of returned pages less than our desired, search * The number of returned pages less than our desired, search
* done. In this case, nothing was found for searching data, * done.
* but we found a hole behind the last offset.
*/ */
if (nr_pages < want) { if (nr_pages < want)
if (type == HOLE_OFF) {
*offset = lastoff;
found = true;
}
break; break;
}
index = pvec.pages[i - 1]->index + 1; index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec); pagevec_release(&pvec);
} while (index <= end); } while (index <= end);
/* No page at lastoff and we are not done - we found a hole. */
if (type == HOLE_OFF && lastoff < endoff) {
*offset = lastoff;
found = true;
}
out: out:
pagevec_release(&pvec); pagevec_release(&pvec);
return found; return found;

View file

@ -828,6 +828,7 @@ xfs_getfsmap(
struct xfs_fsmap dkeys[2]; /* per-dev keys */ struct xfs_fsmap dkeys[2]; /* per-dev keys */
struct xfs_getfsmap_dev handlers[XFS_GETFSMAP_DEVS]; struct xfs_getfsmap_dev handlers[XFS_GETFSMAP_DEVS];
struct xfs_getfsmap_info info = { NULL }; struct xfs_getfsmap_info info = { NULL };
bool use_rmap;
int i; int i;
int error = 0; int error = 0;
@ -837,12 +838,14 @@ xfs_getfsmap(
!xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1])) !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1]))
return -EINVAL; return -EINVAL;
use_rmap = capable(CAP_SYS_ADMIN) &&
xfs_sb_version_hasrmapbt(&mp->m_sb);
head->fmh_entries = 0; head->fmh_entries = 0;
/* Set up our device handlers. */ /* Set up our device handlers. */
memset(handlers, 0, sizeof(handlers)); memset(handlers, 0, sizeof(handlers));
handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev); handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev);
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) if (use_rmap)
handlers[0].fn = xfs_getfsmap_datadev_rmapbt; handlers[0].fn = xfs_getfsmap_datadev_rmapbt;
else else
handlers[0].fn = xfs_getfsmap_datadev_bnobt; handlers[0].fn = xfs_getfsmap_datadev_bnobt;

View file

@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list); bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq); void blk_mq_complete_request(struct request *rq);
bool blk_mq_queue_stopped(struct request_queue *q); bool blk_mq_queue_stopped(struct request_queue *q);

View file

@ -3,6 +3,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
/* /*
@ -12,12 +14,10 @@
*/ */
# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
extern const char *ceph_file_part(const char *s, int len);
# define dout(fmt, ...) \ # define dout(fmt, ...) \
pr_debug("%.*s %12.12s:%-4d : " fmt, \ pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
ceph_file_part(__FILE__, sizeof(__FILE__)), \ kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
__LINE__, ##__VA_ARGS__)
# else # else
/* faux printk call just to see any compiler warnings. */ /* faux printk call just to see any compiler warnings. */
# define dout(fmt, ...) do { \ # define dout(fmt, ...) do { \

Some files were not shown because too many files have changed in this diff Show more