A600PVPU1ARHC

Signed-off-by: BlackMesa123 <brother12@hotmail.it>
This commit is contained in:
BlackMesa123 2018-10-25 16:58:42 +02:00
parent d4b7048986
commit f3dbc97663
1487 changed files with 293381 additions and 17841 deletions

View File

@ -1,3 +1,17 @@
dtb-$(CONFIG_MACH_EXYNOS7884_A6ELTE_USA) := \
exynos7884-a6elte_usa_00.dtb \
exynos7884-a6elte_usa_03.dtb
dtb-$(CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN) := \
exynos7885-jackpot2lte_eur_open_00.dtb \
exynos7885-jackpot2lte_eur_open_01.dtb \
exynos7885-jackpot2lte_eur_open_02.dtb \
exynos7885-jackpot2lte_eur_open_03.dtb \
exynos7885-jackpot2lte_eur_open_04.dtb \
exynos7885-jackpot2lte_eur_open_05.dtb \
exynos7885-jackpot2lte_eur_open_06.dtb \
exynos7885-jackpot2lte_eur_open_07.dtb
dtb-$(CONFIG_MACH_EXYNOS7885_JACKPOTLTE_CAN_OPEN) := \
exynos7885-jackpotlte_can_open_00.dtb \
exynos7885-jackpotlte_can_open_01.dtb \
@ -31,16 +45,6 @@ dtb-$(CONFIG_MACH_EXYNOS7885_JACKPOTLTE_KOR) := \
exynos7885-jackpotlte_kor_06.dtb \
exynos7885-jackpotlte_kor_07.dtb
dtb-$(CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN) := \
exynos7885-jackpot2lte_eur_open_00.dtb \
exynos7885-jackpot2lte_eur_open_01.dtb \
exynos7885-jackpot2lte_eur_open_02.dtb \
exynos7885-jackpot2lte_eur_open_03.dtb \
exynos7885-jackpot2lte_eur_open_04.dtb \
exynos7885-jackpot2lte_eur_open_05.dtb \
exynos7885-jackpot2lte_eur_open_06.dtb \
exynos7885-jackpot2lte_eur_open_07.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)

View File

@ -0,0 +1,583 @@
/*
* A6 2018 US Battery parameters device tree file for board IDs 04 and higher
*
* Copyright (C) 2018 Samsung Electronics, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
/ {
pinctrl@11CB0000 {
fuel_irq: fuel-irq {
samsung,pins = "gpa2-4";
samsung,pin-function = <0>;
samsung,pin-pud = <2>;
samsung,pin-drv = <0>;
};
if_pmic_irq: if-pmic-irq {
samsung,pins = "gpa2-2";
samsung,pin-function = <0>;
samsung,pin-pud = <0>;
samsung,pin-drv = <0>;
};
};
i2c@13830000 {
status = "okay";
s2mu005-fuelgauge@3b {
compatible = "samsung,s2mu005-fuelgauge";
reg = <0x3B>;
pinctrl-names = "default";
pinctrl-0 = <&fuel_irq>;
fuelgauge,fuel_int = <&gpa2 4 0>;
fuelgauge,fuel_alert_soc = <1>;
fuelgauge,fuel_alert_vol = <3000>;
fuelgauge,low_temp_limit = <100>;
fuelgauge,sw_vbat_l_recovery_vol = <3465>;
fuelgauge,capacity_max = <1000>;
fuelgauge,capacity_max_margin = <200>;
fuelgauge,capacity_min = <0>;
fuelgauge,capacity_calculation_type = <28>;
fuelgauge,capacity_full = <3000>;
fuelgauge,type_str = "SDI";
fuelgauge,model_type = <1>;
fuelgauge,fg_log_enable = <1>;
};
};
battery_params {
battery,battery_data =
<
/***** battery data for 4.35 *****/
/* battery,battery_table3 */
176 11 19 11 119 10 221 9 102 9
248 8 144 8 28 8 191 7 128 7
40 7 211 6 154 6 114 6 83 6
60 6 31 6 249 5 196 5 146 5
28 5 209 1 240 8 129 8 18 8
163 7 52 7 197 6 86 6 231 5
120 5 9 5 154 4 43 4 188 3
77 3 222 2 111 2 0 2 145 1
34 1 179 0 68 0 213 15
/* battery,battery_table4 */
97 97 97 97 97 96 96 97 96 96
96 96 97 98 100 101 104 107 113 124
138 206
/* battery,batcap */
0x2D 0xB4 0x0B 0x6D /* [0x0F] [0x0E] [0x11] [0x10] */
/* battery,accum */
0x3B 0x08 /* [0x44] [0x45] */
/* battery,soc_arr_val */
11170 10628 10086 9544 9002 8460 7917 7376 6834 6292
5750 5208 4666 4124 3582 3040 2498 1956 1414 872
330 (-212)
/* battery,ocv_arr_val */
44608 43844 43080 42328 41746 41210 40702 40134 39682 39376
38944 38530 38251 38056 37907 37791 37650 37464 37208 36964
36386 32272
/***** battery data for 4.33 *****/
/* battery,battery_table3 */
175 11 19 11 119 10 221 9 101 9
248 8 144 8 28 8 191 7 128 7
40 7 211 6 154 6 114 6 84 6
60 6 31 6 249 5 196 5 146 5
28 5 209 1 216 8 106 8 252 7
142 7 33 7 179 6 69 6 215 5
105 5 251 4 141 4 32 4 178 3
68 3 214 2 104 2 250 1 140 1
30 1 177 0 67 0 213 15
/* battery,battery_table4 */
97 97 97 97 97 97 96 97 96 96
96 96 97 98 100 101 104 107 113 124
138 104
/* battery,batcap */
0x2D 0xA0 0x0B 0x68 /* [0x0F] [0x0E] [0x11] [0x10] */
/* battery,accum */
0x3B 0x08 /* [0x44] [0x45] */
/* battery,soc_arr_val */
11054 10518 9982 9445 8909 8372 7836 7300 6763 6227
5689 5154 4618 4081 3545 3008 2472 1936 1399 863
325 (-210)
/* battery,ocv_arr_val */
44607 43844 43080 42329 41745 41211 40702 40134 39682 39376
38945 38531 38251 38056 37908 37791 37650 37464 37208 36964
36386 32272
/***** battery data for 4.31 *****/
/* battery,battery_table3 */
176 11 19 11 119 10 221 9 101 9
248 8 144 8 28 8 191 7 128 7
40 7 211 6 154 6 114 6 84 6
60 6 31 6 249 5 196 5 146 5
28 5 209 1 240 8 129 8 18 8
163 7 52 7 197 6 86 6 231 5
120 5 9 5 154 4 43 4 188 3
77 3 222 2 111 2 0 2 145 1
34 1 179 0 68 0 213 15
/* battery,battery_table4 */
97 97 97 97 97 97 96 97 96 96
96 96 97 98 100 101 104 107 113 124
138 104
/* battery,batcap */
0x2D 0x00 0x0B 0x40 /* [0x0F] [0x0E] [0x11] [0x10] */
/* battery,accum */
0x3B 0x08 /* [0x44] [0x45] */
/* battery,soc_arr_val */
11170 10628 10086 9544 9002 8460 7917 7376 6834 6292
5750 5208 4666 4124 3582 3040 2498 1956 1414 872
330 (-212)
/* battery,ocv_arr_val */
44608 43844 43080 42328 41745 41210 40702 40134 39682 39376
38944 38531 38251 38056 37908 37792 37650 37464 37208 36964
36386 32272
/***** battery data for 4.29 *****/
/* battery,battery_table3 */
175 11 19 11 119 10 221 9 102 9
248 8 144 8 28 8 191 7 128 7
40 7 211 6 154 6 114 6 84 6
60 6 31 6 249 5 196 5 146 5
28 5 209 1 16 9 160 8 47 8
190 7 78 7 221 6 109 6 252 5
139 5 27 5 170 4 58 4 201 3
89 3 232 2 119 2 7 2 150 1
38 1 181 0 69 0 212 15
/* battery,battery_table4 */
97 97 97 97 97 97 96 97 96 96
96 96 97 98 100 101 104 107 113 124
138 104
/* battery,batcap */
0x2C 0x60 0x0B 0x18 /* [0x0F] [0x0E] [0x11] [0x10] */
/* battery,accum */
0x3B 0x08 /* [0x44] [0x45] */
/* battery,soc_arr_val */
11329 10779 10229 9680 9130 8580 8031 7481 6931 6381
5832 5282 4732 4183 3633 3083 2533 1984 1434 884
335 (-214)
/* battery,ocv_arr_val */
44607 43844 43080 42328 41746 41210 40704 40134 39682 39376
38945 38531 38251 38056 37908 37792 37650 37464 37208 36964
36387 32272
/***** battery data for 4.24 *****/
/* battery,battery_table3 */
176 11 19 11 119 10 221 9 101 9
248 8 144 8 28 8 191 7 128 7
40 7 211 6 154 6 114 6 84 6
60 6 31 6 249 5 196 5 146 5
28 5 209 1 146 9 27 9 164 8
46 8 183 7 64 7 201 6 82 6
219 5 100 5 237 4 119 4 0 4
137 3 18 3 155 2 36 2 173 1
54 1 191 0 72 0 210 15
/* battery,battery_table4 */
97 97 97 97 97 97 96 97 96 96
96 96 97 98 100 101 104 107 113 124
138 104
/* battery,batcap */
0x2A 0x80 0x0A 0xA0 /* [0x0F] [0x0E] [0x11] [0x10] */
/* battery,accum */
0x3B 0x08 /* [0x44] [0x45] */
/* battery,soc_arr_val */
11964 11384 10803 10223 9642 9062 8481 7901 7320 6740
6159 5578 4998 4417 3837 3256 2676 2095 1515 934
354 (-227)
/* battery,ocv_arr_val */
44608 43844 43080 42328 41745 41210 40702 40134 39682 39376
38944 38531 38251 38056 37908 37792 37650 37464 37208 36964
36387 32272
>;
battery,battery_table1 = <
176 11 19 11 119 10 221 9 102 9
248 8 144 8 28 8 191 7 128 7
40 7 211 6 154 6 114 6 83 6
60 6 31 6 249 5 196 5 146 5
28 5 209 1 240 8 129 8 18 8
163 7 52 7 197 6 86 6 231 5
120 5 9 5 154 4 43 4 188 3
77 3 222 2 111 2 0 2 145 1
34 1 179 0 68 0 213 15
>;
battery,battery_table2 = <
97 97 97 97 97 96 96 97 96 96
96 96 97 98 100 101 104 107 113 124
138 206
>;
battery,battery_table3 = <
176 11 19 11 119 10 221 9 102 9
248 8 144 8 28 8 191 7 128 7
40 7 211 6 154 6 114 6 83 6
60 6 31 6 249 5 196 5 146 5
28 5 209 1 240 8 129 8 18 8
163 7 52 7 197 6 86 6 231 5
120 5 9 5 154 4 43 4 188 3
77 3 222 2 111 2 0 2 145 1
34 1 179 0 68 0 213 15
>;
battery,battery_table4 = <
97 97 97 97 97 96 96 97 96 96
96 96 97 98 100 101 104 107 113 124
138 206
>;
battery,batcap = <0x2D 0xB4 0x0B 0x6D>; /* [0x0F] [0x0E] [0x11] [0x10] */
battery,soc_arr_evt1 = <
11170 10628 10086 9544 9002 8460 7917 7376 6834 6292
5750 5208 4666 4124 3582 3040 2498 1956 1414 872
330 (-212)
>;
battery,ocv_arr_evt1 = <
44608 43844 43080 42328 41746 41210 40702 40134 39682 39376
38944 38530 38251 38056 37907 37791 37650 37464 37208 36964
36386 32272
>;
battery,soc_arr_evt2 = <
11170 10628 10086 9544 9002 8460 7917 7376 6834 6292
5750 5208 4666 4124 3582 3040 2498 1956 1414 872
330 (-212)
>;
battery,ocv_arr_evt2 = <
44608 43844 43080 42328 41746 41210 40702 40134 39682 39376
38944 38530 38251 38056 37907 37791 37650 37464 37208 36964
36386 32272
>;
battery,FG_Accumulative_rate_evt2 = <0x3B 0x08>; /* 0x44 0x45 */
battery,data_ver = <0x04>;
};
battery {
status = "okay";
compatible = "samsung,sec-battery";
pinctrl-names = "default";
battery,vendor = "Battery";
battery,charger_name = "s2mu005-charger";
battery,fuelgauge_name = "s2mu005-fuelgauge";
battery,technology = <2>; /* POWER_SUPPLY_TECHNOLOGY_LION */
battery,fgsrc_switch_name = "s2mu005-fuelgauge"; /* Fuelgauge voltage source */
battery,batt_data_version = <2>;
battery,chip_vendor = "LSI";
battery,temp_adc_type = <1>; /* SEC_BATTERY_ADC_TYPE_AP */
battery,temp_check_type = <2>; /* SEC_BATTERY_TEMP_CHECK_TEMP */
battery,thermal_source = <2>; /* SEC_BATTERY_THERMAL_SOURCE_ADC */
battery,polling_time = <10 30 30 30 3600>;
battery,temp_table_adc = <
282 294 306 318 331 343 356 368 381 394
405 416 426 437 448 464 480 495 511 527
544 560 577 593 610 631 652 673 694 715
740 765 791 816 841 869 897 924 952 980
1012 1044 1076 1108 1140 1174 1208 1241 1275 1309
1347 1385 1422 1460 1498 1542 1586 1631 1675 1719
1763 1807 1851 1895 1939 1985 2032 2078 2125 2171
2222 2273 2323 2374 2425 2469 2513 2557 2601 2645
2683 2721 2758 2796 2834 2878 2922 2967 3011 3055
3094 3133 3171 3210 3249 3279 3309 3340 3370 3400
3427 3454 3482 3509 3536
3561 3582 3602 3622 3643 3663
>;
battery,temp_table_data = <
900 890 880 870 860 850 840 830 820 810
800 790 780 770 760 750 740 730 720 710
700 690 680 670 660 650 640 630 620 610
600 590 580 570 560 550 540 530 520 510
500 490 480 470 460 450 440 430 420 410
400 390 380 370 360 350 340 330 320 310
300 290 280 270 260 250 240 230 220 210
200 190 180 170 160 150 140 130 120 110
100 90 80 70 60 50 40 30 20 10
0 (-10) (-20) (-30) (-40) (-50) (-60) (-70) (-80) (-90)
(-100) (-110) (-120) (-130) (-140)
(-150) (-160) (-170) (-180) (-190) (-200)
>;
battery,inbat_voltage = <1>;
battery,inbat_voltage_table_adc = <2839 2822 2803 2777 2751 2708 2694 2670 2650 2612
2591 2558 2530 2501 2469 2445 2424 2393 2351 2335
2296 2275>;
battery,inbat_voltage_table_data = <4400 4380 4350 4300 4250 4200 4150 4100 4050 4000
3950 3900 3850 3800 3750 3700 3650 3600 3550 3500
3450 3400>;
battery,adc_check_count = <5>;
battery,cable_check_type = <4>; /* SEC_BATTERY_CABLE_CHECK_PSY */
battery,cable_source_type = <1>; /* SEC_BATTERY_CABLE_SOURCE_EXTERNAL */
battery,polling_type = <1>; /* SEC_BATTERY_MONITOR_ALARM */
battery,monitor_initial_count = <0>;
battery,pre_afc_input_current = <500>;
battery,battery_check_type = <0>; /* SEC_BATTERY_CHECK_NONE */
battery,check_count = <0>;
battery,ovp_uvlo_check_type = <3>; /* SEC_BATTERY_OVP_UVLO_CHGPOLLING */
battery,temp_check_count = <1>;
battery,temp_highlimit_threshold_event = <800>;
battery,temp_highlimit_recovery_event = <750>;
battery,temp_high_threshold_event = <500>;
battery,temp_high_recovery_event = <450>;
battery,temp_low_threshold_event = <0>;
battery,temp_low_recovery_event = <50>;
battery,temp_highlimit_threshold_normal = <800>;
battery,temp_highlimit_recovery_normal = <750>;
battery,temp_high_threshold_normal = <500>;
battery,temp_high_recovery_normal = <450>;
battery,temp_low_threshold_normal = <0>;
battery,temp_low_recovery_normal = <50>;
battery,temp_highlimit_threshold_lpm = <800>;
battery,temp_highlimit_recovery_lpm = <750>;
battery,temp_high_threshold_lpm = <500>;
battery,temp_high_recovery_lpm = <450>;
battery,temp_low_threshold_lpm = <0>;
battery,temp_low_recovery_lpm = <50>;
battery,wpc_high_threshold_normal = <450>;
battery,wpc_high_recovery_normal = <400>;
battery,wpc_low_threshold_normal = <0>;
battery,wpc_low_recovery_normal = <50>;
battery,full_check_type = <2>; /* SEC_BATTERY_FULLCHARGED_FG_CURRENT */
battery,full_check_type_2nd = <2>; /* SEC_BATTERY_FULLCHARGED_FG_CURRENT */
battery,full_check_count = <1>;
battery,chg_gpio_full_check = <0>;
battery,chg_polarity_full_check = <1>;
/* SEC_BATTERY_FULL_CONDITION_SOC |
* SEC_BATTERY_FULL_CONDITION_NOTIMEFULL |
* SEC_BATTERY_FULL_CONDITION_VCELL
*/
battery,full_condition_type = <13>;
battery,full_condition_soc = <93>;
battery,full_condition_vcell = <4250>;
battery,recharge_check_count = <1>;
battery,recharge_condition_type = <4>; /* SEC_BATTERY_RECHARGE_CONDITION_VCELL */
battery,recharge_condition_soc = <98>;
battery,recharge_condition_vcell = <4250>;
battery,charging_total_time = <14400>;
battery,hv_charging_total_time = <10800>;
battery,normal_charging_total_time = <18000>;
battery,usb_charging_total_time = <36000>;
battery,recharging_total_time = <5400>;
battery,charging_reset_time = <0>;
battery,chg_float_voltage = <4350>;
battery,swelling_high_temp_block = <410>;
battery,swelling_high_temp_recov = <390>;
battery,swelling_wc_high_temp_recov = <390>;
battery,swelling_low_temp_block_1st = <150>;
battery,swelling_low_temp_recov_1st = <200>;
battery,swelling_low_temp_block_2nd = <50>;
battery,swelling_low_temp_recov_2nd = <100>;
battery,swelling_low_temp_current = <700>; /* 0.3C */
battery,swelling_low_temp_topoff = <150>; /* 0.05C */
battery,swelling_high_temp_current = <1050>; /* 0.45C */
battery,swelling_high_temp_topoff = <150>; /* 0.05C */
battery,swelling_wc_high_temp_current = <700>; /* 0.3C */
battery,swelling_wc_low_temp_current = <700>; /* 0.3C */
battery,swelling_drop_float_voltage = <4150>;
battery,swelling_high_rechg_voltage = <4000>;
battery,swelling_low_rechg_voltage = <4000>;
battery,siop_event_check_type = <1>;
battery,siop_call_cv_current = <330>;
battery,siop_call_cc_current = <330>;
battery,siop_input_limit_current = <1200>;
battery,siop_charging_limit_current = <1000>;
battery,battery_full_capacity = <3000>;
/* cycle, chg_float_voltage, recharge_condition_vcell, full_condition_vcell, full_condition_soc */
battery,age_data = <0 4350 4250 4250 93 /*4.35*/
200 4330 4230 4230 92 /*4.33*/
250 4310 4210 4210 91 /*4.31*/
300 4290 4190 4190 90 /*4.29*/
1000 4240 4140 4140 89 /*4.24*/
>;
battery,standard_curr = <1750>;
battery,expired_time = <13800>; /* 160 + 70 minutes */
battery,recharging_expired_time = <5400>;
battery,cisd_max_voltage_thr = <5000>; /* battery ovp detection voltage */
battery,recovery_cable;
io-channels = <&exynos_adc 1>, <&exynos_adc 2>;
io-channel-names = "adc-temp", "adc-in-bat";
#io-channel-cells = <5>;
io-channel-ranges;
};
/* sec_battery_cable
* 0 UNKNOWN,
* 1 NONE,
* 2 PREPARE_TA,
* 3 TA,
* 4 USB,
* 5 USB_CDP,
* 6 9V_TA,
* 7 9V_ERR,
* 8 9V_UNKNOWN,
* 9 12V_TA,
* 10 WIRELESS,
* 11 HV_WIRELESS,
* 12 PMA_WIRELESS,
* 13 WIRELESS_PACK,
* 14 WIRELESS_PACK_TA,
* 15 WIRELESS_STAND,
* 16 WIRELESS_HV_STAND,
* 17 QC20,
* 18 QC30,
* 19 PDIC,
* 20 UARTOFF,
* 21 OTG,
* 22 LAN_HUB,
* 23 POWER_SHARING,
* 24 HMT_CONNECTED,
* 25 HMT_CHARGE,
* 26 HV_TA_CHG_LIMIT,
* 27 WIRELESS_VEHICLE,
* 28 WIRELESS_HV_VEHICLE,
* 29 PREPARE_WIRELESS_HV,
* 30 TIMEOUT,
* 31 CABLE_MAX,
*/
cable-info {
default_input_current = <1550>;
default_charging_current = <1750>;
full_check_current_1st = <300>;
full_check_current_2nd = <150>;
current_group_1 {
cable_number = <1 4 19 21 22 23 30>;
input_current = <500>;
charging_current = <500>;
};
current_group_2 {
cable_number = <2 25>;
input_current = <1000>;
charging_current = <1000>;
};
current_group_3 {
cable_number = <5>;
input_current = <1500>;
charging_current = <1500>;
};
current_group_4 {
cable_number = <6 7 8>;
input_current = <1650>;
charging_current = <2150>;
};
current_group_5 {
cable_number = <9>;
input_current = <1650>;
charging_current = <2150>;
};
current_group_6 {
cable_number = <10 12 14 15 27>;
input_current = <900>;
charging_current = <1200>;
};
current_group_7 {
cable_number = <13>;
input_current = <700>;
charging_current = <1200>;
};
current_group_8 {
cable_number = <24>;
input_current = <1000>;
charging_current = <450>;
};
current_group_9 {
cable_number = <26>;
input_current = <2000>;
charging_current = <1800>;
};
current_group_10 {
cable_number = <11 16 28>;
input_current = <650>;
charging_current = <1200>;
};
current_group_11 {
cable_number = <29>;
input_current = <500>;
charging_current = <1200>;
};
};
s2mu005-charger {
status = "disable";
compatible = "samsung,s2mu005-charger";
battery,is_1MHz_switching = <1>;
battery,chg_gpio_en = <0>;
battery,chg_polarity_en = <0>;
battery,chg_gpio_status = <0>;
battery,chg_polarity_status = <0>;
battery,chg_float_voltage = <4350>;
};
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,126 @@
/*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/ {
pinctrl@11CB0000 {
btp_irq: btp-irq {
samsung,pins = "gpa0-5";
samsung,pin-function = <0>;
samsung,pin-pud = <0>;
samsung,pin-drv = <3>;
};
btp_irq_sleep: btp-irq-sleep {
samsung,pins = "gpa0-5";
samsung,pin-function = <0>;
samsung,pin-pud = <1>;
samsung,pin-drv = <3>;
};
};
pinctrl@139B0000 {
btp_ldo: btp-ldo {
samsung,pins = "gpg3-3";
samsung,pin-function = <1>;
samsung,pin-pud = <0>;
samsung,pin-drv = <0>;
};
btp_reset: btp-reset {
samsung,pins = "gpg3-2";
samsung,pin-function = <1>;
samsung,pin-pud = <0>;
samsung,pin-drv = <3>;
};
/* SPI_FP */
spi1_bus_inactive: spi1-bus-inactive {
samsung,pins = "gpp6-0", "gpp6-1", "gpp6-3";
samsung,pin-function = <1>;
samsung,pin-pud = <0>;
samsung,pin-drv = <2>;
};
spi1_miso_inactive: spi1-miso-inactive {
samsung,pins = "gpp6-2";
samsung,pin-function = <0>;
samsung,pin-pud = <1>;
samsung,pin-drv = <2>;
};
spi1_clk: spi1-clk {
samsung,pins = "gpp6-0";
samsung,pin-function = <2>;
samsung,pin-pud = <1>;
samsung,pin-drv = <2>;
};
spi1_cs: spi1-cs {
samsung,pins = "gpp6-1";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
samsung,pin-drv = <2>;
};
spi1_miso: spi1-miso {
samsung,pins = "gpp6-2";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
samsung,pin-drv = <2>;
};
spi1_mosi: spi1-mosi {
samsung,pins = "gpp6-3";
samsung,pin-function = <2>;
samsung,pin-pud = <1>;
samsung,pin-drv = <2>;
};
};
spi_1: spi@13910000 {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
/delete-property/ pinctrl-names;
/delete-property/ pinctrl-0;
samsung,spi-fifosize = <256>;
interrupts = <0 256 0>;
dma-mode;
dmas = <&pdma0 21
&pdma0 20>;
dma-names = "tx", "rx";
etspi-spi@0 {
compatible = "etspi,et5xx";
reg = <0>;
spi-max-frequency = <16000000>;
pinctrl-names = "default", "pins_poweron", "pins_poweroff", "pins_poweron_tz", "pins_poweroff_tz";
pinctrl-0 = <&btp_irq_sleep &btp_reset &btp_ldo>;
pinctrl-1 = <&btp_irq &spi1_clk &spi1_cs &spi1_miso &spi1_mosi>;
pinctrl-2 = <&btp_irq_sleep &spi1_bus_inactive &spi1_miso_inactive>;
pinctrl-3 = <&btp_irq>;
pinctrl-4 = <&btp_irq_sleep>;
gpio-controller;
#gpio-cells = <2>;
etspi-sleepPin = <&gpg3 2 0x00>;
etspi-drdyPin = <&gpa0 5 0x00>;
etspi-ldoPin = <&gpg3 3 0x00>;
etspi-chipid = "ET516";
etspi-orient = <0>;
controller-data {
samsung,spi-feedback-delay = <0>;
samsung,spi-chip-select-mode = <0>;
};
};
};
};

View File

@ -0,0 +1,142 @@
/*
* SAMSUNG UNIVERSAL7885 board device tree source
*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/dts-v1/;
#include "exynos7884-a6elte_common.dtsi"
#include "exynos7884-a6elte_usa_gpio_00.dtsi"
#include "exynos7884-a6elte_fingerprint-sensor_00.dtsi"
/ {
model = "Samsung A6ELTE VZW rev00 board based on EXYNOS7884";
model_info-chip = <7885>;
model_info-platform = "android";
model_info-subtype = "samsung";
model_info-hw_rev = <0>;
model_info-hw_rev_end = <2>;
compatible = "samsung, A6ELTE VZW rev00", "samsung,Universal7884";
i2c_4:i2c@13870000 {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
samsung,i2c-max-bus-freq = <400000>;
touchscreen@4b {
compatible = "synaptics,rmi4";
reg = <0x4b>;
pinctrl-names = "default", "on_state", "off_state";
pinctrl-0 = <&attn_irq>;
pinctrl-1 = <&i2c_on>;
pinctrl-2 = <&i2c_off>;
synaptics,irq_gpio = <&gpa0 0 0>;
synaptics,irq_type = <8200>;
synaptics,max_coords = <720 1280>; /* x y */
synaptics,num_lines = <32 18>; /* rx tx */
synaptics,ub-i2c-addr = <0x2c>;
synaptics,reset-delay-ms = <150>;
synaptics,firmware_name = "tsp_synaptics/td4100_j7pop.fw";
synaptics,firmware_name_bl = "tsp_synaptics/td4100_j7pop_recovery.fw";
};
touchscreen@48 {
status = "disabled";
};
};
i2c@20 {
gpios = <&gpc1 6 0 &gpc1 7 0>;
touchkey@20 {
coreriver,scl-gpio = <&gpc1 7 0>;
coreriver,sda-gpio = <&gpc1 6 0>;
coreriver,bringup;
};
};
i2c_5:i2c@13880000 {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c5_bus>;
tmd3725@39 {
compatible = "taos,tmd3725";
reg = <0x39>;
pinctrl-names = "default";
pinctrl-0 = <&prox_int>;
interrupt-parent = <&gpa0>;
interrupts = <4 0 0>;
taos,irq_gpio = <&gpa0 4 0x00>;
taos,vled_ldo_pin = <&gpg3 1 0x00>;
taos,prox_thd_det_hi = <55>;
taos,prox_thd_still_det_low = <40>;
taos,prox_thd_still_det_hi = <250>;
taos,prox_thd_rel_low = <130>;
taos,coef_r = <(-830)>;
taos,coef_g = <1100>;
taos,coef_b = <(-1180)>;
taos,coef_c = <1000>;
taos,dgf = <842>;
taos,cct_coef = <3521>;
taos,cct_offset = <2095>;
};
};
pinctrl@11CB0000 {
grip_int: grip_int {
samsung,pins = "gpa0-6";
samsung,pin-function = <0xf>;
samsung,pin-pud = <0>;
samsung,pin-drv = <0>;
};
};
pinctrl@139B0000 {
grip_ldo: grip_ldo {
samsung,pins = "gpg1-6";
samsung,pin-function = <1>;
samsung,pin-pud = <0>;
};
};
/* VIBRATOR */
sec_vib {
compatible = "sec_vib";
sec_vib_pdata {
status = "disabled";
sec_vib,max_timeout = <10000>;
sec_vib,ldo_type = <1>;
sec_vib,ldo_en = <&gpp0 3 1>;
};
};
hsi2c_8: hsi2c@13980000 {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&hs_i2c8_bus>;
samsung,ch-qactive-off; /*property for usi hsi2c */
a96t3x6@20 {
compatible = "a96t3x6";
pinctrl-names = "default";
pinctrl-0 = <&grip_int &grip_ldo>;
reg = <0x20>;
interrupt-parent = <&gpa0>;
interrupts = <6 0 0>;
a96t3x6,irq_gpio = <&gpa0 6 0>;
a96t3x6,ldo_en = <&gpg1 6 0>;
a96t3x6,fw_path = "abov/a96t346_a6elte.fw";
a96t3x6,firmup_cmd = <0x38>;
};
};
};

View File

@ -0,0 +1,102 @@
/*
* SAMSUNG UNIVERSAL7885 board device tree source
*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/dts-v1/;
#include "exynos7884-a6elte_common.dtsi"
#include "exynos7884-a6elte_usa_gpio_03.dtsi"
#include "exynos7884-a6elte_fingerprint-sensor_00.dtsi"
/ {
model = "Samsung A6ELTE VZW rev03 board based on EXYNOS7884";
model_info-chip = <7885>;
model_info-platform = "android";
model_info-subtype = "samsung";
model_info-hw_rev = <3>;
model_info-hw_rev_end = <255>;
compatible = "samsung, A6ELTE VZW rev03", "samsung,Universal7884";
i2c_4:i2c@13870000 {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
samsung,i2c-max-bus-freq = <400000>;
touchscreen@4b {
compatible = "synaptics,rmi4";
reg = <0x4b>;
pinctrl-names = "default", "on_state", "off_state";
pinctrl-0 = <&attn_irq>;
pinctrl-1 = <&i2c_on>;
pinctrl-2 = <&i2c_off>;
synaptics,irq_gpio = <&gpa0 0 0>;
synaptics,irq_type = <8200>;
synaptics,max_coords = <720 1280>; /* x y */
synaptics,num_lines = <32 18>; /* rx tx */
synaptics,ub-i2c-addr = <0x2c>;
synaptics,reset-delay-ms = <150>;
synaptics,firmware_name = "tsp_synaptics/td4100_j7pop.fw";
synaptics,firmware_name_bl = "tsp_synaptics/td4100_j7pop_recovery.fw";
};
touchscreen@48 {
status = "disabled";
};
};
/* VIBRATOR */
sec_vib {
compatible = "sec_vib";
sec_vib_pdata {
status = "disabled";
sec_vib,max_timeout = <10000>;
sec_vib,ldo_type = <1>;
sec_vib,ldo_en = <&gpp0 3 1>;
};
};
i2c@20 {
gpios = <&gpc1 6 0 &gpc1 7 0>;
touchkey@20 {
coreriver,scl-gpio = <&gpc1 7 0>;
coreriver,sda-gpio = <&gpc1 6 0>;
coreriver,bringup;
};
};
i2c_5:i2c@13880000 {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c5_bus>;
tmd3725@39 {
compatible = "taos,tmd3725";
reg = <0x39>;
pinctrl-names = "default";
pinctrl-0 = <&prox_int>;
interrupt-parent = <&gpa0>;
interrupts = <4 0 0>;
taos,irq_gpio = <&gpa0 4 0x00>;
taos,vled_ldo_pin = <&gpg3 1 0x00>;
taos,prox_thd_det_hi = <55>;
taos,prox_thd_still_det_low = <40>;
taos,prox_thd_still_det_hi = <250>;
taos,prox_thd_rel_low = <130>;
taos,coef_r = <(-830)>;
taos,coef_g = <1100>;
taos,coef_b = <(-1180)>;
taos,coef_c = <1000>;
taos,dgf = <842>;
taos,cct_coef = <3521>;
taos,cct_offset = <2095>;
};
};
};

View File

@ -0,0 +1,214 @@
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "exynos_gpio_config_macros.dtsi"
/* 0x11CB_0000(ALIVE): etc0~1, gpa0~2, gpq0 */
&pinctrl_0 {
/*
* Note:
* Please do not make "sleep-state" node for GPA group GPIOs.
* GPA group doesn't have power-down status.
*/
pinctrl-names = "default";
pinctrl-0 = <&initial0>;
initial0: initial-state {
PIN_IN(gpa0-1, DOWN, LV1); /* NC */
PIN_IN(gpa1-1, DOWN, LV1); /* NC */
PIN_IN(gpa1-2, DOWN, LV1); /* NC */
PIN_IN(gpa1-3, DOWN, LV1); /* NC */
PIN_IN(gpa1-4, DOWN, LV1); /* NC */
PIN_IN(gpa2-3, DOWN, LV1); /* NC */
PIN_IN(gpa2-5, DOWN, LV1); /* NC */
PIN_FUNC(gpa2-7, DOWN, LV3); /* WLAN_UART */
PIN_IN(gpq0-1, DOWN, LV1); /* NC */
};
};
/* 0x148F_0000(DISPAUD): gpb0~2 */
&pinctrl_1 {
pinctrl-names = "sleep";
pinctrl-0 = <&sleep1>;
sleep1: sleep-state {
PIN_SLP(gpb0-0, INPUT, DOWN); /* PM_I2S0_CLK */
PIN_SLP(gpb0-1, INPUT, DOWN); /* PM_I2S0_BCLK */
PIN_SLP(gpb0-2, INPUT, DOWN); /* PM_I2S0_SYNC */
PIN_SLP(gpb0-3, INPUT, DOWN); /* PM_I2S0_DO */
PIN_SLP(gpb0-4, INPUT, DOWN); /* PM_I2S0_DI */
PIN_SLP(gpb1-0, INPUT, DOWN); /* FB_I2S1_SDI */
PIN_SLP(gpb1-1, INPUT, DOWN); /* NC */
PIN_SLP(gpb1-2, INPUT, DOWN); /* NC */
PIN_SLP(gpb1-3, INPUT, DOWN); /* NC */
PIN_SLP(gpb2-0, INPUT, DOWN); /* SPK_I2S3_BCLK */
PIN_SLP(gpb2-1, INPUT, DOWN); /* SPK_I2S3_WS */
PIN_SLP(gpb2-2, INPUT, DOWN); /* SPK_I2S3_DO */
PIN_SLP(gpb2-3, INPUT, DOWN); /* SPK_I2S3_DI */
PIN_SLP(gpb2-4, INPUT, DOWN); /* FM_SPDY_TO_S612 */
};
};
/* 0x1343_0000(FSYS): gpf0,2~4 */
&pinctrl_2 {
pinctrl-names = "sleep";
pinctrl-0 = <&sleep2>;
sleep2: sleep-state {
PIN_SLP(gpf0-0, OUT0, NONE); /* SD_0_CLK */
PIN_SLP(gpf0-1, OUT1, NONE); /* SD_0_CMD */
PIN_SLP(gpf0-2, INPUT, DOWN); /* SD_0_RDQS */
PIN_SLP(gpf0-3, PREV, NONE); /* SD_0_HWreset */
PIN_SLP(gpf2-0, OUT0, NONE); /* SD_0_DATA_0 */
PIN_SLP(gpf2-1, OUT0, NONE); /* SD_0_DATA_1 */
PIN_SLP(gpf2-2, OUT0, NONE); /* SD_0_DATA_2 */
PIN_SLP(gpf2-3, OUT0, NONE); /* SD_0_DATA_3 */
PIN_SLP(gpf2-4, OUT0, NONE); /* SD_0_DATA_4 */
PIN_SLP(gpf2-5, OUT0, NONE); /* SD_0_DATA_5 */
PIN_SLP(gpf2-6, OUT0, NONE); /* SD_0_DATA_6 */
PIN_SLP(gpf2-7, OUT0, NONE); /* SD_0_DATA_7 */
PIN_SLP(gpf4-0, OUT0, NONE); /* SD_2_CLK */
PIN_SLP(gpf4-1, OUT0, NONE); /* SD_2_CMD */
PIN_SLP(gpf4-2, OUT0, NONE); /* SD_2_DATA_0 */
PIN_SLP(gpf4-3, OUT0, NONE); /* SD_2_DATA_1 */
PIN_SLP(gpf4-4, OUT0, NONE); /* SD_2_DATA_2 */
PIN_SLP(gpf4-5, OUT0, NONE); /* SD_2_DATA_3 */
PIN_SLP(gpf3-0, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-1, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-2, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-3, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-4, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-5, INPUT, DOWN); /* NC */
};
};
/* 0x139B_0000(TOP): gpp0~8, gpg0~4, gpc0~2 */
&pinctrl_3 {
pinctrl-names = "default","sleep";
pinctrl-0 = <&initial3>;
pinctrl-1 = <&sleep3>;
initial3: initial-state {
PIN_IN(gpp0-0, DOWN, LV1); /* NC */
PIN_IN(gpp0-1, DOWN, LV1); /* NC */
PIN_IN(gpp4-4, DOWN, LV1); /* NC */
PIN_IN(gpp4-5, DOWN, LV1); /* NC */
PIN_OUT_SET_PULL(gpp0-3, 0, LV1, DOWN); /* MOT_LDO */
PIN_OUT_SET_PULL(gpg1-2, 0, LV1, DOWN); /* CAM_FLASH_EN */
PIN_OUT_SET_PULL(gpg1-3, 0, LV1, DOWN); /* CAM_TORCH_EN */
PIN_IN(gpg0-1, DOWN, LV1); /* NC */
PIN_IN(gpg3-6, DOWN, LV1); /* NC */
PIN_IN(gpg3-7, NONE, LV1); /* HW_REV0 */
PIN_IN(gpg4-0, NONE, LV1); /* HW_REV1 */
PIN_IN(gpg4-1, NONE, LV1); /* HW_REV2 */
PIN_IN(gpc0-2, DOWN, LV1); /* NC */
PIN_IN(gpc0-3, DOWN, LV1); /* NC */
PIN_IN(gpc2-0, DOWN, LV1); /* NC */
PIN_IN(gpc2-1, DOWN, LV1); /* NC */
PIN_IN(gpc2-2, DOWN, LV1); /* NC */
PIN_IN(gpc2-3, DOWN, LV1); /* NC */
/* PIN_IN(gpc2-6, DOWN, LV1);*/ /* NC */
/* PIN_IN(gpc2-7, DOWN, LV1);*/ /* NC */
PIN_OUT_SET(gpg1-4, 1, LV1); /* IF_PMIC_RST */
};
sleep3: sleep-state {
PIN_SLP(gpp0-0, INPUT, DOWN); /* NC */
PIN_SLP(gpp0-1, INPUT, DOWN); /* NC */
PIN_SLP(gpp0-3, OUT0, DOWN); /* MOT_LDO */
PIN_SLP(gpp1-0, INPUT, NONE); /* FG_I2C_SCL */
PIN_SLP(gpp1-1, INPUT, NONE); /* FG_I2C_SDA */
PIN_SLP(gpp1-2, PREV, NONE); /* IF_PMIC_I2C_SCL */
PIN_SLP(gpp1-3, PREV, NONE); /* IF_PMIC_I2C_SDA */
PIN_SLP(gpp2-0, INPUT, DOWN); /* NC */
PIN_SLP(gpp2-1, INPUT, DOWN); /* NC */
PIN_SLP(gpp2-2, INPUT, DOWN); /* NC */
PIN_SLP(gpp2-3, INPUT, DOWN); /* NC */
PIN_SLP(gpp3-0, INPUT, NONE); /* SPK_AMP_I2C_SCL */
PIN_SLP(gpp3-1, INPUT, NONE); /* SPK_AMP_I2C_SDA */
PIN_SLP(gpp3-2, INPUT, DOWN); /* NC */
PIN_SLP(gpp4-0, INPUT, NONE); /* TSP_I2C_SCL */
PIN_SLP(gpp4-1, INPUT, NONE); /* TSP_I2C_SCL */
PIN_SLP(gpp4-2, INPUT, NONE); /* PROXY_I2C_SDA */
PIN_SLP(gpp4-3, INPUT, NONE); /* PROXY_I2C_SCL */
PIN_SLP(gpp4-4, INPUT, DOWN); /* NC */
PIN_SLP(gpp4-5, INPUT, DOWN); /* NC */
PIN_SLP(gpp6-0, OUT0, NONE); /* BTP_SPI_CLK */
PIN_SLP(gpp6-1, OUT0, NONE); /* BTP_SPI_CS_N */
PIN_SLP(gpp6-2, INPUT, DOWN); /* BTP_SPI_MISO */
PIN_SLP(gpp6-3, OUT0, NONE); /* BTP_SPI_MOSI */
PIN_SLP(gpp6-4, PREV, NONE); /* NFC_PVDD_EN */
PIN_SLP(gpp7-0, INPUT, NONE); /* GRIP_SDA_1P8 */
PIN_SLP(gpp7-1, INPUT, NONE); /* GRIP_SCL_1P8 */
PIN_SLP(gpp8-0, INPUT, NONE); /* SENSOR_SDA_1P8 */
PIN_SLP(gpp8-1, INPUT, NONE); /* SENSOR_SCL_1P8 */
PIN_SLP(gpg0-1, INPUT, DOWN); /* NC */
PIN_SLP(gpg1-0, INPUT, DOWN); /* NC */
PIN_SLP(gpg1-1, PREV, NONE); /* PMIC_WRSTBI */
PIN_SLP(gpg1-2, OUT0, DOWN); /* CAM_FLASH_EN */
PIN_SLP(gpg1-3, PREV, NONE); /* CAM_TORCH_EN */
PIN_SLP(gpg1-4, PREV, NONE); /* IF_PMIC_RST */
PIN_SLP(gpg1-6, PREV, NONE); /* GRIP_EN */
PIN_SLP(gpg1-7, PREV, NONE); /* MLCD_RST */
PIN_SLP(gpg2-1, OUT0, DOWN); /* TSP_LDO_EN */
PIN_SLP(gpg2-6, PREV, NONE); /* LCD_PWR_EN */
PIN_SLP(gpg2-7, INPUT, DOWN); /* NC */
PIN_SLP(gpg3-1, PREV, DOWN); /* MST_PWR_EN */
PIN_SLP(gpg3-2, PREV, NONE); /* BTP_RST_N */
PIN_SLP(gpg3-3, PREV, NONE); /* BTP_LDO_EN */
PIN_SLP(gpg3-4, PREV, NONE); /* BTP_RST_N */
PIN_SLP(gpg3-5, PREV, NONE); /* SSP_RST */
PIN_SLP(gpg3-6, INPUT, DOWN); /* NC */
PIN_SLP(gpg3-7, INPUT, NONE); /* HW_REV0 */
PIN_SLP(gpg4-0, INPUT, NONE); /* HW_REV1 */
PIN_SLP(gpg4-1, INPUT, NONE); /* HW_REV2 */
PIN_SLP(gpc0-2, INPUT, DOWN); /* NC */
PIN_SLP(gpc0-3, INPUT, DOWN); /* NC */
PIN_SLP(gpc1-0, INPUT, DOWN); /* FCAM1_I2C_SCL */
PIN_SLP(gpc1-1, INPUT, DOWN); /* FCAM1_I2C_SDA */
PIN_SLP(gpc1-2, INPUT, DOWN); /* RCAM_I2C_SCL */
PIN_SLP(gpc1-3, INPUT, DOWN); /* RCAM_I2C_SDA */
PIN_SLP(gpc1-4, INPUT, DOWN); /* RCAM_AF_EEP_I2C_SCL */
PIN_SLP(gpc1-5, INPUT, DOWN); /* RCAM_AF_EEP_I2C_SDA */
PIN_SLP(gpc1-6, INPUT, NONE); /* TOUCH_SDA_1P8 */
PIN_SLP(gpc1-7, INPUT, NONE); /* TOUCH_SCL_1P8 */
PIN_SLP(gpc2-0, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-1, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-2, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-3, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-4, INPUT, NONE); /* NFC_I2C_SDA */
PIN_SLP(gpc2-5, INPUT, NONE); /* NFC_I2C_SCL */
PIN_SLP(gpc2-6, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-7, INPUT, DOWN); /* NC */
};
};

View File

@ -0,0 +1,216 @@
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "exynos_gpio_config_macros.dtsi"
/* 0x11CB_0000(ALIVE): etc0~1, gpa0~2, gpq0 */
&pinctrl_0 {
/*
* Note:
* Please do not make "sleep-state" node for GPA group GPIOs.
* GPA group doesn't have power-down status.
*/
pinctrl-names = "default";
pinctrl-0 = <&initial0>;
initial0: initial-state {
PIN_IN(gpa0-1, DOWN, LV1); /* NC */
PIN_IN(gpa0-6, DOWN, LV1); /* NC */
PIN_IN(gpa1-1, DOWN, LV1); /* NC */
PIN_IN(gpa1-2, DOWN, LV1); /* NC */
PIN_IN(gpa1-3, DOWN, LV1); /* NC */
PIN_IN(gpa1-4, DOWN, LV1); /* NC */
PIN_IN(gpa2-3, DOWN, LV1); /* NC */
PIN_IN(gpa2-5, DOWN, LV1); /* NC */
PIN_FUNC(gpa2-7, DOWN, LV3); /* WLAN_UART */
PIN_IN(gpq0-1, DOWN, LV1); /* NC */
};
};
/* 0x148F_0000(DISPAUD): gpb0~2 */
&pinctrl_1 {
pinctrl-names = "sleep";
pinctrl-0 = <&sleep1>;
sleep1: sleep-state {
PIN_SLP(gpb0-0, INPUT, DOWN); /* PM_I2S0_CLK */
PIN_SLP(gpb0-1, INPUT, DOWN); /* PM_I2S0_BCLK */
PIN_SLP(gpb0-2, INPUT, DOWN); /* PM_I2S0_SYNC */
PIN_SLP(gpb0-3, INPUT, DOWN); /* PM_I2S0_DO */
PIN_SLP(gpb0-4, INPUT, DOWN); /* PM_I2S0_DI */
PIN_SLP(gpb1-0, INPUT, DOWN); /* FB_I2S1_SDI */
PIN_SLP(gpb1-1, INPUT, DOWN); /* NC */
PIN_SLP(gpb1-2, INPUT, DOWN); /* NC */
PIN_SLP(gpb1-3, INPUT, DOWN); /* NC */
PIN_SLP(gpb2-0, INPUT, DOWN); /* SPK_I2S3_BCLK */
PIN_SLP(gpb2-1, INPUT, DOWN); /* SPK_I2S3_WS */
PIN_SLP(gpb2-2, INPUT, DOWN); /* SPK_I2S3_DO */
PIN_SLP(gpb2-3, INPUT, DOWN); /* SPK_I2S3_DI */
PIN_SLP(gpb2-4, INPUT, DOWN); /* FM_SPDY_TO_S612 */
};
};
/* 0x1343_0000(FSYS): gpf0,2~4 */
&pinctrl_2 {
pinctrl-names = "sleep";
pinctrl-0 = <&sleep2>;
sleep2: sleep-state {
PIN_SLP(gpf0-0, OUT0, NONE); /* SD_0_CLK */
PIN_SLP(gpf0-1, OUT1, NONE); /* SD_0_CMD */
PIN_SLP(gpf0-2, INPUT, DOWN); /* SD_0_RDQS */
PIN_SLP(gpf0-3, PREV, NONE); /* SD_0_HWreset */
PIN_SLP(gpf2-0, OUT0, NONE); /* SD_0_DATA_0 */
PIN_SLP(gpf2-1, OUT0, NONE); /* SD_0_DATA_1 */
PIN_SLP(gpf2-2, OUT0, NONE); /* SD_0_DATA_2 */
PIN_SLP(gpf2-3, OUT0, NONE); /* SD_0_DATA_3 */
PIN_SLP(gpf2-4, OUT0, NONE); /* SD_0_DATA_4 */
PIN_SLP(gpf2-5, OUT0, NONE); /* SD_0_DATA_5 */
PIN_SLP(gpf2-6, OUT0, NONE); /* SD_0_DATA_6 */
PIN_SLP(gpf2-7, OUT0, NONE); /* SD_0_DATA_7 */
PIN_SLP(gpf4-0, OUT0, NONE); /* SD_2_CLK */
PIN_SLP(gpf4-1, OUT0, NONE); /* SD_2_CMD */
PIN_SLP(gpf4-2, OUT0, NONE); /* SD_2_DATA_0 */
PIN_SLP(gpf4-3, OUT0, NONE); /* SD_2_DATA_1 */
PIN_SLP(gpf4-4, OUT0, NONE); /* SD_2_DATA_2 */
PIN_SLP(gpf4-5, OUT0, NONE); /* SD_2_DATA_3 */
PIN_SLP(gpf3-0, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-1, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-2, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-3, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-4, INPUT, DOWN); /* NC */
PIN_SLP(gpf3-5, INPUT, DOWN); /* NC */
};
};
/* 0x139B_0000(TOP): gpp0~8, gpg0~4, gpc0~2 */
&pinctrl_3 {
pinctrl-names = "default","sleep";
pinctrl-0 = <&initial3>;
pinctrl-1 = <&sleep3>;
initial3: initial-state {
PIN_IN(gpp0-0, DOWN, LV1); /* NC */
PIN_IN(gpp0-1, DOWN, LV1); /* NC */
PIN_IN(gpp4-4, DOWN, LV1); /* NC */
PIN_IN(gpp4-5, DOWN, LV1); /* NC */
PIN_OUT_SET_PULL(gpp0-3, 0, LV1, DOWN); /* MOT_LDO */
PIN_OUT_SET_PULL(gpg1-2, 0, LV1, DOWN); /* CAM_FLASH_EN */
PIN_OUT_SET_PULL(gpg1-3, 0, LV1, DOWN); /* CAM_TORCH_EN */
PIN_IN(gpg0-1, DOWN, LV1); /* NC */
PIN_IN(gpg3-6, DOWN, LV1); /* NC */
PIN_IN(gpg3-7, NONE, LV1); /* HW_REV0 */
PIN_IN(gpg4-0, NONE, LV1); /* HW_REV1 */
PIN_IN(gpg4-1, NONE, LV1); /* HW_REV2 */
PIN_IN(gpc0-2, DOWN, LV1); /* NC */
PIN_IN(gpc0-3, DOWN, LV1); /* NC */
PIN_IN(gpc2-0, DOWN, LV1); /* NC */
PIN_IN(gpc2-1, DOWN, LV1); /* NC */
PIN_IN(gpc2-2, DOWN, LV1); /* NC */
PIN_IN(gpc2-3, DOWN, LV1); /* NC */
/* PIN_IN(gpc2-6, DOWN, LV1);*/ /* NC */
/* PIN_IN(gpc2-7, DOWN, LV1);*/ /* NC */
PIN_OUT_SET(gpg1-4, 1, LV1); /* IF_PMIC_RST */
};
sleep3: sleep-state {
PIN_SLP(gpp0-0, INPUT, DOWN); /* NC */
PIN_SLP(gpp0-1, INPUT, DOWN); /* NC */
PIN_SLP(gpp0-3, OUT0, DOWN); /* MOT_LDO */
PIN_SLP(gpp1-0, INPUT, NONE); /* FG_I2C_SCL */
PIN_SLP(gpp1-1, INPUT, NONE); /* FG_I2C_SDA */
PIN_SLP(gpp1-2, PREV, NONE); /* IF_PMIC_I2C_SCL */
PIN_SLP(gpp1-3, PREV, NONE); /* IF_PMIC_I2C_SDA */
PIN_SLP(gpp2-0, INPUT, DOWN); /* NC */
PIN_SLP(gpp2-1, INPUT, DOWN); /* NC */
PIN_SLP(gpp2-2, INPUT, DOWN); /* NC */
PIN_SLP(gpp2-3, INPUT, DOWN); /* NC */
PIN_SLP(gpp3-0, INPUT, NONE); /* SPK_AMP_I2C_SCL */
PIN_SLP(gpp3-1, INPUT, NONE); /* SPK_AMP_I2C_SDA */
PIN_SLP(gpp3-2, INPUT, DOWN); /* NC */
PIN_SLP(gpp4-0, INPUT, NONE); /* TSP_I2C_SCL */
PIN_SLP(gpp4-1, INPUT, NONE); /* TSP_I2C_SCL */
PIN_SLP(gpp4-2, INPUT, NONE); /* PROXY_I2C_SDA */
PIN_SLP(gpp4-3, INPUT, NONE); /* PROXY_I2C_SCL */
PIN_SLP(gpp4-4, INPUT, DOWN); /* NC */
PIN_SLP(gpp4-5, INPUT, DOWN); /* NC */
PIN_SLP(gpp6-0, OUT0, NONE); /* BTP_SPI_CLK */
PIN_SLP(gpp6-1, OUT0, NONE); /* BTP_SPI_CS_N */
PIN_SLP(gpp6-2, INPUT, DOWN); /* BTP_SPI_MISO */
PIN_SLP(gpp6-3, OUT0, NONE); /* BTP_SPI_MOSI */
PIN_SLP(gpp6-4, PREV, NONE); /* NFC_PVDD_EN */
PIN_SLP(gpp7-0, INPUT, DOWN); /* NC */
PIN_SLP(gpp7-1, INPUT, DOWN); /* NC */
PIN_SLP(gpp8-0, INPUT, NONE); /* SENSOR_SDA_1P8 */
PIN_SLP(gpp8-1, INPUT, NONE); /* SENSOR_SCL_1P8 */
PIN_SLP(gpg0-1, INPUT, DOWN); /* NC */
PIN_SLP(gpg1-0, INPUT, DOWN); /* NC */
PIN_SLP(gpg1-1, PREV, NONE); /* PMIC_WRSTBI */
PIN_SLP(gpg1-2, OUT0, DOWN); /* CAM_FLASH_EN */
PIN_SLP(gpg1-3, PREV, NONE); /* CAM_TORCH_EN */
PIN_SLP(gpg1-4, PREV, NONE); /* IF_PMIC_RST */
PIN_SLP(gpg1-6, INPUT, DOWN); /* NC */
PIN_SLP(gpg1-7, PREV, NONE); /* MLCD_RST */
PIN_SLP(gpg2-1, OUT0, DOWN); /* TSP_LDO_EN */
PIN_SLP(gpg2-6, PREV, NONE); /* LCD_PWR_EN */
PIN_SLP(gpg2-7, INPUT, DOWN); /* NC */
PIN_SLP(gpg3-1, PREV, DOWN); /* MST_PWR_EN */
PIN_SLP(gpg3-2, PREV, NONE); /* BTP_RST_N */
PIN_SLP(gpg3-3, PREV, NONE); /* BTP_LDO_EN */
PIN_SLP(gpg3-4, PREV, NONE); /* BTP_RST_N */
PIN_SLP(gpg3-5, PREV, NONE); /* SSP_RST */
PIN_SLP(gpg3-6, INPUT, DOWN); /* NC */
PIN_SLP(gpg3-7, INPUT, NONE); /* HW_REV0 */
PIN_SLP(gpg4-0, INPUT, NONE); /* HW_REV1 */
PIN_SLP(gpg4-1, INPUT, NONE); /* HW_REV2 */
PIN_SLP(gpc0-2, INPUT, DOWN); /* NC */
PIN_SLP(gpc0-3, INPUT, DOWN); /* NC */
PIN_SLP(gpc1-0, INPUT, DOWN); /* FCAM1_I2C_SCL */
PIN_SLP(gpc1-1, INPUT, DOWN); /* FCAM1_I2C_SDA */
PIN_SLP(gpc1-2, INPUT, DOWN); /* RCAM_I2C_SCL */
PIN_SLP(gpc1-3, INPUT, DOWN); /* RCAM_I2C_SDA */
PIN_SLP(gpc1-4, INPUT, DOWN); /* RCAM_AF_EEP_I2C_SCL */
PIN_SLP(gpc1-5, INPUT, DOWN); /* RCAM_AF_EEP_I2C_SDA */
PIN_SLP(gpc1-6, INPUT, NONE); /* TOUCH_SDA_1P8 */
PIN_SLP(gpc1-7, INPUT, NONE); /* TOUCH_SCL_1P8 */
PIN_SLP(gpc2-0, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-1, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-2, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-3, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-4, INPUT, NONE); /* NFC_I2C_SDA */
PIN_SLP(gpc2-5, INPUT, NONE); /* NFC_I2C_SCL */
PIN_SLP(gpc2-6, INPUT, DOWN); /* NC */
PIN_SLP(gpc2-7, INPUT, DOWN); /* NC */
};
};

View File

@ -0,0 +1,98 @@
/*
* SAMSUNG EXYNOS device tree source for reserved-memory
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/memreserve/ 0xE0000000 0xD00000;
/ {
reserved-memory {
#address-cells = <2>;
#size-cells = <1>;
ranges;
fb_handover: framebuffer@0xEC000000 {
compatible = "exynos,dpu_fb_handover";
/* 1 full screen size = lcd_width * lcd_height * 4 */
reg = <0x0 0xEC000000 0x1400000>;
};
dram_test {
compatible = "exynos7885,dram_test";
reg = <0x0 0x80002000 0x2000>;
};
modem_if {
compatible = "exynos,modem_if";
reg = <0x0 0xF5400000 0x8800000>;
};
cp_ram_logging {
compatible = "exynos,cp_ram_logging";
reg = <0x0 0xFDC00000 0x2000000>;
};
gnss_reserved: gnss_if {
compatible = "exynos,gnss_if";
reg = <0x0 0xFFC00000 0x400000>;
};
sec_debug_magic {
compatible = "exynos,sec_debug_magic";
reg = <0x0 0x80000000 0x1000>;
};
tima_mem: tima_region@0xB8000000 {
compatible = "removed-dma-pool";
reg = <0x0 0xB8000000 0x200000>;
};
seclog_mem {
compatible = "exynos,seclog";
reg = <0x0 0xC0000000 0x80000>;
};
wifibt_if {
compatible = "exynos,wifibt_if";
reg = <0x0 0xE9000000 0x400000>;
};
abox_rmem: abox_rmem@0xE9400000 {
compatible = "exynos,abox_rmem";
reg = <0x0 0xE9400000 0x2800000>;
};
btabox_rmem {
compatible = "exynos,btabox_rmem";
size = <0x400000>;
};
crypto {
compatible = "exynos8890-ion,crypto";
ion,reusable;
size = <0x1800000>;
alignment = <0x0 0x00400000>;
id = <1>;
};
video_stream {
compatible = "exynos8890-ion,vstream";
ion,secure;
ion,reusable;
/* TUI : 1080x2220 32bpp 3frames < 28MiB*/
/* DRM : 72 MiB */
size = <0x4800000>;
alignment = <0x0 0x00400000>;
id = <3>;
compat-id = <25 31>;
alloc-ranges = <0x0 0x80000000 0x80000000>;
};
};
};

36
arch/arm64/boot/dts/exynos/exynos7885-display-lcd.dtsi Normal file → Executable file
View File

@ -138,6 +138,7 @@
dsc_slice_num = <0>; /* count of dsc slice */
data_lane = <4>; /* number of using data lane */
vt_compensation = <1851>; /* for difference between wordclk and vclk at video mode */
clklane_onoff = <1>;
};
s6d7at0b: s6d7at0b_hd {
@ -159,6 +160,7 @@
dsc_slice_num = <0>; /* count of dsc slice */
data_lane = <4>; /* number of using data lane */
vt_compensation = <742>; /* for difference between wordclk and vclk at video mode */
clklane_onoff = <1>;
};
ea8061s: ea8061s_hd {
@ -184,8 +186,9 @@
data_lane = <4>; /* number of using data lane */
vt_compensation = <706>; /* for difference between wordclk and vclk at video mode */
clklane_onoff = <1>;
};
s6e8aa5: s6e8aa5_hdp {
mode = <0>;
resolution = <720 1480>;
@ -196,9 +199,9 @@
timing,h-porch = <26 62 2>;
/* VBP VFP VSW */
timing,v-porch = <10 12 2>;
timing,dsi-hs-clk = <500>;
timing,dsi-hs-clk = <498>;
timing,dsi-escape-clk = <16>;
timing,pms = <1 77 2>;
timing,pms = <3 230 2>;
mic_en = <0>; /* 0: Disable, 1: Enable */
mic_ratio = <0>; /* 0: 1/2 mic, 1: 1/3 mic */
mic_ver = <0>; /* 0: mic v1.1, 1: v1.2, 2: v2.0 */
@ -208,7 +211,30 @@
dsc_slice_num = <0>; /* count of dsc slice */
data_lane = <4>; /* number of using data lane */
vt_compensation = <575>; /* for difference between wordclk and vclk at video mode */
vt_compensation = <570>; /* for difference between wordclk and vclk at video mode */
clklane_onoff = <1>;
};
sn65dsi86_hx8896: sn65dsi86_hx8896 {
mode = <0>; /* 0: video mode, 1: DP command mode, 2: MIPI command mode */
resolution = <1920 1080>;
size = <382 214>; /* 381.888(H) x 213.812(V) */
timing,refresh = <60>;
/* HBP HFP HSW */
timing,h-porch = <142 48 32>;
/* VBP VFP VSW */
timing,v-porch = <13 3 6>;
timing,dsi-hs-clk = <923>;
timing,pms = <1 71 1>;
timing,dsi-escape-clk = <16>;
mic_en = <0>; /* 0: Disable, 1: Enable */
mic_ratio = <0>; /* 0: 1/2 mic, 1: 1/3 mic */
mic_ver = <0>; /* 0: mic v1.1, 1: v1.2, 2: v2.0 */
type_of_ddi = <0>; /* 0: Samsung Mobile, 1: MAGNA, 2: Normal(Etc) */
dsc_en = <0>; /* 0: Disable, 1: Enable */
dsc_cnt = <0>; /* used DSC count */
dsc_slice_num = <0>; /* count of dsc slice */
data_lane = <4>; /* number of using data lane */
vt_compensation = <1348>; /* for difference between wordclk and vclk at video mode */
};
};

57
arch/arm64/boot/dts/exynos/modem-s327ap-sipc-pdata.dtsi Normal file → Executable file
View File

@ -30,7 +30,7 @@
mif,link_types = <0x200>; /* LINKDEV_SHMEM */
mif,link_name = "shmem";
mif,link_attrs = <0x7C9>; /* XMIT_BTDLR(0x400) | DUMP_ALIGNED (0x200) | BOOT_ALIGNED (0x100) | MEM_DUMP (0x80) | MEM_BOOT (0x40) | DPRAM_MAGIC (0x08) | SBD_IPC (0x01) */
mif,num_iodevs = <22>;
mif,num_iodevs = <23>;
/* mbox pdata */
mbx_ap2cp_msg = <0>;
@ -105,6 +105,19 @@
iodevs {
io_device_0 {
iod,name = "umts_cass";
iod,id = <35>;
iod,format = <1>;
iod,io_type = <0>;
iod,links = <0x200>;
iod,attrs = <0x2082>;
iod,max_tx_size = <2048>;
iod,ul_num_buffers = <16>;
iod,ul_buffer_size = <2048>;
iod,dl_num_buffers = <16>;
iod,dl_buffer_size = <2048>;
};
io_device_1 {
iod,name = "umts_ipc0";
iod,id = <235>;
iod,format = <0>; /* IPC_FMT */
@ -117,7 +130,7 @@
iod,dl_num_buffers = <32>;
iod,dl_buffer_size = <4096>;
};
io_device_1 {
io_device_2 {
iod,name = "umts_ipc1";
iod,id = <236>;
iod,format = <0>; /* IPC_FMT */
@ -130,7 +143,7 @@
iod,dl_num_buffers = <32>;
iod,dl_buffer_size = <4096>;
};
io_device_2 {
io_device_3 {
iod,name = "umts_rfs0";
iod,id = <245>;
iod,format = <2>; /* IPC_RFS */
@ -143,7 +156,7 @@
iod,dl_num_buffers = <512>;
iod,dl_buffer_size = <2048>;
};
io_device_3 {
io_device_4 {
iod,name = "umts_csd";
iod,id = <1>;
iod,format = <1>;
@ -156,7 +169,7 @@
iod,dl_num_buffers = <64>;
iod,dl_buffer_size = <2048>;
};
io_device_4 {
io_device_5 {
iod,name = "umts_router";
iod,id = <25>;
iod,format = <1>;
@ -170,7 +183,7 @@
iod,dl_num_buffers = <16>;
iod,dl_buffer_size = <2048>;
};
io_device_5 {
io_device_6 {
iod,name = "umts_dm0";
iod,id = <28>;
iod,format = <1>;
@ -183,7 +196,7 @@
iod,dl_num_buffers = <128>;
iod,dl_buffer_size = <2048>;
};
io_device_6 {
io_device_7 {
iod,name = "rmnet0";
iod,id = <10>;
iod,format = <1>;
@ -196,7 +209,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_7 {
io_device_8 {
iod,name = "rmnet1";
iod,id = <11>;
iod,format = <1>;
@ -209,7 +222,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_8 {
io_device_9 {
iod,name = "rmnet2";
iod,id = <12>;
iod,format = <1>;
@ -222,7 +235,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_9 {
io_device_10 {
iod,name = "rmnet3";
iod,id = <13>;
iod,format = <1>;
@ -235,7 +248,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_10 {
io_device_11 {
iod,name = "rmnet4";
iod,id = <14>;
iod,format = <1>;
@ -247,7 +260,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_11 {
io_device_12 {
iod,name = "rmnet5";
iod,id = <15>;
iod,format = <1>;
@ -259,7 +272,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_12 {
io_device_13 {
iod,name = "rmnet6";
iod,id = <16>;
iod,format = <1>;
@ -271,7 +284,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_13 {
io_device_14 {
iod,name = "rmnet7";
iod,id = <17>;
iod,format = <1>;
@ -283,7 +296,7 @@
iod,dl_num_buffers = <0>;
iod,dl_buffer_size = <2048>;
};
io_device_14 {
io_device_15 {
iod,name = "multipdp_hiprio";
iod,id = <0>;
iod,format = <3>; /* IPC_MULTI_RAW */
@ -296,7 +309,7 @@
iod,dl_num_buffers = <256>;
iod,dl_buffer_size = <2048>;
};
io_device_15 {
io_device_16 {
iod,name = "multipdp";
iod,id = <0>;
iod,format = <3>; /* IPC_MULTI_RAW */
@ -309,7 +322,7 @@
iod,dl_num_buffers = <1024>;
iod,dl_buffer_size = <2048>;
};
io_device_16 {
io_device_17 {
iod,name = "umts_boot0";
iod,id = <215>;
iod,format = <4>; /* IPC_BOOT */
@ -318,7 +331,7 @@
iod,attrs = <0x02>;
iod,app = "CBD";
};
io_device_17 {
io_device_18 {
iod,name = "umts_ramdump0";
iod,id = <225>;
iod,format = <5>; /* IPC_DUMP */
@ -327,7 +340,7 @@
iod,attrs = <0x202>; /* ATTR_NO_CHECK_MAXQ | ATTR_SIPC5 */
iod,app = "CBD";
};
io_device_18 {
io_device_19 {
iod,name = "umts_atc0";
iod,id = <32>;
iod,format = <1>;
@ -342,7 +355,7 @@
iod,dl_buffer_size = <2048>;
iod,option_region = "usa_vzw";
};
io_device_19 {
io_device_20 {
iod,name = "smd4";
iod,id = <33>;
iod,format = <1>;
@ -355,7 +368,7 @@
iod,dl_buffer_size = <2048>;
iod,option_region = "kor_single";
};
io_device_20 {
io_device_21 {
iod,name = "umts_ciq0";
iod,id = <26>;
iod,format = <1>;
@ -368,7 +381,7 @@
iod,dl_buffer_size = <2048>;
iod,option_region = "usa_att";
};
io_device_21 {
io_device_22 {
iod,name = "umts_mdmi";
iod,id = <34>;
iod,format = <1>;

File diff suppressed because it is too large Load Diff

View File

@ -362,12 +362,12 @@ CONFIG_ARCH_EXYNOS7=y
#
# SAMSUNG EXYNOS SoCs Support
#
# CONFIG_MACH_EXYNOS7885_NONE is not set
# CONFIG_MACH_EXYNOS7884_A6ELTE_USA is not set
CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN=y
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_CAN_OPEN is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_EUR_OPEN is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_JPN_DCM is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_KOR is not set
CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN=y
CONFIG_EXYNOS_DTBTOOL=y
CONFIG_EXYNOS_DTBH_PLATFORM_CODE=0x50a6
CONFIG_EXYNOS_DTBH_SUBTYPE_CODE=0x217584da
@ -1137,6 +1137,7 @@ CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
# CONFIG_CEPH_LIB is not set
# CONFIG_NFC is not set
# CONFIG_NFC_PN533 is not set
# CONFIG_NFC_PN547 is not set
# CONFIG_NFC_SIM is not set
CONFIG_SAMSUNG_NFC=y
CONFIG_ESE_P3_LSI=y
@ -1394,6 +1395,8 @@ CONFIG_HICCUP_CHARGER=y
# CONFIG_CP_UART_SWITCH is not set
# CONFIG_NEW_FACTORY_JIGONB is not set
# CONFIG_MUIC_INCOMPATIBLE_VZW is not set
# CONFIG_MUIC_S2MU005_SUPPORT_HMT is not set
# CONFIG_MUIC_UNIVERSAL is not set
#
# SCSI device support
@ -1835,9 +1838,12 @@ CONFIG_TOUCHSCREEN_DUMP_MODE=y
# CONFIG_TOUCHSCREEN_ZINITIX_ZT75XX_TCLM is not set
CONFIG_TOUCHSCREEN_SEC_TS=y
CONFIG_TOUCHSCREEN_SEC_TS_GLOVEMODE=y
# CONFIG_TOUCHSCREEN_SEC_TS_A7Y18 is not set
# CONFIG_TOUCHSCREEN_SEC_INCELL_TS is not set
# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_TD4X00 is not set
# CONFIG_TOUCHSCREEN_MELFAS_MMS438 is not set
# CONFIG_TOUCHSCREEN_FTS is not set
# CONFIG_TOUCHSCREEN_IST40XX is not set
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_AD714X is not set
# CONFIG_INPUT_BMA150 is not set
@ -2539,9 +2545,11 @@ CONFIG_USE_CSI_DMAOUT_FEATURE=y
CONFIG_CAMERA_CIS_SELECT=y
# CONFIG_CAMERA_CIS_6B2_OBJ is not set
# CONFIG_CAMERA_CIS_5E2_OBJ is not set
# CONFIG_CAMERA_CIS_5E9_OBJ is not set
# CONFIG_CAMERA_CIS_5E3_OBJ is not set
# CONFIG_CAMERA_CIS_4H5YC_OBJ is not set
# CONFIG_CAMERA_CIS_4H5_OBJ is not set
# CONFIG_CAMERA_CIS_4HA_OBJ is not set
# CONFIG_CAMERA_CIS_2P2_OBJ is not set
CONFIG_CAMERA_CIS_2P6_OBJ=y
# CONFIG_CAMERA_CIS_2P8_OBJ is not set
@ -2569,6 +2577,7 @@ CONFIG_CAMERA_CIS_3P8SP_OBJ=y
# CONFIG_CAMERA_CIS_SR556_OBJ is not set
# CONFIG_CAMERA_CIS_SR556B_OBJ is not set
CONFIG_CAMERA_CIS_SR846_OBJ=y
# CONFIG_CAMERA_CIS_IMX576_OBJ is not set
# CONFIG_CAMERA_CIS_VIRTUAL_OBJ is not set
CONFIG_CAMERA_ACT_SELECT=y
# CONFIG_CAMERA_ACT_AK7348_OBJ is not set
@ -2638,7 +2647,9 @@ CONFIG_CAMERA_JACKPOT=y
# CONFIG_CAMERA_J7TOPE is not set
# CONFIG_CAMERA_J7DUO is not set
# CONFIG_CAMERA_JACKPOT_JPN is not set
# CONFIG_CAMERA_GVIEW2 is not set
# CONFIG_CAMERA_A6E is not set
# CONFIG_CAMERA_A7Y18 is not set
# CONFIG_CAMERA_OTPROM_SUPPORT_FRONT is not set
# CONFIG_VIDEO_EXYNOS_CAMERA_POSTPROCESS is not set
CONFIG_MEDIA_EXYNOS=y
@ -2763,6 +2774,7 @@ CONFIG_MALI_TMIX=y
# CONFIG_MALI_TMIX_R3P0 is not set
# CONFIG_MALI_TMIX_R8P0 is not set
CONFIG_MALI_TMIX_R9P0=y
# CONFIG_MALI_TMIX_R10P0 is not set
CONFIG_MALI_MIDGARD=y
# CONFIG_MALI_GATOR_SUPPORT is not set
# CONFIG_MALI_MIDGARD_DVFS is not set
@ -2847,13 +2859,14 @@ CONFIG_DISPLAY_USE_INFO=y
CONFIG_EXYNOS_DECON_MDNIE_LITE=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA3 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A5Y18 is not set
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A7Y18=y
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT2=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT is not set
# CONFIG_EXYNOS_DECON_LCD_TD4100_J3TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_S6D7AT0B_J7TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_EA8061S_J7DUO is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_A6ELTE is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_FEEL2 is not set
# CONFIG_EXYNOS_DECON_LCD_SN65DSI86_GVIEW2 is not set
CONFIG_STATE_NOTIFIER=y
# CONFIG_FB_SSD1307 is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@ -2959,10 +2972,12 @@ CONFIG_SND_SOC_COMPRESS=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS8895_COD3033 is not set
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885=y
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035_MULTI is not set
CONFIG_SND_SOC_SAMSUNG_ABOX=y
CONFIG_SEC_SND_SYNCHRONIZED_IPC=y
# CONFIG_SND_SOC_BT_SHARED_SRATE is not set
CONFIG_SND_SOC_FM=y
CONFIG_SND_SOC_SAMSUNG_ABOX_FREE_RMEM=y
CONFIG_SND_SOC_SAMSUNG_MAILBOX=y
# CONFIG_SND_SOC_SAMSUNG_VTS is not set
@ -2996,6 +3011,7 @@ CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_CS42XX8_I2C is not set
# CONFIG_SND_SOC_CS4349 is not set
CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_COD30XX_EXT_ANT is not set
# CONFIG_SND_SOC_ES8328 is not set
# CONFIG_SND_SOC_GTM601 is not set
# CONFIG_SND_SOC_MAX98506 is not set
@ -3019,6 +3035,7 @@ CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_TAS571X is not set
CONFIG_SND_SOC_TFA9872=y
# CONFIG_SND_SOC_TFA9879 is not set
# CONFIG_SND_SOC_TFA9896 is not set
# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
# CONFIG_SND_SOC_TLV320AIC31XX is not set
@ -4272,9 +4289,11 @@ CONFIG_BATTERY_NOTIFIER=y
CONFIG_BATTERY_SAMSUNG_V2=y
# CONFIG_CHARGING_VZWCONCEPT is not set
# CONFIG_BATTERY_AGE_FORECAST_DETACHABLE is not set
# CONFIG_FUELGAUGE_ASOC_FROM_CYCLES is not set
# CONFIG_ENG_BATTERY_CONCEPT is not set
# CONFIG_AFC_CURR_CONTROL_BY_TEMP is not set
CONFIG_BATTERY_CISD=y
# CONFIG_FG_FULLCAP_FROM_BATTERY is not set
# CONFIG_FUELGAUGE_MAX77865 is not set
CONFIG_FUELGAUGE_S2MU004=y
# CONFIG_FUELGAUGE_S2MU005 is not set
@ -4440,12 +4459,22 @@ CONFIG_SENSORS_SSP_BAROMETER_LPS22H=y
# CONFIG_SENSORS_YAS539 is not set
# CONFIG_SENSORS_GP2AP070S is not set
# CONFIG_SENSORS_STK3013 is not set
# CONFIG_SENSORS_CM36658 is not set
# CONFIG_SENSORS_CM36686 is not set
# CONFIG_SENSORS_CM3323 is not set
# CONFIG_SENSORS_CM36672P is not set
# CONFIG_SENSORS_TMD3725 is not set
# CONFIG_SENSORS_A96T3X6_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_WIFI is not set
# CONFIG_SENSORS_A96T3X6_WIFI_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_SUB is not set
# CONFIG_SENSORS_A96T3X6_SUB_VIEW2 is not set
# CONFIG_SENSORS_TC3XXK is not set
# CONFIG_SENSORS_TC3XXK_A7Y18LTE is not set
CONFIG_MOTOR_ZH915=y
# CONFIG_SEC_VIB is not set
CONFIG_MOTOR_S2MU004=y
# CONFIG_ISA1000 is not set
CONFIG_FIVE_USE_TRUSTONIC=y
CONFIG_FIVE_TRUSTLET_PATH="five/ffffffff000000000000000000000072.tlbin"

View File

@ -362,12 +362,12 @@ CONFIG_ARCH_EXYNOS7=y
#
# SAMSUNG EXYNOS SoCs Support
#
# CONFIG_MACH_EXYNOS7885_NONE is not set
# CONFIG_MACH_EXYNOS7884_A6ELTE_USA is not set
# CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_CAN_OPEN is not set
CONFIG_MACH_EXYNOS7885_JACKPOTLTE_EUR_OPEN=y
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_JPN_DCM is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_KOR is not set
# CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN is not set
CONFIG_EXYNOS_DTBTOOL=y
CONFIG_EXYNOS_DTBH_PLATFORM_CODE=0x50a6
CONFIG_EXYNOS_DTBH_SUBTYPE_CODE=0x217584da
@ -1137,6 +1137,7 @@ CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
# CONFIG_CEPH_LIB is not set
# CONFIG_NFC is not set
# CONFIG_NFC_PN533 is not set
# CONFIG_NFC_PN547 is not set
# CONFIG_NFC_SIM is not set
CONFIG_SAMSUNG_NFC=y
CONFIG_ESE_P3_LSI=y
@ -1394,6 +1395,8 @@ CONFIG_HICCUP_CHARGER=y
# CONFIG_CP_UART_SWITCH is not set
# CONFIG_NEW_FACTORY_JIGONB is not set
# CONFIG_MUIC_INCOMPATIBLE_VZW is not set
# CONFIG_MUIC_S2MU005_SUPPORT_HMT is not set
# CONFIG_MUIC_UNIVERSAL is not set
#
# SCSI device support
@ -1835,9 +1838,12 @@ CONFIG_TOUCHSCREEN_DUMP_MODE=y
# CONFIG_TOUCHSCREEN_ZINITIX_ZT75XX_TCLM is not set
CONFIG_TOUCHSCREEN_SEC_TS=y
CONFIG_TOUCHSCREEN_SEC_TS_GLOVEMODE=y
# CONFIG_TOUCHSCREEN_SEC_TS_A7Y18 is not set
# CONFIG_TOUCHSCREEN_SEC_INCELL_TS is not set
# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_TD4X00 is not set
# CONFIG_TOUCHSCREEN_MELFAS_MMS438 is not set
# CONFIG_TOUCHSCREEN_FTS is not set
# CONFIG_TOUCHSCREEN_IST40XX is not set
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_AD714X is not set
# CONFIG_INPUT_BMA150 is not set
@ -2539,9 +2545,11 @@ CONFIG_USE_CSI_DMAOUT_FEATURE=y
CONFIG_CAMERA_CIS_SELECT=y
# CONFIG_CAMERA_CIS_6B2_OBJ is not set
# CONFIG_CAMERA_CIS_5E2_OBJ is not set
# CONFIG_CAMERA_CIS_5E9_OBJ is not set
# CONFIG_CAMERA_CIS_5E3_OBJ is not set
# CONFIG_CAMERA_CIS_4H5YC_OBJ is not set
# CONFIG_CAMERA_CIS_4H5_OBJ is not set
# CONFIG_CAMERA_CIS_4HA_OBJ is not set
# CONFIG_CAMERA_CIS_2P2_OBJ is not set
CONFIG_CAMERA_CIS_2P6_OBJ=y
# CONFIG_CAMERA_CIS_2P8_OBJ is not set
@ -2569,6 +2577,7 @@ CONFIG_CAMERA_CIS_3P8SP_OBJ=y
# CONFIG_CAMERA_CIS_SR556_OBJ is not set
# CONFIG_CAMERA_CIS_SR556B_OBJ is not set
CONFIG_CAMERA_CIS_SR846_OBJ=y
# CONFIG_CAMERA_CIS_IMX576_OBJ is not set
# CONFIG_CAMERA_CIS_VIRTUAL_OBJ is not set
CONFIG_CAMERA_ACT_SELECT=y
# CONFIG_CAMERA_ACT_AK7348_OBJ is not set
@ -2638,7 +2647,9 @@ CONFIG_CAMERA_JACKPOT=y
# CONFIG_CAMERA_J7TOPE is not set
# CONFIG_CAMERA_J7DUO is not set
# CONFIG_CAMERA_JACKPOT_JPN is not set
# CONFIG_CAMERA_GVIEW2 is not set
# CONFIG_CAMERA_A6E is not set
# CONFIG_CAMERA_A7Y18 is not set
# CONFIG_CAMERA_OTPROM_SUPPORT_FRONT is not set
# CONFIG_VIDEO_EXYNOS_CAMERA_POSTPROCESS is not set
CONFIG_MEDIA_EXYNOS=y
@ -2763,6 +2774,7 @@ CONFIG_MALI_TMIX=y
# CONFIG_MALI_TMIX_R3P0 is not set
# CONFIG_MALI_TMIX_R8P0 is not set
CONFIG_MALI_TMIX_R9P0=y
# CONFIG_MALI_TMIX_R10P0 is not set
CONFIG_MALI_MIDGARD=y
# CONFIG_MALI_GATOR_SUPPORT is not set
# CONFIG_MALI_MIDGARD_DVFS is not set
@ -2847,13 +2859,14 @@ CONFIG_DISPLAY_USE_INFO=y
CONFIG_EXYNOS_DECON_MDNIE_LITE=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA3 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7 is not set
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A5Y18=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A7Y18 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT2 is not set
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT=y
# CONFIG_EXYNOS_DECON_LCD_TD4100_J3TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_S6D7AT0B_J7TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_EA8061S_J7DUO is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_A6ELTE is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_FEEL2 is not set
# CONFIG_EXYNOS_DECON_LCD_SN65DSI86_GVIEW2 is not set
CONFIG_STATE_NOTIFIER=y
# CONFIG_FB_SSD1307 is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@ -2959,10 +2972,12 @@ CONFIG_SND_SOC_COMPRESS=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS8895_COD3033 is not set
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885=y
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035_MULTI is not set
CONFIG_SND_SOC_SAMSUNG_ABOX=y
CONFIG_SEC_SND_SYNCHRONIZED_IPC=y
# CONFIG_SND_SOC_BT_SHARED_SRATE is not set
CONFIG_SND_SOC_FM=y
CONFIG_SND_SOC_SAMSUNG_ABOX_FREE_RMEM=y
CONFIG_SND_SOC_SAMSUNG_MAILBOX=y
# CONFIG_SND_SOC_SAMSUNG_VTS is not set
@ -2996,6 +3011,7 @@ CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_CS42XX8_I2C is not set
# CONFIG_SND_SOC_CS4349 is not set
CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_COD30XX_EXT_ANT is not set
# CONFIG_SND_SOC_ES8328 is not set
# CONFIG_SND_SOC_GTM601 is not set
# CONFIG_SND_SOC_MAX98506 is not set
@ -3019,6 +3035,7 @@ CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_TAS571X is not set
CONFIG_SND_SOC_TFA9872=y
# CONFIG_SND_SOC_TFA9879 is not set
# CONFIG_SND_SOC_TFA9896 is not set
# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
# CONFIG_SND_SOC_TLV320AIC31XX is not set
@ -4272,9 +4289,11 @@ CONFIG_BATTERY_NOTIFIER=y
CONFIG_BATTERY_SAMSUNG_V2=y
# CONFIG_CHARGING_VZWCONCEPT is not set
# CONFIG_BATTERY_AGE_FORECAST_DETACHABLE is not set
# CONFIG_FUELGAUGE_ASOC_FROM_CYCLES is not set
# CONFIG_ENG_BATTERY_CONCEPT is not set
# CONFIG_AFC_CURR_CONTROL_BY_TEMP is not set
CONFIG_BATTERY_CISD=y
# CONFIG_FG_FULLCAP_FROM_BATTERY is not set
# CONFIG_FUELGAUGE_MAX77865 is not set
CONFIG_FUELGAUGE_S2MU004=y
# CONFIG_FUELGAUGE_S2MU005 is not set
@ -4440,12 +4459,22 @@ CONFIG_SENSORS_SSP_BAROMETER_LPS22H=y
# CONFIG_SENSORS_YAS539 is not set
# CONFIG_SENSORS_GP2AP070S is not set
# CONFIG_SENSORS_STK3013 is not set
# CONFIG_SENSORS_CM36658 is not set
# CONFIG_SENSORS_CM36686 is not set
# CONFIG_SENSORS_CM3323 is not set
# CONFIG_SENSORS_CM36672P is not set
# CONFIG_SENSORS_TMD3725 is not set
# CONFIG_SENSORS_A96T3X6_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_WIFI is not set
# CONFIG_SENSORS_A96T3X6_WIFI_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_SUB is not set
# CONFIG_SENSORS_A96T3X6_SUB_VIEW2 is not set
# CONFIG_SENSORS_TC3XXK is not set
# CONFIG_SENSORS_TC3XXK_A7Y18LTE is not set
CONFIG_MOTOR_ZH915=y
# CONFIG_SEC_VIB is not set
CONFIG_MOTOR_S2MU004=y
# CONFIG_ISA1000 is not set
CONFIG_FIVE_USE_TRUSTONIC=y
CONFIG_FIVE_TRUSTLET_PATH="five/ffffffff000000000000000000000072.tlbin"

View File

@ -362,12 +362,12 @@ CONFIG_ARCH_EXYNOS7=y
#
# SAMSUNG EXYNOS SoCs Support
#
# CONFIG_MACH_EXYNOS7885_NONE is not set
# CONFIG_MACH_EXYNOS7884_A6ELTE_USA is not set
# CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN is not set
CONFIG_MACH_EXYNOS7885_JACKPOTLTE_CAN_OPEN=y
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_EUR_OPEN is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_JPN_DCM is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_KOR is not set
# CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN is not set
CONFIG_EXYNOS_DTBTOOL=y
CONFIG_EXYNOS_DTBH_PLATFORM_CODE=0x50a6
CONFIG_EXYNOS_DTBH_SUBTYPE_CODE=0x217584da
@ -1137,6 +1137,7 @@ CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
# CONFIG_CEPH_LIB is not set
# CONFIG_NFC is not set
# CONFIG_NFC_PN533 is not set
# CONFIG_NFC_PN547 is not set
# CONFIG_NFC_SIM is not set
CONFIG_SAMSUNG_NFC=y
CONFIG_ESE_P3_LSI=y
@ -1394,6 +1395,8 @@ CONFIG_HICCUP_CHARGER=y
# CONFIG_CP_UART_SWITCH is not set
# CONFIG_NEW_FACTORY_JIGONB is not set
# CONFIG_MUIC_INCOMPATIBLE_VZW is not set
# CONFIG_MUIC_S2MU005_SUPPORT_HMT is not set
# CONFIG_MUIC_UNIVERSAL is not set
#
# SCSI device support
@ -1835,9 +1838,12 @@ CONFIG_TOUCHSCREEN_DUMP_MODE=y
# CONFIG_TOUCHSCREEN_ZINITIX_ZT75XX_TCLM is not set
CONFIG_TOUCHSCREEN_SEC_TS=y
CONFIG_TOUCHSCREEN_SEC_TS_GLOVEMODE=y
# CONFIG_TOUCHSCREEN_SEC_TS_A7Y18 is not set
# CONFIG_TOUCHSCREEN_SEC_INCELL_TS is not set
# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_TD4X00 is not set
# CONFIG_TOUCHSCREEN_MELFAS_MMS438 is not set
# CONFIG_TOUCHSCREEN_FTS is not set
# CONFIG_TOUCHSCREEN_IST40XX is not set
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_AD714X is not set
# CONFIG_INPUT_BMA150 is not set
@ -2539,9 +2545,11 @@ CONFIG_USE_CSI_DMAOUT_FEATURE=y
CONFIG_CAMERA_CIS_SELECT=y
# CONFIG_CAMERA_CIS_6B2_OBJ is not set
# CONFIG_CAMERA_CIS_5E2_OBJ is not set
# CONFIG_CAMERA_CIS_5E9_OBJ is not set
# CONFIG_CAMERA_CIS_5E3_OBJ is not set
# CONFIG_CAMERA_CIS_4H5YC_OBJ is not set
# CONFIG_CAMERA_CIS_4H5_OBJ is not set
# CONFIG_CAMERA_CIS_4HA_OBJ is not set
# CONFIG_CAMERA_CIS_2P2_OBJ is not set
CONFIG_CAMERA_CIS_2P6_OBJ=y
# CONFIG_CAMERA_CIS_2P8_OBJ is not set
@ -2569,6 +2577,7 @@ CONFIG_CAMERA_CIS_3P8SP_OBJ=y
# CONFIG_CAMERA_CIS_SR556_OBJ is not set
# CONFIG_CAMERA_CIS_SR556B_OBJ is not set
CONFIG_CAMERA_CIS_SR846_OBJ=y
# CONFIG_CAMERA_CIS_IMX576_OBJ is not set
# CONFIG_CAMERA_CIS_VIRTUAL_OBJ is not set
CONFIG_CAMERA_ACT_SELECT=y
# CONFIG_CAMERA_ACT_AK7348_OBJ is not set
@ -2638,7 +2647,9 @@ CONFIG_CAMERA_JACKPOT=y
# CONFIG_CAMERA_J7TOPE is not set
# CONFIG_CAMERA_J7DUO is not set
# CONFIG_CAMERA_JACKPOT_JPN is not set
# CONFIG_CAMERA_GVIEW2 is not set
# CONFIG_CAMERA_A6E is not set
# CONFIG_CAMERA_A7Y18 is not set
# CONFIG_CAMERA_OTPROM_SUPPORT_FRONT is not set
# CONFIG_VIDEO_EXYNOS_CAMERA_POSTPROCESS is not set
CONFIG_MEDIA_EXYNOS=y
@ -2763,6 +2774,7 @@ CONFIG_MALI_TMIX=y
# CONFIG_MALI_TMIX_R3P0 is not set
# CONFIG_MALI_TMIX_R8P0 is not set
CONFIG_MALI_TMIX_R9P0=y
# CONFIG_MALI_TMIX_R10P0 is not set
CONFIG_MALI_MIDGARD=y
# CONFIG_MALI_GATOR_SUPPORT is not set
# CONFIG_MALI_MIDGARD_DVFS is not set
@ -2847,13 +2859,14 @@ CONFIG_DISPLAY_USE_INFO=y
CONFIG_EXYNOS_DECON_MDNIE_LITE=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA3 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7 is not set
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A5Y18=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A7Y18 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT2 is not set
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT=y
# CONFIG_EXYNOS_DECON_LCD_TD4100_J3TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_S6D7AT0B_J7TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_EA8061S_J7DUO is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_A6ELTE is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_FEEL2 is not set
# CONFIG_EXYNOS_DECON_LCD_SN65DSI86_GVIEW2 is not set
CONFIG_STATE_NOTIFIER=y
# CONFIG_FB_SSD1307 is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@ -2959,10 +2972,12 @@ CONFIG_SND_SOC_COMPRESS=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS8895_COD3033 is not set
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885=y
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035_MULTI is not set
CONFIG_SND_SOC_SAMSUNG_ABOX=y
CONFIG_SEC_SND_SYNCHRONIZED_IPC=y
# CONFIG_SND_SOC_BT_SHARED_SRATE is not set
CONFIG_SND_SOC_FM=y
CONFIG_SND_SOC_SAMSUNG_ABOX_FREE_RMEM=y
CONFIG_SND_SOC_SAMSUNG_MAILBOX=y
# CONFIG_SND_SOC_SAMSUNG_VTS is not set
@ -2996,6 +3011,7 @@ CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_CS42XX8_I2C is not set
# CONFIG_SND_SOC_CS4349 is not set
CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_COD30XX_EXT_ANT is not set
# CONFIG_SND_SOC_ES8328 is not set
# CONFIG_SND_SOC_GTM601 is not set
# CONFIG_SND_SOC_MAX98506 is not set
@ -3019,6 +3035,7 @@ CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_TAS571X is not set
CONFIG_SND_SOC_TFA9872=y
# CONFIG_SND_SOC_TFA9879 is not set
# CONFIG_SND_SOC_TFA9896 is not set
# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
# CONFIG_SND_SOC_TLV320AIC31XX is not set
@ -4272,9 +4289,11 @@ CONFIG_BATTERY_NOTIFIER=y
CONFIG_BATTERY_SAMSUNG_V2=y
# CONFIG_CHARGING_VZWCONCEPT is not set
# CONFIG_BATTERY_AGE_FORECAST_DETACHABLE is not set
# CONFIG_FUELGAUGE_ASOC_FROM_CYCLES is not set
# CONFIG_ENG_BATTERY_CONCEPT is not set
# CONFIG_AFC_CURR_CONTROL_BY_TEMP is not set
CONFIG_BATTERY_CISD=y
# CONFIG_FG_FULLCAP_FROM_BATTERY is not set
# CONFIG_FUELGAUGE_MAX77865 is not set
CONFIG_FUELGAUGE_S2MU004=y
# CONFIG_FUELGAUGE_S2MU005 is not set
@ -4440,12 +4459,22 @@ CONFIG_SENSORS_SSP_BAROMETER_LPS22H=y
# CONFIG_SENSORS_YAS539 is not set
# CONFIG_SENSORS_GP2AP070S is not set
# CONFIG_SENSORS_STK3013 is not set
# CONFIG_SENSORS_CM36658 is not set
# CONFIG_SENSORS_CM36686 is not set
# CONFIG_SENSORS_CM3323 is not set
# CONFIG_SENSORS_CM36672P is not set
# CONFIG_SENSORS_TMD3725 is not set
# CONFIG_SENSORS_A96T3X6_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_WIFI is not set
# CONFIG_SENSORS_A96T3X6_WIFI_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_SUB is not set
# CONFIG_SENSORS_A96T3X6_SUB_VIEW2 is not set
# CONFIG_SENSORS_TC3XXK is not set
# CONFIG_SENSORS_TC3XXK_A7Y18LTE is not set
CONFIG_MOTOR_ZH915=y
# CONFIG_SEC_VIB is not set
CONFIG_MOTOR_S2MU004=y
# CONFIG_ISA1000 is not set
CONFIG_FIVE_USE_TRUSTONIC=y
CONFIG_FIVE_TRUSTLET_PATH="five/ffffffff000000000000000000000072.tlbin"

View File

@ -362,12 +362,12 @@ CONFIG_ARCH_EXYNOS7=y
#
# SAMSUNG EXYNOS SoCs Support
#
# CONFIG_MACH_EXYNOS7885_NONE is not set
# CONFIG_MACH_EXYNOS7884_A6ELTE_USA is not set
# CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_CAN_OPEN is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_EUR_OPEN is not set
# CONFIG_MACH_EXYNOS7885_JACKPOTLTE_JPN_DCM is not set
CONFIG_MACH_EXYNOS7885_JACKPOTLTE_KOR=y
# CONFIG_MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN is not set
CONFIG_EXYNOS_DTBTOOL=y
CONFIG_EXYNOS_DTBH_PLATFORM_CODE=0x50a6
CONFIG_EXYNOS_DTBH_SUBTYPE_CODE=0x217584da
@ -1137,6 +1137,7 @@ CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
# CONFIG_CEPH_LIB is not set
# CONFIG_NFC is not set
# CONFIG_NFC_PN533 is not set
# CONFIG_NFC_PN547 is not set
# CONFIG_NFC_SIM is not set
CONFIG_SAMSUNG_NFC=y
CONFIG_ESE_P3_LSI=y
@ -1394,6 +1395,8 @@ CONFIG_HICCUP_CHARGER=y
# CONFIG_CP_UART_SWITCH is not set
# CONFIG_NEW_FACTORY_JIGONB is not set
# CONFIG_MUIC_INCOMPATIBLE_VZW is not set
# CONFIG_MUIC_S2MU005_SUPPORT_HMT is not set
# CONFIG_MUIC_UNIVERSAL is not set
#
# SCSI device support
@ -1835,9 +1838,12 @@ CONFIG_TOUCHSCREEN_DUMP_MODE=y
# CONFIG_TOUCHSCREEN_ZINITIX_ZT75XX_TCLM is not set
CONFIG_TOUCHSCREEN_SEC_TS=y
CONFIG_TOUCHSCREEN_SEC_TS_GLOVEMODE=y
# CONFIG_TOUCHSCREEN_SEC_TS_A7Y18 is not set
# CONFIG_TOUCHSCREEN_SEC_INCELL_TS is not set
# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_TD4X00 is not set
# CONFIG_TOUCHSCREEN_MELFAS_MMS438 is not set
# CONFIG_TOUCHSCREEN_FTS is not set
# CONFIG_TOUCHSCREEN_IST40XX is not set
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_AD714X is not set
# CONFIG_INPUT_BMA150 is not set
@ -2539,9 +2545,11 @@ CONFIG_USE_CSI_DMAOUT_FEATURE=y
CONFIG_CAMERA_CIS_SELECT=y
# CONFIG_CAMERA_CIS_6B2_OBJ is not set
# CONFIG_CAMERA_CIS_5E2_OBJ is not set
# CONFIG_CAMERA_CIS_5E9_OBJ is not set
# CONFIG_CAMERA_CIS_5E3_OBJ is not set
# CONFIG_CAMERA_CIS_4H5YC_OBJ is not set
# CONFIG_CAMERA_CIS_4H5_OBJ is not set
# CONFIG_CAMERA_CIS_4HA_OBJ is not set
# CONFIG_CAMERA_CIS_2P2_OBJ is not set
CONFIG_CAMERA_CIS_2P6_OBJ=y
# CONFIG_CAMERA_CIS_2P8_OBJ is not set
@ -2569,6 +2577,7 @@ CONFIG_CAMERA_CIS_3P8SP_OBJ=y
# CONFIG_CAMERA_CIS_SR556_OBJ is not set
# CONFIG_CAMERA_CIS_SR556B_OBJ is not set
CONFIG_CAMERA_CIS_SR846_OBJ=y
# CONFIG_CAMERA_CIS_IMX576_OBJ is not set
# CONFIG_CAMERA_CIS_VIRTUAL_OBJ is not set
CONFIG_CAMERA_ACT_SELECT=y
# CONFIG_CAMERA_ACT_AK7348_OBJ is not set
@ -2638,7 +2647,9 @@ CONFIG_CAMERA_JACKPOT=y
# CONFIG_CAMERA_J7TOPE is not set
# CONFIG_CAMERA_J7DUO is not set
# CONFIG_CAMERA_JACKPOT_JPN is not set
# CONFIG_CAMERA_GVIEW2 is not set
# CONFIG_CAMERA_A6E is not set
# CONFIG_CAMERA_A7Y18 is not set
# CONFIG_CAMERA_OTPROM_SUPPORT_FRONT is not set
# CONFIG_VIDEO_EXYNOS_CAMERA_POSTPROCESS is not set
CONFIG_MEDIA_EXYNOS=y
@ -2780,6 +2791,7 @@ CONFIG_MALI_TMIX=y
# CONFIG_MALI_TMIX_R3P0 is not set
# CONFIG_MALI_TMIX_R8P0 is not set
CONFIG_MALI_TMIX_R9P0=y
# CONFIG_MALI_TMIX_R10P0 is not set
CONFIG_MALI_MIDGARD=y
# CONFIG_MALI_GATOR_SUPPORT is not set
# CONFIG_MALI_MIDGARD_DVFS is not set
@ -2864,13 +2876,14 @@ CONFIG_DISPLAY_USE_INFO=y
CONFIG_EXYNOS_DECON_MDNIE_LITE=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA3 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7 is not set
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A5Y18=y
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_A7Y18 is not set
# CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT2 is not set
CONFIG_EXYNOS_DECON_LCD_S6E3FA7_JACKPOT=y
# CONFIG_EXYNOS_DECON_LCD_TD4100_J3TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_S6D7AT0B_J7TOPE is not set
# CONFIG_EXYNOS_DECON_LCD_EA8061S_J7DUO is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_A6ELTE is not set
# CONFIG_EXYNOS_DECON_LCD_S6E8AA5_FEEL2 is not set
# CONFIG_EXYNOS_DECON_LCD_SN65DSI86_GVIEW2 is not set
CONFIG_STATE_NOTIFIER=y
# CONFIG_FB_SSD1307 is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@ -2976,10 +2989,12 @@ CONFIG_SND_SOC_COMPRESS=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS8895_COD3033 is not set
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885=y
CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035=y
# CONFIG_SND_SOC_SAMSUNG_EXYNOS7885_COD3035_MULTI is not set
CONFIG_SND_SOC_SAMSUNG_ABOX=y
CONFIG_SEC_SND_SYNCHRONIZED_IPC=y
# CONFIG_SND_SOC_BT_SHARED_SRATE is not set
# CONFIG_SND_SOC_FM is not set
CONFIG_SND_SOC_SAMSUNG_ABOX_FREE_RMEM=y
CONFIG_SND_SOC_SAMSUNG_MAILBOX=y
# CONFIG_SND_SOC_SAMSUNG_VTS is not set
@ -3013,6 +3028,7 @@ CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_CS42XX8_I2C is not set
# CONFIG_SND_SOC_CS4349 is not set
CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_COD30XX_EXT_ANT is not set
# CONFIG_SND_SOC_ES8328 is not set
# CONFIG_SND_SOC_GTM601 is not set
# CONFIG_SND_SOC_MAX98506 is not set
@ -3036,6 +3052,7 @@ CONFIG_SND_SOC_COD3035X=y
# CONFIG_SND_SOC_TAS571X is not set
CONFIG_SND_SOC_TFA9872=y
# CONFIG_SND_SOC_TFA9879 is not set
# CONFIG_SND_SOC_TFA9896 is not set
# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
# CONFIG_SND_SOC_TLV320AIC31XX is not set
@ -4289,9 +4306,11 @@ CONFIG_BATTERY_NOTIFIER=y
CONFIG_BATTERY_SAMSUNG_V2=y
# CONFIG_CHARGING_VZWCONCEPT is not set
# CONFIG_BATTERY_AGE_FORECAST_DETACHABLE is not set
# CONFIG_FUELGAUGE_ASOC_FROM_CYCLES is not set
# CONFIG_ENG_BATTERY_CONCEPT is not set
# CONFIG_AFC_CURR_CONTROL_BY_TEMP is not set
CONFIG_BATTERY_CISD=y
# CONFIG_FG_FULLCAP_FROM_BATTERY is not set
# CONFIG_FUELGAUGE_MAX77865 is not set
CONFIG_FUELGAUGE_S2MU004=y
# CONFIG_FUELGAUGE_S2MU005 is not set
@ -4457,12 +4476,22 @@ CONFIG_SENSORS_SSP_BAROMETER_LPS22H=y
# CONFIG_SENSORS_YAS539 is not set
# CONFIG_SENSORS_GP2AP070S is not set
# CONFIG_SENSORS_STK3013 is not set
# CONFIG_SENSORS_CM36658 is not set
# CONFIG_SENSORS_CM36686 is not set
# CONFIG_SENSORS_CM3323 is not set
# CONFIG_SENSORS_CM36672P is not set
# CONFIG_SENSORS_TMD3725 is not set
# CONFIG_SENSORS_A96T3X6_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_WIFI is not set
# CONFIG_SENSORS_A96T3X6_WIFI_VIEW2 is not set
# CONFIG_SENSORS_A96T3X6_SUB is not set
# CONFIG_SENSORS_A96T3X6_SUB_VIEW2 is not set
# CONFIG_SENSORS_TC3XXK is not set
# CONFIG_SENSORS_TC3XXK_A7Y18LTE is not set
CONFIG_MOTOR_ZH915=y
# CONFIG_SEC_VIB is not set
CONFIG_MOTOR_S2MU004=y
# CONFIG_ISA1000 is not set
CONFIG_FIVE_USE_TRUSTONIC=y
CONFIG_FIVE_TRUSTLET_PATH="five/ffffffff000000000000000000000072.tlbin"

View File

@ -13,10 +13,15 @@ menu "SAMSUNG EXYNOS SoCs Support"
choice
prompt "Exynos device target"
default MACH_EXYNOS7885_NONE
default MACH_EXYNOS7885_JACKPOTLTE_EUR_OPEN
config MACH_EXYNOS7885_NONE
bool "None"
config MACH_EXYNOS7884_A6ELTE_USA
bool "Galaxy A6 (SM-A600P)"
select EXYNOS_DTBTOOL
config MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN
bool "Galaxy A8+ (SM-A730F)"
select EXYNOS_DTBTOOL
config MACH_EXYNOS7885_JACKPOTLTE_CAN_OPEN
bool "Galaxy A8 (SM-A530W)"
@ -34,10 +39,6 @@ config MACH_EXYNOS7885_JACKPOTLTE_KOR
bool "Galaxy A8 (SM-A530N)"
select EXYNOS_DTBTOOL
config MACH_EXYNOS7885_JACKPOT2LTE_EUR_OPEN
bool "Galaxy A8+ (SM-A730F)"
select EXYNOS_DTBTOOL
endchoice
menuconfig EXYNOS_DTBTOOL

31
drivers/battery_v2/Kconfig Normal file → Executable file
View File

@ -64,6 +64,16 @@ config BATTERY_AGE_FORECAST_DETACHABLE
are calculated based on the RIL time to give an estimate
on weeks that the battery has been used to determine aging.
config FUELGAUGE_ASOC_FROM_CYCLES
bool "FUELGAUGE ASOC output based on mapping with battery cycles"
default n
depends on BATTERY_AGE_FORECAST
help
Say Y to enable support for the mapping of ASOC with charging cycles.
Support for this feature implies that a mapping table has been shared
from HW team and big data side. This feature is providing
support for platform required battery health monitoring concepts.
config MULTI_CHARGING
bool "support for multi charger ICs"
help
@ -101,6 +111,17 @@ config BATTERY_CISD
Say Y to include support for cisd
cisd means cell internal short detection
config FG_FULLCAP_FROM_BATTERY
bool "support Fuelgauge FULLCAP measurment concept via battery driver"
default n
depends on BATTERY_CISD
help
Say Y to enable support for Fuelgauge (FG) FULLCAP
measurement directly from the battery driver.
This is an approximation of the remaining battery
capacity. It is shouldn't be enabled if the FG
supports this internally.
# Fuel Gauge
config FUELGAUGE_DUMMY
@ -352,6 +373,16 @@ config CHARGER_BQ24260
This driver source code implemented
all functions for BQ24260 charger.
config CHARGER_BQ25898S
tristate "BQ25898S charger driver"
default n
depends on BATTERY_SAMSUNG
help
Say Y to include support
for TI BQ25898S charger driver.
This driver source code implemented
all functions for BQ25898S charger.
config CHARGER_MAX77693
tristate "MAX77693 battery charger support"
depends on MFD_MAX77693 && I2C

5
drivers/battery_v2/Makefile Normal file → Executable file
View File

@ -7,7 +7,10 @@ obj-$(CONFIG_UPDATE_BATTERY_DATA) += sec_battery_data.o
obj-$(CONFIG_BATTERY_NOTIFIER) += battery_notifier.o
obj-$(CONFIG_FUELGAUGE_S2MU004) += s2mu004_fuelgauge.o
obj-$(CONFIG_FUELGAUGE_S2MU005) += s2mu005_fuelgauge.o
obj-$(CONFIG_FUELGAUGE_S2MU005) += s2mu005_fuelgauge.o
obj-$(CONFIG_FUELGAUGE_MAX77865) += max77865_fuelgauge.o
obj-$(CONFIG_CHARGER_S2MU004) += s2mu004_charger.o
obj-$(CONFIG_CHARGER_S2MU005) += s2mu005_charger.o
obj-$(CONFIG_CHARGER_MAX77865) += max77865_charger.o
obj-$(CONFIG_CHARGER_BQ25898S) += bq25898s_charger.o

View File

@ -0,0 +1,722 @@
/*
* bq25898s_charger.c
* Samsung bq25898s Charger Driver
*
* Copyright (C) 2015 Samsung Electronics
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define DEBUG
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include "include/charger/bq25898s_charger.h"
#define ENABLE 1
#define DISABLE 0
static enum power_supply_property bq25898s_charger_props[] = {
};
int bq25898s_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
struct bq25898s_charger *bq25898s = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&bq25898s->i2c_lock);
ret = i2c_smbus_read_byte_data(i2c, reg);
mutex_unlock(&bq25898s->i2c_lock);
if (ret < 0) {
pr_info("%s reg(0x%x), ret(%d)\n", __func__, reg, ret);
return ret;
}
ret &= 0xff;
*dest = ret;
return 0;
}
int bq25898s_write_reg(struct i2c_client *i2c, u8 reg, u8 value)
{
struct bq25898s_charger *bq25898s = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&bq25898s->i2c_lock);
ret = i2c_smbus_write_byte_data(i2c, reg, value);
mutex_unlock(&bq25898s->i2c_lock);
if (ret < 0)
pr_info("%s reg(0x%x), ret(%d)\n",
__func__, reg, ret);
return ret;
}
int bq25898s_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
{
struct bq25898s_charger *bq25898s = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&bq25898s->i2c_lock);
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (val & mask) | (old_val & (~mask));
ret = i2c_smbus_write_byte_data(i2c, reg, new_val);
}
mutex_unlock(&bq25898s->i2c_lock);
return ret;
}
static void bq25898s_test_read(struct bq25898s_charger *charger)
{
u8 reg;
u8 reg_data;
char str[1024]={0,};
for (reg = 0x00; reg <= 0x14; reg++) {
bq25898s_read_reg(charger->i2c, reg, &reg_data);
sprintf(str + strlen(str), "0x%02x:0x%02x,", reg, reg_data);
}
pr_info("%s : %s\n", __func__, str);
}
static int bq25898s_get_charge_current(struct bq25898s_charger *charger)
{
u8 data;
int charge_current;
bq25898s_read_reg(charger->i2c, BQ25898S_CHG_REG_04, &data);
charge_current = (data & 0x3F) * 64;
pr_info("%s : DATA(0x%02x), current(%d)\n", __func__, data, charge_current);
return charge_current;
}
static int bq25898s_get_float_voltage(struct bq25898s_charger *charger)
{
u8 data;
int max_voltage;
bq25898s_read_reg(charger->i2c, BQ25898S_CHG_REG_06, &data);
max_voltage = (data >> 2) * 16 * 10 + 38400;
pr_info("%s : DATA(0x%02x) VOLTAGE(%d)\n", __func__, data, max_voltage);
return max_voltage;
}
static int bq25898s_get_input_current(struct bq25898s_charger *charger)
{
u8 data;
int input_current;
bq25898s_read_reg(charger->i2c, BQ25898S_CHG_REG_00, &data);
input_current = (data & 0x3F) * 50 + 100;
pr_info("%s : DATA(0x%02x), current(%d)\n", __func__, data, input_current);
return input_current;
}
static int bq25898s_get_charger_state(struct bq25898s_charger *charger)
{
u8 data;
int status = POWER_SUPPLY_STATUS_UNKNOWN;
bq25898s_read_reg(charger->i2c, BQ25898S_CHG_REG_0B, &data);
data = (data & 0x18) >> 3;
if (data == 0x00)
status = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (data == 0x03)
status = POWER_SUPPLY_STATUS_FULL;
else
status = POWER_SUPPLY_STATUS_CHARGING;
pr_info("%s: DATA(0x%02x), status(%d)\n", __func__, data, status);
return status;
}
static void bq25898s_set_charge_current(struct bq25898s_charger *charger, int charging_current)
{
u8 data;
data = charging_current / 64;
pr_info("%s: charging_current(%d), 0x%x \n", __func__, charging_current, data);
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_04,
data, BQ25898S_CHG_ICHG_MASK);
}
static void bq25898s_set_input_current(struct bq25898s_charger *charger, int input_current)
{
u8 data;
if (input_current < 100)
data = 0;
else
data = (input_current - 100) / 50;
pr_info ("%s : SET INPUT CURRENT(%d), 0x%x\n", __func__, input_current, data);
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_00,
data, BQ25898S_CHG_IINLIM_MASK);
}
static void bq25898s_watchdog_reset(struct bq25898s_charger *charger)
{
u8 data;
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_03, 0x40, 0x40);
bq25898s_read_reg(charger->i2c, BQ25898S_CHG_REG_03, &data);
pr_info("%s : BQ25898S_CHG_REG_03(0x%02x)\n", __func__, data);
}
static void bq25898s_set_watchdog_timer_en(struct bq25898s_charger *charger, int time)
{
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_07,
time << BQ25898S_CHG_WATCHDOG_SHIFT, BQ25898S_CHG_WATCHDOG_MASK);
}
static void bq25898s_set_float_voltage(struct bq25898s_charger *charger, int float_voltage)
{
u8 data;
data = ((float_voltage - 38400) / 10 / 16) << 2;
pr_info("%s: voltage(%d), 0x%x \n", __func__, float_voltage, data);
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_06,
data, BQ25898S_CHG_VREG_MASK);
}
static void bq25898s_set_charger_state(struct bq25898s_charger *charger,
int enable)
{
pr_info("%s: CHARGE_EN(%s)\n",__func__, enable > 0 ? "ENABLE" : "DISABLE");
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_03,
(enable << BQ25898S_CHG_CONFIG_SHIFT), BQ25898S_CHG_CONFIG_MASK);
bq25898s_set_watchdog_timer_en(charger, enable? WATCHDOG_TIMER_80S: WATCHDOG_TIMER_DISABLE);
bq25898s_test_read(charger);
}
static void bq25898s_set_topoff_current(struct bq25898s_charger *charger, int eoc)
{
u8 data;
data = (eoc - 64) / 64;
pr_info("%s: eoc(%d), 0x%x \n", __func__, eoc, data);
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_05,
data, BQ25898S_CHG_ITERM_MASK);
}
static void bq25898s_charger_initialize(struct bq25898s_charger *charger)
{
bq25898s_set_charger_state(charger, DISABLE);
bq25898s_set_input_current(charger, 2000);
bq25898s_set_charge_current(charger, 500);
/* Disable AUTO_DPDM_EN */
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_02, 0x0 << 0, 0x1 << 0);
/* Disable charging termination */
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_07, 0x0 << 7, 0x1 << 7);
/* termination current */
bq25898s_set_topoff_current(charger, charger->full_check_current);
/* set flolat voltage */
bq25898s_set_float_voltage(charger, charger->float_voltage);
bq25898s_test_read(charger);
}
static irqreturn_t bq25898s_irq_handler(int irq, void *data)
{
struct bq25898s_charger *charger = data;
u8 val;
bq25898s_read_reg(charger->i2c, BQ25898S_CHG_REG_0C, &val);
dev_info(charger->dev,
"%s: 0x%x\n", __func__, val);
return IRQ_HANDLED;
}
static int bq25898s_chg_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct bq25898s_charger *charger = power_supply_get_drvdata(psy);
enum power_supply_ext_property ext_psp = psp;
val->intval = 0;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = charger->cable_type;
break;
case POWER_SUPPLY_PROP_PRESENT:
break;
case POWER_SUPPLY_PROP_STATUS:
val->intval = bq25898s_get_charger_state(charger);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
break;
case POWER_SUPPLY_PROP_HEALTH:
bq25898s_watchdog_reset(charger);
if (charger->is_charging == ENABLE) {
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_02, 0x80, 0x80);
}
bq25898s_test_read(charger);
val->intval = POWER_SUPPLY_HEALTH_GOOD;
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
val->intval = bq25898s_get_input_current(charger);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
val->intval = bq25898s_get_charge_current(charger);
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
break;
#if defined(CONFIG_BATTERY_SWELLING)
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
val->intval = bq25898s_get_float_voltage(charger);
break;
#endif
case POWER_SUPPLY_PROP_USB_HC:
return -ENODATA;
case POWER_SUPPLY_PROP_CHARGE_NOW:
break;
case POWER_SUPPLY_PROP_MAX ... POWER_SUPPLY_EXT_PROP_MAX:
switch (ext_psp) {
case POWER_SUPPLY_EXT_PROP_CHECK_SLAVE_I2C:
{
u8 reg_data;
bq25898s_read_reg(charger->i2c, BQ25898S_CHG_REG_11, &reg_data);
if((reg_data > 0x93) && (reg_data < 0x9D)) // 4.5V ~ 5.5V
val->intval = 1;
else
val->intval = 0;
pr_info("%s: reg_data : 0x%02X\n", __func__,reg_data);
}
break;
case POWER_SUPPLY_EXT_PROP_CHECK_MULTI_CHARGE:
val->intval = charger->is_charging ?
POWER_SUPPLY_STATUS_CHARGING : POWER_SUPPLY_STATUS_DISCHARGING;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
static int bq25898s_chg_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct bq25898s_charger *charger = power_supply_get_drvdata(psy);
switch (psp) {
/* val->intval : type */
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
charger->is_charging =
(val->intval == SEC_BAT_CHG_MODE_CHARGING) ? ENABLE : DISABLE;
bq25898s_set_charger_state(charger, charger->is_charging);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
charger->charging_current = val->intval;
bq25898s_set_charge_current(charger, charger->charging_current);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
charger->siop_level = val->intval;
break;
case POWER_SUPPLY_PROP_ONLINE:
charger->cable_type = val->intval;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
if (val->intval / 1000 < 10) /* chg_float_voltage_conv = 1 */
charger->float_voltage = val->intval * 10;
else /* chg_float_voltage_conv = 10 */
charger->float_voltage = val->intval;
bq25898s_set_float_voltage(charger, charger->float_voltage);
break;
case POWER_SUPPLY_PROP_STATUS:
case POWER_SUPPLY_PROP_CURRENT_FULL:
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
charger->input_current = val->intval;
bq25898s_set_input_current(charger, charger->input_current);
break;
case POWER_SUPPLY_PROP_HEALTH:
return -ENODATA;
default:
return -EINVAL;
}
return 0;
}
static ssize_t bq25898s_store_addr(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
//struct power_supply *psy = dev_get_drvdata(dev);
struct bq25898s_charger *charger;
int x;
if (sscanf(buf, "0x%x\n", &x) == 1) {
charger->addr = x;
}
return count;
}
static ssize_t bq25898s_show_addr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
//struct power_supply *psy = dev_get_drvdata(dev);
struct bq25898s_charger *charger;
return sprintf(buf, "0x%x\n", charger->addr);
}
static ssize_t bq25898s_store_size(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
//struct power_supply *psy = dev_get_drvdata(dev);
struct bq25898s_charger *charger;
int x;
if (sscanf(buf, "%d\n", &x) == 1) {
charger->size = x;
}
return count;
}
static ssize_t bq25898s_show_size(struct device *dev,
struct device_attribute *attr,
char *buf)
{
//struct power_supply *psy = dev_get_drvdata(dev);
struct bq25898s_charger *charger;
return sprintf(buf, "0x%x\n", charger->size);
}
static ssize_t bq25898s_store_data(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
//struct power_supply *psy = dev_get_drvdata(dev);
struct bq25898s_charger *charger;
int x;
if (sscanf(buf, "0x%x", &x) == 1) {
u8 data = x;
if (bq25898s_write_reg(charger->i2c, charger->addr, data) < 0)
{
dev_info(charger->dev,
"%s: addr: 0x%x write fail\n", __func__, charger->addr);
}
}
return count;
}
static ssize_t bq25898s_show_data(struct device *dev,
struct device_attribute *attr,
char *buf)
{
//struct power_supply *psy = dev_get_drvdata(dev);
struct bq25898s_charger *charger;
u8 data;
int i, count = 0;;
if (charger->size == 0)
charger->size = 1;
for (i = 0; i <= charger->size; i++) {
if (bq25898s_read_reg(charger->i2c, charger->addr+i, &data) < 0) {
dev_info(charger->dev,
"%s: read fail\n", __func__);
count += sprintf(buf+count, "addr: 0x%x read fail\n", charger->addr+i);
continue;
}
count += sprintf(buf+count, "0x%02x : 0x%02x\n", charger->addr+i,data);
}
return count;
}
static DEVICE_ATTR(addr, 0644, bq25898s_show_addr, bq25898s_store_addr);
static DEVICE_ATTR(size, 0644, bq25898s_show_size, bq25898s_store_size);
static DEVICE_ATTR(data, 0644, bq25898s_show_data, bq25898s_store_data);
static struct attribute *bq25898s_attributes[] = {
&dev_attr_addr.attr,
&dev_attr_size.attr,
&dev_attr_data.attr,
NULL
};
static const struct attribute_group bq25898s_attr_group = {
.attrs = bq25898s_attributes,
};
#ifdef CONFIG_OF
static int bq25898s_charger_parse_dt(struct bq25898s_charger *charger,
struct bq25898s_charger_platform_data *pdata)
{
struct device_node *np = of_find_node_by_name(NULL, "bq25898s-charger");
int ret = 0;
if (!np) {
pr_err("%s: np is NULL\n", __func__);
return -1;
} else {
ret = of_get_named_gpio_flags(np, "bq25898s-charger,irq-gpio",
0, NULL);
if (ret < 0) {
pr_err("%s: bq25898s-charger,irq-gpio is empty\n", __func__);
pdata->irq_gpio = 0;
} else {
pdata->irq_gpio = ret;
pr_info("%s: irq-gpio = %d\n", __func__, pdata->irq_gpio);
}
ret = of_property_read_u32(np, "bq25898s-charger,chg_float_voltage",
&pdata->float_voltage);
if (ret) {
pr_info("%s: bq25898s-charger,chg_float_voltage is empty\n", __func__);
charger->float_voltage = 43000;
} else
charger->float_voltage = pdata->float_voltage;
ret = of_property_read_u32(np, "bq25898s-charger,full_check_current",
&pdata->full_check_current);
if (ret) {
pr_info("%s: bq25898s-charger,full_check_current is empty\n", __func__);
charger->full_check_current = 128;
} else
charger->full_check_current = pdata->full_check_current;
}
return 0;
}
#endif
static const struct power_supply_desc bq25898s_charger_power_supply_desc = {
.name = "bq25898s-charger",
.type = POWER_SUPPLY_TYPE_UNKNOWN,
.properties = bq25898s_charger_props,
.num_properties = ARRAY_SIZE(bq25898s_charger_props),
.get_property = bq25898s_chg_get_property,
.set_property = bq25898s_chg_set_property,
.no_thermal = true,
};
static int bq25898s_charger_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct bq25898s_charger *charger;
struct bq25898s_charger_platform_data *pdata = client->dev.platform_data;
struct power_supply_config sub_charger_cfg = {};
int ret = 0;
pr_info("%s: bq25898s Charger Driver Loading\n", __func__);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
return -EIO;
charger = kzalloc(sizeof(*charger), GFP_KERNEL);
if (!charger)
return -ENOMEM;
mutex_init(&charger->i2c_lock);
charger->dev = &client->dev;
charger->i2c = client;
if (client->dev.of_node) {
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&client->dev, "Failed to allocate memory\n");
ret = -ENOMEM;
goto err_parse_dt_nomem;
}
#if defined(CONFIG_OF)
ret = bq25898s_charger_parse_dt(charger, pdata);
if (ret < 0) {
pr_err("%s not found charger dt! ret[%d]\n",
__func__, ret);
goto err_parse_dt;
}
#endif
}
charger->pdata = pdata;
i2c_set_clientdata(client, charger);
/*
charger->psy_chg.name = "bq25898s-charger";
charger->psy_chg.type = POWER_SUPPLY_TYPE_UNKNOWN;
charger->psy_chg.get_property = bq25898s_chg_get_property;
charger->psy_chg.set_property = bq25898s_chg_set_property;
charger->psy_chg.properties = bq25898s_charger_props;
charger->psy_chg.num_properties = ARRAY_SIZE(bq25898s_charger_props);
*/
charger->cable_type = SEC_BATTERY_CABLE_NONE;
bq25898s_charger_initialize(charger);
charger->input_current = bq25898s_get_input_current(charger);
charger->charging_current = bq25898s_get_charge_current(charger);
pr_info("%s: input: %d, charging: %d\n", __func__, charger->input_current, charger->charging_current);
sub_charger_cfg.drv_data = charger;
charger->psy_chg = power_supply_register(charger->dev, &bq25898s_charger_power_supply_desc, &sub_charger_cfg);
if (!charger->psy_chg) {
pr_err("%s: Failed to Register psy_chg\n", __func__);
goto err_data_free;
}
if (pdata->irq_gpio) {
charger->chg_irq = gpio_to_irq(pdata->irq_gpio);
ret = request_threaded_irq(charger->chg_irq, NULL,
bq25898s_irq_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"bq25898s-irq", charger);
if (ret < 0) {
pr_err("%s: Failed to Request IRQ(%d)\n", __func__, ret);
goto err_req_irq;
}
}
device_init_wakeup(charger->dev, 1);
/*
ret = sysfs_create_group(&charger->psy_chg.dev->kobj, &bq25898s_attr_group);
if (ret) {
dev_info(&client->dev,
"%s: sysfs_create_group failed\n", __func__);
}
*/
charger->size = BQ25898S_CHG_REG_14;
pr_info("%s: bq25898s Charger Driver Loaded\n", __func__);
return 0;
err_req_irq:
power_supply_unregister(charger->psy_chg);
err_data_free:
err_parse_dt:
kfree(pdata);
err_parse_dt_nomem:
mutex_destroy(&charger->i2c_lock);
kfree(charger);
return ret;
}
static const struct i2c_device_id bq25898s_charger_id[] = {
{"bq25898s-charger", 0},
{}
};
#ifdef CONFIG_OF
static struct of_device_id bq25898s_charger_match_table[] = {
{.compatible = "ti,bq25898s-charger"},
{},
};
#else
#define da9155_charger_match_table NULL
#endif
static void bq25898s_charger_shutdown(struct i2c_client *client)
{
struct bq25898s_charger *charger = i2c_get_clientdata(client);
if (charger->chg_irq)
free_irq(charger->chg_irq, charger);
pr_info("%s: bq25898s Charger driver shutdown\n", __func__);
if (!charger->i2c) {
pr_err("%s: no bq25898s i2c client\n", __func__);
return;
}
/* reset register */
bq25898s_update_reg(charger->i2c, BQ25898S_CHG_REG_14, 0x80, 0x80);
}
static int bq25898s_charger_remove(struct i2c_client *client)
{
struct bq25898s_charger *charger = i2c_get_clientdata(client);
if (charger->chg_irq)
free_irq(charger->chg_irq, charger);
power_supply_unregister(charger->psy_chg);
mutex_destroy(&charger->i2c_lock);
kfree(charger->pdata);
kfree(charger);
return 0;
}
#if defined CONFIG_PM
static int bq25898s_charger_suspend(struct device *dev)
{
struct bq25898s_charger *charger = dev_get_drvdata(dev);
if (charger->chg_irq) {
if (device_may_wakeup(dev))
enable_irq_wake(charger->chg_irq);
disable_irq(charger->chg_irq);
}
return 0;
}
static int bq25898s_charger_resume(struct device *dev)
{
struct bq25898s_charger *charger = dev_get_drvdata(dev);
if (charger->chg_irq) {
if (device_may_wakeup(dev))
disable_irq_wake(charger->chg_irq);
enable_irq(charger->chg_irq);
}
return 0;
}
#else
#define bq25898s_charger_suspend NULL
#define bq25898s_charger_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(bq25898s_charger_pm_ops, bq25898s_charger_suspend,
bq25898s_charger_resume);
static struct i2c_driver bq25898s_charger_driver = {
.driver = {
.name = "bq25898s-charger",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &bq25898s_charger_pm_ops,
#endif
.of_match_table = bq25898s_charger_match_table,
},
.probe = bq25898s_charger_probe,
.remove = bq25898s_charger_remove,
.shutdown = bq25898s_charger_shutdown,
.id_table = bq25898s_charger_id,
};
static int __init bq25898s_charger_init(void)
{
pr_info("%s : \n", __func__);
return i2c_add_driver(&bq25898s_charger_driver);
}
static void __exit bq25898s_charger_exit(void)
{
i2c_del_driver(&bq25898s_charger_driver);
}
module_init(bq25898s_charger_init);
module_exit(bq25898s_charger_exit);
MODULE_DESCRIPTION("Samsung BQ25898S Charger Driver");
MODULE_AUTHOR("Samsung Electronics");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,87 @@
/*
* bq25898s_charger.h
* Samsung BQ25898S Charger Header
*
* Copyright (C) 2012 Samsung Electronics, Inc.
*
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __BQ25898S_CHARGER_H
#define __BQ25898S_CHARGER_H __FILE__
#include "../sec_charging_common.h"
#define BQ25898S_CHG_REG_00 0x00
#define BQ25898S_CHG_ENABLE_HIZ_MODE_SHIFT 7
#define BQ25898S_CHG_ENABLE_HIZ_MODE_MASK (1 << BQ25898S_CHG_ENABLE_HIZ_MODE_SHIFT)
#define BQ25898S_CHG_IINLIM_MASK 0x3F
#define BQ25898S_CHG_REG_02 0x02
#define BQ25898S_CHG_REG_03 0x03
#define BQ25898S_CHG_CONFIG_SHIFT 4
#define BQ25898S_CHG_CONFIG_MASK (1 << BQ25898S_CHG_CONFIG_SHIFT)
#define BQ25898S_CHG_REG_04 0x04
#define BQ25898S_CHG_ICHG_MASK 0x3F
#define BQ25898S_CHG_REG_05 0x05
#define BQ25898S_CHG_ITERM_MASK 0x0F
#define BQ25898S_CHG_REG_06 0x06
#define BQ25898S_CHG_VREG_MASK 0xFC
#define BQ25898S_CHG_REG_07 0x07
#define BQ25898S_CHG_WATCHDOG_SHIFT 4
#define BQ25898S_CHG_WATCHDOG_MASK (0x3 << BQ25898S_CHG_WATCHDOG_SHIFT)
#define BQ25898S_CHG_REG_0B 0x0B
#define BQ25898S_CHG_REG_0C 0x0C
#define BQ25898S_CHG_REG_11 0x11
#define BQ25898S_CHG_REG_14 0x14
enum bq25898s_watchdog_timer {
WATCHDOG_TIMER_DISABLE = 0,
WATCHDOG_TIMER_40S,
WATCHDOG_TIMER_80S,
WATCHDOG_TIMER_160S,
};
struct bq25898s_charger_platform_data {
int irq_gpio;
unsigned int float_voltage;
unsigned int full_check_current;
};
struct bq25898s_charger {
struct device *dev;
struct i2c_client *i2c;
struct mutex i2c_lock;
struct bq25898s_charger_platform_data *pdata;
struct power_supply *psy_chg;
unsigned int siop_level;
unsigned int chg_irq;
unsigned int is_charging;
unsigned int charging_type;
unsigned int cable_type;
int input_current;
int charging_current;
unsigned int float_voltage;
unsigned int full_check_current;
u8 addr;
int size;
};
#endif /* __BQ25898S_CHARGER_H */

5
drivers/battery_v2/include/charger/max77865_charger.h Normal file → Executable file
View File

@ -251,6 +251,9 @@ struct max77865_charger_data {
int uvlo_attach_flag;
int uvlo_attach_cable_type;
int irq_bat;
int irq_tm;
int irq_bypass;
int irq_batp;
@ -286,6 +289,8 @@ struct max77865_charger_data {
int irq_sysovlo;
struct wake_lock sysovlo_wake_lock;
u8 vsys_ocp;
bool is_mdock;
bool otg_on;

View File

@ -183,6 +183,7 @@ struct max77865_fuelgauge_data {
bool using_hw_vempty;
unsigned int vempty_mode;
int temperature;
bool vempty_init_flag;
int low_temp_limit;

View File

@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __S2MU004_FUELGAUGE_H
@ -27,6 +26,8 @@
#include "../sec_charging_common.h"
extern unsigned int lpcharge;
/* Slave address should be shifted to the right 1bit.
* R/W bit should NOT be included.
*/
@ -79,7 +80,8 @@ struct sec_fg_info {
#if !defined(CONFIG_BATTERY_AGE_FORECAST)
/* copy from platform data /
* DTS or update by shell script */
* DTS or update by shell script
*/
int battery_table1[88]; // evt1
int battery_table2[22]; // evt1
int battery_table3[88]; // evt2
@ -109,6 +111,16 @@ struct fg_age_data_info {
struct fg_age_data_info
#endif
#if defined(CONFIG_FUELGAUGE_ASOC_FROM_CYCLES)
struct sec_cycles_to_asoc {
unsigned int cycle;
unsigned int asoc;
};
#define sec_cycles_to_asoc_t \
struct sec_cycles_to_asoc
#endif
typedef struct s2mu004_fuelgauge_platform_data {
int capacity_max;
int capacity_max_margin;
@ -124,6 +136,11 @@ typedef struct s2mu004_fuelgauge_platform_data {
char *fuelgauge_name;
bool repeated_fuelalert;
#if defined(CONFIG_FUELGAUGE_ASOC_FROM_CYCLES)
int fixed_asoc_levels;
sec_cycles_to_asoc_t *cycles_to_asoc;
#endif
} s2mu004_fuelgauge_platform_data_t;
struct s2mu004_fuelgauge_data {
@ -140,13 +157,13 @@ struct s2mu004_fuelgauge_data {
int mode;
int revision;
/* HW-dedicated fuel guage info structure
* used in individual fuel gauge file only
/* HW-dedicated fuelgauge info structure
* used in individual fuelgauge file only
* (ex. dummy_fuelgauge.c)
*/
struct sec_fg_info info;
#if defined(CONFIG_BATTERY_AGE_FORECAST)
fg_age_data_info_t* age_data_info;
fg_age_data_info_t *age_data_info;
int fg_num_age_step;
int fg_age_step;
int age_reset_status;

44
drivers/battery_v2/include/sec_battery.h Normal file → Executable file
View File

@ -2,7 +2,7 @@
* sec_battery.h
* Samsung Mobile Battery Header
*
* Copyright (C) 2017 Samsung Electronics
* Copyright (C) 2018 Samsung Electronics
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -11,7 +11,7 @@
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
@ -78,10 +78,10 @@
#define SIOP_EVENT_NONE 0x0000
#define SIOP_EVENT_WPC_CALL 0x0001
#if defined(CONFIG_SEC_FACTORY) // SEC_FACTORY
#if defined(CONFIG_SEC_FACTORY) /* SEC_FACTORY */
#define STORE_MODE_CHARGING_MAX 80
#define STORE_MODE_CHARGING_MIN 70
#else // !SEC_FACTORY, STORE MODE
#else /* !SEC_FACTORY, STORE MODE */
#define STORE_MODE_CHARGING_MAX 70
#define STORE_MODE_CHARGING_MIN 60
#define STORE_MODE_CHARGING_MAX_VZW 35
@ -94,10 +94,10 @@
#define DEFAULT_HEALTH_CHECK_COUNT 5
#define TEMP_HIGHLIMIT_DEFAULT 2000
#define SIOP_INPUT_LIMIT_CURRENT 1200
#define SIOP_CHARGING_LIMIT_CURRENT 1000
#define SIOP_WIRELESS_INPUT_LIMIT_CURRENT 530
#define SIOP_WIRELESS_CHARGING_LIMIT_CURRENT 780
#define SIOP_INPUT_LIMIT_CURRENT 1200
#define SIOP_CHARGING_LIMIT_CURRENT 1000
#define SIOP_WIRELESS_INPUT_LIMIT_CURRENT 530
#define SIOP_WIRELESS_CHARGING_LIMIT_CURRENT 780
#define SIOP_HV_WIRELESS_INPUT_LIMIT_CURRENT 700
#define SIOP_HV_WIRELESS_CHARGING_LIMIT_CURRENT 600
#define SIOP_STORE_HV_WIRELESS_CHARGING_LIMIT_CURRENT 450
@ -150,6 +150,24 @@ struct adc_sample_info {
int index;
};
#if defined(CONFIG_FG_FULLCAP_FROM_BATTERY)
enum capacity_measure_state {
CAPACITY_MEASURE_NONE = 0,
CAPACITY_MEASURE_OFF,
CAPACITY_MEASURING,
CAPACITY_MEASURE_UPDATING,
CAPACITY_MEASURE_UPDATED,
};
struct capacity_measure_info {
enum capacity_measure_state status;
int capacity_rep; /* mA * seconds */
int capacity_full; /* mA * seconds */
int design_cap; /* mA * seconds */
int start_soc;
};
#endif
struct sec_battery_info {
struct device *dev;
sec_battery_platform_data_t *pdata;
@ -201,7 +219,7 @@ struct sec_battery_info {
int voltage_avg; /* average voltage (mV) */
int voltage_ocv; /* open circuit voltage (mV) */
int current_now; /* current (mA) */
int inbat_adc; /* inbat adc */
int inbat_adc; /* inbat adc */
int current_avg; /* average current (mA) */
int current_max; /* input current limit (mA) */
int current_adc;
@ -282,7 +300,7 @@ struct sec_battery_info {
int temp_adc;
int temp_ambient_adc;
int usb_temp_adc;
int usb_temp_adc;
int chg_temp_adc;
int wpc_temp_adc;
int coil_temp_adc;
@ -431,6 +449,10 @@ struct sec_battery_info {
int fg_reset;
bool block_water_event;
#if defined(CONFIG_FG_FULLCAP_FROM_BATTERY)
struct capacity_measure_info capacity_info;
#endif
};
ssize_t sec_bat_show_attrs(struct device *dev,
@ -482,7 +504,7 @@ enum {
BATT_TEMP_AVER,
BATT_TEMP_ADC_AVER,
USB_TEMP,
USB_TEMP_ADC,
USB_TEMP_ADC,
CHG_TEMP,
CHG_TEMP_ADC,
SLAVE_CHG_TEMP,

3
drivers/battery_v2/include/sec_charging_common.h Normal file → Executable file
View File

@ -66,6 +66,7 @@ enum power_supply_ext_property {
#if defined(CONFIG_FUELGAUGE_S2MU004) || defined(CONFIG_FUELGAUGE_S2MU005)
POWER_SUPPLY_EXT_PROP_UPDATE_BATTERY_DATA,
#endif
POWER_SUPPLY_EXT_PROP_HV_DISABLE,
};
enum sec_battery_usb_conf {
@ -596,7 +597,7 @@ struct sec_charging_current {
#if defined(CONFIG_BATTERY_AGE_FORECAST)
struct sec_age_data {
int cycle;
unsigned int cycle;
unsigned int float_voltage;
unsigned int recharge_condition_vcell;
unsigned int full_condition_vcell;

323
drivers/battery_v2/max77865_charger.c Normal file → Executable file
View File

@ -587,6 +587,75 @@ static void max77865_set_charger_state(struct max77865_charger_data *charger,
pr_info("%s : CHG_CNFG_00(0x%02x), CHG_CNFG_12(0x%02x)\n", __func__, cnfg_00, cnfg_12);
}
static void max77865_set_otg(struct max77865_charger_data *charger, int enable)
{
union power_supply_propval value;
u8 reg = 0;
static u8 chg_int_state;
pr_info("%s: CHGIN-OTG %s\n", __func__, enable > 0 ? "on" : "off");
if (charger->otg_on == enable || lpcharge)
return;
wake_lock(&charger->otg_wake_lock);
mutex_lock(&charger->charger_mutex);
/* CHGIN-OTG */
value.intval = enable;
if (enable) {
psy_do_property("wireless", set,
POWER_SUPPLY_PROP_CHARGE_OTG_CONTROL, value);
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
&chg_int_state);
/* disable charger interrupt: CHG_I, CHGIN_I */
/* enable charger interrupt: BYP_I */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
MAX77865_CHG_IM | MAX77865_CHGIN_IM,
MAX77865_CHG_IM | MAX77865_CHGIN_IM | MAX77865_BYP_IM);
/* Update CHG_CNFG_11 to 0x16(5.020V) */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_CNFG_11, 0x16);
/* OTG off, boost on */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
CHG_CNFG_00_BOOST_MASK, CHG_CNFG_00_OTG_CTRL);
msleep(100);
/* OTG on, boost on */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
CHG_CNFG_00_OTG_CTRL, CHG_CNFG_00_OTG_CTRL);
} else {
/* OTG off(UNO on), boost off */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
0, CHG_CNFG_00_OTG_CTRL);
/* Update CHG_CNFG_11 to 0x00(3.485V) */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_CNFG_11, 0x00);
mdelay(50);
/* enable charger interrupt */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_INT_MASK, chg_int_state);
psy_do_property("wireless", set,
POWER_SUPPLY_PROP_CHARGE_OTG_CONTROL, value);
}
charger->otg_on = enable;
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
&chg_int_state);
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
&reg);
mutex_unlock(&charger->charger_mutex);
wake_unlock(&charger->otg_wake_lock);
pr_info("%s: INT_MASK(0x%x), CHG_CNFG_00(0x%x)\n",
__func__, chg_int_state, reg);
power_supply_changed(charger->psy_otg);
}
static void max77865_check_slow_charging(struct max77865_charger_data *charger,
int input_current)
{
@ -638,7 +707,11 @@ static void max77865_charger_initialize(struct max77865_charger_data *charger)
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_02, 0xC0, 0xC0);
/* BAT to SYS OCP 4.50A */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_05, 0x04, 0x07);
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_05, charger->vsys_ocp, 0x07);
/* Junction Temperature Thermal Regulation Loop Set point (130'C) */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_07, 0x78, 0x78);
/*
* top off current 150mA
* top off timer 30min
@ -866,7 +939,9 @@ static int max77865_chg_get_property(struct power_supply *psy,
}
break;
case POWER_SUPPLY_PROP_CHARGE_OTG_CONTROL:
mutex_lock(&charger->charger_mutex);
val->intval = charger->otg_on;
mutex_unlock(&charger->charger_mutex);
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
break;
@ -916,9 +991,7 @@ static int max77865_chg_set_property(struct power_supply *psy,
const union power_supply_propval *val)
{
struct max77865_charger_data *charger = power_supply_get_drvdata(psy);
union power_supply_propval value;
u8 reg = 0;
u8 cnt = 0;
static u8 chg_int_state;
int buck_state = ENABLE;
enum power_supply_ext_property ext_psp = psp;
@ -967,7 +1040,8 @@ static int max77865_chg_set_property(struct power_supply *psy,
MAX77865_CHG_REG_INT_MASK, &reg_data);
pr_info("%s : enable aicl : 0x%x\n", __func__, reg_data);
}
} else if (is_hv_wire_type(charger->cable_type)) {
} else if (is_hv_wire_type(charger->cable_type) ||
(charger->cable_type == SEC_BATTERY_CABLE_HV_TA_CHG_LIMIT)) {
/* Disable AICL IRQ */
if (charger->irq_aicl_enabled == 1) {
u8 reg_data;
@ -977,6 +1051,8 @@ static int max77865_chg_set_property(struct power_supply *psy,
max77865_read_reg(charger->i2c,
MAX77865_CHG_REG_INT_MASK, &reg_data);
pr_info("%s : disable aicl : 0x%x\n", __func__, reg_data);
charger->aicl_on = false;
charger->slow_charging = false;
}
}
break;
@ -1039,77 +1115,7 @@ static int max77865_chg_set_property(struct power_supply *psy,
charger->otg_on = false;
break;
case POWER_SUPPLY_PROP_CHARGE_OTG_CONTROL:
pr_info("%s: CHGIN-OTG %s\n", __func__, val->intval > 0 ? "on" : "off");
if (charger->otg_on == val->intval || lpcharge)
return 0;
wake_lock(&charger->otg_wake_lock);
mutex_lock(&charger->charger_mutex);
/* CHGIN-OTG */
if (val->intval) {
if (is_hv_wireless_type(charger->cable_type)) {
pr_info("%s: OTG enabled on HV_WC, set 5V", __func__);
for (cnt = 0; cnt < 5; cnt++) { /* check if wireless vout goes to 5V */
pr_info("%s: cnt(%d)\n", __func__, cnt);
value.intval = WIRELESS_VOUT_5V_OTG;
psy_do_property(charger->pdata->wireless_charger_name, set,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, value);
msleep(100);
psy_do_property(charger->pdata->wireless_charger_name, get,
POWER_SUPPLY_PROP_ENERGY_NOW, value);
if (value.intval <= 6000) {
pr_info("%s: wireless vout goes to 5V Vout.\n", __func__);
break;
}
}
}
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
&chg_int_state);
/* disable charger interrupt: CHG_I, CHGIN_I */
/* enable charger interrupt: BYP_I */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
MAX77865_CHG_IM | MAX77865_CHGIN_IM,
MAX77865_CHG_IM | MAX77865_CHGIN_IM | MAX77865_BYP_IM);
/* OTG on, boost on */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
CHG_CNFG_00_OTG_CTRL, CHG_CNFG_00_OTG_CTRL);
/* Update CHG_CNFG_11 to 0x16(5.020V) */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_CNFG_11, 0x16);
} else {
/* OTG off(UNO on), boost off */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
0, CHG_CNFG_00_OTG_CTRL);
/* Update CHG_CNFG_11 to 0x00(3.485V) */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_CNFG_11, 0x00);
mdelay(50);
/* enable charger interrupt */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_INT_MASK, chg_int_state);
if (is_hv_wireless_type(charger->cable_type)) {
pr_info("%s: OTG disabled on HV_WC, set 9V", __func__);
value.intval = WIRELESS_VOUT_10V_OTG;
psy_do_property(charger->pdata->wireless_charger_name, set,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, value);
}
}
charger->otg_on = val->intval;
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
&chg_int_state);
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
&reg);
mutex_unlock(&charger->charger_mutex);
wake_unlock(&charger->otg_wake_lock);
pr_info("%s: INT_MASK(0x%x), CHG_CNFG_00(0x%x)\n",
__func__, chg_int_state, reg);
power_supply_changed(charger->psy_otg);
max77865_set_otg(charger, val->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_UNO_CONTROL:
pr_info("%s: WCIN-UNO %s\n", __func__, val->intval > 0 ? "on" : "off");
@ -1171,6 +1177,13 @@ static int max77865_chg_set_property(struct power_supply *psy,
check_charger_unlock_state(charger);
}
break;
case POWER_SUPPLY_EXT_PROP_INBAT_VOLTAGE_FGSRC_SWITCHING:
/* if jig attached, change the power source
from the VBATFG to the internal VSYS*/
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_07,
((!val->intval) << CHG_CNFG_07_REG_FGSRC_SHIFT),
CHG_CNFG_07_REG_FGSRC_MASK);
break;
default:
return -EINVAL;
}
@ -1189,7 +1202,9 @@ static int max77865_otg_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
mutex_lock(&charger->charger_mutex);
val->intval = charger->otg_on;
mutex_unlock(&charger->charger_mutex);
break;
default:
return -EINVAL;
@ -1202,94 +1217,15 @@ static int max77865_otg_set_property(struct power_supply *psy,
const union power_supply_propval *val)
{
struct max77865_charger_data *charger = power_supply_get_drvdata(psy);
union power_supply_propval value;
static u8 chg_int_state;
u8 chg_cnfg_00;
u8 cnt = 0;
int ret = 0;
wake_lock(&charger->otg_wake_lock);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
pr_info("%s: CHGIN-OTG %s\n", __func__, val->intval > 0 ? "on" : "off");
if (charger->otg_on == val->intval || lpcharge) {
ret = 0;
goto otg_err;
}
mutex_lock(&charger->charger_mutex);
/* CHGIN-OTG */
if (val->intval) {
if (is_hv_wireless_type(charger->cable_type)) {
pr_info("%s: OTG enabled on HV_WC, set 5V", __func__);
for (cnt = 0; cnt < 5; cnt++) { /* check if wireless vout goes to 5V */
pr_info("%s: cnt(%d)\n", __func__, cnt);
value.intval = WIRELESS_VOUT_5V_OTG;
psy_do_property(charger->pdata->wireless_charger_name, set,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, value);
msleep(100);
psy_do_property(charger->pdata->wireless_charger_name, get,
POWER_SUPPLY_PROP_ENERGY_NOW, value);
if (value.intval <= 6000) {
pr_info("%s: wireless vout goes to 5V Vout.\n", __func__);
break;
}
}
}
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
&chg_int_state);
/* disable charger interrupt: CHG_I, CHGIN_I */
/* enable charger interrupt: BYP_I */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
MAX77865_CHG_IM | MAX77865_CHGIN_IM,
MAX77865_CHG_IM | MAX77865_CHGIN_IM | MAX77865_BYP_IM);
/* OTG on, boost on */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
CHG_CNFG_00_OTG_CTRL, CHG_CNFG_00_OTG_CTRL);
/* Update CHG_CNFG_11 to 0x16(5.020V) */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_CNFG_11, 0x16);
} else {
/* OTG off(UNO on), boost off */
max77865_update_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
0, CHG_CNFG_00_OTG_CTRL);
/* Update CHG_CNFG_11 to 0x00(3.485V) */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_CNFG_11, 0x00);
mdelay(50);
/* enable charger interrupt */
max77865_write_reg(charger->i2c,
MAX77865_CHG_REG_INT_MASK, chg_int_state);
if (is_hv_wireless_type(charger->cable_type)) {
pr_info("%s: OTG disabled on HV_WC, set 9V", __func__);
value.intval = WIRELESS_VOUT_10V_OTG;
psy_do_property(charger->pdata->wireless_charger_name, set,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, value);
}
}
charger->otg_on = val->intval;
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_INT_MASK,
&chg_int_state);
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_CNFG_00,
&chg_cnfg_00);
mutex_unlock(&charger->charger_mutex);
pr_info("%s: INT_MASK(0x%x), CHG_CNFG_00(0x%x)\n",
__func__, chg_int_state, chg_cnfg_00);
power_supply_changed(charger->psy_otg);
max77865_set_otg(charger, val->intval);
break;
default:
ret = -EINVAL;
goto otg_err;
return -EINVAL;
}
otg_err:
wake_unlock(&charger->otg_wake_lock);
return ret;
return 0;
}
static int max77865_debugfs_show(struct seq_file *s, void *data)
@ -1572,10 +1508,10 @@ static void max77865_aicl_isr_work(struct work_struct *work)
value.intval = max77865_get_input_current(charger);
psy_do_property("battery", set,
POWER_SUPPLY_EXT_PROP_AICL_CURRENT, value);
}
if (is_not_wireless_type(charger->cable_type))
max77865_check_slow_charging(charger, charger->input_current);
if (is_not_wireless_type(charger->cable_type))
max77865_check_slow_charging(charger, charger->input_current);
}
max77865_update_reg(charger->i2c,
MAX77865_CHG_REG_INT_MASK, 0, MAX77865_AICL_IM);
@ -1803,6 +1739,40 @@ static irqreturn_t max77865_sysovlo_irq(int irq, void *data)
return IRQ_HANDLED;
}
static irqreturn_t max77865_chg_bat_irq(int irq, void *data)
{
struct max77865_charger_data *charger = data;
u8 bat_dtls;
pr_info("%s: \n", __func__);
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_DETAILS_01, &bat_dtls);
bat_dtls = (bat_dtls & 0x70) >> 4;
pr_info("%s: read bat dtls(0x%2x)\n", __func__, bat_dtls);
if (bat_dtls == 0x6) {
//panic("BATTERY - OCP");
pr_info("%s: BATTERY - OCP\n", __func__);
}
return IRQ_HANDLED;
}
static irqreturn_t max77865_systm_irq(int irq, void *data)
{
struct max77865_charger_data *charger = data;
u8 treg_state;
pr_info("%s \n", __func__);
max77865_read_reg(charger->i2c, MAX77865_CHG_REG_DETAILS_01, &treg_state);
treg_state = treg_state >> 7;;
pr_info("%s: read treg(0x%2x)\n", __func__, treg_state);
if (treg_state) {
//panic("BATTERY - TREG");
pr_info("%s: BATTERY - TREG\n", __func__);
}
return IRQ_HANDLED;
}
#ifdef CONFIG_OF
static int max77865_charger_parse_dt(struct max77865_charger_data *charger)
{
@ -1840,9 +1810,21 @@ static int max77865_charger_parse_dt(struct max77865_charger_data *charger)
pr_info("%s : wireless_cc_cv is Empty\n", __func__);
}
np = of_find_node_by_name(NULL, "max77865-charger");
if (!np) {
pr_err("%s: np(max77865-charger) is NULL\n", __func__);
} else {
ret = of_property_read_u8(np, "charger,vsys_ocp",
&charger->vsys_ocp);
if (ret) {
pr_info("%s : default vsys ocp\n", __func__);
charger->vsys_ocp = 0x04;
}
}
np = of_find_node_by_name(NULL, "max77865-fuelgauge");
if (!np) {
pr_err("%s: np NULL\n", __func__);
pr_err("%s: np(max77865_fuelgauge) is NULL\n", __func__);
} else {
charger->jig_low_active = of_property_read_bool(np,
"fuelgauge,jig_low_active");
@ -1911,6 +1893,7 @@ static int max77865_charger_probe(struct platform_device *pdev)
charger->otg_on = false;
charger->max77865_pdata = pdata;
charger->wc_pre_current = WC_CURRENT_START;
charger->vsys_ocp = 0x04;
#if defined(CONFIG_OF)
ret = max77865_charger_parse_dt(charger);
@ -1960,6 +1943,8 @@ static int max77865_charger_probe(struct platform_device *pdev)
wake_lock_init(&charger->wc_current_wake_lock, WAKE_LOCK_SUSPEND,
"charger->wc-current");
INIT_DELAYED_WORK(&charger->wc_current_work, max77865_wc_current_work);
wake_lock_init(&charger->otg_wake_lock, WAKE_LOCK_SUSPEND,
"otg-path");
charger_cfg.drv_data = charger;
@ -2050,6 +2035,22 @@ static int max77865_charger_probe(struct platform_device *pdev)
enable_irq_wake(charger->irq_sysovlo);
}
charger->irq_bat = pdata->irq_base + MAX77865_CHG_IRQ_BAT_I;
ret = request_threaded_irq(charger->irq_bat, NULL,
max77865_chg_bat_irq, 0,
"bat-irq", charger);
if (ret < 0)
pr_err("%s: fail to request battery IRQ: %d: %d\n",
__func__, charger->irq_bat, ret);
charger->irq_tm = pdata->irq_base + MAX77865_SYSTEM_IRQ_TM_INT;
ret = request_threaded_irq(charger->irq_tm, NULL,
max77865_systm_irq, 0,
"sys-tm", charger);
if (ret < 0)
pr_err("%s: fail to request tm IRQ: %d: %d\n",
__func__, charger->irq_tm, ret);
ret = max77865_chg_create_attrs(&charger->psy_chg->dev);
if (ret) {
dev_err(charger->dev,

64
drivers/battery_v2/max77865_fuelgauge.c Normal file → Executable file
View File

@ -248,6 +248,8 @@ static int max77865_fg_write_temp(struct max77865_fuelgauge_data *fuelgauge,
__func__, temperature, data[1], data[0]);
fuelgauge->temperature = temperature;
if (!fuelgauge->vempty_init_flag)
fuelgauge->vempty_init_flag = true;
return temperature;
}
@ -695,6 +697,8 @@ int max77865_fg_reset_capacity_by_jig_connection(struct max77865_fuelgauge_data
val.intval = 1;
psy_do_property("max77865-charger", set,
POWER_SUPPLY_PROP_ENERGY_NOW, val);
psy_do_property("battery", set,
POWER_SUPPLY_PROP_ENERGY_NOW, val);
pr_info("%s: DesignCap = Capacity - 1 (Jig Connection)\n", __func__);
return max77865_write_word(fuelgauge->i2c, DESIGNCAP_REG,
@ -1126,6 +1130,19 @@ bool max77865_fg_reset(struct max77865_fuelgauge_data *fuelgauge)
return false;
}
static int max77865_fg_check_capacity_max(
struct max77865_fuelgauge_data *fuelgauge, int capacity_max)
{
int cap_max, cap_min;
cap_max = fuelgauge->pdata->capacity_max;
cap_min = (fuelgauge->pdata->capacity_max -
fuelgauge->pdata->capacity_max_margin);
return (capacity_max < cap_min) ? cap_min :
((capacity_max >= cap_max) ? cap_max : capacity_max);
}
#define CAPACITY_MAX_CONTROL_THRESHOLD 300
static void max77865_fg_get_scaled_capacity(
struct max77865_fuelgauge_data *fuelgauge,
@ -1216,7 +1233,8 @@ static void max77865_fg_get_scaled_capacity(
pr_info("%s : TEMP(%d) SAMPLE(%d) CAPACITY_MAX(%d)\n",
__func__, temp, sample, fuelgauge->capacity_max);
fuelgauge->capacity_max = max_temp;
fuelgauge->capacity_max =
max77865_fg_check_capacity_max(fuelgauge, max_temp);
}
} else {
cnt = 0;
@ -1282,12 +1300,23 @@ static int max77865_fg_calculate_dynamic_scale(
raw_soc_val.intval = max77865_get_fuelgauge_value(fuelgauge,
FG_RAW_SOC) / 10;
fuelgauge->capacity_max =
(raw_soc_val.intval * 100 / (capacity + 1));
fuelgauge->capacity_old = capacity;
if (raw_soc_val.intval <
fuelgauge->pdata->capacity_max -
fuelgauge->pdata->capacity_max_margin) {
pr_info("%s: raw soc(%d) is very low, skip routine\n",
__func__, raw_soc_val.intval);
} else {
fuelgauge->capacity_max =
(raw_soc_val.intval * 100 / (capacity + 1));
fuelgauge->capacity_old = capacity;
pr_info("%s: %d is used for capacity_max, capacity(%d)\n",
__func__, fuelgauge->capacity_max, capacity);
fuelgauge->capacity_max =
max77865_fg_check_capacity_max(fuelgauge,
fuelgauge->capacity_max);
pr_info("%s: %d is used for capacity_max, capacity(%d)\n",
__func__, fuelgauge->capacity_max, capacity);
}
return fuelgauge->capacity_max;
}
@ -1568,7 +1597,7 @@ static int max77865_fg_get_property(struct power_supply *psy,
val->intval /= 10;
/* SW/HW V Empty setting */
if (fuelgauge->using_hw_vempty) {
if (fuelgauge->using_hw_vempty && fuelgauge->vempty_init_flag) {
if (fuelgauge->temperature <= (int)fuelgauge->low_temp_limit) {
if (fuelgauge->raw_capacity <= 50) {
if (fuelgauge->vempty_mode != VEMPTY_MODE_HW) {
@ -1660,6 +1689,10 @@ static int max77865_fg_get_property(struct power_supply *psy,
val->intval = data[1] << 8 | data[0];
pr_debug("%s: FilterCFG=0x%04X\n", __func__, data[1] << 8 | data[0]);
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
val->intval = fuelgauge->battery_data->Capacity * fuelgauge->raw_capacity;
pr_info("%s: Remaining Capacity=%d uAh\n", __func__, val->intval);
break;
#if defined(CONFIG_BATTERY_SBM_DATA)
case POWER_SUPPLY_PROP_MAX ... POWER_SUPPLY_EXT_PROP_MAX:
switch (ext_psp) {
@ -1763,7 +1796,7 @@ static int max77865_fg_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
pr_info("%s: capacity_max changed, %d -> %d\n",
__func__, fuelgauge->capacity_max, val->intval);
fuelgauge->capacity_max = val->intval;
fuelgauge->capacity_max = max77865_fg_check_capacity_max(fuelgauge, val->intval);
fuelgauge->initial_update_of_soc = true;
break;
case POWER_SUPPLY_PROP_CHARGE_ENABLED:
@ -1922,6 +1955,13 @@ static int max77865_fuelgauge_parse_dt(struct max77865_fuelgauge_data *fuelgauge
fuelgauge->pdata->capacity_max_hv = fuelgauge->pdata->capacity_max;
}
ret = of_property_read_u32(np, "fuelgauge,capacity_max_margin",
&pdata->capacity_max_margin);
if (ret < 0) {
pr_err("%s error reading capacity_max_margin %d\n", __func__, ret);
pdata->capacity_max_margin = 300;
}
ret = of_property_read_u32(np, "fuelgauge,capacity_min",
&pdata->capacity_min);
if (ret < 0)
@ -2141,13 +2181,13 @@ static int max77865_fuelgauge_parse_dt(struct max77865_fuelgauge_data *fuelgauge
pr_info("%s thermal: %d, fg_irq: %d, capacity_max: %d\n"
"qrtable20: 0x%x, qrtable30 : 0x%x\n"
"capacity_min: %d\n"
"capacity_max_margin: %d, capacity_min: %d\n"
"calculation_type: 0x%x, fuel_alert_soc: %d,\n"
"repeated_fuelalert: %d\n",
__func__, pdata->thermal_source, pdata->fg_irq, pdata->capacity_max,
fuelgauge->battery_data->QResidual20,
fuelgauge->battery_data->QResidual30,
pdata->capacity_min,
pdata->capacity_max_margin, pdata->capacity_min,
pdata->capacity_calculation_type, pdata->fuel_alert_soc,
pdata->repeated_fuelalert);
}
@ -2253,9 +2293,9 @@ static int max77865_fuelgauge_probe(struct platform_device *pdev)
}
/* SW/HW init code. SW/HW V Empty mode must be opposite ! */
fuelgauge->temperature = 300; /* default value */
fuelgauge->vempty_init_flag = false; /* default value */
pr_info("%s: SW/HW V empty init \n", __func__);
max77865_fg_set_vempty(fuelgauge, VEMPTY_MODE_HW);
max77865_fg_set_vempty(fuelgauge, VEMPTY_MODE_SW);
fuelgauge_cfg.drv_data = fuelgauge;

51
drivers/battery_v2/s2mu004_fuelgauge.c Normal file → Executable file
View File

@ -1519,6 +1519,27 @@ static int s2mu004_fg_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
val->intval = s2mu004_get_temperature(fuelgauge);
break;
case POWER_SUPPLY_PROP_ENERGY_FULL:
#if defined(CONFIG_FUELGAUGE_ASOC_FROM_CYCLES)
{
int calc_step = 0;
if (!(fuelgauge->pdata->fixed_asoc_levels <= 0 || val->intval < 0)) {
for (calc_step = fuelgauge->pdata->fixed_asoc_levels - 1; calc_step >= 0; calc_step--) {
if (fuelgauge->pdata->cycles_to_asoc[calc_step].cycle <= val->intval)
break;
}
pr_info("%s: Battery Cycles = %d, ASOC step = %d\n",
__func__, val->intval, calc_step);
val->intval = fuelgauge->pdata->cycles_to_asoc[calc_step].asoc;
}
}
#else
return -1;
#endif
break;
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
val->intval = fuelgauge->capacity_max;
break;
@ -1702,6 +1723,9 @@ static int s2mu004_fuelgauge_parse_dt(struct s2mu004_fuelgauge_data *fuelgauge)
int ret;
#if defined(CONFIG_BATTERY_AGE_FORECAST)
int len, i;
#if defined(CONFIG_FUELGAUGE_ASOC_FROM_CYCLES)
const u32 *p;
#endif
#endif
/* reset, irq gpio info */
@ -1839,6 +1863,33 @@ static int s2mu004_fuelgauge_parse_dt(struct s2mu004_fuelgauge_data *fuelgauge)
fuelgauge->age_data_info[i].soc_arr_val[0],
fuelgauge->age_data_info[i].ocv_arr_val[0]);
}
#if defined(CONFIG_FUELGAUGE_ASOC_FROM_CYCLES)
p = of_get_property(np, "battery,cycles_to_asoc_mapping", &len);
if (p) {
fuelgauge->pdata->fixed_asoc_levels = len / sizeof(sec_cycles_to_asoc_t);
fuelgauge->pdata->cycles_to_asoc = kzalloc(len, GFP_KERNEL);
ret = of_property_read_u32_array(np, "battery,cycles_to_asoc_mapping",
(u32 *)fuelgauge->pdata->cycles_to_asoc, len/sizeof(u32));
if (ret) {
pr_err("%s: failed to read fuelgauge->pdata->cycles_to_asoc: %d\n",
__func__, ret);
kfree(fuelgauge->pdata->cycles_to_asoc);
fuelgauge->pdata->cycles_to_asoc = NULL;
fuelgauge->pdata->fixed_asoc_levels = 0;
}
pr_err("%s: fixed_asoc_levels : %d\n", __func__, fuelgauge->pdata->fixed_asoc_levels);
for (len = 0; len < fuelgauge->pdata->fixed_asoc_levels; ++len) {
pr_err("[%d/%d]cycle:%d, asoc:%d\n",
len, fuelgauge->pdata->fixed_asoc_levels-1,
fuelgauge->pdata->cycles_to_asoc[len].cycle,
fuelgauge->pdata->cycles_to_asoc[len].asoc);
}
} else {
fuelgauge->pdata->fixed_asoc_levels = 0;
pr_err("%s: Cycles to ASOC mapping not defined\n", __func__);
}
#endif
#endif
}
}

122
drivers/battery_v2/sec_battery.c Normal file → Executable file
View File

@ -1321,9 +1321,11 @@ static bool sec_bat_battery_cable_check(struct sec_battery_info *battery)
}
if (battery->pdata->recovery_cable) {
if (!battery->slate_mode && (battery->cable_type == SEC_BATTERY_CABLE_NONE &&
battery->wire_status != SEC_BATTERY_CABLE_NONE)) {
pr_info("%s: Recover from abnormal condition\n",__func__);
if (!battery->slate_mode && ((battery->cable_type == SEC_BATTERY_CABLE_NONE &&
battery->wire_status != SEC_BATTERY_CABLE_NONE) ||
(battery->cable_type != SEC_BATTERY_CABLE_NONE &&
battery->wire_status == SEC_BATTERY_CABLE_NONE))) {
pr_info("%s: Recover from abnormal condition\n",__func__);
wake_lock(&battery->cable_wake_lock);
queue_delayed_work(battery->monitor_wqueue, &battery->cable_work, 0);
}
@ -3202,13 +3204,17 @@ static void sec_bat_check_slowcharging_work(struct work_struct *work)
#if defined(CONFIG_CCIC_NOTIFIER)
if (battery->pdic_info.sink_status.rp_currentlvl == RP_CURRENT_LEVEL_DEFAULT &&
battery->cable_type == SEC_BATTERY_CABLE_USB) {
#else
if (battery->cable_type == SEC_BATTERY_CABLE_USB) {
#endif
if (!get_usb_enumeration_state() &&
(battery->current_event & SEC_BAT_CURRENT_EVENT_USB_100MA)) {
sec_bat_set_misc_event(battery, BATT_MISC_EVENT_TIMEOUT_OPEN_TYPE, 0);
battery->max_charge_power = battery->input_voltage * battery->current_max;
}
}
#endif
dev_info(battery->dev, "%s:\n", __func__);
}
@ -3564,6 +3570,95 @@ safety_time_end:
__func__, battery->expired_time, battery->cal_safety_time);
}
#if defined(CONFIG_FG_FULLCAP_FROM_BATTERY)
static void sec_bat_measure_capacity(struct sec_battery_info *battery, struct timespec c_ts)
{
static struct timespec old_ts = {0, };
struct capacity_measure_info * info = &(battery->capacity_info);
unsigned long time_diff = c_ts.tv_sec - old_ts.tv_sec;
char *state[] = {
"None",
"Off",
"Measuring",
"Updating",
"Updated",
};
char data[512];
static int design_cap = 0;
if (!design_cap) {
union power_supply_propval val;
val.intval = SEC_BATTERY_CAPACITY_FULL;
psy_do_property(battery->pdata->fuelgauge_name, get,
POWER_SUPPLY_PROP_ENERGY_NOW, val);
design_cap = val.intval;
}
if (battery->status == POWER_SUPPLY_STATUS_DISCHARGING ||
battery->status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
if (info->status == CAPACITY_MEASURE_UPDATING) {
int current_capacity;
current_capacity = design_cap * (info->start_soc * 3600 / 100);
info->capacity_full = info->capacity_rep + current_capacity;
info->status = CAPACITY_MEASURE_UPDATED;
sprintf(data, "full capacity: %dmAs, %dmAh, charged cap: %dmAs, start_soc: %d, asoc: %d\n",
info->capacity_full, info->capacity_full/3600, info->capacity_rep, info->start_soc,
(info->capacity_rep / 3600 * 100 * 100/((100-info->start_soc) * design_cap)));
pr_info("%s : %s\n", __func__, data);
}
if (info->status != CAPACITY_MEASURE_NONE) {
info->status = CAPACITY_MEASURE_NONE;
sprintf(data, "charger removed, battery status: %s\n", sec_bat_status_str[battery->status]);
pr_info("%s : %s\n", __func__, data);
}
return;
}
if (info->status == CAPACITY_MEASURE_OFF || info->status == CAPACITY_MEASURE_UPDATED)
return;
if (info->status == CAPACITY_MEASURE_NONE &&
battery->status == POWER_SUPPLY_STATUS_CHARGING) {
if (battery->capacity > 30) {
sprintf(data, "Do not count due to battery level(%d) high\n", battery->capacity);
pr_info("%s : %s\n", __func__, data);
info->status = CAPACITY_MEASURE_OFF;
} else {
info->status = CAPACITY_MEASURING;
info->capacity_rep = 0;
info->start_soc = battery->capacity;
old_ts = c_ts;
sprintf(data, "current_cap: %dmAs, start_soc: %d%%, cable: %s, Design Capacity: %d\n",
info->capacity_rep, info->start_soc, sec_cable_type[battery->cable_type], design_cap);
pr_info("%s : %s, time: %ld\n", __func__, data, c_ts.tv_sec);
}
return;
}
if (info->status == CAPACITY_MEASURE_NONE || time_diff == 0)
return;
info->capacity_rep += time_diff * battery->current_avg;
if (battery->status == POWER_SUPPLY_STATUS_FULL) {
info->status = CAPACITY_MEASURE_UPDATING;
if (battery->charging_mode == SEC_BATTERY_CHARGING_NONE) {
int current_capacity;
current_capacity = design_cap * (info->start_soc * 3600 / 100);
info->capacity_full = info->capacity_rep + current_capacity;
info->status = CAPACITY_MEASURE_UPDATED;
sprintf(data, "Full capacity: %dmAs, %dmAh, charged cap: %dmAs, start_soc: %d, asoc: %d\n",
info->capacity_full, info->capacity_full / 3600, info->capacity_rep, info->start_soc,
(info->capacity_rep / 3600 * 100 * 100/((100-info->start_soc) * design_cap)));
pr_info("%s : %s\n", __func__, data);
sprintf(data, "charging stopped, battery status: %s\n", sec_bat_status_str[battery->status]);
pr_info("%s : %s\n", __func__, data);
}
}
old_ts = c_ts;
pr_info("%s : current_cap: %d, status: %s, time_diff: %ld\n",
__func__, info->capacity_rep, state[info->status], time_diff);
}
#endif
static void sec_bat_monitor_work(
struct work_struct *work)
{
@ -3625,6 +3720,11 @@ static void sec_bat_monitor_work(
old_ts = c_ts;
sec_bat_get_battery_info(battery);
#if defined(CONFIG_FG_FULLCAP_FROM_BATTERY)
sec_bat_measure_capacity(battery, c_ts);
#endif
#if defined(CONFIG_BATTERY_CISD)
sec_bat_cisd_check(battery);
#endif
@ -4458,6 +4558,9 @@ ssize_t sec_bat_show_attrs(struct device *dev,
__func__, battery->pdata->fuelgauge_name);
} else {
if (psy_fg->desc->get_property != NULL) {
#if defined(CONFIG_FUELGAUGE_ASOC_FROM_CYCLES)
value.intval = battery->batt_cycle;
#endif
ret = psy_fg->desc->get_property(psy_fg,
POWER_SUPPLY_PROP_ENERGY_FULL, &value);
if (ret < 0) {
@ -4977,11 +5080,15 @@ ssize_t sec_bat_show_attrs(struct device *dev,
case CISD_FULLCAPREP_MAX:
{
union power_supply_propval fullcaprep_val;
#if defined(CONFIG_FG_FULLCAP_FROM_BATTERY)
struct capacity_measure_info * info = &(battery->capacity_info);
fullcaprep_val.intval = info->capacity_full / 3600;
#else
fullcaprep_val.intval = SEC_BATTERY_CAPACITY_FULL;
psy_do_property(battery->pdata->fuelgauge_name, get,
POWER_SUPPLY_PROP_ENERGY_NOW, fullcaprep_val);
#endif
i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
fullcaprep_val.intval);
}
@ -6696,6 +6803,8 @@ static int sec_bat_set_property(struct power_supply *psy,
}
sec_bat_set_charging_current(battery);
break;
case POWER_SUPPLY_EXT_PROP_HV_DISABLE:
break;
default:
return -EINVAL;
}
@ -7909,6 +8018,9 @@ static int batt_handle_notification(struct notifier_block *nb,
}
}
/* clear timeout event */
sec_bat_set_misc_event(battery, BATT_MISC_EVENT_TIMEOUT_OPEN_TYPE, true);
#if defined(CONFIG_CCIC_NOTIFIER)
/* If PD cable is already attached, return this function */
if (battery->pdic_attach) {

27
drivers/battery_v2/sec_cisd.c Normal file → Executable file
View File

@ -163,9 +163,16 @@ bool sec_bat_cisd_check(struct sec_battery_info *battery)
}
}
#if defined(CONFIG_FG_FULLCAP_FROM_BATTERY)
{
struct capacity_measure_info * info = &(battery->capacity_info);
capcurr_val.intval = info->capacity_full / 3600;
}
#else
capcurr_val.intval = SEC_BATTERY_CAPACITY_FULL;
psy_do_property(battery->pdata->fuelgauge_name, get,
POWER_SUPPLY_PROP_ENERGY_NOW, capcurr_val);
#endif
if (capcurr_val.intval == -1) {
dev_info(battery->dev, "%s: [CISD] FG I2C fail. skip cisd check \n", __func__);
return ret;
@ -173,12 +180,14 @@ bool sec_bat_cisd_check(struct sec_battery_info *battery)
if (capcurr_val.intval > pcisd->data[CISD_DATA_CAP_MAX])
pcisd->data[CISD_DATA_CAP_MAX] = capcurr_val.intval;
if (capcurr_val.intval < pcisd->data[CISD_DATA_CAP_MIN])
if ((capcurr_val.intval < pcisd->data[CISD_DATA_CAP_MIN]) &&
(capcurr_val.intval != 0))
pcisd->data[CISD_DATA_CAP_MIN] = capcurr_val.intval;
if (capcurr_val.intval > pcisd->data[CISD_DATA_CAP_MAX_PER_DAY])
pcisd->data[CISD_DATA_CAP_MAX_PER_DAY] = capcurr_val.intval;
if (capcurr_val.intval < pcisd->data[CISD_DATA_CAP_MIN_PER_DAY])
if ((capcurr_val.intval < pcisd->data[CISD_DATA_CAP_MIN_PER_DAY]) &&
(capcurr_val.intval != 0))
pcisd->data[CISD_DATA_CAP_MIN_PER_DAY] = capcurr_val.intval;
}
@ -235,9 +244,17 @@ void sec_battery_cisd_init(struct sec_battery_info *battery)
battery->cisd.diff_volt_now = 40;
battery->cisd.diff_cap_now = 5;
capfull_val.intval = SEC_BATTERY_CAPACITY_FULL;
psy_do_property(battery->pdata->fuelgauge_name, get,
POWER_SUPPLY_PROP_ENERGY_NOW, capfull_val);
#if defined(CONFIG_FG_FULLCAP_FROM_BATTERY)
{
struct capacity_measure_info * info = &(battery->capacity_info);
capfull_val.intval = info->capacity_full / 3600;
}
#else
capfull_val.intval = SEC_BATTERY_CAPACITY_FULL;
psy_do_property(battery->pdata->fuelgauge_name, get,
POWER_SUPPLY_PROP_ENERGY_NOW, capfull_val);
#endif
battery->cisd.curr_cap_max = capfull_val.intval;
battery->cisd.err_cap_high_thr = battery->pdata->cisd_cap_high_thr;
battery->cisd.err_cap_low_thr = battery->pdata->cisd_cap_low_thr;

1341
drivers/ccic/ccic_alternate.c Normal file → Executable file

File diff suppressed because it is too large Load Diff

51
drivers/ccic/ccic_misc.h Executable file
View File

@ -0,0 +1,51 @@
/*
* driver/ccic/ccic_misc.h - S2MM005 CCIC MISC driver
*
* Copyright (C) 2017 Samsung Electronics
* Author: Wookwang Lee <wookwang.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>.
*
*/
enum uvdm_data_type {
TYPE_SHORT = 0,
TYPE_LONG,
};
enum uvdm_direction_type {
DIR_OUT = 0,
DIR_IN,
};
struct uvdm_data {
unsigned short pid; /* Product ID */
char type; /* uvdm_data_type */
char dir; /* uvdm_direction_type */
unsigned int size; /* data size */
void __user *pData; /* data pointer */
};
struct ccic_misc_dev {
struct uvdm_data u_data;
atomic_t open_excl;
atomic_t ioctl_excl;
int (*uvdm_write)(void *data, int size);
int (*uvdm_read)(void *data, int size);
};
extern ssize_t samsung_uvdm_out_request_message(void *data, size_t size);
extern int samsung_uvdm_in_request_message(void *data);
extern int samsung_uvdm_ready(void);
extern void samsung_uvdm_close(void);

10
drivers/ccic/ccic_notifier.c Normal file → Executable file
View File

@ -234,10 +234,20 @@ int ccic_notifier_notify(CC_NOTI_TYPEDEF *p_noti, void *pd, int pdic_attach)
((CC_NOTI_ATTACH_TYPEDEF *)p_noti)->id,
((CC_NOTI_ATTACH_TYPEDEF *)p_noti)->attach);
ccic_uevent_work(CCIC_NOTIFY_ID_WATER, ((CC_NOTI_ATTACH_TYPEDEF *)p_noti)->attach);
#ifdef CONFIG_SEC_FACTORY
return 0;
#endif
break;
case CCIC_NOTIFY_ID_VCONN:
ccic_uevent_work(CCIC_NOTIFY_ID_VCONN, 0);
break;
case CCIC_NOTIFY_ID_ROLE_SWAP:
pr_info("%s: src:%01x dest:%01x id:%02x sub1:%02x\n", __func__,
((CC_NOTI_ATTACH_TYPEDEF *)p_noti)->src,
((CC_NOTI_ATTACH_TYPEDEF *)p_noti)->dest,
((CC_NOTI_ATTACH_TYPEDEF *)p_noti)->id,
((CC_NOTI_ATTACH_TYPEDEF *)p_noti)->attach);
break;
default:
pr_info("%s: src:%01x dest:%01x id:%02x "
"sub1:%d sub2:%02x sub3:%02x\n", __func__,

419
drivers/ccic/ccic_sysfs.c Normal file → Executable file
View File

@ -24,6 +24,12 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/ccic/ccic_sysfs.h>
#ifdef CONFIG_CCIC_S2MM005
#include <linux/ccic/s2mm005.h>
#include <linux/ccic/s2mm005_ext.h>
#include <linux/ccic/s2mm005_fw.h>
#include <linux/regulator/consumer.h>
#endif
#ifdef CONFIG_CCIC_S2MU004
#include <linux/ccic/usbpd.h>
#include <linux/ccic/usbpd-s2mu004.h>
@ -66,7 +72,8 @@ static ssize_t ccic_src_ver_show(struct device *dev,
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
struct s2mm005_version fw_swver;
s2mm005_get_fw_version(&fw_swver, usbpd_data->firm_ver[3], usbpd_data->hw_rev);
s2mm005_get_fw_version(usbpd_data->s2mm005_fw_product_id,
&fw_swver, usbpd_data->firm_ver[3], usbpd_data->hw_rev);
return sprintf(buf, "%02X %02X %02X %02X\n",
fw_swver.main[2], fw_swver.main[1], fw_swver.main[0], fw_swver.boot);
#else
@ -110,41 +117,33 @@ static ssize_t ccic_store_manual_lpm_mode(struct device *dev,
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
int mode;
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
sscanf(buf, "%d", &mode);
pr_info("usb: %s mode=%d\n", __func__, mode);
switch (mode) {
switch(mode){
case 0:
/* Disable Low Power Mode for App (JIGON Low + LP Off) */
s2mm005_manual_LPM(usbpd_data, 0x3);
/* Disable Low Power Mode for App (SW JIGON Disable) */
s2mm005_manual_JIGON(usbpd_data, 0);
usbpd_data->manual_lpm_mode = 0;
break;
case 1:
/* Enable Low Power Mode (JIGON High + Force LP On) */
/* Enable Low Power Mode for App (SW JIGON Enable) */
s2mm005_manual_JIGON(usbpd_data, 1);
s2mm005_manual_LPM(usbpd_data, 0x8); /* force Low power mode */
usbpd_data->manual_lpm_mode = 1;
break;
case 2:
/* Enable Low Power Mode (Normal LP On) */
/* SW JIGON Enable */
s2mm005_manual_JIGON(usbpd_data, 1);
s2mm005_manual_LPM(usbpd_data, 0x1); /* normal power mode */
// s2mm005_manual_LPM(usbpd_data, 0x1);
usbpd_data->manual_lpm_mode = 1;
break;
case 3:
/* Disable Low Power Mode (LP Off) */
s2mm005_manual_LPM(usbpd_data, 0x3);
usbpd_data->manual_lpm_mode = 0;
break;
default:
/* Disable Low Power Mode (JIGON Low + LP Off) */
s2mm005_manual_LPM(usbpd_data, 0x3);
/* SW JIGON Disable */
s2mm005_manual_JIGON(usbpd_data, 0);
usbpd_data->manual_lpm_mode = 0;
break;
@ -316,36 +315,21 @@ static int ccic_firmware_update_built_in(struct device *dev)
s2mm005_get_chip_swversion(usbpd_data, &chip_swver);
pr_err("%s CHIP SWversion %2x %2x %2x %2x - before\n", __func__,
chip_swver.main[2] , chip_swver.main[1], chip_swver.main[0], chip_swver.boot);
s2mm005_get_fw_version(&fw_swver, chip_swver.boot, usbpd_data->hw_rev);
pr_err("%s SRC SWversion:%2x, %2x, %2x, %2x\n", __func__,
s2mm005_get_fw_version(usbpd_data->s2mm005_fw_product_id,
&fw_swver, chip_swver.boot, usbpd_data->hw_rev);
pr_err("%s SRC SWversion:%2x,%2x,%2x,%2x\n",__func__,
fw_swver.main[2], fw_swver.main[1], fw_swver.main[0], fw_swver.boot);
pr_err("%s: FW UPDATE boot:%01d hw_rev:%02d\n", __func__, chip_swver.boot, usbpd_data->hw_rev);
if (chip_swver.main[0] == fw_swver.main[0]) {
pr_err("%s: FW version is same. Stop FW update. src:%2x chip:%2x\n",
if(chip_swver.main[0] == fw_swver.main[0]) {
pr_err("%s: FW version is same. Stop FW update. src:%2x chip:%2x\n",
__func__, chip_swver.main[0], fw_swver.main[0]);
goto done;
}
if (chip_swver.boot == 4) {
if (usbpd_data->hw_rev >= 9)
s2mm005_flash_fw(usbpd_data, FLASH_WRITE);
else
s2mm005_flash_fw(usbpd_data, FLASH_WRITE4);
} else if (chip_swver.boot == 5) {
if (usbpd_data->hw_rev >= 9)
s2mm005_flash_fw(usbpd_data, FLASH_WRITE5);
else
s2mm005_flash_fw(usbpd_data, FLASH_WRITE5_NODPDM);
} else if (chip_swver.boot == 6) {
if (usbpd_data->hw_rev >= 9)
s2mm005_flash_fw(usbpd_data, FLASH_WRITE6);
} else {
pr_err("%s: Didn't have same FW boot version. Stop FW update. src_boot:%2x chip_boot:%2x\n",
__func__, chip_swver.boot, fw_swver.boot);
return -1;
}
s2mm005_flash_fw(usbpd_data, chip_swver.boot);
done:
return 0;
}
@ -477,8 +461,88 @@ static ssize_t ccic_store_firmware_update(struct device *dev,
return size;
}
static DEVICE_ATTR(fw_update, 0220, NULL, ccic_store_firmware_update);
static ssize_t ccic_store_sink_pdo_update(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
uint32_t data = 0;
uint16_t REG_ADD;
uint8_t MSG_BUF[32] = {0,};
SINK_VAR_SUPPLY_Typedef *pSINK_MSG;
MSG_HEADER_Typedef *pMSG_HEADER;
uint32_t * MSG_DATA;
uint8_t cnt;
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
sscanf(buf, "%x\n", &data);
if (data == 0)
data = 0x8F019032; // 5V~12V, 500mA
pr_info("%s data=0x%x\n", __func__, data);
/* update Sink PDO */
REG_ADD = REG_TX_SINK_CAPA_MSG;
s2mm005_read_byte(usbpd_data->i2c, REG_ADD, MSG_BUF, 32);
MSG_DATA = (uint32_t *)&MSG_BUF[0];
pr_err("--- Read Data on TX_SNK_CAPA_MSG(0x220)\n");
for(cnt = 0; cnt < 8; cnt++) {
pr_err(" 0x%08X\n", MSG_DATA[cnt]);
}
pMSG_HEADER = (MSG_HEADER_Typedef *)&MSG_BUF[0];
pMSG_HEADER->BITS.Number_of_obj += 1;
pSINK_MSG = (SINK_VAR_SUPPLY_Typedef *)&MSG_BUF[8];
pSINK_MSG->DATA = data;
pr_err("--- Write DATA\n");
for (cnt = 0; cnt < 8; cnt++) {
pr_err(" 0x%08X\n", MSG_DATA[cnt]);
}
s2mm005_write_byte(usbpd_data->i2c, REG_ADD, &MSG_BUF[0], 32);
for (cnt = 0; cnt < 32; cnt++) {
MSG_BUF[cnt] = 0;
}
for (cnt = 0; cnt < 8; cnt++) {
pr_err(" 0x%08X\n", MSG_DATA[cnt]);
}
s2mm005_read_byte(usbpd_data->i2c, REG_ADD, MSG_BUF, 32);
pr_err("--- Read 2 new Data on TX_SNK_CAPA_MSG(0x220)\n");
for(cnt = 0; cnt < 8; cnt++) {
pr_err(" 0x%08X\n", MSG_DATA[cnt]);
}
return size;
}
static DEVICE_ATTR(sink_pdo_update, 0220, NULL, ccic_store_sink_pdo_update);
#endif
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
static ssize_t ccic_send_samsung_uVDM_message(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
int ret = 0;
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
ret = send_samsung_unstructured_vdm_message(usbpd_data, buf, size);
if( ret < 0 )
return ret;
else
return size;
}
static DEVICE_ATTR(samsung_uvdm, 0220, NULL, ccic_send_samsung_uVDM_message);
static ssize_t ccic_send_uVDM_message(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
@ -498,6 +562,47 @@ static ssize_t ccic_send_uVDM_message(struct device *dev,
return size;
}
static DEVICE_ATTR(uvdm, 0220, NULL, ccic_send_uVDM_message);
static ssize_t ccic_send_dna_audio_uVDM_message(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
int cmd = 0;
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
sscanf(buf, "%d", &cmd);
pr_info("%s cmd=%d\n", __func__, cmd);
send_dna_audio_unstructured_vdm_message(usbpd_data, cmd);
return size;
}
static DEVICE_ATTR(dna_audio_uvdm, 0220, NULL, ccic_send_dna_audio_uVDM_message);
static ssize_t ccic_send_dex_fan_uVDM_message(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
int cmd = 0;
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
sscanf(buf, "%d", &cmd);
pr_info("%s cmd=%d\n", __func__, cmd);
send_dex_fan_unstructured_vdm_message(usbpd_data, cmd);
return size;
}
static DEVICE_ATTR(dex_fan_uvdm, 0220, NULL, ccic_send_dex_fan_uVDM_message);
static ssize_t ccic_send_attention_message(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
@ -517,6 +622,7 @@ static ssize_t ccic_send_attention_message(struct device *dev,
return size;
}
static DEVICE_ATTR(attention, 0220, NULL, ccic_send_attention_message);
static ssize_t ccic_send_role_swap_message(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
@ -536,11 +642,10 @@ static ssize_t ccic_send_role_swap_message(struct device *dev,
return size;
}
static DEVICE_ATTR(role_swap, 0220, NULL, ccic_send_role_swap_message);
#endif
static ssize_t ccic_acc_device_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
#ifdef CONFIG_CCIC_S2MM005
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
if (!usbpd_data) {
@ -550,10 +655,166 @@ static ssize_t ccic_acc_device_version_show(struct device *dev,
pr_info("%s 0x%04x\n", __func__, usbpd_data->Device_Version);
return sprintf(buf, "%04x\n", usbpd_data->Device_Version);
}
static DEVICE_ATTR(acc_device_version, 0444, ccic_acc_device_version_show,NULL);
#endif
#ifdef CONFIG_CCIC_S2MM005
static ssize_t ccic_set_gpio(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
int mode;
u8 W_DATA[2];
u8 REG_ADD;
u8 R_DATA;
int i;
struct device_node *np = NULL;
const char *ss_vdd;
int ret = 0;
struct regulator *vdd085_usb;
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
sscanf(buf, "%d", &mode);
pr_info("usb: %s mode=%d\n", __func__, mode);
/* VDD_USB_3P0_AP is on for DP SWITCH */
np = of_find_compatible_node(NULL, NULL, "samsung,usb-notifier");
if (!np) {
pr_err("%s: failed to get the battery device node\n", __func__);
return 0;
} else {
if(of_property_read_string(np, "hs-regulator", (char const **)&ss_vdd) < 0) {
pr_err("%s - get ss_vdd error\n", __func__);
}
vdd085_usb = regulator_get(NULL, ss_vdd);
if (IS_ERR(vdd085_usb) || vdd085_usb == NULL) {
pr_err("%s - vdd085_usb regulator_get fail\n", __func__);
return 0;
}
}
/* for Wake up*/
for(i=0; i<5; i++){
R_DATA = 0x00;
REG_ADD = 0x8;
s2mm005_read_byte(usbpd_data->i2c, REG_ADD, &R_DATA, 1); //dummy read
}
udelay(10);
switch(mode){
case 0:
if (!regulator_is_enabled(vdd085_usb)) {
ret = regulator_enable(vdd085_usb);
if (ret) {
pr_err("%s - enable vdd085_usb ldo enable failed, ret=%d\n",
__func__, ret);
regulator_put(vdd085_usb);
return 0;
}
}
regulator_put(vdd085_usb);
/* SBU1/SBU2 set as open-drain status*/
// SBU1/2 Open command ON
REG_ADD = 0x10;
W_DATA[0] = 0x03;
W_DATA[1] = 0x85;
s2mm005_write_byte(usbpd_data->i2c, REG_ADD, &W_DATA[0], 2);
break;
case 1:
/* SBU1/SBU2 set as default status */
// SBU1/2 Open command OFF
REG_ADD = 0x10;
W_DATA[0] = 0x03;
W_DATA[1] = 0x86;
s2mm005_write_byte(usbpd_data->i2c, REG_ADD, &W_DATA[0], 2);
if (regulator_is_enabled(vdd085_usb)) {
ret = regulator_disable(vdd085_usb);
if (ret) {
pr_err("%s - enable vdd085_usb ldo enable failed, ret=%d\n",
__func__, ret);
regulator_put(vdd085_usb);
return 0;
}
}
regulator_put(vdd085_usb);
break;
default:
break;
}
return size;
}
static ssize_t ccic_get_gpio(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
u8 W_DATA[4];
u8 REG_ADD;
u8 R_DATA;
int i;
if (!usbpd_data) {
pr_err("usbpd_data is NULL\n");
return -ENODEV;
}
/* for Wake up*/
for(i=0; i<5; i++){
R_DATA = 0x00;
REG_ADD = 0x8;
s2mm005_read_byte(usbpd_data->i2c, REG_ADD, &R_DATA, 1); //dummy read
}
udelay(10);
W_DATA[0] =0x2;
W_DATA[1] =0x10;
W_DATA[2] =0x84;
W_DATA[3] =0x10;
s2mm005_write_byte(usbpd_data->i2c, 0x10, &W_DATA[0], 4);
s2mm005_read_byte(usbpd_data->i2c, 0x14, &R_DATA, 1);
pr_err("%s SBU1 status = %2x , SBU2 status = %2x \n", __func__,
(R_DATA & 0x10) >> 4,(R_DATA & 0x20) >> 5);
return sprintf(buf, "%d %d\n", (R_DATA & 0x10) >> 4,(R_DATA & 0x20) >> 5);
}
static DEVICE_ATTR(control_gpio, 0664, ccic_get_gpio, ccic_set_gpio);
#endif
static ssize_t ccic_usbpd_ids_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
#ifdef CONFIG_CCIC_S2MM005
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
int retval = 0;
if (!usbpd_data) {
pr_err("%s usbpd_data is null!!\n", __func__);
return -ENODEV;
}
retval = sprintf(buf, "%04x:%04x\n",
le16_to_cpu(usbpd_data->Vendor_ID),
le16_to_cpu(usbpd_data->Product_ID));
pr_info("usb: %s : %s",
__func__, buf);
#else
struct s2mu004_usbpd_data *pdic_data = dev_get_drvdata(dev);
struct usbpd_data *pd_data;
struct usbpd_manager_data *manager;
int retval = 0;
if (!pdic_data) {
pr_err("%s s2mu004_data is null!!\n", __func__);
@ -567,35 +828,6 @@ static ssize_t ccic_acc_device_version_show(struct device *dev,
}
manager = &pd_data->manager;
if (!manager) {
pr_err("%s manager_data is null!!\n", __func__);
return -ENODEV;
}
pr_info("%s 0x%04x\n", __func__, manager->Device_Version);
return sprintf(buf, "%04x\n", manager->Device_Version);
#endif
}
static DEVICE_ATTR(acc_device_version, 0444, ccic_acc_device_version_show,NULL);
static ssize_t ccic_usbpd_ids_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s2mu004_usbpd_data *pdic_data = dev_get_drvdata(dev);
struct usbpd_data *pd_data = dev_get_drvdata(pdic_data->dev);
struct usbpd_manager_data *manager = &pd_data->manager;
int retval = 0;
if (!pdic_data) {
pr_err("%s s2mu004_data is null!!\n", __func__);
return -ENODEV;
}
if (!pd_data) {
pr_err("%s usbpd_data is null!!\n", __func__);
return -ENODEV;
}
if (!manager) {
pr_err("%s manager_data is null!!\n", __func__);
return -ENODEV;
@ -606,7 +838,7 @@ static ssize_t ccic_usbpd_ids_show(struct device *dev,
le16_to_cpu(manager->Product_ID));
pr_info("usb: %s : %s",
__func__, buf);
#endif
return retval;
}
static DEVICE_ATTR(usbpd_ids, 0444, ccic_usbpd_ids_show, NULL);
@ -614,9 +846,21 @@ static DEVICE_ATTR(usbpd_ids, 0444, ccic_usbpd_ids_show, NULL);
static ssize_t ccic_usbpd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
#ifdef CONFIG_CCIC_S2MM005
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
int retval = 0;
if (!usbpd_data) {
pr_err("%s usbpd_data is null!!\n", __func__);
return -ENODEV;
}
retval = sprintf(buf, "%d\n", usbpd_data->acc_type);
pr_info("usb: %s : %d",
__func__, usbpd_data->acc_type);
#else
struct s2mu004_usbpd_data *pdic_data = dev_get_drvdata(dev);
struct usbpd_data *pd_data = dev_get_drvdata(pdic_data->dev);
struct usbpd_manager_data *manager = &pd_data->manager;
struct usbpd_data *pd_data;
struct usbpd_manager_data *manager;
int retval = 0;
if (!pdic_data) {
@ -624,11 +868,13 @@ static ssize_t ccic_usbpd_type_show(struct device *dev,
return -ENODEV;
}
pd_data = dev_get_drvdata(pdic_data->dev);
if (!pd_data) {
pr_err("%s usbpd_data is null!!\n", __func__);
return -ENODEV;
}
manager = &pd_data->manager;
if (!manager) {
pr_err("%s manager_data is null!!\n", __func__);
return -ENODEV;
@ -636,6 +882,7 @@ static ssize_t ccic_usbpd_type_show(struct device *dev,
retval = sprintf(buf, "%d\n", manager->acc_type);
pr_info("usb: %s : %d",
__func__, manager->acc_type);
#endif
return retval;
}
static DEVICE_ATTR(usbpd_type, 0444, ccic_usbpd_type_show, NULL);
@ -643,6 +890,18 @@ static DEVICE_ATTR(usbpd_type, 0444, ccic_usbpd_type_show, NULL);
static ssize_t ccic_water_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
#ifdef CONFIG_CCIC_S2MM005
struct s2mm005_data *usbpd_data = dev_get_drvdata(dev);
if(!usbpd_data) {
pr_err("%s usbpd_data is null!!\n", __func__);
return -ENODEV;
}
pr_info("%s water=%d, run_dry=%d\n", __func__,
usbpd_data->water_det, usbpd_data->run_dry);
return sprintf(buf, "%d\n", (usbpd_data->water_det | !usbpd_data->run_dry));
#else
struct s2mu004_usbpd_data *usbpd_data = dev_get_drvdata(dev);
if(!usbpd_data) {
@ -653,6 +912,7 @@ static ssize_t ccic_water_show(struct device *dev,
(int)usbpd_data->is_water_detect);
return sprintf(buf, "%d\n", usbpd_data->is_water_detect);
#endif
}
static DEVICE_ATTR(water, 0444, ccic_water_show, NULL);
@ -669,15 +929,22 @@ static struct attribute *ccic_attributes[] = {
#ifdef CONFIG_CCIC_S2MM005
&dev_attr_fw_update.attr,
&dev_attr_fw_update_status.attr,
&dev_attr_sink_pdo_update.attr,
#endif
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
&dev_attr_uvdm.attr,
&dev_attr_attention.attr,
&dev_attr_role_swap.attr,
#endif
&dev_attr_samsung_uvdm.attr,
&dev_attr_dna_audio_uvdm.attr,
&dev_attr_dex_fan_uvdm.attr,
&dev_attr_acc_device_version.attr,
#endif
&dev_attr_usbpd_ids.attr,
&dev_attr_usbpd_type.attr,
#ifdef CONFIG_CCIC_S2MM005
&dev_attr_control_gpio.attr,
#endif
&dev_attr_water.attr,
NULL
};

View File

@ -22,9 +22,15 @@
#include <linux/ccic/s2mm005_ext.h>
#include <linux/ccic/s2mm005_fw.h>
#include <linux/usb_notify.h>
#include <linux/ccic/ccic_sysfs.h>
extern struct device *ccic_device;
extern struct pdic_notifier_struct pd_noti;
#if defined(CONFIG_BATTERY_SAMSUNG)
extern unsigned int lpcharge;
#endif
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
static enum dual_role_property fusb_drp_properties[] = {
DUAL_ROLE_PROP_MODE,
@ -45,6 +51,9 @@ void s2mm005_rprd_mode_change(struct s2mm005_data *usbpd_data, u8 mode);
void s2mm005_manual_JIGON(struct s2mm005_data *usbpd_data, int mode);
void s2mm005_manual_LPM(struct s2mm005_data *usbpd_data, int cmd);
void s2mm005_control_option_command(struct s2mm005_data *usbpd_data, int cmd);
int s2mm005_fw_ver_check(void * data);
int ccic_misc_init(void);
void ccic_misc_exit(void);
////////////////////////////////////////////////////////////////////////////////
//status machine of s2mm005 ccic
////////////////////////////////////////////////////////////////////////////////
@ -60,11 +69,15 @@ void s2mm005_control_option_command(struct s2mm005_data *usbpd_data, int cmd);
int s2mm005_read_byte(const struct i2c_client *i2c, u16 reg, u8 *val, u16 size)
{
int ret; u8 wbuf[2];
int ret, i2c_retry; u8 wbuf[2];
struct i2c_msg msg[2];
struct s2mm005_data *usbpd_data = i2c_get_clientdata(i2c);
#if defined(CONFIG_USB_HW_PARAM)
struct otg_notify *o_notify = get_otg_notify();
#endif
mutex_lock(&usbpd_data->i2c_mutex);
i2c_retry = 0;
msg[0].addr = i2c->addr;
msg[0].flags = i2c->flags;
msg[0].len = 2;
@ -77,10 +90,17 @@ int s2mm005_read_byte(const struct i2c_client *i2c, u16 reg, u8 *val, u16 size)
wbuf[0] = (reg & 0xFF00) >> 8;
wbuf[1] = (reg & 0xFF);
ret = i2c_transfer(i2c->adapter, msg, ARRAY_SIZE(msg));
if (ret < 0)
dev_err(&i2c->dev, "i2c read16 fail reg:0x%x error %d\n",
reg, ret);
do {
ret = i2c_transfer(i2c->adapter, msg, ARRAY_SIZE(msg));
} while (ret < 0 && i2c_retry++ < 5);
if (ret < 0) {
#if defined(CONFIG_USB_HW_PARAM)
if (o_notify)
inc_hw_param(o_notify, USB_CCIC_I2C_ERROR_COUNT);
#endif
dev_err(&i2c->dev, "i2c read16 fail reg:0x%x error %d\n", reg, ret);
}
mutex_unlock(&usbpd_data->i2c_mutex);
return ret;
@ -91,6 +111,9 @@ int s2mm005_read_byte_flash(const struct i2c_client *i2c, u16 reg, u8 *val, u16
int ret; u8 wbuf[2];
struct i2c_msg msg[2];
struct s2mm005_data *usbpd_data = i2c_get_clientdata(i2c);
#if defined(CONFIG_USB_HW_PARAM)
struct otg_notify *o_notify = get_otg_notify();
#endif
u8 W_DATA[1];
udelay(20);
@ -112,9 +135,13 @@ int s2mm005_read_byte_flash(const struct i2c_client *i2c, u16 reg, u8 *val, u16
wbuf[1] = (reg & 0xFF);
ret = i2c_transfer(i2c->adapter, msg, ARRAY_SIZE(msg));
if (ret < 0)
dev_err(&i2c->dev, "i2c read16 fail reg:0x%x error %d\n",
reg, ret);
if (ret < 0) {
#if defined(CONFIG_USB_HW_PARAM)
if (o_notify)
inc_hw_param(o_notify, USB_CCIC_I2C_ERROR_COUNT);
#endif
dev_err(&i2c->dev, "i2c read16 fail reg:0x%x error %d\n", reg, ret);
}
mutex_unlock(&usbpd_data->i2c_mutex);
return ret;
@ -122,9 +149,12 @@ int s2mm005_read_byte_flash(const struct i2c_client *i2c, u16 reg, u8 *val, u16
int s2mm005_write_byte(const struct i2c_client *i2c, u16 reg, u8 *val, u16 size)
{
int ret = 0; u8 buf[258] = {0,};
int ret, i2c_retry; u8 buf[258] = {0,};
struct i2c_msg msg[1];
struct s2mm005_data *usbpd_data = i2c_get_clientdata(i2c);
#if defined(CONFIG_USB_HW_PARAM)
struct otg_notify *o_notify = get_otg_notify();
#endif
if (size > 256)
{
@ -133,6 +163,7 @@ int s2mm005_write_byte(const struct i2c_client *i2c, u16 reg, u8 *val, u16 size)
}
mutex_lock(&usbpd_data->i2c_mutex);
i2c_retry = 0;
msg[0].addr = i2c->addr;
msg[0].flags = 0;
msg[0].len = size+2;
@ -142,9 +173,17 @@ int s2mm005_write_byte(const struct i2c_client *i2c, u16 reg, u8 *val, u16 size)
buf[1] = (reg & 0xFF);
memcpy(&buf[2], val, size);
ret = i2c_transfer(i2c->adapter, msg, 1);
if (ret < 0)
do {
ret = i2c_transfer(i2c->adapter, msg, 1);
} while (ret < 0 && i2c_retry++ < 5);
if (ret < 0) {
#if defined(CONFIG_USB_HW_PARAM)
if (o_notify)
inc_hw_param(o_notify, USB_CCIC_I2C_ERROR_COUNT);
#endif
dev_err(&i2c->dev, "i2c write fail reg:0x%x error %d\n", reg, ret);
}
mutex_unlock(&usbpd_data->i2c_mutex);
return ret;
@ -155,6 +194,9 @@ int s2mm005_read_byte_16(const struct i2c_client *i2c, u16 reg, u8 *val)
int ret; u8 wbuf[2], rbuf;
struct i2c_msg msg[2];
struct s2mm005_data *usbpd_data = i2c_get_clientdata(i2c);
#if defined(CONFIG_USB_HW_PARAM)
struct otg_notify *o_notify = get_otg_notify();
#endif
mutex_lock(&usbpd_data->i2c_mutex);
msg[0].addr = i2c->addr;
@ -170,9 +212,13 @@ int s2mm005_read_byte_16(const struct i2c_client *i2c, u16 reg, u8 *val)
wbuf[1] = (reg & 0xFF);
ret = i2c_transfer(i2c->adapter, msg, 2);
if (ret < 0)
dev_err(&i2c->dev, "i2c read16 fail reg(0x%x), error %d\n",
reg, ret);
if (ret < 0) {
#if defined(CONFIG_USB_HW_PARAM)
if (o_notify)
inc_hw_param(o_notify, USB_CCIC_I2C_ERROR_COUNT);
#endif
dev_err(&i2c->dev, "i2c read16 fail reg(0x%x), error %d\n", reg, ret);
}
mutex_unlock(&usbpd_data->i2c_mutex);
*val = rbuf;
@ -184,6 +230,9 @@ int s2mm005_write_byte_16(const struct i2c_client *i2c, u16 reg, u8 val)
int ret = 0; u8 wbuf[3];
struct i2c_msg msg[1];
struct s2mm005_data *usbpd_data = i2c_get_clientdata(i2c);
#if defined(CONFIG_USB_HW_PARAM)
struct otg_notify *o_notify = get_otg_notify();
#endif
mutex_lock(&usbpd_data->i2c_mutex);
msg[0].addr = i2c->addr;
@ -196,9 +245,13 @@ int s2mm005_write_byte_16(const struct i2c_client *i2c, u16 reg, u8 val)
wbuf[2] = (val & 0xFF);
ret = i2c_transfer(i2c->adapter, msg, 1);
if (ret < 0)
dev_err(&i2c->dev, "i2c write fail reg(0x%x:%x), error %d\n",
reg, val, ret);
if (ret < 0) {
#if defined(CONFIG_USB_HW_PARAM)
if (o_notify)
inc_hw_param(o_notify, USB_CCIC_I2C_ERROR_COUNT);
#endif
dev_err(&i2c->dev, "i2c write fail reg(0x%x:%x), error %d\n", reg, val, ret);
}
mutex_unlock(&usbpd_data->i2c_mutex);
return ret;
@ -207,7 +260,7 @@ int s2mm005_write_byte_16(const struct i2c_client *i2c, u16 reg, u8 val)
void s2mm005_int_clear(struct s2mm005_data *usbpd_data)
{
struct i2c_client *i2c = usbpd_data->i2c;
pr_info("%s : -- clear clear -- \n", __func__);
s2mm005_write_byte_16(i2c, 0x10, 0x1);
}
@ -219,6 +272,7 @@ void s2mm005_reset(struct s2mm005_data *usbpd_data)
u8 R_DATA[1];
int i;
pr_info("%s\n", __func__);
/* for Wake up*/
for(i=0; i<5; i++){
R_DATA[0] = 0x00;
@ -235,6 +289,8 @@ void s2mm005_reset(struct s2mm005_data *usbpd_data)
W_DATA[4] = 0x01;
REG_ADD = 0x10;
s2mm005_write_byte(i2c, REG_ADD, &W_DATA[0], 5);
/* reset stable time */
msleep(100);
}
void s2mm005_reset_enable(struct s2mm005_data *usbpd_data)
@ -291,7 +347,7 @@ void s2mm005_hard_reset(struct s2mm005_data *usbpd_data)
pr_err("could not set reset pins\n");
printk("hard_reset: %04d %1d %01d\n", __LINE__, gpio_get_value(usbpd_data->s2mm005_sda), gpio_get_value(usbpd_data->s2mm005_scl));
usleep_range(1 * 1000, 1 * 1000);
usleep_range(10 * 1000, 10 * 1000);
i2c_pinctrl = devm_pinctrl_get_select(i2c_dev, "default");
if (IS_ERR(i2c_pinctrl))
pr_err("could not set default pins\n");
@ -349,7 +405,7 @@ void s2mm005_manual_JIGON(struct s2mm005_data *usbpd_data, int mode)
if(mode) W_DATA[1] = 0x5; // JIGON High
else W_DATA[1] = 0x4; // JIGON Low
REG_ADD = 0x10;
s2mm005_write_byte(i2c, REG_ADD, &W_DATA[0], 2);
s2mm005_write_byte(i2c, REG_ADD, &W_DATA[0], 2);
}
@ -397,12 +453,21 @@ void s2mm005_control_option_command(struct s2mm005_data *usbpd_data, int cmd)
// 0x82 : Vconn control option command OFF
// 0x83 : Water Detect option command ON
// 0x84 : Water Detect option command OFF
#if defined(CONFIG_SEC_FACTORY)
if((cmd&0xF) == 0x3)
usbpd_data->fac_water_enable = 1;
else if ((cmd&0xF) == 0x4)
usbpd_data->fac_water_enable = 0;
#endif
REG_ADD = 0x10;
W_DATA[0] = 0x03;
W_DATA[1] = 0x80 | (cmd&0xF);
s2mm005_write_byte(i2c, REG_ADD, &W_DATA[0], 2);
}
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
static void s2mm005_new_toggling_control(struct s2mm005_data *usbpd_data, u8 mode)
{
struct i2c_client *i2c = usbpd_data->i2c;
@ -435,6 +500,54 @@ static void s2mm005_toggling_control(struct s2mm005_data *usbpd_data, u8 mode)
REG_ADD = 0x10;
s2mm005_write_byte(i2c, REG_ADD, &W_DATA[0], 5);
}
#endif
int s2mm005_fw_ver_check(void * data)
{
struct s2mm005_data *usbpd_data = data;
struct s2mm005_version chip_swver, hwver;
if ((usbpd_data->firm_ver[1] == 0xFF && usbpd_data->firm_ver[2] == 0xFF)
|| (usbpd_data->firm_ver[1] == 0x00 && usbpd_data->firm_ver[2] == 0x00)) {
s2mm005_get_chip_hwversion(usbpd_data, &hwver);
pr_err("%s CHIP HWversion %2x %2x %2x %2x\n", __func__,
hwver.main[2] , hwver.main[1], hwver.main[0], hwver.boot);
s2mm005_get_chip_swversion(usbpd_data, &chip_swver);
pr_err("%s CHIP SWversion %2x %2x %2x %2x\n", __func__,
chip_swver.main[2] , chip_swver.main[1], chip_swver.main[0], chip_swver.boot);
if ((chip_swver.main[0] == 0xFF && chip_swver.main[1] == 0xFF)
|| (chip_swver.main[0] == 0x00 && chip_swver.main[1] == 0x00)) {
pr_err("%s Invalid FW version\n", __func__);
return CCIC_FW_VERSION_INVALID;
}
store_ccic_version(&hwver.main[0], &chip_swver.main[0], &chip_swver.boot);
usbpd_data->firm_ver[0] = chip_swver.main[2];
usbpd_data->firm_ver[1] = chip_swver.main[1];
usbpd_data->firm_ver[2] = chip_swver.main[0];
usbpd_data->firm_ver[3] = chip_swver.boot;
}
return 0;
}
void s2mm005_set_upsm_mode(void)
{
struct s2mm005_data *usbpd_data;
u8 W_DATA[2];
if(!ccic_device)
return;
usbpd_data = dev_get_drvdata(ccic_device);
if(!usbpd_data)
return;
W_DATA[0] =0x3;
W_DATA[1] =0x40;
s2mm005_write_byte(usbpd_data->i2c, 0x10, &W_DATA[0], 2);
pr_info("%s : current status is upsm! \n",__func__);
}
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
void s2mm005_rprd_mode_change(struct s2mm005_data *usbpd_data, u8 mode)
@ -472,16 +585,27 @@ static irqreturn_t s2mm005_usbpd_irq_thread(int irq, void *data)
MSG_IRQ_STATUS_Type MSG_IRQ_State;
dev_info(&i2c->dev, "%d times\n", ++usbpd_data->wq_times);
if (usbpd_data->ccic_check_at_booting) {
usbpd_data->ccic_check_at_booting = 0;
cancel_delayed_work_sync(&usbpd_data->ccic_init_work);
}
// Function State
irq_gpio_status[0] = gpio_get_value(usbpd_data->irq_gpio);
dev_info(&i2c->dev, "IRQ0:%02d\n", irq_gpio_status[0]);
wake_lock_timeout(&usbpd_data->wlock, HZ);
// Send attach event
process_cc_attach(usbpd_data,&plug_attach_done);
if (s2mm005_fw_ver_check(usbpd_data) == CCIC_FW_VERSION_INVALID) {
goto ver_err;
}
if(usbpd_data->water_det){
// Send attach event
process_cc_attach(usbpd_data, &plug_attach_done);
if (usbpd_data->s2mm005_i2c_err < 0)
goto i2cErr;
if(usbpd_data->water_det || !usbpd_data->run_dry || !usbpd_data->booting_run_dry){
process_cc_water_det(usbpd_data);
goto water;
}
@ -495,6 +619,8 @@ static irqreturn_t s2mm005_usbpd_irq_thread(int irq, void *data)
// RID processing
process_cc_rid(usbpd_data);
i2cErr:
ver_err:
water:
/* ========================================== */
// s2mm005_int_clear(usbpd_data);
@ -517,6 +643,12 @@ static int of_s2mm005_usbpd_dt(struct device *dev,
usbpd_data->s2mm005_om = of_get_named_gpio(np, "usbpd,s2mm005_om", 0);
usbpd_data->s2mm005_sda = of_get_named_gpio(np, "usbpd,s2mm005_sda", 0);
usbpd_data->s2mm005_scl = of_get_named_gpio(np, "usbpd,s2mm005_scl", 0);
if (of_property_read_u32(np, "usbpd,water_detect_support", &usbpd_data->water_detect_support)) {
usbpd_data->water_detect_support = 1;
}
if (of_property_read_u32(np, "usbpd,s2mm005_fw_product_id", &usbpd_data->s2mm005_fw_product_id)) {
usbpd_data->s2mm005_fw_product_id = 0x01;
}
np = of_find_all_nodes(NULL);
ret = of_property_read_u32(np, "model_info-hw_rev", &usbpd_data->hw_rev);
@ -526,15 +658,84 @@ static int of_s2mm005_usbpd_dt(struct device *dev,
}
dev_err(dev, "hw_rev:%02d usbpd_irq = %d redriver_en = %d s2mm005_om = %d\n"
"s2mm005_sda = %d, s2mm005_scl = %d\n",
"s2mm005_sda = %d, s2mm005_scl = %d, fw_product_id=0x%02X\n",
usbpd_data->hw_rev,
usbpd_data->irq_gpio, usbpd_data->redriver_en, usbpd_data->s2mm005_om,
usbpd_data->s2mm005_sda, usbpd_data->s2mm005_scl);
usbpd_data->s2mm005_sda, usbpd_data->s2mm005_scl,
usbpd_data->s2mm005_fw_product_id);
return 0;
}
#endif /* CONFIG_OF */
void ccic_state_check_work(struct work_struct *wk)
{
struct s2mm005_data *usbpd_data =
container_of(wk, struct s2mm005_data, ccic_init_work.work);
pr_info("%s - check state=%d\n", __func__, usbpd_data->ccic_check_at_booting);
if(usbpd_data->ccic_check_at_booting) {
usbpd_data->ccic_check_at_booting = 0;
s2mm005_usbpd_irq_thread(usbpd_data->irq, usbpd_data);
}
}
static int pdic_handle_usb_external_notifier_notification(struct notifier_block *nb,
unsigned long action, void *data)
{
struct s2mm005_data *usbpd_data = dev_get_drvdata(ccic_device);
int ret = 0;
int enable = *(int *)data;
pr_info("%s : action=%lu , enable=%d\n",__func__,action,enable);
switch (action) {
case EXTERNAL_NOTIFY_HOSTBLOCK_PRE:
if(enable) {
set_enable_alternate_mode(ALTERNATE_MODE_STOP);
if(usbpd_data->dp_is_connect)
dp_detach(usbpd_data);
} else {
if(usbpd_data->dp_is_connect)
dp_detach(usbpd_data);
}
break;
case EXTERNAL_NOTIFY_HOSTBLOCK_POST:
if(enable) {
} else {
set_enable_alternate_mode(ALTERNATE_MODE_START);
}
break;
default:
break;
}
return ret;
}
static void delayed_external_notifier_init(struct work_struct *work)
{
int ret = 0;
static int retry_count = 1;
int max_retry_count = 5;
struct s2mm005_data *usbpd_data = dev_get_drvdata(ccic_device);
pr_info("%s : %d = times!\n",__func__,retry_count);
// Register ccic handler to ccic notifier block list
ret = usb_external_notify_register(&usbpd_data->usb_external_notifier_nb,
pdic_handle_usb_external_notifier_notification,EXTERNAL_NOTIFY_DEV_PDIC);
if(ret < 0) {
pr_err("Manager notifier init time is %d.\n",retry_count);
if(retry_count++ != max_retry_count)
schedule_delayed_work(&usbpd_data->usb_external_notifier_register_work, msecs_to_jiffies(2000));
else
pr_err("fail to init external notifier\n");
} else
pr_info("%s : external notifier register done!\n",__func__);
}
static int s2mm005_usbpd_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@ -544,15 +745,26 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
#ifdef CONFIG_CCIC_LPM_ENABLE
u8 check[8] = {0,};
#endif
uint16_t REG_ADD;
uint8_t MSG_BUF[32] = {0,};
SINK_VAR_SUPPLY_Typedef *pSINK_MSG;
MSG_HEADER_Typedef *pMSG_HEADER;
#if defined(CONFIG_SEC_FACTORY)
LP_STATE_Type Lp_DATA;
#endif
uint32_t * MSG_DATA;
uint8_t cnt;
u8 W_DATA[8];
u8 R_DATA[4];
u8 temp, ftrim;
int i;
struct s2mm005_version chip_swver, fw_swver, hwver;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
struct dual_role_phy_desc *desc;
struct dual_role_phy_instance *dual_role;
#endif
#if defined(CONFIG_USB_HOST_NOTIFY)
struct otg_notify *o_notify = get_otg_notify();
#endif
pr_info("%s\n", __func__);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@ -568,8 +780,10 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
#if defined(CONFIG_OF)
if (i2c->dev.of_node)
of_s2mm005_usbpd_dt(&i2c->dev, usbpd_data);
else
else {
dev_err(&i2c->dev, "not found ccic dt! ret:%d\n", ret);
return -ENODEV;
}
#endif
ret = gpio_request(usbpd_data->irq_gpio, "s2mm005_irq");
if (ret)
@ -593,6 +807,7 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
usbpd_data->dev = &i2c->dev;
usbpd_data->i2c = i2c;
i2c_set_clientdata(i2c, usbpd_data);
if (ccic_device == NULL) ccic_notifier_init(); // temp
dev_set_drvdata(ccic_device, usbpd_data);
device_init_wakeup(usbpd_data->dev, 1);
pd_noti.pusbpd = usbpd_data;
@ -611,7 +826,15 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
usbpd_data->is_client = 0;
usbpd_data->manual_lpm_mode = 0;
usbpd_data->water_det = 0;
usbpd_data->run_dry = 1;
usbpd_data->booting_run_dry = 1;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->try_state_change = 0;
#endif
#if defined(CONFIG_SEC_FACTORY)
usbpd_data->fac_water_enable = 0;
#endif
wake_lock_init(&usbpd_data->wlock, WAKE_LOCK_SUSPEND,
"s2mm005-intr");
@ -628,9 +851,30 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
dev_err(&i2c->dev, "probed, irq %d\n", usbpd_data->irq_gpio);
for (cnt = 0; cnt < 32; cnt++) {
MSG_BUF[cnt] = 0;
}
REG_ADD = REG_TX_SINK_CAPA_MSG;
ret = s2mm005_read_byte(i2c, REG_ADD, MSG_BUF, 32);
if (ret < 0) {
s2mm005_hard_reset(usbpd_data);
msleep(1000);
ret = s2mm005_read_byte(i2c, REG_ADD, MSG_BUF, 32);
if (ret < 0) {
/* to check wrong ccic chipsets, It will be removed after PRA */
// panic("Intentional Panic - ccic i2c error\n");
dev_err(&i2c->dev, "%s has i2c read error.\n", __func__);
// goto err_init_irq;
}
}
s2mm005_get_chip_hwversion(usbpd_data, &hwver);
pr_err("%s CHIP HWversion %2x %2x %2x %2x\n", __func__,
pr_err("%s CHIP HWversion %2x %2x %2x %2x\n", __func__,
hwver.main[2] , hwver.main[1], hwver.main[0], hwver.boot);
pr_err("%s CHIP HWversion2 %2x %2x %2x %2x\n", __func__,
hwver.ver2[3], hwver.ver2[2], hwver.ver2[1], hwver.ver2[0]);
if (hwver.boot <= 2) {
W_DATA[0] =0x02; W_DATA[1] =0x40; W_DATA[2] =0x04; W_DATA[3] =0x11;
s2mm005_write_byte(i2c, 0x10, &W_DATA[0], 4);
@ -653,35 +897,61 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
}
for (i=0; i<2; i++) {
s2mm005_get_chip_swversion(usbpd_data, &chip_swver);
pr_err("%s CHIP SWversion %2x %2x %2x %2x\n", __func__,
chip_swver.main[2] , chip_swver.main[1], chip_swver.main[0], chip_swver.boot);
if(chip_swver.main[0] && (chip_swver.main[0] != 0xff))
break;
}
s2mm005_get_fw_version(&fw_swver, chip_swver.boot, usbpd_data->hw_rev);
pr_err("%s SRC SWversion:%2x,%2x,%2x,%2x\n",__func__,
s2mm005_get_chip_swversion(usbpd_data, &chip_swver);
pr_err("%s CHIP SWversion %2x %2x %2x %2x\n", __func__,
chip_swver.main[2], chip_swver.main[1], chip_swver.main[0], chip_swver.boot);
pr_err("%s CHIP SWversion2 %2x %2x %2x %2x\n", __func__,
chip_swver.ver2[3], chip_swver.ver2[2], chip_swver.ver2[1], chip_swver.ver2[0]);
s2mm005_get_fw_version(usbpd_data->s2mm005_fw_product_id,
&fw_swver, chip_swver.boot, usbpd_data->hw_rev);
pr_err("%s SRC SWversion: %2x,%2x,%2x,%2x\n", __func__,
fw_swver.main[2], fw_swver.main[1], fw_swver.main[0], fw_swver.boot);
pr_err("%s: FW UPDATE boot:%01d hw_rev:%02d\n", __func__,
chip_swver.boot, usbpd_data->hw_rev);
pr_err("%s: FW UPDATE boot:%01d hw_rev:%02d\n", __func__, chip_swver.boot, usbpd_data->hw_rev);
usbpd_data->fw_product_id = fw_swver.main[2];
usbpd_data->fw_product_num = fw_swver.main[2];
#if defined(CONFIG_SEC_FACTORY)
s2mm005_read_byte(i2c, 0x60, Lp_DATA.BYTE, 4);
pr_err("%s: WATER reg:0x%02X BOOTING_RUN_DRY=%d\n", __func__,
Lp_DATA.BYTE[0], Lp_DATA.BITS.BOOTING_RUN_DRY);
#ifdef CONFIG_SEC_FACTORY
if (chip_swver.main[0] != fw_swver.main[0])
s2mm005_flash_fw(usbpd_data,chip_swver.boot);
#else
if (chip_swver.main[0] < fw_swver.main[0])
s2mm005_flash_fw(usbpd_data,chip_swver.boot);
else if ((((chip_swver.main[2] == 0xff) && (chip_swver.main[1] == 0xa5) && (chip_swver.main[0] == 0xa7)) || chip_swver.main[2] == 0x00) &&
fw_swver.main[2] != 0x0) //extra case, factory or old version (for dream)
s2mm005_flash_fw(usbpd_data,chip_swver.boot);
usbpd_data->fac_booting_dry_check = Lp_DATA.BITS.BOOTING_RUN_DRY;
#endif
s2mm005_get_chip_swversion(usbpd_data, &chip_swver);
pr_err("%s CHIP SWversion %2x %2x %2x %2x\n", __func__,
chip_swver.main[2] , chip_swver.main[1], chip_swver.main[0], chip_swver.boot);
if (chip_swver.boot == 0x8) {
#ifdef CONFIG_SEC_FACTORY
if ((chip_swver.main[0] != fw_swver.main[0]) /* main version */
|| (chip_swver.main[1] != fw_swver.main[1]) /* sub version */
|| (chip_swver.main[2] != fw_swver.main[2])) /* product id */
{
if(s2mm005_flash_fw(usbpd_data,chip_swver.boot) < 0)
{
pr_err("%s: s2mm005_flash_fw 1st fail, try again \n", __func__);
if(s2mm005_flash_fw(usbpd_data,chip_swver.boot) < 0)
{
pr_err("%s: s2mm005_flash_fw 2st fail, panic \n", __func__);
panic("infinite write fail!\n");
}
}
}
#else
if ((chip_swver.main[0] < fw_swver.main[0])
|| ((chip_swver.main[0] == fw_swver.main[0]) && (chip_swver.main[1] < fw_swver.main[1]))
|| (chip_swver.main[2] != fw_swver.main[2]))
s2mm005_flash_fw(usbpd_data,chip_swver.boot);
else if ((((chip_swver.main[2] == 0xff) && (chip_swver.main[1] == 0xa5)) || chip_swver.main[2] == 0x00) &&
fw_swver.main[2] != 0x0) //extra case, factory or old version (for dream)
s2mm005_flash_fw(usbpd_data,chip_swver.boot);
#endif
s2mm005_get_chip_swversion(usbpd_data, &chip_swver);
pr_err("%s CHIP SWversion %2x %2x %2x %2x\n", __func__,
chip_swver.main[2], chip_swver.main[1], chip_swver.main[0], chip_swver.boot);
pr_err("%s CHIP SWversion2 %2x %2x %2x %2x\n", __func__,
chip_swver.ver2[3], chip_swver.ver2[2], chip_swver.ver2[1], chip_swver.ver2[0]);
}
store_ccic_version(&hwver.main[0], &chip_swver.main[0], &chip_swver.boot);
@ -690,14 +960,44 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
usbpd_data->firm_ver[2] = chip_swver.main[0];
usbpd_data->firm_ver[3] = chip_swver.boot;
#ifdef CONFIG_CCIC_LPM_ENABLE
if (chip_swver.main[0] >= 0xE) {
pr_err("LPM_ENABLE\n");
MSG_DATA = (uint32_t *)&MSG_BUF[0];
dev_info(&i2c->dev, "--- Read Data on TX_SNK_CAPA_MSG(0x220)\n\r");
for(cnt = 0; cnt < 8; cnt++) {
dev_info(&i2c->dev, " 0x%08X\n\r", MSG_DATA[cnt]);
}
pMSG_HEADER = (MSG_HEADER_Typedef *)&MSG_BUF[0];
pMSG_HEADER->BITS.Number_of_obj += 1;
pSINK_MSG = (SINK_VAR_SUPPLY_Typedef *)&MSG_BUF[8];
pSINK_MSG->DATA = 0x8F019032; // 5V~12V, 500mA
dev_info(&i2c->dev, "--- Write DATA\n\r");
for (cnt = 0; cnt < 8; cnt++) {
dev_info(&i2c->dev, " 0x%08X\n\r", MSG_DATA[cnt]);
}
/* default value is written by CCIC FW. If you need others, overwrite it.*/
//s2mm005_write_byte(i2c, REG_ADD, &MSG_BUF[0], 32);
for (cnt = 0; cnt < 32; cnt++) {
MSG_BUF[cnt] = 0;
}
for (cnt = 0; cnt < 8; cnt++) {
dev_info(&i2c->dev, " 0x%08X\n\r", MSG_DATA[cnt]);
}
ret = s2mm005_read_byte(i2c, REG_ADD, MSG_BUF, 32);
dev_info(&i2c->dev, "--- Read 2 new Data on TX_SNK_CAPA_MSG(0x220)\n\r");
for(cnt = 0; cnt < 8; cnt++) {
dev_info(&i2c->dev, " 0x%08X\n\r", MSG_DATA[cnt]);
}
#ifdef CONFIG_CCIC_LPM_ENABLE
pr_err("LPM_ENABLE\n");
check[0] = 0x0F;
check[1] = 0x06;
s2mm005_write_byte(i2c, 0x10, &check[0], 2);
}
#endif
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
@ -722,15 +1022,45 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
usbpd_data->dual_role = dual_role;
usbpd_data->desc = desc;
init_completion(&usbpd_data->reverse_completion);
init_completion(&usbpd_data->uvdm_out_wait);
init_completion(&usbpd_data->uvdm_longpacket_in_wait);
usbpd_data->power_role = DUAL_ROLE_PROP_PR_NONE;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
INIT_DELAYED_WORK(&usbpd_data->role_swap_work, role_swap_check);
#endif
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
usbpd_data->alternate_state = 0;
usbpd_data->acc_type = 0;
usbpd_data->dp_is_connect = 0;
usbpd_data->dp_hs_connect = 0;
usbpd_data->dp_selected_pin = 0;
usbpd_data->pin_assignment = 0;
usbpd_data->is_samsung_accessory_enter_mode = 0;
usbpd_data->Vendor_ID = 0;
usbpd_data->Product_ID = 0;
usbpd_data->Device_Version = 0;
ccic_register_switch_device(1);
INIT_DELAYED_WORK(&usbpd_data->acc_detach_work, acc_detach_check);
init_waitqueue_head(&usbpd_data->host_turn_on_wait_q);
set_host_turn_on_event(0);
usbpd_data->host_turn_on_wait_time = 2;
ret = ccic_misc_init();
if (ret) {
dev_err(&i2c->dev, "ccic misc register is failed, error %d\n", ret);
goto err_init_irq;
}
#endif
s2mm005_int_clear(usbpd_data);
fp_select_pdo = s2mm005_select_pdo;
usbpd_data->ccic_check_at_booting = 1;
INIT_DELAYED_WORK(&usbpd_data->ccic_init_work, ccic_state_check_work);
schedule_delayed_work(&usbpd_data->ccic_init_work, msecs_to_jiffies(200));
ret = request_threaded_irq(usbpd_data->irq, NULL, s2mm005_usbpd_irq_thread,
(IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND | IRQF_ONESHOT), "s2mm005-usbpd", usbpd_data);
if (ret) {
@ -738,6 +1068,33 @@ static int s2mm005_usbpd_probe(struct i2c_client *i2c,
goto err_init_irq;
}
#if defined(CONFIG_BATTERY_SAMSUNG)
if(usbpd_data->s2mm005_fw_product_id == PRODUCT_NUM_DREAM)
{
u8 W_CHG_INFO[3]={0,};
W_CHG_INFO[0] = 0x0f;
W_CHG_INFO[1] = 0x0c;
if (lpcharge)
W_CHG_INFO[2] = 0x1; // lpcharge
else
W_CHG_INFO[2] = 0x0; // normal
s2mm005_write_byte(usbpd_data->i2c, 0x10, &W_CHG_INFO[0], 3); // send info to ccic
}
#endif
INIT_DELAYED_WORK(&usbpd_data->usb_external_notifier_register_work,
delayed_external_notifier_init);
// Register ccic handler to ccic notifier block list
ret = usb_external_notify_register(&usbpd_data->usb_external_notifier_nb,
pdic_handle_usb_external_notifier_notification,EXTERNAL_NOTIFY_DEV_PDIC);
if(ret < 0)
schedule_delayed_work(&usbpd_data->usb_external_notifier_register_work, msecs_to_jiffies(2000));
else
pr_info("%s : external notifier register done!\n",__func__);
s2mm005_int_clear(usbpd_data);
return ret;
@ -751,36 +1108,76 @@ err_free_redriver_gpio:
err_free_irq_gpio:
wake_lock_destroy(&usbpd_data->wlock);
gpio_free(usbpd_data->irq_gpio);
kfree(usbpd_data);
return ret;
}
static int s2mm005_usbpd_remove(struct i2c_client *i2c)
{
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
struct s2mm005_data *usbpd_data = dev_get_drvdata(ccic_device);
process_cc_detach(usbpd_data);
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
devm_dual_role_instance_unregister(usbpd_data->dev, usbpd_data->dual_role);
devm_kfree(usbpd_data->dev, usbpd_data->desc);
#endif
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
ccic_register_switch_device(0);
#endif
wake_lock_destroy(&usbpd_data->wlock);
sysfs_remove_group(&ccic_device->kobj, &ccic_sysfs_group);
if (usbpd_data->irq) {
free_irq(usbpd_data->irq, usbpd_data);
usbpd_data->irq = 0;
}
if (usbpd_data->i2c) {
disable_irq_wake(usbpd_data->i2c->irq);
free_irq(usbpd_data->i2c->irq, usbpd_data);
mutex_destroy(&usbpd_data->i2c_mutex);
i2c_set_clientdata(usbpd_data->i2c, NULL);
}
wake_lock_destroy(&usbpd_data->wlock);
ccic_misc_exit();
return 0;
}
static void s2mm005_usbpd_shutdown(struct i2c_client *i2c)
{
struct s2mm005_data *usbpd_data = i2c_get_clientdata(i2c);
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
struct device_node *np;
int gpio_dp_sw_oe;
#endif
disable_irq(usbpd_data->irq);
if ((usbpd_data->cur_rid != RID_523K) &&
(usbpd_data->cur_rid != RID_619K) &&
(!usbpd_data->manual_lpm_mode))
s2mm005_reset(usbpd_data);
(usbpd_data->cur_rid != RID_619K) &&
(!usbpd_data->manual_lpm_mode)) {
pr_info("%s: pd_state=%d, water=%d, dry=%d\n", __func__,
usbpd_data->pd_state, usbpd_data->water_det, usbpd_data->run_dry);
if (usbpd_data->water_det) {
s2mm005_hard_reset(usbpd_data);
} else {
if (usbpd_data->pd_state) {
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
if (usbpd_data->dp_is_connect) {
pr_info("aux_sw_oe pin set to high\n");
np = of_find_node_by_name(NULL, "displayport");
gpio_dp_sw_oe = of_get_named_gpio(np, "dp,aux_sw_oe", 0);
gpio_direction_output(gpio_dp_sw_oe, 1);
}
#endif
s2mm005_manual_LPM(usbpd_data, 0xB);
mdelay(110);
}
s2mm005_reset(usbpd_data);
}
}
}
#if defined(CONFIG_PM)

487
drivers/ccic/s2mm005_cc.c Normal file → Executable file
View File

@ -27,15 +27,28 @@
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
#include <linux/ccic/ccic_alternate.h>
#endif
#if defined(CONFIG_USB_HOST_NOTIFY)
#include <linux/usb_notify.h>
#endif
#if defined(CONFIG_COMBO_REDRIVER)
#include <linux/combo_redriver/ptn36502.h>
#endif
#if defined(CONFIG_BATTERY_SAMSUNG)
extern unsigned int lpcharge;
#endif
extern struct pdic_notifier_struct pd_noti;
////////////////////////////////////////////////////////////////////////////////
// function definition
////////////////////////////////////////////////////////////////////////////////
void process_cc_water(void * data, LP_STATE_Type *Lp_DATA);
void process_cc_attach(void * data, u8 *plug_attach_done);
void process_cc_detach(void * data);
void process_cc_get_int_status(void *data, uint32_t *pPRT_MSG, MSG_IRQ_STATUS_Type *MSG_IRQ_State);
void process_cc_rid(void * data);
void ccic_event_work(void *data, int dest, int id, int attach, int event);
void ccic_event_work(void *data, int dest, int id, int attach, int event, int
sub);
void process_cc_water_det(void * data);
////////////////////////////////////////////////////////////////////////////////
@ -108,7 +121,7 @@ static void ccic_event_notifier(struct work_struct *data)
ccic_noti.id = event_work->id;
ccic_noti.sub1 = event_work->attach;
ccic_noti.sub2 = event_work->event;
ccic_noti.sub3 = 0;
ccic_noti.sub3 = event_work->sub;
#ifdef CONFIG_USB_TYPEC_MANAGER_NOTIFIER
ccic_noti.pd = &pd_noti;
#endif
@ -117,19 +130,24 @@ static void ccic_event_notifier(struct work_struct *data)
kfree(event_work);
}
void ccic_event_work(void *data, int dest, int id, int attach, int event)
void ccic_event_work(void *data, int dest, int id, int attach, int event, int sub)
{
struct s2mm005_data *usbpd_data = data;
struct ccic_state_work * event_work;
pr_info("usb: %s\n", __func__);
event_work = kmalloc(sizeof(struct ccic_state_work), GFP_ATOMIC);
if (!event_work) {
pr_err("%s: failed to alloc for event_work\n", __func__);
return;
}
INIT_WORK(&event_work->ccic_work, ccic_event_notifier);
event_work->dest = dest;
event_work->id = id;
event_work->attach = attach;
event_work->event = event;
event_work->sub = sub;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
if (id == CCIC_NOTIFY_ID_USB) {
@ -148,6 +166,10 @@ void ccic_event_work(void *data, int dest, int id, int attach, int event)
complete(&usbpd_data->reverse_completion);
}
}
else if (id == CCIC_NOTIFY_ID_ROLE_SWAP ) {
if (usbpd_data->dual_role != NULL)
dual_role_instance_changed(usbpd_data->dual_role);
}
#endif
queue_work(usbpd_data->ccic_wq, &event_work->ccic_work);
@ -229,7 +251,8 @@ static int ccic_set_dual_role(struct dual_role_phy_instance *dual_role,
#if defined(CONFIG_CCIC_NOTIFIER)
/* muic */
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 0/*attach*/, 0/*rprd*/);
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH,
0/*attach*/, 0/*rprd*/, 0);
#endif
/* exit from Disabled state and set mode to UFP */
mode = TYPE_C_ATTACH_UFP;
@ -357,57 +380,209 @@ void process_cc_water_det(void * data)
pr_info("%s\n",__func__);
s2mm005_int_clear(usbpd_data); // interrupt clear
s2mm005_manual_LPM(usbpd_data, 0x9);
#if defined(CONFIG_SEC_FACTORY)
if(!usbpd_data->fac_water_enable)
#endif
{
if(usbpd_data->water_det)
s2mm005_manual_LPM(usbpd_data, 0x9);
}
}
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
void dp_detach(void *data)
{
struct s2mm005_data *usbpd_data = data;
pr_info("%s: dp_is_connect %d\n",__func__, usbpd_data->dp_is_connect);
ccic_event_work(usbpd_data, CCIC_NOTIFY_DEV_USB_DP,
CCIC_NOTIFY_ID_USB_DP, 0/*attach*/,usbpd_data->dp_hs_connect/*drp*/, 0);
ccic_event_work(usbpd_data, CCIC_NOTIFY_DEV_DP,
CCIC_NOTIFY_ID_DP_CONNECT, 0/*attach*/, 0/*drp*/, 0);
usbpd_data->dp_is_connect = 0;
usbpd_data->dp_hs_connect = 0;
usbpd_data->is_sent_pin_configuration = 0;
return;
}
#endif
//////////////////////////////////////////// ////////////////////////////////////
// Moisture detection processing
////////////////////////////////////////////////////////////////////////////////
void process_cc_water(void * data, LP_STATE_Type *Lp_DATA)
{
struct s2mm005_data *usbpd_data = data;
struct i2c_client *i2c = usbpd_data->i2c;
uint32_t R_len;
uint16_t REG_ADD;
pr_info("%s\n",__func__);
/* read reg for water and dry state */
REG_ADD = 0x60;
R_len = 4;
usbpd_data->s2mm005_i2c_err = s2mm005_read_byte(i2c, REG_ADD, Lp_DATA->BYTE, R_len);
dev_info(&i2c->dev, "%s: WATER reg:0x%02X WATER=%d DRY=%d\n", __func__,
Lp_DATA->BYTE[0],
Lp_DATA->BITS.WATER_DET,
Lp_DATA->BITS.RUN_DRY);
if (!usbpd_data->water_detect_support) {
dev_info(&i2c->dev, "%s: It does not support water detection\n", __func__);
return;
}
if (usbpd_data->s2mm005_i2c_err < 0) {
pr_err("%s : i2c err : ret = %d\n", __func__, usbpd_data->s2mm005_i2c_err);
Lp_DATA->BITS.WATER_DET = 0;
Lp_DATA->BITS.RUN_DRY = 1;
Lp_DATA->BITS.BOOTING_RUN_DRY = usbpd_data->booting_run_dry;
} else {
if (Lp_DATA->BITS.WATER_DET == 0
&& Lp_DATA->BITS.RUN_DRY == 0
&& Lp_DATA->BITS.BOOTING_RUN_DRY == 0) {
usbpd_data->s2mm005_i2c_err = s2mm005_read_byte(i2c, REG_ADD, Lp_DATA->BYTE, R_len);
dev_info(&i2c->dev, "Re: %s: WATER reg:0x%02X WATER=%d DRY=%d\n", __func__,
Lp_DATA->BYTE[0],
Lp_DATA->BITS.WATER_DET,
Lp_DATA->BITS.RUN_DRY);
if (!Lp_DATA->BITS.AUTO_LP_ENABLE_BIT) {
if (Lp_DATA->BITS.WATER_DET == 1 || Lp_DATA->BITS.RUN_DRY == 0)
usbpd_data->s2mm005_i2c_err = -CCIC_I2C_VALUE_INVALID;
}
if (usbpd_data->s2mm005_i2c_err < 0) {
pr_err("%s : i2c err : ret = %d\n", __func__, usbpd_data->s2mm005_i2c_err);
Lp_DATA->BITS.WATER_DET = 0;
Lp_DATA->BITS.RUN_DRY = 1;
Lp_DATA->BITS.BOOTING_RUN_DRY = usbpd_data->booting_run_dry;
}
}
}
#if defined(CONFIG_BATTERY_SAMSUNG)
if (lpcharge) {
dev_info(&i2c->dev, "%s: BOOTING_RUN_DRY=%d\n", __func__,
Lp_DATA->BITS.BOOTING_RUN_DRY);
usbpd_data->booting_run_dry = Lp_DATA->BITS.BOOTING_RUN_DRY;
}
#endif
#if defined(CONFIG_SEC_FACTORY)
if (!Lp_DATA->BITS.WATER_DET) {
Lp_DATA->BITS.RUN_DRY = 1;
}
#endif
/* check for dry case */
if (Lp_DATA->BITS.RUN_DRY && !usbpd_data->run_dry) {
dev_info(&i2c->dev, "== WATER RUN-DRY DETECT ==\n");
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_BATTERY, CCIC_NOTIFY_ID_WATER,
0/*attach*/, 0, 0);
}
usbpd_data->run_dry = Lp_DATA->BITS.RUN_DRY;
/* check for water case */
if ((Lp_DATA->BITS.WATER_DET & !usbpd_data->water_det)) {
dev_info(&i2c->dev, "== WATER DETECT ==\n");
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_BATTERY, CCIC_NOTIFY_ID_WATER,
1/*attach*/, 0, 0);
}
usbpd_data->water_det = Lp_DATA->BITS.WATER_DET;
}
////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////// ////////////////////////////////////
// ATTACH processing
////////////////////////////////////////////////////////////////////////////////
void process_cc_attach(void * data,u8 *plug_attach_done)
{
struct s2mm005_data *usbpd_data = data;
struct i2c_client *i2c = usbpd_data->i2c;
uint8_t R_DATA[4];
LP_STATE_Type Lp_DATA;
FUNC_STATE_Type Func_DATA;
uint32_t R_len;
uint16_t REG_ADD;
#if defined(CONFIG_USB_HOST_NOTIFY)
struct otg_notify *o_notify = get_otg_notify();
#endif
printk("%s\n",__func__);
pr_info("%s\n",__func__);
if (usbpd_data->hw_rev >= 9) {
R_DATA[0] = 0x00;
R_DATA[1] = 0x00;
R_DATA[2] = 0x00;
R_DATA[3] = 0x00;
REG_ADD = 0x60;
R_len = 4;
s2mm005_read_byte(i2c, REG_ADD, R_DATA, R_len);
usbpd_data->water_det = R_DATA[0] & (0x1 << 3);
dev_info(&i2c->dev, "%s: WATER reg:0x%02X WATER=%d\n", __func__, R_DATA[0], usbpd_data->water_det);
}
// Check for moisture
process_cc_water(usbpd_data, &Lp_DATA);
if (usbpd_data->water_det) {
dev_info(&i2c->dev, "== WATER DETECT ==\n");
/* Moisture detection is only handled in the disconnected state(LPM). */
return;
} else if(!usbpd_data->run_dry || !usbpd_data->booting_run_dry) {
dev_info(&i2c->dev, " Water? No Dry\n");
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_BATTERY, CCIC_NOTIFY_ID_WATER, 1/*attach*/, 0);
usbpd_data->pd_state = 0;
} else {
R_DATA[0] = 0x00;
R_DATA[1] = 0x00;
R_DATA[2] = 0x00;
R_DATA[3] = 0x00;
REG_ADD = 0x20;
R_len = 4;
CCIC_NOTIFY_DEV_BATTERY, CCIC_NOTIFY_ID_WATER,
1/*attach*/, 0, 0);
s2mm005_read_byte(i2c, REG_ADD, R_DATA, R_len);
dev_info(&i2c->dev, "Rsvd_H:0x%02X PD_Nxt_State:0x%02X Rsvd_L:0x%02X PD_State:%02d\n", R_DATA[3], R_DATA[2], R_DATA[1], R_DATA[0]);
usbpd_data->pd_state = R_DATA[0];
memcpy(&usbpd_data->func_state, &R_DATA, 4);
if (usbpd_data->s2mm005_i2c_err >= 0) {
REG_ADD = 0x20;
R_len = 4;
s2mm005_read_byte(i2c, REG_ADD, Func_DATA.BYTE, R_len);
dev_info(&i2c->dev, "Rsvd_H:0x%02X PD_Nxt_State:0x%02X Rsvd_L:0x%02X PD_State:%02d\n",
Func_DATA.BYTES.RSP_BYTE2,
Func_DATA.BYTES.PD_Next_State,
Func_DATA.BYTES.RSP_BYTE1,
Func_DATA.BYTES.PD_State);
}
return;
} else {
if (usbpd_data->s2mm005_i2c_err < 0) {
if (usbpd_data->pd_state == State_PE_Initial_detach)
return;
Func_DATA.DATA = 0;
} else {
REG_ADD = 0x20;
R_len = 4;
s2mm005_read_byte(i2c, REG_ADD, Func_DATA.BYTE, R_len);
dev_info(&i2c->dev, "Rsvd_H:0x%02X PD_Nxt_State:0x%02X Rsvd_L:0x%02X PD_State:%02d\n",
Func_DATA.BYTES.RSP_BYTE2,
Func_DATA.BYTES.PD_Next_State,
Func_DATA.BYTES.RSP_BYTE1,
Func_DATA.BYTES.PD_State);
}
#if defined(CONFIG_USB_HW_PARAM)
if (!usbpd_data->pd_state && Func_DATA.BYTES.PD_State && Func_DATA.BITS.VBUS_CC_Short)
inc_hw_param(o_notify, USB_CCIC_VBUS_CC_SHORT_COUNT);
#endif
usbpd_data->pd_state = Func_DATA.BYTES.PD_State;
usbpd_data->func_state = Func_DATA.DATA;
dev_info(&i2c->dev, "func_state :0x%X, is_dfp : %d, is_src : %d\n", usbpd_data->func_state, \
(usbpd_data->func_state & (0x1 << 26) ? 1 : 0), (usbpd_data->func_state & (0x1 << 25) ? 1 : 0));
if (Func_DATA.BITS.RESET) {
dev_info(&i2c->dev, "ccic reset detected\n");
if (!Lp_DATA.BITS.AUTO_LP_ENABLE_BIT) {
/* AUTO LPM Enable */
s2mm005_manual_LPM(usbpd_data, 6);
}
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
set_enable_alternate_mode(ALTERNATE_MODE_START);
#endif
}
if(usbpd_data->pd_state == State_PE_SRC_Wait_New_Capabilities && Lp_DATA.BITS.Sleep_Cable_Detect)
{
s2mm005_manual_LPM(usbpd_data, 0x0D);
return;
}
}
#ifdef CONFIG_USB_NOTIFY_PROC_LOG
store_usblog_notify(NOTIFY_FUNCSTATE, (void*)&usbpd_data->pd_state, NULL);
@ -435,29 +610,47 @@ void process_cc_attach(void * data,u8 *plug_attach_done)
dev_info(&i2c->dev, "%s %d: pd_state:%02d, turn off client\n",
__func__, __LINE__, usbpd_data->pd_state);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 0/*attach*/, 0/*rprd*/);
CCIC_NOTIFY_DEV_MUIC,
CCIC_NOTIFY_ID_ATTACH,
0/*attach*/, 0/*rprd*/,0);
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = DUAL_ROLE_PROP_PR_NONE;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
#endif
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/);
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/,0);
usbpd_data->is_client = CLIENT_OFF;
msleep(300);
}
if (usbpd_data->is_host == HOST_OFF) {
/* muic */
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 1/*attach*/, 1/*rprd*/);
CCIC_NOTIFY_DEV_MUIC,
CCIC_NOTIFY_ID_ATTACH, 1/*attach*/, 1/*rprd*/,0);
/* otg */
usbpd_data->is_host = HOST_ON_BY_RD;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = DUAL_ROLE_PROP_PR_SRC;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 1);
#endif
#endif
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 1/*attach*/, USB_STATUS_NOTIFY_ATTACH_DFP/*drp*/);
msleep(100);
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
1/*attach*/, USB_STATUS_NOTIFY_ATTACH_DFP/*drp*/, 0);
#if defined(CONFIG_COMBO_REDRIVER)
mdelay(8);
ptn36502_config(USB3_ONLY_MODE, DFP);
#endif
/* add to turn on external 5V */
if (!is_blocked(o_notify, NOTIFY_BLOCK_TYPE_HOST))
#if defined(CONFIG_USB_HOST_NOTIFY)
if (is_blocked(o_notify, NOTIFY_BLOCK_TYPE_HOST))
s2mm005_set_upsm_mode();
else
#endif
vbus_turn_on_ctrl(1);
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
// only start alternate mode at DFP state
@ -479,31 +672,66 @@ void process_cc_attach(void * data,u8 *plug_attach_done)
if (usbpd_data->is_host == HOST_ON_BY_RD) {
dev_info(&i2c->dev, "%s %d: pd_state:%02d, turn off host\n",
__func__, __LINE__, usbpd_data->pd_state);
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
if (usbpd_data->dp_is_connect == 1) {
dp_detach(usbpd_data);
}
#endif
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 0/*attach*/, 1/*rprd*/);
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 0/*attach*/, 1/*rprd*/, 0);
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = DUAL_ROLE_PROP_PR_NONE;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
#endif
/* add to turn off external 5V */
vbus_turn_on_ctrl(0);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/);
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/, 0);
usbpd_data->is_host = HOST_OFF;
msleep(300);
}
/* muic */
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 1/*attach*/, 0/*rprd*/);
if (Lp_DATA.BITS.PDSTATE29_SBU_DONE) {
dev_info(&i2c->dev, "%s SBU check done\n", __func__);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH,
1/*attach*/, 0/*rprd*/,
(Func_DATA.BITS.VBUS_CC_Short || Func_DATA.BITS.VBUS_SBU_Short) ? Rp_Abnormal:Func_DATA.BITS.RP_CurrentLvl);
} else {
/* muic */
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH,
1/*attach*/, 0/*rprd*/, Rp_Sbu_check);
}
if (usbpd_data->is_client == CLIENT_OFF && usbpd_data->is_host == HOST_OFF) {
/* usb */
usbpd_data->is_client = CLIENT_ON;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = DUAL_ROLE_PROP_PR_SNK;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
#endif
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 1/*attach*/, USB_STATUS_NOTIFY_ATTACH_UFP/*drp*/);
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
1/*attach*/, USB_STATUS_NOTIFY_ATTACH_UFP/*drp*/, 0);
#if defined(CONFIG_COMBO_REDRIVER)
ptn36502_config(USB3_ONLY_MODE, UFP);
#endif
}
break;
case State_PE_PRS_SRC_SNK_Transition_to_off:
dev_info(&i2c->dev, "%s State_PE_PRS_SRC_SNK_Transition_to_off! \n", __func__);
vbus_turn_on_ctrl(0);
break;
case State_PE_PRS_SNK_SRC_Source_on:
dev_info(&i2c->dev,"%s State_PE_PRS_SNK_SRC_Source_on! \n", __func__);
vbus_turn_on_ctrl(1);
break;
default :
break;
}
@ -513,12 +741,29 @@ void process_cc_attach(void * data,u8 *plug_attach_done)
usbpd_data->plug_rprd_sel = 0;
usbpd_data->is_dr_swap = 0;
usbpd_data->is_pr_swap = 0;
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
if (usbpd_data->dp_is_connect == 1) {
dp_detach(usbpd_data);
}
if ( usbpd_data->acc_type != CCIC_DOCK_DETACHED ) {
pr_info("%s: schedule_delayed_work - pd_state : %d\n", __func__, usbpd_data->pd_state);
if (usbpd_data->acc_type == CCIC_DOCK_HMT ) {
schedule_delayed_work(&usbpd_data->acc_detach_work, msecs_to_jiffies(GEAR_VR_DETACH_WAIT_MS));
} else {
schedule_delayed_work(&usbpd_data->acc_detach_work, msecs_to_jiffies(0));
}
}
#endif
#if defined(CONFIG_CCIC_NOTIFIER)
/* muic */
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 0/*attach*/, 0/*rprd*/);
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH,
0/*attach*/, 0/*rprd*/, 0);
#if defined(CONFIG_COMBO_REDRIVER)
ptn36502_config(INIT_MODE, 0);
#endif
if(usbpd_data->is_host > HOST_OFF || usbpd_data->is_client > CLIENT_OFF) {
if(usbpd_data->is_host > HOST_OFF)
if(usbpd_data->is_host > HOST_OFF || usbpd_data->power_role == DUAL_ROLE_PROP_PR_SRC)
vbus_turn_on_ctrl(0);
/* usb or otg */
dev_info(&i2c->dev, "%s %d: pd_state:%02d, is_host = %d, is_client = %d\n",
@ -527,30 +772,53 @@ void process_cc_attach(void * data,u8 *plug_attach_done)
usbpd_data->is_client = CLIENT_OFF;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = DUAL_ROLE_PROP_PR_NONE;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
#endif
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/);
msleep(300);
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/, 0);
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
if (!usbpd_data->try_state_change)
s2mm005_rprd_mode_change(usbpd_data, TYPE_C_ATTACH_DRP);
#endif
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
if ( usbpd_data->acc_type != CCIC_DOCK_DETACHED )
{
pr_info("%s: schedule_delayed_work - pd_state : %d\n", __func__, usbpd_data->pd_state);
if (usbpd_data->acc_type == CCIC_DOCK_HMT ) {
schedule_delayed_work(&usbpd_data->acc_detach_work, msecs_to_jiffies(GEAR_VR_DETACH_WAIT_MS));
} else {
schedule_delayed_work(&usbpd_data->acc_detach_work, msecs_to_jiffies(0));
}
}
#endif
}
#endif
}
}
//////////////////////////////////////////// ////////////////////////////////////
// Detach processing
// 1. Used when the s2mm005 unbind case
////////////////////////////////////////////////////////////////////////////////
void process_cc_detach(void * data)
{
struct s2mm005_data *usbpd_data = data;
#if defined(CONFIG_USB_HOST_NOTIFY)
struct otg_notify *o_notify = get_otg_notify();
#endif
if (usbpd_data->pd_state) {
usbpd_data->pd_state = State_PE_Initial_detach;
#if defined(CONFIG_CCIC_NOTIFIER)
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH, 0/*attach*/, 0/*rprd*/, 0);
#endif
if(usbpd_data->is_host > HOST_OFF)
vbus_turn_on_ctrl(0);
usbpd_data->is_host = HOST_OFF;
usbpd_data->is_client = CLIENT_OFF;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = DUAL_ROLE_PROP_PR_NONE;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
#endif
}
}
////////////////////////////////////////////////////////////////////////////////
// Get staus interrupt register
////////////////////////////////////////////////////////////////////////////////
@ -564,6 +832,7 @@ void process_cc_get_int_status(void *data, uint32_t *pPRT_MSG, MSG_IRQ_STATUS_Ty
uint32_t IrqPrint;
VDM_MSG_IRQ_STATUS_Type VDM_MSG_IRQ_State;
SSM_MSG_IRQ_STATUS_Type SSM_MSG_IRQ_State;
AP_REQ_GET_STATUS_Type AP_REQ_GET_State;
pr_info("%s\n",__func__);
for(cnt = 0;cnt < 48;cnt++)
@ -583,7 +852,7 @@ void process_cc_get_int_status(void *data, uint32_t *pPRT_MSG, MSG_IRQ_STATUS_Ty
dev_info(&i2c->dev, "MSG IRQ Status = 0x%08X\n",pPRT_MSG[4]);
dev_info(&i2c->dev, "VDM IRQ Status = 0x%08X\n",pPRT_MSG[5]);
dev_info(&i2c->dev, "SSM_MSG IRQ Status = 0x%08X\n",pPRT_MSG[6]);
dev_info(&i2c->dev, "DBG_VDM IRQ Status = 0x%08X\n",pPRT_MSG[7]);
dev_info(&i2c->dev, "AP REQ GET Status = 0x%08X\n",pPRT_MSG[7]);
dev_info(&i2c->dev, "0x50 IRQ Status = 0x%08X\n",pPRT_MSG[8]);
dev_info(&i2c->dev, "0x54 IRQ Status = 0x%08X\n",pPRT_MSG[9]);
@ -591,6 +860,19 @@ void process_cc_get_int_status(void *data, uint32_t *pPRT_MSG, MSG_IRQ_STATUS_Ty
MSG_IRQ_State->DATA = pPRT_MSG[4];
VDM_MSG_IRQ_State.DATA = pPRT_MSG[5];
SSM_MSG_IRQ_State.DATA = pPRT_MSG[6];
AP_REQ_GET_State.DATA = pPRT_MSG[7];
#if defined(CONFIG_SEC_FACTORY)
if((AP_REQ_GET_State.BYTES[0] >> 5) > 0) {
dev_info(&i2c->dev, "FAC: Repeat_State:%d, Repeat_RID:%d, RID0:%d\n",
AP_REQ_GET_State.BITS.FAC_Abnormal_Repeat_State,
AP_REQ_GET_State.BITS.FAC_Abnormal_Repeat_RID,
AP_REQ_GET_State.BITS.FAC_Abnormal_RID0);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_CCIC, CCIC_NOTIFY_ID_FAC,
AP_REQ_GET_State.BYTES[0] >> 5, 0, 0); // b5~b7
}
#endif
IrqPrint = 1;
for(cnt=0;cnt<32;cnt++)
@ -605,30 +887,54 @@ void process_cc_get_int_status(void *data, uint32_t *pPRT_MSG, MSG_IRQ_STATUS_Ty
{
usbpd_data->is_dr_swap++;
dev_info(&i2c->dev, "is_dr_swap count : 0x%x\n", usbpd_data->is_dr_swap);
if (usbpd_data->is_host == HOST_ON_BY_RD) {
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/);
msleep(300);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 1/*attach*/, USB_STATUS_NOTIFY_ATTACH_UFP/*drp*/);
usbpd_data->is_host = HOST_OFF;
usbpd_data->is_client = CLIENT_ON;
} else if (usbpd_data->is_client == CLIENT_ON) {
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/);
msleep(300);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 1/*attach*/, USB_STATUS_NOTIFY_ATTACH_DFP/*drp*/);
usbpd_data->is_host = HOST_ON_BY_RD;
usbpd_data->is_client = CLIENT_OFF;
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
if (usbpd_data->dp_is_connect)
{
dev_info(&i2c->dev, "dr_swap is skiped, current status is dp mode !!\n");
}
else
{
#endif
if (usbpd_data->is_host == HOST_ON_BY_RD) {
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/, 0);
msleep(300);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_ATTACH,
1/*attach*/, 0/*rprd*/,0);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
1/*attach*/, USB_STATUS_NOTIFY_ATTACH_UFP/*drp*/, 0);
usbpd_data->is_host = HOST_OFF;
usbpd_data->is_client = CLIENT_ON;
} else if (usbpd_data->is_client == CLIENT_ON) {
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/, 0);
msleep(300);
ccic_event_work(usbpd_data, CCIC_NOTIFY_DEV_MUIC,
CCIC_NOTIFY_ID_ATTACH, 1/*attach*/, 1/*rprd*/,0);
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
1/*attach*/,
USB_STATUS_NOTIFY_ATTACH_DFP/*drp*/, 0);
usbpd_data->is_host = HOST_ON_BY_RD;
usbpd_data->is_client = CLIENT_OFF;
}
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
}
#endif
}
#if defined(CONFIG_CCIC_ALTERNATE_MODE)
if(VDM_MSG_IRQ_State.DATA)
if(VDM_MSG_IRQ_State.DATA && usbpd_data->is_host == HOST_ON_BY_RD)
receive_alternate_message(usbpd_data, &VDM_MSG_IRQ_State);
if(SSM_MSG_IRQ_State.BITS.Ssm_Flag_Unstructured_Data)
if(SSM_MSG_IRQ_State.BITS.Ssm_Flag_Unstructured_Data
&& usbpd_data->is_host == HOST_ON_BY_RD)
receive_unstructured_vdm_message(usbpd_data, &SSM_MSG_IRQ_State);
if(!AP_REQ_GET_State.BITS.Alt_Mode_By_I2C)
set_enable_alternate_mode(ALTERNATE_MODE_RESET);
#endif
}
@ -641,6 +947,9 @@ void process_cc_rid(void *data)
struct i2c_client *i2c = usbpd_data->i2c;
static int prev_rid = RID_OPEN;
u8 rid;
#if defined(CONFIG_USB_HOST_NOTIFY)
struct otg_notify *o_notify = get_otg_notify();
#endif
pr_info("%s\n",__func__);
s2mm005_read_byte_16(i2c, 0x50, &rid); // fundtion read , 0x20 , 0x0:detach , not 0x0 attach : source 3,6,7 / sink 16:17:21:29(decimanl)
@ -653,21 +962,21 @@ void process_cc_rid(void *data)
#if defined(CONFIG_CCIC_NOTIFIER)
/* rid */
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_RID, rid/*rid*/, 0);
CCIC_NOTIFY_DEV_MUIC, CCIC_NOTIFY_ID_RID,
rid/*rid*/, 0, 0);
if(rid == RID_OPEN || rid == RID_UNDEFINED || rid == RID_523K || rid == RID_619K) {
if (prev_rid == RID_000K) {
/* add to turn off external 5V */
vbus_turn_on_ctrl(0);
}
usbpd_data->is_host = HOST_OFF;
if( rid == RID_523K || rid == RID_619K) {
usbpd_data->is_client = CLIENT_OFF;
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = DUAL_ROLE_PROP_PR_NONE;
#if defined(CONFIG_USB_HOST_NOTIFY)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
#endif
/* usb or otg */
ccic_event_work(usbpd_data,
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB, 0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/);
CCIC_NOTIFY_DEV_USB, CCIC_NOTIFY_ID_USB,
0/*attach*/, USB_STATUS_NOTIFY_DETACH/*drp*/, 0);
}
#endif
}

View File

@ -4,17 +4,16 @@
#include <linux/ccic/s2mm005_ext.h>
#include <linux/ccic/s2mm005_fw.h>
#include <linux/ccic/ccic_sysfs.h>
#include <linux/ccic/BOOT_FLASH_FW.h>
#include <linux/ccic/BOOT_FLASH_FW_BOOT3.h>
#include <linux/ccic/BOOT_FLASH_FW_BOOT4.h>
#include <linux/ccic/BOOT_FLASH_FW_BOOT5.h>
#include <linux/ccic/BOOT_FLASH_FW_BOOT5_NODPDM.h>
#include <linux/ccic/BOOT_FLASH_FW_BOOT6.h>
#include <linux/ccic/BOOT_FLASH_FW_BOOT7.h>
#include <linux/ccic/BOOT_SRAM_FW.h>
#include <linux/ccic/BOOT_FLASH_FW_0x0D_BOOT8.h>
#define S2MM005_FIRMWARE_PATH "usbpd/s2mm005.bin"
#define FW_CHECK_RETRY 5
#define VALID_FW_BOOT_VERSION(fw_boot) (fw_boot == 0x8)
#define VALID_FW_MAIN_VERSION(fw_main) \
(!((fw_main[0] == 0xff) && (fw_main[1] == 0xff)) \
&& !((fw_main[0] == 0x00) && (fw_main[1] == 0x00)))
const char *flashmode_to_string(u32 mode)
{
switch (mode) {
@ -26,27 +25,6 @@ const char *flashmode_to_string(u32 mode)
return "?";
}
int s2mm005_sram_write(const struct i2c_client *i2c)
{
int ret = 0;
struct i2c_msg msg[1];
struct s2mm005_data *usbpd_data = i2c_get_clientdata(i2c);
pr_err("%s size:%d\n", __func__, BOOT_SRAM_FW_SIZE);
mutex_lock(&usbpd_data->i2c_mutex);
msg[0].addr = 0x3B; /* Slave addr 0x76 */
msg[0].flags = 0;
msg[0].len = BOOT_SRAM_FW_SIZE;
msg[0].buf = (u8 *)&BOOT_SRAM_FW[0];
ret = i2c_transfer(i2c->adapter, msg, 1);
if (ret < 0)
dev_err(&i2c->dev, "i2c write fail error %d\n", ret);
mutex_unlock(&usbpd_data->i2c_mutex);
return ret;
}
void s2mm005_write_flash(const struct i2c_client *i2c,
unsigned int fAddr, unsigned int fData) {
u8 data[8];
@ -158,12 +136,15 @@ static int s2mm005_flash_write(struct s2mm005_data *usbpd_data, unsigned char *f
pFlash_FWCS = (uint32_t *)fw_data;
fw_hd = (struct s2mm005_fw*)fw_data;
size = fw_hd -> size;
if(fw_hd -> boot < 6)
sLopCnt = 0x1000/4;
else if (fw_hd -> boot == 6)
sLopCnt = 0x8000/4;
else if (fw_hd -> boot >= 7)
sLopCnt = 0x7000/4;
sLopCnt = 0x1000/4;
else if (fw_hd -> boot == 6)
sLopCnt = 0x8000/4;
else if (fw_hd -> boot == 7)
sLopCnt = 0x7000/4;
else if (fw_hd -> boot >= 8)
sLopCnt = 0x5000/4;
/* Flash write */
for (LopCnt = sLopCnt; LopCnt < (size/4); LopCnt++) {
@ -278,47 +259,69 @@ static int s2mm005_flash_write(struct s2mm005_data *usbpd_data, unsigned char *f
return 0;
}
void s2mm005_flash_ready(struct s2mm005_data *usbpd_data)
{
struct i2c_client *i2c = usbpd_data->i2c;
u8 W_DATA[5];
/* FLASH_READY */
W_DATA[0] = 0x02;
W_DATA[1] = 0x01;
W_DATA[2] = 0x30;
W_DATA[3] = 0x50;
W_DATA[4] = 0x01;
s2mm005_write_byte(i2c, CMD_MODE_0x10, &W_DATA[0], 5);
}
int s2mm005_flash(struct s2mm005_data *usbpd_data, unsigned int input)
{
struct i2c_client *i2c = usbpd_data->i2c;
u8 val, reg;
u8 W_DATA[5];
int ret = 0;
static int retry = 0;
uint32_t *pFlash_FW;
uint32_t LopCnt, fAddr, fData, fRData;
int retry = 0;
struct s2mm005_fw *fw_hd;
struct file *fp;
mm_segment_t old_fs;
long fw_size, nread;
int irq_gpio_status;
FLASH_STATE_Type Flash_DATA;
switch (input) {
case FLASH_MODE_ENTER: { /* enter flash mode */
/* FLASH_READY */
s2mm005_flash_ready(usbpd_data);
do {
/* FLASH_READY */
W_DATA[0] = 0x02;
W_DATA[1] = 0x01;
W_DATA[2] = 0x30;
W_DATA[3] = 0x50;
W_DATA[4] = 0x01;
s2mm005_write_byte(i2c, CMD_MODE_0x10, &W_DATA[0], 5);
/* FLASH_MODE */
reg = FLASH_MODE_ENTER_0x10;
s2mm005_write_byte(i2c, CMD_MODE_0x10, &reg, 1);
usleep_range(10 * 1000, 10 * 1000);
s2mm005_read_byte_flash(i2c, FLASH_STATUS_0x24, &val, 1);
pr_err("flash mode : %s retry %d\n", flashmode_to_string(val), retry);
usleep_range(10 * 1000, 10 * 1000);
retry++;
if(retry == 15) {
s2mm005_reset(usbpd_data);
msleep(3000);
} else if (retry == 30) {
panic("Flash mode change fail!\n");
usleep_range(50 * 1000, 50 * 1000);
/* If irq status is not clear, CCIC can not enter flash mode. */
irq_gpio_status = gpio_get_value(usbpd_data->irq_gpio);
dev_info(&i2c->dev, "%s IRQ0:%02d\n", __func__, irq_gpio_status);
if(!irq_gpio_status) {
s2mm005_int_clear(usbpd_data); // interrupt clear
usleep_range(10 * 1000, 10 * 1000);
}
s2mm005_read_byte_flash(i2c, FLASH_STATUS_0x24, &val, 1);
pr_err("%s %s retry %d\n", __func__, flashmode_to_string(val), retry);
usleep_range(50*1000, 50*1000);
s2mm005_read_byte(i2c, 0x24, Flash_DATA.BYTE, 4);
dev_info(&i2c->dev, "Flash_State:0x%02X Reserved:0x%06X\n",
Flash_DATA.BITS.Flash_State, Flash_DATA.BITS.Reserved);
if(val != FLASH_MODE_FLASH) {
retry++;
if(retry == 10) {
/* RESET */
s2mm005_reset(usbpd_data);
msleep(3000);
/* FLASH_READY */
s2mm005_flash_ready(usbpd_data);
} else if (retry == 20) {
panic("Flash mode change fail!\n");
}
}
} while (val != FLASH_MODE_FLASH);
break;
}
@ -331,28 +334,14 @@ int s2mm005_flash(struct s2mm005_data *usbpd_data, unsigned int input)
pr_err("flash mode : %s\n", flashmode_to_string(val));
break;
}
case FLASH_WRITE: { /* write flash & verify */
ret = s2mm005_flash_write(usbpd_data, (unsigned char*)&BOOT_FLASH_FW[0]);
break;
}
case FLASH_WRITE3: { /* write flash & verify */
ret = s2mm005_flash_write(usbpd_data, (unsigned char*)&BOOT_FLASH_FW_BOOT3[0]);
break;
}
case FLASH_WRITE4: { /* write flash & verify */
ret = s2mm005_flash_write(usbpd_data, (unsigned char*)&BOOT_FLASH_FW_BOOT4[0]);
break;
}
case FLASH_WRITE5: { /* write flash & verify */
ret = s2mm005_flash_write(usbpd_data, (unsigned char*)&BOOT_FLASH_FW_BOOT5[0]);
break;
}
case FLASH_WRITE6: { /* write flash & verify */
ret = s2mm005_flash_write(usbpd_data, (unsigned char*)&BOOT_FLASH_FW_BOOT6[0]);
break;
}
case FLASH_WRITE7: { /* write flash & verify */
ret = s2mm005_flash_write(usbpd_data, (unsigned char*)&BOOT_FLASH_FW_BOOT7[0]);
case FLASH_WRITE8: { /* write flash & verify */
switch (usbpd_data->s2mm005_fw_product_id) {
case PRODUCT_NUM_VIEW2:
ret = s2mm005_flash_write(usbpd_data, (unsigned char*)&BOOT_FLASH_FW_0x0D_BOOT8[0]);
break;
default:
break;
}
break;
}
case FLASH_WRITE_UMS: {
@ -363,7 +352,8 @@ int s2mm005_flash(struct s2mm005_data *usbpd_data, unsigned int input)
pr_err("%s: failed to open %s.\n", __func__,
CCIC_DEFAULT_UMS_FW);
ret = -ENOENT;
goto done;
set_fs(old_fs);
return ret;
}
fw_size = fp->f_path.dentry->d_inode->i_size;
@ -395,38 +385,6 @@ int s2mm005_flash(struct s2mm005_data *usbpd_data, unsigned int input)
set_fs(old_fs);
break;
}
case FLASH_SRAM: { /* write flash & verify */
fw_hd = (struct s2mm005_fw*)&BOOT_FLASH_FW_BOOT4;
reg = FLASH_WRITE_0x42;
s2mm005_write_byte(i2c, CMD_MODE_0x10, &reg, 1);
reg = FLASH_WRITING_BYTE_SIZE_0x4;
s2mm005_write_byte(i2c, CMD_HOST_0x11, &reg, 1);
s2mm005_read_byte_flash(i2c, FLASH_STATUS_0x24, &val, 1);
pFlash_FW = (uint32_t *)&BOOT_FLASH_FW_BOOT4[0];
fAddr = 0x00000000;
for ((LopCnt = 0); LopCnt < (fw_hd->size/4); LopCnt++) {
fAddr = LopCnt*4;
fData = pFlash_FW[LopCnt];
s2mm005_write_flash(i2c, fAddr, fData);
s2mm005_verify_flash(i2c, fAddr, &fRData);
if (fData != fRData) {
pr_err("Verify Error Address = 0x%08X WData = 0x%08X VData = 0x%08X\n", fAddr, fData, fRData);
return -EFLASH_VERIFY;
}
}
if (LopCnt >= (fw_hd->size/4)) {
fAddr = 0xeFFC;
fData = 0x1;
s2mm005_write_flash(i2c, fAddr, fData);
s2mm005_verify_flash(i2c, fAddr, &fRData);
if (fData != fRData) {
pr_err("Verify Error Address = 0x%08X WData = 0x%08X VData = 0x%08X\n", fAddr, fData, fRData);
return -EFLASH_VERIFY;
}
}
break;
}
case FLASH_MODE_EXIT: { /* exit flash mode */
reg = FLASH_MODE_EXIT_0x20;
s2mm005_write_byte(i2c, CMD_MODE_0x10, &reg, 1);
@ -440,26 +398,19 @@ int s2mm005_flash(struct s2mm005_data *usbpd_data, unsigned int input)
}
}
done:
return ret;
}
void s2mm005_get_fw_version(struct s2mm005_version *version, u8 boot_version, u32 hw_rev)
void s2mm005_get_fw_version(int product_id,
struct s2mm005_version *version, u8 boot_version, u32 hw_rev)
{
struct s2mm005_fw *fw_hd;
switch (boot_version) {
case 5:
fw_hd = (struct s2mm005_fw*) BOOT_FLASH_FW_BOOT5;
break;
case 6:
fw_hd = (struct s2mm005_fw*) BOOT_FLASH_FW_BOOT6;
break;
case 7:
fw_hd = (struct s2mm005_fw*) BOOT_FLASH_FW_BOOT7;
break;
default:
fw_hd = (struct s2mm005_fw*) BOOT_FLASH_FW_BOOT7;
break;
switch (product_id) {
case PRODUCT_NUM_VIEW2:
default:
fw_hd = (struct s2mm005_fw*) BOOT_FLASH_FW_0x0D_BOOT8;
break;
}
version->boot = fw_hd->boot;
version->main[0] = fw_hd->main[0];
@ -474,15 +425,27 @@ void s2mm005_get_chip_hwversion(struct s2mm005_data *usbpd_data,
s2mm005_read_byte_flash(i2c, 0x0, (u8 *)&version->boot, 1);
s2mm005_read_byte_flash(i2c, 0x1, (u8 *)&version->main, 3);
s2mm005_read_byte_flash(i2c, 0x4, (u8 *)&version->ver2, 4);
}
void s2mm005_get_chip_swversion(struct s2mm005_data *usbpd_data,
struct s2mm005_version *version)
{
struct i2c_client *i2c = usbpd_data->i2c;
int i;
s2mm005_read_byte_flash(i2c, 0x8, (u8 *)&version->boot, 1);
s2mm005_read_byte_flash(i2c, 0x9, (u8 *)&version->main, 3);
for(i=0; i < FW_CHECK_RETRY; i++) {
s2mm005_read_byte_flash(i2c, 0x8, (u8 *)&version->boot, 1);
if(VALID_FW_BOOT_VERSION(version->boot))
break;
}
for(i=0; i < FW_CHECK_RETRY; i++) {
s2mm005_read_byte_flash(i2c, 0x9, (u8 *)&version->main, 3);
if(VALID_FW_MAIN_VERSION(version->main))
break;
}
for (i = 0; i < FW_CHECK_RETRY; i++)
s2mm005_read_byte_flash(i2c, 0xc, (u8 *)&version->ver2, 4);
}
int s2mm005_check_version(struct s2mm005_version *version1,
@ -500,42 +463,32 @@ int s2mm005_check_version(struct s2mm005_version *version1,
int s2mm005_flash_fw(struct s2mm005_data *usbpd_data, unsigned int input)
{
struct i2c_client *i2c = usbpd_data->i2c;
int ret = 0;
u8 val = 0;
u8 check[4];
struct device *i2c_dev = i2c->dev.parent->parent;
struct pinctrl *i2c_pinctrl;
if( usbpd_data->fw_product_num != PRODUCT_NUM)
if( usbpd_data->fw_product_id != usbpd_data->s2mm005_fw_product_id)
{
pr_err("FW_UPDATE fail, product number is different (%d)(%d) \n", usbpd_data->fw_product_num,PRODUCT_NUM);
pr_err("FW_UPDATE fail, product number is different (%d)(%d) \n", usbpd_data->fw_product_id,usbpd_data->s2mm005_fw_product_id);
return 0;
}
pr_err("FW_UPDATE %d\n", input);
switch (input) {
case FLASH_WRITE3:
case FLASH_WRITE4:
case FLASH_WRITE5:
case FLASH_WRITE6:
case FLASH_WRITE7:
case FLASH_WRITE: {
case FLASH_WRITE8:
s2mm005_flash(usbpd_data, FLASH_MODE_ENTER);
usleep_range(10 * 1000, 10 * 1000);
s2mm005_flash(usbpd_data, FLASH_ERASE);
msleep(200);
ret = s2mm005_flash(usbpd_data, input);
if (ret < 0)
panic("infinite write fail!\n");
return ret;
usleep_range(10 * 1000, 10 * 1000);
s2mm005_flash(usbpd_data, FLASH_MODE_EXIT);
usleep_range(10 * 1000, 10 * 1000);
s2mm005_reset(usbpd_data);
usleep_range(10 * 1000, 10 * 1000);
break;
}
case FLASH_WRITE_UMS: {
case FLASH_WRITE_UMS:
s2mm005_read_byte_flash(usbpd_data->i2c, FLASH_STATUS_0x24, &val, 1);
if(val != FLASH_MODE_NORMAL) {
pr_err("Can't CCIC FW update: cause by %s\n", flashmode_to_string(val));
@ -558,50 +511,9 @@ int s2mm005_flash_fw(struct s2mm005_data *usbpd_data, unsigned int input)
s2mm005_manual_LPM(usbpd_data, 0x6); // LP On
enable_irq(usbpd_data->irq);
break;
}
case FLASH_SRAM: {
s2mm005_system_reset(usbpd_data);
s2mm005_reset_enable(usbpd_data);
s2mm005_sram_reset(usbpd_data);
i2c_pinctrl = devm_pinctrl_get_select(i2c_dev, "om_high");
if (IS_ERR(i2c_pinctrl))
pr_err("could not set om high pins\n");
s2mm005_hard_reset(usbpd_data);
s2mm005_sram_write(i2c);
usleep_range(1 * 1000, 1 * 1000);
check[0] = 0x02;
check[1] = 0x40; /* long read */
check[2] = 0x00;
check[3] = 0x20;
s2mm005_write_byte(i2c, 0x10, &check[0], 4);
s2mm005_read_byte_flash(i2c, 0x14, &check[0], 4);
pr_err("%s sram write size:%2x,%2x,%2x,%2x\n",__func__,check[3],check[2],check[1],check[0]);
ret = s2mm005_read_byte_flash(i2c, 0xC, &check[0], 4);
pr_err("%s sram check :%2x,%2x,%2x,%2x\n",__func__,check[3],check[2],check[1],check[0]);
s2mm005_flash(usbpd_data, FLASH_MODE_ENTER);
usleep_range(10 * 1000, 10 * 1000);
s2mm005_flash(usbpd_data, FLASH_ERASE);
ret = s2mm005_flash(usbpd_data, input);
if (ret < 0)
panic("infinite write fail!\n");
usleep_range(10 * 1000, 10 * 1000);
s2mm005_flash(usbpd_data, FLASH_MODE_EXIT);
i2c_pinctrl = devm_pinctrl_get_select(i2c_dev, "om_input");
if (IS_ERR(i2c_pinctrl))
pr_err("could not set reset pins\n");
s2mm005_hard_reset(usbpd_data);
usleep_range(10 * 1000, 10 * 1000);
default:
break;
}
default: {
break;
}
}
return 0;
}

163
drivers/ccic/s2mm005_pd.c Normal file → Executable file
View File

@ -23,6 +23,9 @@
#if defined(CONFIG_BATTERY_NOTIFIER)
#include <linux/battery/battery_notifier.h>
#endif
#if defined(CONFIG_USB_HOST_NOTIFY)
#include <linux/usb_notify.h>
#endif
struct pdic_notifier_struct pd_noti;
@ -102,88 +105,92 @@ void vbus_turn_on_ctrl(bool enable)
}
static int s2mm005_src_capacity_information(const struct i2c_client *i2c, uint32_t *RX_SRC_CAPA_MSG,
PDIC_SINK_STATUS * pd_sink_status)
PDIC_SINK_STATUS * pd_sink_status, uint8_t *do_power_nego)
{
uint32_t RdCnt;
uint32_t PDO_cnt;
uint32_t PDO_sel;
int available_pdo_num = 0;
int num_of_obj = 0;
MSG_HEADER_Type *MSG_HDR;
SRC_FIXED_SUPPLY_Typedef *MSG_FIXED_SUPPLY;
SRC_VAR_SUPPLY_Typedef *MSG_VAR_SUPPLY;
SRC_BAT_SUPPLY_Typedef *MSG_BAT_SUPPLY;
dev_info(&i2c->dev, "\n\r");
dev_info(&i2c->dev, "\n");
for(RdCnt=0;RdCnt<8;RdCnt++)
{
dev_info(&i2c->dev, "Rd_SRC_CAPA_%d : 0x%X\n\r", RdCnt, RX_SRC_CAPA_MSG[RdCnt]);
dev_info(&i2c->dev, "Rd_SRC_CAPA_%d : 0x%X\n", RdCnt, RX_SRC_CAPA_MSG[RdCnt]);
}
MSG_HDR = (MSG_HEADER_Type *)&RX_SRC_CAPA_MSG[0];
dev_info(&i2c->dev, "\n\r");
dev_info(&i2c->dev, "=======================================\n\r");
dev_info(&i2c->dev, " MSG Header\n\r");
dev_info(&i2c->dev, "=======================================\n");
dev_info(&i2c->dev, " MSG Header\n");
dev_info(&i2c->dev, " Rsvd_msg_header : %d\n\r",MSG_HDR->Rsvd_msg_header );
dev_info(&i2c->dev, " Number_of_obj : %d\n\r",MSG_HDR->Number_of_obj );
dev_info(&i2c->dev, " Message_ID : %d\n\r",MSG_HDR->Message_ID );
dev_info(&i2c->dev, " Port_Power_Role : %d\n\r",MSG_HDR->Port_Power_Role );
dev_info(&i2c->dev, " Specification_Revision : %d\n\r",MSG_HDR->Specification_Revision );
dev_info(&i2c->dev, " Port_Data_Role : %d\n\r",MSG_HDR->Port_Data_Role );
dev_info(&i2c->dev, " Rsvd2_msg_header : %d\n\r",MSG_HDR->Rsvd2_msg_header );
dev_info(&i2c->dev, " Message_Type : %d\n\r",MSG_HDR->Message_Type );
dev_info(&i2c->dev, " Rsvd_msg_header : %d\n",MSG_HDR->Rsvd_msg_header );
dev_info(&i2c->dev, " Number_of_obj : %d\n",MSG_HDR->Number_of_obj );
dev_info(&i2c->dev, " Message_ID : %d\n",MSG_HDR->Message_ID );
dev_info(&i2c->dev, " Port_Power_Role : %d\n",MSG_HDR->Port_Power_Role );
dev_info(&i2c->dev, " Specification_Revision : %d\n",MSG_HDR->Specification_Revision );
dev_info(&i2c->dev, " Port_Data_Role : %d\n",MSG_HDR->Port_Data_Role );
dev_info(&i2c->dev, " Rsvd2_msg_header : %d\n",MSG_HDR->Rsvd2_msg_header );
dev_info(&i2c->dev, " Message_Type : %d\n",MSG_HDR->Message_Type );
for(PDO_cnt = 0;PDO_cnt < MSG_HDR->Number_of_obj;PDO_cnt++)
num_of_obj = MSG_HDR->Number_of_obj > MAX_PDO_NUM ? MAX_PDO_NUM : MSG_HDR->Number_of_obj;
for(PDO_cnt = 0;PDO_cnt < num_of_obj;PDO_cnt++)
{
PDO_sel = (RX_SRC_CAPA_MSG[PDO_cnt + 1] >> 30) & 0x3;
dev_info(&i2c->dev, " =================\n\r");
dev_info(&i2c->dev, " PDO_Num : %d\n\r", (PDO_cnt + 1));
dev_info(&i2c->dev, " =================\n");
dev_info(&i2c->dev, " PDO_Num : %d\n", (PDO_cnt + 1));
if(PDO_sel == 0) // *MSG_FIXED_SUPPLY
{
MSG_FIXED_SUPPLY = (SRC_FIXED_SUPPLY_Typedef *)&RX_SRC_CAPA_MSG[PDO_cnt + 1];
if(MSG_FIXED_SUPPLY->Voltage_Unit <= (AVAILABLE_VOLTAGE/UNIT_FOR_VOLTAGE))
available_pdo_num = PDO_cnt + 1;
if (!(*do_power_nego) &&
(pd_sink_status->power_list[PDO_cnt+1].max_voltage != MSG_FIXED_SUPPLY->Voltage_Unit * UNIT_FOR_VOLTAGE ||
pd_sink_status->power_list[PDO_cnt+1].max_current != MSG_FIXED_SUPPLY->Maximum_Current * UNIT_FOR_CURRENT))
*do_power_nego = 1;
pd_sink_status->power_list[PDO_cnt+1].max_voltage = MSG_FIXED_SUPPLY->Voltage_Unit * UNIT_FOR_VOLTAGE;
pd_sink_status->power_list[PDO_cnt+1].max_current = MSG_FIXED_SUPPLY->Maximum_Current * UNIT_FOR_CURRENT;
dev_info(&i2c->dev, " PDO_Parameter(FIXED_SUPPLY) : %d\n\r",MSG_FIXED_SUPPLY->PDO_Parameter );
dev_info(&i2c->dev, " Dual_Role_Power : %d\n\r",MSG_FIXED_SUPPLY->Dual_Role_Power );
dev_info(&i2c->dev, " USB_Suspend_Support : %d\n\r",MSG_FIXED_SUPPLY->USB_Suspend_Support );
dev_info(&i2c->dev, " Externally_POW : %d\n\r",MSG_FIXED_SUPPLY->Externally_POW );
dev_info(&i2c->dev, " USB_Comm_Capable : %d\n\r",MSG_FIXED_SUPPLY->USB_Comm_Capable );
dev_info(&i2c->dev, " Data_Role_Swap : %d\n\r",MSG_FIXED_SUPPLY->Data_Role_Swap );
dev_info(&i2c->dev, " Reserved : %d\n\r",MSG_FIXED_SUPPLY->Reserved );
dev_info(&i2c->dev, " Peak_Current : %d\n\r",MSG_FIXED_SUPPLY->Peak_Current );
dev_info(&i2c->dev, " Voltage_Unit : %d\n\r",MSG_FIXED_SUPPLY->Voltage_Unit );
dev_info(&i2c->dev, " Maximum_Current : %d\n\r",MSG_FIXED_SUPPLY->Maximum_Current );
dev_info(&i2c->dev, " PDO_Parameter(FIXED_SUPPLY) : %d\n",MSG_FIXED_SUPPLY->PDO_Parameter );
dev_info(&i2c->dev, " Dual_Role_Power : %d\n",MSG_FIXED_SUPPLY->Dual_Role_Power );
dev_info(&i2c->dev, " USB_Suspend_Support : %d\n",MSG_FIXED_SUPPLY->USB_Suspend_Support );
dev_info(&i2c->dev, " Externally_POW : %d\n",MSG_FIXED_SUPPLY->Externally_POW );
dev_info(&i2c->dev, " USB_Comm_Capable : %d\n",MSG_FIXED_SUPPLY->USB_Comm_Capable );
dev_info(&i2c->dev, " Data_Role_Swap : %d\n",MSG_FIXED_SUPPLY->Data_Role_Swap );
dev_info(&i2c->dev, " Reserved : %d\n",MSG_FIXED_SUPPLY->Reserved );
dev_info(&i2c->dev, " Peak_Current : %d\n",MSG_FIXED_SUPPLY->Peak_Current );
dev_info(&i2c->dev, " Voltage_Unit : %d\n",MSG_FIXED_SUPPLY->Voltage_Unit );
dev_info(&i2c->dev, " Maximum_Current : %d\n",MSG_FIXED_SUPPLY->Maximum_Current );
}
else if(PDO_sel == 2) // *MSG_VAR_SUPPLY
{
MSG_VAR_SUPPLY = (SRC_VAR_SUPPLY_Typedef *)&RX_SRC_CAPA_MSG[PDO_cnt + 1];
dev_info(&i2c->dev, " PDO_Parameter(VAR_SUPPLY) : %d\n\r",MSG_VAR_SUPPLY->PDO_Parameter );
dev_info(&i2c->dev, " Maximum_Voltage : %d\n\r",MSG_VAR_SUPPLY->Maximum_Voltage );
dev_info(&i2c->dev, " Minimum_Voltage : %d\n\r",MSG_VAR_SUPPLY->Minimum_Voltage );
dev_info(&i2c->dev, " Maximum_Current : %d\n\r",MSG_VAR_SUPPLY->Maximum_Current );
dev_info(&i2c->dev, " PDO_Parameter(VAR_SUPPLY) : %d\n",MSG_VAR_SUPPLY->PDO_Parameter );
dev_info(&i2c->dev, " Maximum_Voltage : %d\n",MSG_VAR_SUPPLY->Maximum_Voltage );
dev_info(&i2c->dev, " Minimum_Voltage : %d\n",MSG_VAR_SUPPLY->Minimum_Voltage );
dev_info(&i2c->dev, " Maximum_Current : %d\n",MSG_VAR_SUPPLY->Maximum_Current );
}
else if(PDO_sel == 1) // *MSG_BAT_SUPPLY
{
MSG_BAT_SUPPLY = (SRC_BAT_SUPPLY_Typedef *)&RX_SRC_CAPA_MSG[PDO_cnt + 1];
dev_info(&i2c->dev, " PDO_Parameter(BAT_SUPPLY) : %d\n\r",MSG_BAT_SUPPLY->PDO_Parameter );
dev_info(&i2c->dev, " Maximum_Voltage : %d\n\r",MSG_BAT_SUPPLY->Maximum_Voltage );
dev_info(&i2c->dev, " Minimum_Voltage : %d\n\r",MSG_BAT_SUPPLY->Minimum_Voltage );
dev_info(&i2c->dev, " Maximum_Allow_Power : %d\n\r",MSG_BAT_SUPPLY->Maximum_Allow_Power );
dev_info(&i2c->dev, " PDO_Parameter(BAT_SUPPLY) : %d\n",MSG_BAT_SUPPLY->PDO_Parameter );
dev_info(&i2c->dev, " Maximum_Voltage : %d\n",MSG_BAT_SUPPLY->Maximum_Voltage );
dev_info(&i2c->dev, " Minimum_Voltage : %d\n",MSG_BAT_SUPPLY->Minimum_Voltage );
dev_info(&i2c->dev, " Maximum_Allow_Power : %d\n",MSG_BAT_SUPPLY->Maximum_Allow_Power );
}
}
/* the number of available pdo list */
pd_sink_status->available_pdo_num = available_pdo_num;
dev_info(&i2c->dev, "=======================================\n\r");
dev_info(&i2c->dev, "\n\r");
dev_info(&i2c->dev, "=======================================\n");
return available_pdo_num;
}
@ -192,25 +199,34 @@ void process_pd(void *data, u8 plug_attach_done, u8 *pdic_attach, MSG_IRQ_STATUS
struct s2mm005_data *usbpd_data = data;
struct i2c_client *i2c = usbpd_data->i2c;
uint16_t REG_ADD;
uint8_t rp_currentlvl, is_src;
uint8_t rp_currentlvl, is_src, i;
REQUEST_FIXED_SUPPLY_STRUCT_Typedef *request_power_number;
#ifdef CONFIG_USB_TYPEC_MANAGER_NOTIFIER
CC_NOTI_ATTACH_TYPEDEF pd_notifier;
#if defined(CONFIG_USB_HOST_NOTIFY)
struct otg_notify *o_notify = get_otg_notify();
#endif
printk("%s\n",__func__);
rp_currentlvl = (usbpd_data->func_state >> 3) & 0x3;
is_src = (usbpd_data->func_state >> 1) & 0x1;
rp_currentlvl = ((usbpd_data->func_state >> 27) & 0x3);
is_src = (usbpd_data->func_state & (0x1 << 25) ? 1 : 0);
dev_info(&i2c->dev, "rp_currentlvl:0x%02X, is_source:0x%02X\n", rp_currentlvl, is_src);
if (MSG_IRQ_State->BITS.Ctrl_Flag_PR_Swap)
{
usbpd_data->is_pr_swap++;
dev_info(&i2c->dev, "PR_Swap requested to %s\n", is_src ? "SOURCE" : "SINK");
if (is_src && (usbpd_data->power_role == DUAL_ROLE_PROP_PR_SNK)) {
ccic_event_work(usbpd_data, CCIC_NOTIFY_DEV_BATTERY, CCIC_NOTIFY_ID_ATTACH, 0, 0, 0);
}
vbus_turn_on_ctrl(is_src);
#if defined(CONFIG_DUAL_ROLE_USB_INTF)
usbpd_data->power_role = is_src ? DUAL_ROLE_PROP_PR_SRC : DUAL_ROLE_PROP_PR_SNK;
usbpd_data->power_role = is_src ? DUAL_ROLE_PROP_PR_SRC : DUAL_ROLE_PROP_PR_SNK;
#if defined(CONFIG_USB_HOST_NOTIFY)
if( usbpd_data->power_role == DUAL_ROLE_PROP_PR_SRC)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 1);
else if( usbpd_data->power_role == DUAL_ROLE_PROP_PR_SNK)
send_otg_notify(o_notify, NOTIFY_EVENT_POWER_SOURCE, 0);
#endif
ccic_event_work(usbpd_data, CCIC_NOTIFY_DEV_PDIC, CCIC_NOTIFY_ID_ROLE_SWAP, 0, 0, 0);
#endif
}
@ -218,10 +234,12 @@ void process_pd(void *data, u8 plug_attach_done, u8 *pdic_attach, MSG_IRQ_STATUS
{
uint8_t ReadMSG[32];
int available_pdo_num;
uint8_t do_power_nego = 0;
pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK;
REG_ADD = REG_RX_SRC_CAPA_MSG;
s2mm005_read_byte(i2c, REG_ADD, ReadMSG, 32);
available_pdo_num = s2mm005_src_capacity_information(i2c, (uint32_t *)ReadMSG, &pd_noti.sink_status);
available_pdo_num = s2mm005_src_capacity_information(i2c, (uint32_t *)ReadMSG, &pd_noti.sink_status, &do_power_nego);
REG_ADD = REG_TX_REQUEST_MSG;
s2mm005_read_byte(i2c, REG_ADD, ReadMSG, 32);
@ -241,8 +259,15 @@ void process_pd(void *data, u8 plug_attach_done, u8 *pdic_attach, MSG_IRQ_STATUS
pd_noti.sink_status.selected_pdo_num = pd_noti.sink_status.current_pdo_num;
}
} else {
pr_info(" %s : PDO(%d) is selected, but same with previous list, so skip\n",
if (do_power_nego) {
pr_info(" %s : PDO(%d) is selected, but power negotiation is requested\n",
__func__, pd_noti.sink_status.selected_pdo_num);
pd_noti.sink_status.selected_pdo_num = 0;
pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK_CAP;
} else {
pr_info(" %s : PDO(%d) is selected, but same with previous list, so skip\n",
__func__, pd_noti.sink_status.selected_pdo_num);
}
}
*pdic_attach = 1;
} else {
@ -250,21 +275,49 @@ void process_pd(void *data, u8 plug_attach_done, u8 *pdic_attach, MSG_IRQ_STATUS
}
}
if (MSG_IRQ_State->BITS.Ctrl_Flag_Get_Sink_Cap)
{
pr_info(" %s : SRC requested SINK Cap\n", __func__);
}
/* notify to battery */
#ifdef CONFIG_USB_TYPEC_MANAGER_NOTIFIER
if (plug_attach_done) {
if (*pdic_attach) {
pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK;
pd_notifier.src = CCIC_NOTIFY_DEV_CCIC;
pd_notifier.dest = CCIC_NOTIFY_DEV_BATTERY;
pd_notifier.id = CCIC_NOTIFY_ID_POWER_STATUS;
pd_notifier.attach = *pdic_attach;
ccic_notifier_notify((CC_NOTI_TYPEDEF*)&pd_notifier, &pd_noti, *pdic_attach);
}
else
pd_noti.event = PDIC_NOTIFY_EVENT_CCIC_ATTACH;
/* PD charger is detected by PDIC */
} else if (!is_src && (usbpd_data->pd_state == State_PE_SNK_Wait_for_Capabilities ||
usbpd_data->pd_state == State_ErrorRecovery) &&
rp_currentlvl != pd_noti.sink_status.rp_currentlvl &&
rp_currentlvl >= RP_CURRENT_LEVEL_DEFAULT) {
if (rp_currentlvl == RP_CURRENT_LEVEL3) {
/* 5V/3A RP charger is detected by CCIC */
pd_noti.sink_status.rp_currentlvl = RP_CURRENT_LEVEL3;
pd_noti.event = PDIC_NOTIFY_EVENT_CCIC_ATTACH;
} else if (rp_currentlvl == RP_CURRENT_LEVEL2) {
/* 5V/1.5A RP charger is detected by CCIC */
pd_noti.sink_status.rp_currentlvl = RP_CURRENT_LEVEL2;
pd_noti.event = PDIC_NOTIFY_EVENT_CCIC_ATTACH;
} else if (rp_currentlvl == RP_CURRENT_LEVEL_DEFAULT) {
/* 5V/0.5A RP charger is detected by CCIC */
pd_noti.sink_status.rp_currentlvl = RP_CURRENT_LEVEL_DEFAULT;
pd_noti.event = PDIC_NOTIFY_EVENT_CCIC_ATTACH;
} else
return;
} else
return;
#ifdef CONFIG_SEC_FACTORY
pr_info(" %s : debug pdic_attach(%d) event(%d)\n", __func__, *pdic_attach, pd_noti.event);
#endif
ccic_event_work(usbpd_data, CCIC_NOTIFY_DEV_BATTERY, CCIC_NOTIFY_ID_POWER_STATUS, *pdic_attach, 0, 0);
} else {
for (i = 0; i < MAX_PDO_NUM + 1; i++) {
pd_noti.sink_status.power_list[i].max_current = 0;
pd_noti.sink_status.power_list[i].max_voltage = 0;
}
pd_noti.sink_status.rp_currentlvl = RP_CURRENT_LEVEL_NONE;
pd_noti.sink_status.available_pdo_num = 0;
pd_noti.sink_status.selected_pdo_num = 0;
pd_noti.sink_status.current_pdo_num = 0;
pd_noti.event = PDIC_NOTIFY_EVENT_DETACH;
}
#else

View File

@ -1194,6 +1194,8 @@ static int etspi_type_check(struct etspi_data *etspi)
* ET510C : 0X00 / 0X66 / 0X00 / 0X33
* ET510D : 0x03 / 0x0A / 0x05
* ET516B : 0x01 or 0x02 / 0x10 / 0x05
* ET520 : 0x03 / 0x14 / 0x05
* ET523 : 0x00 / 0x17 / 0x05
*/
if (((buf1 == 0x01) || (buf1 == 0x02))
&& (buf2 == 0x10) && (buf3 == 0x05)) {
@ -1202,6 +1204,12 @@ static int etspi_type_check(struct etspi_data *etspi)
} else if ((buf1 == 0x03) && (buf2 == 0x0A) && (buf3 == 0x05)) {
etspi->sensortype = SENSOR_EGIS;
pr_info("%s sensor type is EGIS ET510D sensor\n", __func__);
} else if ((buf1 == 0x03) && (buf2 == 0x14) && (buf3 == 0x05)) {
etspi->sensortype = SENSOR_EGIS;
pr_info("%s sensor type is EGIS ET520 sensor\n", __func__);
} else if((buf1 == 0x00) && (buf2 == 0x17) && (buf3 == 0x05)) {
etspi->sensortype = SENSOR_EGIS;
pr_info("%s sensor type is EGIS ET523 sensor\n", __func__);
} else {
if ((buf4 == 0x00) && (buf5 == 0x66)
&& (buf6 == 0x00) && (buf7 == 0x33)) {

7
drivers/five/five_tee_driver/Kconfig Normal file → Executable file
View File

@ -42,3 +42,10 @@ config FIVE_TRUSTLET_PATH
default "five/ffffffff000000000000000000000072.tlbin"
---help---
This option defines FIVE trustlet path which will be built-in kernel
config FIVE_EARLY_LOAD_TRUSTED_APP
bool "Load trusted application in early boot"
depends on FIVE_TEE_DRIVER
default n
help
Enable the load of trusted application on during initialization of driver

46
drivers/five/five_tee_driver/five_tee_driver.c Normal file → Executable file
View File

@ -19,8 +19,10 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <five_tee_driver.h>
#include <linux/task_integrity.h>
#include "tee_client_api.h"
#include "five_ta_uuid.h"
#include "../../../security/integrity/five/five_audit.h"
#ifdef CONFIG_TEE_DRIVER_DEBUG
#include <linux/uaccess.h>
@ -88,7 +90,7 @@ static int initialize_trusted_app(void)
wait_for_completion(&ta_loaded);
ret = kthread_stop(tsk);
is_initialized = ret == 0 ? 1 : 0;
pr_info("FIVE: Initialized trusted app ret: %d\n", ret);
pr_info("FIVE: Initialize trusted app ret: %d\n", ret);
return ret;
}
@ -178,8 +180,17 @@ static int send_cmd(unsigned int cmd,
mutex_unlock(&itee_driver_lock);
if (rc == TEEC_SUCCESS && origin != TEEC_ORIGIN_TRUSTED_APP)
rc = -EIO;
if (rc == TEEC_SUCCESS) {
if (origin != TEEC_ORIGIN_TRUSTED_APP) {
rc = -EIO;
five_audit_tee_msg("send_cmd",
"TEEC_InvokeCommand is failed", rc, origin);
}
} else {
five_audit_tee_msg("send_cmd", "TEEC_InvokeCommand is failed.",
rc, origin);
}
if (rc == TEEC_SUCCESS && cmd == CMD_SIGN) {
memcpy(signature, msg->signature, sig_len);
@ -257,7 +268,8 @@ static int load_trusted_app(void)
rc = TEEC_InitializeContext(NULL, context);
if (rc) {
pr_err("FIVE: Can't initialize context rc=0x%x\n", rc);
five_audit_tee_msg("load_trusted_app", "Can't initialize context",
rc, 0);
goto error;
}
@ -270,8 +282,8 @@ static int load_trusted_app(void)
rc = TEEC_OpenSession(context, session,
&five_ta_uuid, 0, NULL, NULL, &origin);
if (rc) {
pr_err("FIVE: Can't open session rc=0x%x origin=0x%x\n",
rc, origin);
five_audit_tee_msg("load_trusted_app", "Can't open session",
rc, origin);
goto error;
}
@ -391,9 +403,27 @@ static inline int __init init_fs(void)
static int __init tee_driver_init(void)
{
int rc = 0;
mutex_init(&itee_driver_lock);
register_tee_driver();
return init_fs();
#ifdef CONFIG_FIVE_EARLY_LOAD_TRUSTED_APP
rc = load_trusted_app();
pr_info("FIVE: Initialize trusted app in early boot ret: %d\n", rc);
#endif
rc = register_tee_driver();
if (rc) {
pr_err("FIVE: Can't register tee_driver\n");
goto out;
}
rc = init_fs();
if (rc) {
pr_err("FIVE: Can't initialize debug FS\n");
goto out;
}
out:
return rc;
}
static void __exit tee_driver_exit(void)

7
drivers/gpio/Kconfig Normal file → Executable file
View File

@ -1040,4 +1040,11 @@ config GPIO_VIPERBOARD
endmenu
config SEC_CONNECTOR_DETECTOR
tristate "Setting Samsung Connector Detector feature"
depends on SEC_FACTORY
help
To detect connection of connector, set this feature.
This feature should be enabled for factory mode.
endif

1
drivers/gpio/Makefile Normal file → Executable file
View File

@ -119,3 +119,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
obj-$(CONFIG_SEC_CONNECTOR_DETECTOR) += sec-detect-conn.o

496
drivers/gpio/sec-detect-conn.c Executable file
View File

@ -0,0 +1,496 @@
/**
* sec-detect-conn.c
*
* Copyright (C) 2017 Samsung Electronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define DEBUG_FOR_SECDETECT
#include <linux/sec-detect-conn.h>
static int detect_conn_enabled;
struct detect_conn_info *gpinfo;
#define SEC_CONN_PRINT(format, ...) pr_info("[SEC_Detect_Conn] " format, ##__VA_ARGS__)
#if defined(CONFIG_SEC_FACTORY)
#ifdef CONFIG_OF
static const struct of_device_id sec_detect_conn_dt_match[] = {
{ .compatible = "samsung,sec_detect_conn" },
{ }
};
#endif
static int sec_detect_conn_pm_suspend(struct device *dev)
{
return 0;
}
static int sec_detect_conn_pm_resume(struct device *dev)
{
return 0;
}
static int sec_detect_conn_remove(struct platform_device *pdev)
{
return 0;
}
static const struct dev_pm_ops sec_detect_conn_pm = {
.suspend = sec_detect_conn_pm_suspend,
.resume = sec_detect_conn_pm_resume,
};
/**
* Send uevent from irq handler.
*/
void send_uevent_irq(int irq, struct detect_conn_info *pinfo, int type)
{
char *uevent_conn_str[3] = {"", "", NULL};
char uevent_dev_str[UEVENT_CONN_MAX_DEV_NAME];
char uevent_dev_type_str[UEVENT_CONN_MAX_DEV_NAME];
int i;
/*Send Uevent Data*/
for (i = 0; i < pinfo->pdata->gpio_cnt; i++) {
if (irq == pinfo->pdata->irq_number[i]) {
if (gpio_get_value(pinfo->pdata->irq_gpio[i])) {
SEC_CONN_PRINT("%s status changed.\n", pinfo->pdata->name[i]);
sprintf(uevent_dev_str, "CONNECTOR_NAME=%s", pinfo->pdata->name[i]);
if (type == IRQ_TYPE_EDGE_RISING) {
sprintf(uevent_dev_type_str, "CONNECTOR_TYPE=RISING_EDGE");
SEC_CONN_PRINT("send uevent irq[%d]:CONNECTOR_NAME=%s,CONNECTOR_TYPE=RISING_EDGE.\n"
, irq, pinfo->pdata->name[i]);
} else if (type == IRQ_TYPE_EDGE_FALLING) {
sprintf(uevent_dev_type_str, "CONNECTOR_TYPE=FALLING_EDGE");
SEC_CONN_PRINT("send uevent irq[%d]:CONNECTOR_NAME=%s,CONNECTOR_TYPE=FALLING_EDGE.\n"
, irq, pinfo->pdata->name[i]);
} else if (type == IRQ_TYPE_EDGE_BOTH) {
sprintf(uevent_dev_type_str, "CONNECTOR_TYPE=EDGE_BOTH");
SEC_CONN_PRINT("send uevent irq[%d]:CONNECTOR_NAME=%s,CONNECTOR_TYPE=ALL_EDGE.\n"
, irq, pinfo->pdata->name[i]);
} else {
SEC_CONN_PRINT("Err:Unknown type irq : irq[%d]:CONNECTOR_NAME=%s,CONNECTOR_TYPE=%d.\n"
, irq, pinfo->pdata->name[i], type);
return;
}
uevent_conn_str[0] = uevent_dev_str;
uevent_conn_str[1] = uevent_dev_type_str;
kobject_uevent_env(&pinfo->dev->kobj, KOBJ_CHANGE, uevent_conn_str);
}
}
}
}
/**
* Send an uevent about given gpio pin number.
*/
void send_uevent_by_num(int num, struct detect_conn_info *pinfo, int level)
{
char *uevent_conn_str[3] = {"", "", NULL};
char uevent_dev_str[UEVENT_CONN_MAX_DEV_NAME];
char uevent_dev_type_str[UEVENT_CONN_MAX_DEV_NAME];
/*Send Uevent Data*/
sprintf(uevent_dev_str, "CONNECTOR_NAME=%s", pinfo->pdata->name[num]);
uevent_conn_str[0] = uevent_dev_str;
if (level == 1)
sprintf(uevent_dev_type_str, "CONNECTOR_TYPE=HIGH_LEVEL");
else if (level == 0)
sprintf(uevent_dev_type_str, "CONNECTOR_TYPE=LOW_LEVEL");
uevent_conn_str[1] = uevent_dev_type_str;
kobject_uevent_env(&pinfo->dev->kobj, KOBJ_CHANGE, uevent_conn_str);
if (level == 1)
SEC_CONN_PRINT("send uevent pin[%d]:CONNECTOR_NAME=%s,CONNECTOR_TYPE=HIGH_LEVEL.\n"
, num, pinfo->pdata->name[num]);
else if (level == 0)
SEC_CONN_PRINT("send uevent pin[%d]:CONNECTOR_NAME=%s,CONNECTOR_TYPE=LOW_LEVEL.\n"
, num, pinfo->pdata->name[num]);
}
/**
* Called when the connector pin state changes.
*/
static irqreturn_t detect_conn_interrupt_handler(int irq, void *handle)
{
int type;
struct detect_conn_info *pinfo = handle;
if (detect_conn_enabled != 0) {
SEC_CONN_PRINT("%s\n", __func__);
type = irq_get_trigger_type(irq);
send_uevent_irq(irq, pinfo, type);
}
return IRQ_HANDLED;
}
/**
* Enable all gpio pin IRQ which is from Device Tree.
*/
int detect_conn_irq_enable(struct detect_conn_info *pinfo, bool enable, int pin)
{
int retval = 0;
int i;
if (enable) {
/*enable IRQ*/
enable_irq(pinfo->pdata->irq_number[pin]);
pinfo->irq_enabled[pin] = true;
} else {
for (i = 0; i < pinfo->pdata->gpio_cnt; i++) {
if (pinfo->irq_enabled[i]) {
disable_irq(pinfo->pdata->irq_number[i]);
pinfo->irq_enabled[i] = false;
}
}
}
return retval;
}
/**
* Triggered when "enabled" node is set.
* When enabling this node, check and send an uevent if the pin level is high.
* And then gpio pin interrupt is enabled.
*/
static ssize_t store_detect_conn_enabled(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct detect_conn_info *pinfo;
struct sec_det_conn_p_data *pdata;
int ret;
int i;
int bufLength;
int pinNameLength;
if (gpinfo == 0)
return -1;
pinfo = gpinfo;
pdata = pinfo->pdata;
bufLength = strlen(buf);
#if defined(DEBUG_FOR_SECDETECT)
SEC_CONN_PRINT("buf = %s\n", buf);
SEC_CONN_PRINT("bufLength = %d\n", bufLength);
#endif
/*Disable irq when "enabled" value set to 0*/
if (!strncmp(buf, "0", 1)) {
SEC_CONN_PRINT("SEC Detect connector driver disable.\n");
detect_conn_enabled = 0;
ret = detect_conn_irq_enable(pinfo, false, 0);
if (ret) {
SEC_CONN_PRINT("Interrupt not disabled.\n");
return ret;
}
} else {
for (i = 0; i < pdata->gpio_cnt; i++) {
pinNameLength = strlen(pdata->name[i]);
#if defined(DEBUG_FOR_SECDETECT)
SEC_CONN_PRINT("pinName = %s\n", pdata->name[i]);
SEC_CONN_PRINT("pinNameLength = %d\n", pinNameLength);
#endif
if (pinNameLength == bufLength) {
if (!strncmp(buf, pdata->name[i], bufLength)) {
SEC_CONN_PRINT("%s driver enabled.\n", buf);
detect_conn_enabled |= (1 << i);
#if defined(DEBUG_FOR_SECDETECT)
SEC_CONN_PRINT("gpio level [%d] = %d\n", pdata->irq_gpio[i],
gpio_get_value(pdata->irq_gpio[i]));
#endif
/*get level value of the gpio pin.*/
/*if there's gpio low pin, send uevent*/
if (gpio_get_value(pdata->irq_gpio[i]))
send_uevent_by_num(i, pinfo, 1);
else
send_uevent_by_num(i, pinfo, 0);
/*Enable interrupt.*/
ret = detect_conn_irq_enable(pinfo, true, i);
if (ret < 0) {
SEC_CONN_PRINT("%s Interrupt not enabled.\n", buf);
return ret;
}
}
}
/* For ALL_CONNECT input, enable all nodes except already enabled node. */
if (bufLength == 11) {
if (!strncmp(buf, "ALL_CONNECT", bufLength)) {
if (!(detect_conn_enabled & (1 << i))) {
SEC_CONN_PRINT("%s driver enabled.\n", buf);
detect_conn_enabled |= (1 << i);
#if defined(DEBUG_FOR_SECDETECT)
SEC_CONN_PRINT("gpio level [%d] = %d\n", pdata->irq_gpio[i],
gpio_get_value(pdata->irq_gpio[i]));
#endif
/*get level value of the gpio pin.*/
/*if there's gpio low pin, send uevent*/
if (gpio_get_value(pdata->irq_gpio[i]))
send_uevent_by_num(i, pinfo, 1);
else
send_uevent_by_num(i, pinfo, 0);
/*Enable interrupt.*/
ret = detect_conn_irq_enable(pinfo, true, i);
if (ret < 0) {
SEC_CONN_PRINT("%s Interrupt not enabled.\n", buf);
return ret;
}
}
}
}
}
}
return count;
}
static ssize_t show_detect_conn_enabled(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", detect_conn_enabled);
}
static DEVICE_ATTR(enabled, 0644, show_detect_conn_enabled, store_detect_conn_enabled);
#ifdef CONFIG_OF
/**
* Parse the device tree and get gpio number, irq type.
* Request gpio
*/
static int detect_conn_parse_dt(struct device *dev)
{
struct sec_det_conn_p_data *pdata = dev->platform_data;
struct device_node *np = dev->of_node;
int i;
pdata->gpio_cnt = of_gpio_named_count(np, "sec,det_conn_gpios");
for (i = 0; i < pdata->gpio_cnt; i++) {
/*Get connector name*/
of_property_read_string_index(np, "sec,det_conn_name", i, &(pdata->name[i]));
/*Get connector gpio number*/
pdata->irq_gpio[i] = of_get_named_gpio(np, "sec,det_conn_gpios", i);
if (gpio_is_valid(pdata->irq_gpio[i])) {
#if defined(DEBUG_FOR_SECDETECT)
SEC_CONN_PRINT("i = [%d] gpio level [%d] = %d\n", i, pdata->irq_gpio[i],
gpio_get_value(pdata->irq_gpio[i]));
SEC_CONN_PRINT("gpio irq gpio = [%d], irq = [%d]\n", pdata->irq_gpio[i],
gpio_to_irq(pdata->irq_gpio[i]));
#endif
/*Filling the irq_number from this gpio.*/
pdata->irq_number[i] = gpio_to_irq(pdata->irq_gpio[i]);
} else {
dev_err(dev, "%s: Failed to get irq gpio.\n", __func__);
return -EINVAL;
}
}
/*Get type of gpio irq*/
if (of_property_read_u32_array(np, "sec,det_conn_irq_type", pdata->irq_type, pdata->gpio_cnt)) {
dev_err(dev, "%s, Failed to get irq_type property.\n", __func__);
return -EINVAL;
}
return 0;
}
#endif
static int detect_conn_init_irq(void)
{
struct detect_conn_info *pinfo;
struct sec_det_conn_p_data *pdata;
int retval = 0;
int i;
if (gpinfo == 0)
return -1;
pinfo = gpinfo;
pdata = pinfo->pdata;
for (i = 0; i < pinfo->pdata->gpio_cnt; i++) {
retval = request_threaded_irq(pinfo->pdata->irq_number[i], NULL,
detect_conn_interrupt_handler,
pinfo->pdata->irq_type[i] | IRQF_ONESHOT,
pinfo->pdata->name[i], pinfo);
if (retval) {
SEC_CONN_PRINT("%s: Failed to request threaded irq %d.\n",
__func__, retval);
return retval;
}
#if defined(DEBUG_FOR_SECDETECT)
SEC_CONN_PRINT("%s: Succeeded to request threaded irq %d: irq_num[%d], type[%x],name[%s].\n",
__func__, retval, pinfo->pdata->irq_number[i],
pinfo->pdata->irq_type[i], pinfo->pdata->name[i]);
#endif
/*disable irq init*/
disable_irq(pinfo->pdata->irq_number[i]);
}
return 0;
}
static int sec_detect_conn_probe(struct platform_device *pdev)
{
struct sec_det_conn_p_data *pdata;
struct detect_conn_info *pinfo;
int ret;
SEC_CONN_PRINT("%s\n", __func__);
/* First Get the GPIO pins; if it fails, we'll defer the probe. */
if (pdev->dev.of_node) {
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct sec_det_conn_p_data), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "Failed to allocate platform data.\n");
return -ENOMEM;
}
pdev->dev.platform_data = pdata;
#if CONFIG_OF
ret = detect_conn_parse_dt(&pdev->dev);
#else
ret = 0;
#endif
if (ret) {
dev_err(&pdev->dev, "Failed to parse dt data.\n");
return ret;
}
pr_info("%s: parse dt done.\n", __func__);
} else {
pdata = pdev->dev.platform_data;
}
if (!pdata) {
dev_err(&pdev->dev, "There are no platform data.\n");
return -EINVAL;
}
pinfo = devm_kzalloc(&pdev->dev, sizeof(struct detect_conn_info), GFP_KERNEL);
if (!pinfo) {
SEC_CONN_PRINT("pinfo : failed to allocate pinfo.\n");
return -ENOMEM;
}
/* Create sys device /sys/class/sec/sec_detect_conn */
pinfo->dev = sec_device_create(pinfo, "sec_detect_conn");
if (unlikely(IS_ERR(pinfo->dev))) {
pr_err("%s Failed to create device(sec_detect_conn).\n", __func__);
ret = -ENODEV;
goto out;
}
/* Create sys node /sys/class/sec/sec_detect_conn/enabled */
ret = device_create_file(pinfo->dev, &dev_attr_enabled);
if (ret) {
dev_err(&pdev->dev, "%s: Failed to create device file.\n", __func__);
goto err_create_detect_conn_sysfs;
}
/*save pinfo data to pdata to interrupt enable*/
pdata->pinfo = pinfo;
/*save pdata data to pinfo for enable node*/
pinfo->pdata = pdata;
/* save pinfo to gpinfo to enabled node*/
gpinfo = pinfo;
/* detect_conn_init_irq thread create*/
ret = detect_conn_init_irq();
return ret;
err_create_detect_conn_sysfs:
sec_device_destroy(pinfo->dev->devt);
out:
gpinfo = 0;
kfree(pinfo);
kfree(pdata);
return ret;
}
static struct platform_driver sec_detect_conn_driver = {
.probe = sec_detect_conn_probe,
.remove = sec_detect_conn_remove,
.driver = {
.name = "sec_detect_conn",
.owner = THIS_MODULE,
#if defined(CONFIG_PM)
.pm = &sec_detect_conn_pm,
#endif
#if CONFIG_OF
.of_match_table = of_match_ptr(sec_detect_conn_dt_match),
#endif
},
};
#endif
static int __init sec_detect_conn_init(void)
{
#if defined(CONFIG_SEC_FACTORY)
SEC_CONN_PRINT("%s\n", __func__);
return platform_driver_register(&sec_detect_conn_driver);
#else
SEC_CONN_PRINT("Not support Sec_Detect_Conn.\n");
return 0;
#endif
}
static void __exit sec_detect_conn_exit(void)
{
#if defined(CONFIG_SEC_FACTORY)
return platform_driver_unregister(&sec_detect_conn_driver);
#endif
}
module_init(sec_detect_conn_init);
module_exit(sec_detect_conn_exit);
MODULE_DESCRIPTION("Samsung Detecting Connector Driver");
MODULE_AUTHOR("Samsung Electronics");
MODULE_LICENSE("GPL");

View File

@ -25,4 +25,7 @@ ifeq ($(CONFIG_MALI_TMIX),y)
ifeq ($(CONFIG_MALI_TMIX_R9P0),y)
obj-y += tMIx/r9p0/
endif
ifeq ($(CONFIG_MALI_TMIX_R10P0),y)
obj-y += tMIx/r10p0/
endif
endif

View File

@ -32,7 +32,8 @@ choice
prompt "Version Configuration"
depends on MALI_TMIX
default MALI_TMIX_R3P0 if DDK_VERSION_OS = "n"
default MALI_TMIX_R9P0 if DDK_VERSION_OS = "o"
default MALI_TMIX_R9P0 if ANDROID_VERSION = "80000"
default MALI_TMIX_R10P0 if ANDROID_VERSION = "80100"
help
Select the gpu support version.
@ -47,6 +48,10 @@ config MALI_TMIX_R8P0
config MALI_TMIX_R9P0
depends on MALI_TMIX
bool "mimir r9p0 driver"
config MALI_TMIX_R10P0
depends on MALI_TMIX
bool "mimir r10p0 driver"
endchoice
if MALI_TMIX_R3P0

186
drivers/gpu/arm/tMIx/r10p0/Kbuild Executable file
View File

@ -0,0 +1,186 @@
#
# (C) COPYRIGHT 2012-2016, 2017 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
# Foundation, and any use by you of this program is subject to the terms
# of such GNU licence.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# SPDX-License-Identifier: GPL-2.0
#
#
# Driver version string which is returned to userspace via an ioctl
MALI_RELEASE_NAME ?= "r10p0-01rel0"
# Paths required for build
KBASE_PATH = $(src)
KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy
UMP_PATH = $(src)/../../../base
ifeq ($(CONFIG_MALI_ERROR_INJECT),y)
MALI_ERROR_INJECT_ON = 1
endif
# Set up defaults if not defined by build system
MALI_CUSTOMER_RELEASE ?= 1
MALI_UNIT_TEST ?= 0
MALI_KERNEL_TEST_API ?= 0
MALI_ERROR_INJECT_ON ?= 0
MALI_MOCK_TEST ?= 0
MALI_COVERAGE ?= 0
CONFIG_MALI_PLATFORM_NAME ?= "devicetree"
# This workaround is for what seems to be a compiler bug we observed in
# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling
# the "_Pragma" syntax, where an error message is returned:
#
# "internal compiler error: unspellable token PRAGMA"
#
# This regression has thus far only been seen on the GCC 4.7 compiler bundled
# with AOSP 4.3.0. So this makefile, intended for in-tree kernel builds
# which are not known to be used with AOSP, is hardcoded to disable the
# workaround, i.e. set the define to 0.
MALI_GCC_WORKAROUND_MIDCOM_4598 ?= 0
# Set up our defines, which will be passed to gcc
DEFINES = \
-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
-DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
-DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \
-DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \
-DMALI_COVERAGE=$(MALI_COVERAGE) \
-DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
-DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598)
# MALI_SEC_INTEGRATION : rename CONFIG_MALI_PLATFORM_NAME to CONFIG_MALI_PLATFORM_THIRDPARTY_NAME
ifeq ($(KBUILD_EXTMOD),)
# in-tree
DEFINES +=-DMALI_KBASE_PLATFORM_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
else
# out-of-tree
DEFINES +=-DMALI_KBASE_PLATFORM_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
endif
DEFINES += -I$(srctree)/drivers/staging/android
# Use our defines when compiling
ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(OSK_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
SRC := \
mali_kbase_device.c \
mali_kbase_cache_policy.c \
mali_kbase_mem.c \
mali_kbase_mmu.c \
mali_kbase_ctx_sched.c \
mali_kbase_jd.c \
mali_kbase_jd_debugfs.c \
mali_kbase_jm.c \
mali_kbase_gpuprops.c \
mali_kbase_js.c \
mali_kbase_js_ctx_attr.c \
mali_kbase_event.c \
mali_kbase_context.c \
mali_kbase_pm.c \
mali_kbase_config.c \
mali_kbase_vinstr.c \
mali_kbase_softjobs.c \
mali_kbase_10969_workaround.c \
mali_kbase_hw.c \
mali_kbase_utility.c \
mali_kbase_debug.c \
mali_kbase_trace_timeline.c \
mali_kbase_gpu_memory_debugfs.c \
mali_kbase_mem_linux.c \
mali_kbase_core_linux.c \
mali_kbase_replay.c \
mali_kbase_mem_profile_debugfs.c \
mali_kbase_mmu_mode_lpae.c \
mali_kbase_mmu_mode_aarch64.c \
mali_kbase_disjoint_events.c \
mali_kbase_gator_api.c \
mali_kbase_debug_mem_view.c \
mali_kbase_debug_job_fault.c \
mali_kbase_smc.c \
mali_kbase_mem_pool.c \
mali_kbase_mem_pool_debugfs.c \
mali_kbase_tlstream.c \
mali_kbase_strings.c \
mali_kbase_as_fault_debugfs.c \
mali_kbase_regs_history_debugfs.c \
thirdparty/mali_kbase_mmap.c
ifeq ($(CONFIG_MALI_JOB_DUMP),y)
SRC += mali_kbase_gwt.c
endif
ifeq ($(MALI_UNIT_TEST),1)
SRC += mali_kbase_tlstream_test.c
endif
ifeq ($(MALI_CUSTOMER_RELEASE),0)
SRC += mali_kbase_regs_dump_debugfs.c
endif
ccflags-y += -I$(KBASE_PATH)
# Tell the Linux build system from which .o file to create the kernel module
obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
# Tell the Linux build system to enable building of our .c files
mali_kbase-y := $(SRC:.c=.o)
# Kconfig passes in the name with quotes for in-tree builds - remove them.
# MALI_SEC_INTEGRATION : rename CONFIG_MALI_PLATFORM_NAME to CONFIG_MALI_PLATFORM_THIRDPARTY_NAME
platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
MALI_PLATFORM_DIR := platform/$(platform_name)
ccflags-y += -I$(src)/$(MALI_PLATFORM_DIR)
#include $(src)/$(MALI_PLATFORM_DIR)/Kbuild
obj-$(CONFIG_MALI_MIDGARD) += platform/
#mali_kbase-y += $(PLATFORM_THIRDPARTY:.c=.o)
ifeq ($(CONFIG_MALI_DEVFREQ),y)
ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
include $(src)/ipa/Kbuild
endif
endif
mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
mali_kbase_dma_fence.o \
mali_kbase_fence.o
mali_kbase-$(CONFIG_SYNC) += \
mali_kbase_sync_android.o \
mali_kbase_sync_common.o
mali_kbase-$(CONFIG_SYNC_FILE) += \
mali_kbase_sync_file.o \
mali_kbase_sync_common.o \
mali_kbase_fence.o
ifeq ($(MALI_MOCK_TEST),1)
# Test functionality
mali_kbase-y += tests/internal/src/mock/mali_kbase_pm_driver_mock.o
endif
include $(src)/backend/gpu/Kbuild
mali_kbase-y += $(BACKEND:.c=.o)
ccflags-y += -I$(src)/backend/gpu
subdir-ccflags-y += -I$(src)/backend/gpu
# For kutf and mali_kutf_irq_latency_test
obj-$(CONFIG_MALI_KUTF) += tests/

View File

@ -0,0 +1,221 @@
#
# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
# Foundation, and any use by you of this program is subject to the terms
# of such GNU licence.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# SPDX-License-Identifier: GPL-2.0
#
#
menuconfig MALI_MIDGARD
tristate "Mali Midgard series support"
select GPU_TRACEPOINTS if ANDROID
default n
help
Enable this option to build support for a ARM Mali Midgard GPU.
To compile this driver as a module, choose M here:
this will generate a single module, called mali_kbase.
config MALI_GATOR_SUPPORT
bool "Streamline support via Gator"
depends on MALI_MIDGARD
default n
help
Adds diagnostic support for use with the ARM Streamline Performance Analyzer.
You will need the Gator device driver already loaded before loading this driver when enabling
Streamline debug support.
This is a legacy interface required by older versions of Streamline.
config MALI_MIDGARD_DVFS
bool "Enable legacy DVFS"
depends on MALI_MIDGARD && !MALI_DEVFREQ
default n
help
Choose this option to enable legacy DVFS in the Mali Midgard DDK.
config MALI_MIDGARD_ENABLE_TRACE
bool "Enable kbase tracing"
depends on MALI_MIDGARD
default n
help
Enables tracing in kbase. Trace log available through
the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
config MALI_DEVFREQ
bool "devfreq support for Mali"
depends on MALI_MIDGARD && PM_DEVFREQ
help
Support devfreq for Mali.
Using the devfreq framework and, by default, the simpleondemand
governor, the frequency of Mali will be dynamically selected from the
available OPPs.
config MALI_DMA_FENCE
bool "DMA_BUF fence support for Mali"
depends on MALI_MIDGARD && !KDS
default n
help
Support DMA_BUF fences for Mali.
This option should only be enabled if KDS is not present and
the Linux Kernel has built in support for DMA_BUF fences.
# MALI_SEC_INTEGRATION
config MALI_PLATFORM_THIRDPARTY
default y
bool "Third Party Platform"
config MALI_PLATFORM_THIRDPARTY_NAME
depends on MALI_MIDGARD
string "Platform name"
default "exynos"
help
Enter the name of the desired platform configuration directory to
include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
exist.
# MALI_EXPERT configuration options
menuconfig MALI_EXPERT
depends on MALI_MIDGARD
bool "Enable Expert Settings"
default n
help
Enabling this option and modifying the default settings may produce a driver with performance or
other limitations.
config MALI_CORESTACK
bool "Support controlling power to the GPU core stack"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Enabling this feature on supported GPUs will let the driver powering
on/off the GPU core stack independently without involving the Power
Domain Controller. This should only be enabled on platforms which
integration of the PDC to the Mali GPU is known to be problematic.
This feature is currently only supported on t-Six and t-HEx GPUs.
If unsure, say N.
config MALI_PRFCNT_SET_SECONDARY
bool "Use secondary set of performance counters"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Select this option to use secondary set of performance counters. Kernel
features that depend on an access to the primary set of counters may
become unavailable. Enabling this option will prevent power management
from working optimally and may cause instrumentation tools to return
bogus results.
If unsure, say N.
config MALI_DEBUG
bool "Debug build"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Select this option for increased checking and reporting of errors.
config MALI_FENCE_DEBUG
bool "Debug sync fence usage"
depends on MALI_MIDGARD && MALI_EXPERT && (SYNC || SYNC_FILE)
default n
help
Select this option to enable additional checking and reporting on the
use of sync fences in the Mali driver.
This will add a 3s timeout to all sync fence waits in the Mali
driver, so that when work for Mali has been waiting on a sync fence
for a long time a debug message will be printed, detailing what fence
is causing the block, and which dependent Mali atoms are blocked as a
result of this.
The timeout can be changed at runtime through the js_soft_timeout
device attribute, where the timeout is specified in milliseconds.
config MALI_NO_MALI
bool "No Mali"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
This can be used to test the driver in a simulated environment
whereby the hardware is not physically present. If the hardware is physically
present it will not be used. This can be used to test the majority of the
driver without needing actual hardware or for software benchmarking.
All calls to the simulated hardware will complete immediately as if the hardware
completed the task.
config MALI_ERROR_INJECT
bool "Error injection"
depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI
default n
help
Enables insertion of errors to test module failure and recovery mechanisms.
config MALI_TRACE_TIMELINE
bool "Timeline tracing"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Enables timeline tracing through the kernel tracepoint system.
config MALI_SYSTEM_TRACE
bool "Enable system event tracing support"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Choose this option to enable system trace events for each
kbase event. This is typically used for debugging but has
minimal overhead when not in use. Enable only if you know what
you are doing.
config MALI_JOB_DUMPING
bool "Enable system level support needed for job dumping"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Choose this option to enable system level support needed for
job dumping. This is typically used for instrumentation but has
minimal overhead when not in use. Enable only if you know what
you are doing.
config MALI_2MB_ALLOC
bool "Attempt to allocate 2MB pages"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Rather than allocating all GPU memory page-by-page, attempt to
allocate 2MB pages from the kernel. This reduces TLB pressure and
helps to prevent memory fragmentation.
If in doubt, say N
config MALI_PWRSOFT_765
bool "PWRSOFT-765 ticket"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged
in kernel v4.10, however if backported into the kernel then this
option must be manually selected.
If using kernel >= v4.10 then say N, otherwise if devfreq cooling
changes have been backported say Y to avoid compilation errors.
source "drivers/gpu/arm/tMIx/r10p0/platform/Kconfig"

View File

@ -0,0 +1,44 @@
#
# (C) COPYRIGHT 2010-2016, 2017 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
# Foundation, and any use by you of this program is subject to the terms
# of such GNU licence.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# SPDX-License-Identifier: GPL-2.0
#
#
KDIR ?= /lib/modules/$(shell uname -r)/build
BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
UMP_PATH_RELATIVE = $(CURDIR)/../../../base/ump
KBASE_PATH_RELATIVE = $(CURDIR)
EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers
ifeq ($(MALI_UNIT_TEST), 1)
EXTRA_SYMBOLS += $(KBASE_PATH_RELATIVE)/tests/internal/src/kernel_assert_module/linux/Module.symvers
endif
ifeq ($(CONFIG_MALI_FPGA_BUS_LOGGER),y)
#Add bus logger symbols
EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
endif
# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
all:
$(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
clean:
$(MAKE) -C $(KDIR) M=$(CURDIR) clean

View File

@ -0,0 +1,23 @@
#
# (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
# Foundation, and any use by you of this program is subject to the terms
# of such GNU licence.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# SPDX-License-Identifier: GPL-2.0
#
#
EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH) -I$(OSK_PATH)/src/linux/include -I$(KBASE_PATH)/platform_$(PLATFORM)

View File

@ -0,0 +1,207 @@
#
# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
# Foundation, and any use by you of this program is subject to the terms
# of such GNU licence.
#
# A copy of the licence is included with the program, and can also be obtained
# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
#
menuconfig MALI_MIDGARD
bool "Mali Midgard series support"
default y
help
Enable this option to build support for a ARM Mali Midgard GPU.
To compile this driver as a module, choose M here:
this will generate a single module, called mali_kbase.
config MALI_GATOR_SUPPORT
bool "Streamline support via Gator"
depends on MALI_MIDGARD
default y if INSTRUMENTATION_STREAMLINE_OLD
default n
help
Adds diagnostic support for use with the ARM Streamline Performance Analyzer.
You will need the Gator device driver already loaded before loading this driver when enabling
Streamline debug support.
This is a legacy interface required by older versions of Streamline.
config MALI_MIDGARD_DVFS
bool "Enable legacy DVFS"
depends on MALI_MIDGARD && !MALI_DEVFREQ
default n
help
Choose this option to enable legacy DVFS in the Mali Midgard DDK.
config MALI_MIDGARD_ENABLE_TRACE
bool "Enable kbase tracing"
depends on MALI_MIDGARD
default n
help
Enables tracing in kbase. Trace log available through
the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
config MALI_DEVFREQ
bool "devfreq support for Mali"
depends on MALI_MIDGARD
default y if PLATFORM_JUNO
default y if PLATFORM_CUSTOM
help
Support devfreq for Mali.
Using the devfreq framework and, by default, the simpleondemand
governor, the frequency of Mali will be dynamically selected from the
available OPPs.
config MALI_DMA_FENCE
bool "DMA_BUF fence support for Mali"
depends on MALI_MIDGARD
default n
help
Support DMA_BUF fences for Mali.
This option should only be enabled if the Linux Kernel has built in
support for DMA_BUF fences.
config MALI_PLATFORM_NAME
depends on MALI_MIDGARD
string "Platform name"
default "arndale" if PLATFORM_ARNDALE
default "arndale_octa" if PLATFORM_ARNDALE_OCTA
default "rk" if PLATFORM_FIREFLY
default "hisilicon" if PLATFORM_HIKEY960
default "vexpress" if PLATFORM_VEXPRESS
default "devicetree"
help
Enter the name of the desired platform configuration directory to
include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
exist.
config MALI_MOCK_TEST
bool
depends on MALI_MIDGARD && !RELEASE
default y
# MALI_EXPERT configuration options
menuconfig MALI_EXPERT
depends on MALI_MIDGARD
bool "Enable Expert Settings"
default y
help
Enabling this option and modifying the default settings may produce a driver with performance or
other limitations.
config MALI_CORESTACK
bool "Support controlling power to the GPU core stack"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Enabling this feature on supported GPUs will let the driver powering
on/off the GPU core stack independently without involving the Power
Domain Controller. This should only be enabled on platforms which
integration of the PDC to the Mali GPU is known to be problematic.
This feature is currently only supported on t-Six and t-HEx GPUs.
If unsure, say N.
config MALI_PRFCNT_SET_SECONDARY
bool "Use secondary set of performance counters"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Select this option to use secondary set of performance counters. Kernel
features that depend on an access to the primary set of counters may
become unavailable. Enabling this option will prevent power management
from working optimally and may cause instrumentation tools to return
bogus results.
If unsure, say N.
config MALI_DEBUG
bool "Debug build"
depends on MALI_MIDGARD && MALI_EXPERT
default y if DEBUG
default n
help
Select this option for increased checking and reporting of errors.
config MALI_FENCE_DEBUG
bool "Debug sync fence usage"
depends on MALI_MIDGARD && MALI_EXPERT
default y if MALI_DEBUG
help
Select this option to enable additional checking and reporting on the
use of sync fences in the Mali driver.
This will add a 3s timeout to all sync fence waits in the Mali
driver, so that when work for Mali has been waiting on a sync fence
for a long time a debug message will be printed, detailing what fence
is causing the block, and which dependent Mali atoms are blocked as a
result of this.
The timeout can be changed at runtime through the js_soft_timeout
device attribute, where the timeout is specified in milliseconds.
config MALI_ERROR_INJECT
bool "Error injection"
depends on MALI_MIDGARD && MALI_EXPERT && NO_MALI
default n
help
Enables insertion of errors to test module failure and recovery mechanisms.
config MALI_ERROR_INJECT_RANDOM
bool "Random error injection"
depends on MALI_MIDGARD && MALI_EXPERT && NO_MALI && MALI_ERROR_INJECT
default n
help
Injected errors are random, rather than user-driven.
config MALI_TRACE_TIMELINE
bool "Timeline tracing"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Enables timeline tracing through the kernel tracepoint system.
config MALI_SYSTEM_TRACE
bool "Enable system event tracing support"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Choose this option to enable system trace events for each
kbase event. This is typically used for debugging but has
minimal overhead when not in use. Enable only if you know what
you are doing.
config MALI_2MB_ALLOC
bool "Attempt to allocate 2MB pages"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
Rather than allocating all GPU memory page-by-page, attempt to
allocate 2MB pages from the kernel. This reduces TLB pressure and
helps to prevent memory fragmentation.
If in doubt, say N
config MALI_FPGA_BUS_LOGGER
bool "Enable bus log integration"
depends on MALI_MIDGARD && MALI_EXPERT
default n
config MALI_PWRSOFT_765
bool "PWRSOFT-765 ticket"
depends on MALI_MIDGARD && MALI_EXPERT
default n
help
PWRSOFT-765 fixes devfreq cooling devices issues. However, they are
not merged in mainline kernel yet. So this define helps to guard those
parts of the code.

View File

@ -0,0 +1,66 @@
#
# (C) COPYRIGHT 2014,2017 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
# Foundation, and any use by you of this program is subject to the terms
# of such GNU licence.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# SPDX-License-Identifier: GPL-2.0
#
#
BACKEND += \
backend/gpu/mali_kbase_cache_policy_backend.c \
backend/gpu/mali_kbase_device_hw.c \
backend/gpu/mali_kbase_gpu.c \
backend/gpu/mali_kbase_gpuprops_backend.c \
backend/gpu/mali_kbase_debug_job_fault_backend.c \
backend/gpu/mali_kbase_irq_linux.c \
backend/gpu/mali_kbase_instr_backend.c \
backend/gpu/mali_kbase_jm_as.c \
backend/gpu/mali_kbase_jm_hw.c \
backend/gpu/mali_kbase_jm_rb.c \
backend/gpu/mali_kbase_js_affinity.c \
backend/gpu/mali_kbase_js_backend.c \
backend/gpu/mali_kbase_mmu_hw_direct.c \
backend/gpu/mali_kbase_pm_backend.c \
backend/gpu/mali_kbase_pm_driver.c \
backend/gpu/mali_kbase_pm_metrics.c \
backend/gpu/mali_kbase_pm_ca.c \
backend/gpu/mali_kbase_pm_ca_fixed.c \
backend/gpu/mali_kbase_pm_always_on.c \
backend/gpu/mali_kbase_pm_coarse_demand.c \
backend/gpu/mali_kbase_pm_demand.c \
backend/gpu/mali_kbase_pm_policy.c \
backend/gpu/mali_kbase_time.c
ifeq ($(MALI_CUSTOMER_RELEASE),0)
BACKEND += \
backend/gpu/mali_kbase_pm_ca_random.c \
backend/gpu/mali_kbase_pm_demand_always_powered.c \
backend/gpu/mali_kbase_pm_fast_start.c
endif
ifeq ($(CONFIG_MALI_DEVFREQ),y)
BACKEND += \
backend/gpu/mali_kbase_devfreq.c \
backend/gpu/mali_kbase_pm_ca_devfreq.c
endif
ifeq ($(CONFIG_MALI_NO_MALI),y)
# Dummy model
BACKEND += backend/gpu/mali_kbase_model_dummy.c
BACKEND += backend/gpu/mali_kbase_model_linux.c
# HW error simulation
BACKEND += backend/gpu/mali_kbase_model_error_generator.c
endif

View File

@ -0,0 +1,34 @@
/*
*
* (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Backend specific configuration
*/
#ifndef _KBASE_BACKEND_CONFIG_H_
#define _KBASE_BACKEND_CONFIG_H_
/* Enable GPU reset API */
#define KBASE_GPU_RESET_EN 1
#endif /* _KBASE_BACKEND_CONFIG_H_ */

View File

@ -0,0 +1,34 @@
/*
*
* (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#include "backend/gpu/mali_kbase_cache_policy_backend.h"
#include <backend/gpu/mali_kbase_device_internal.h>
void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
u32 mode)
{
kbdev->current_gpu_coherency_mode = mode;
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG))
kbase_reg_write(kbdev, COHERENCY_ENABLE, mode, NULL);
}

View File

@ -0,0 +1,39 @@
/*
*
* (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#ifndef _KBASE_CACHE_POLICY_BACKEND_H_
#define _KBASE_CACHE_POLICY_BACKEND_H_
#include "mali_kbase.h"
#include "mali_base_kernel.h"
/**
* kbase_cache_set_coherency_mode() - Sets the system coherency mode
* in the GPU.
* @kbdev: Device pointer
* @mode: Coherency mode. COHERENCY_ACE/ACE_LITE
*/
void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
u32 mode);
#endif /* _KBASE_CACHE_POLICY_H_ */

View File

@ -0,0 +1,162 @@
/*
*
* (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#include <mali_kbase.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include "mali_kbase_debug_job_fault.h"
#ifdef CONFIG_DEBUG_FS
/*GPU_CONTROL_REG(r)*/
static int gpu_control_reg_snapshot[] = {
GPU_ID,
SHADER_READY_LO,
SHADER_READY_HI,
TILER_READY_LO,
TILER_READY_HI,
L2_READY_LO,
L2_READY_HI
};
/* JOB_CONTROL_REG(r) */
static int job_control_reg_snapshot[] = {
JOB_IRQ_MASK,
JOB_IRQ_STATUS
};
/* JOB_SLOT_REG(n,r) */
static int job_slot_reg_snapshot[] = {
JS_HEAD_LO,
JS_HEAD_HI,
JS_TAIL_LO,
JS_TAIL_HI,
JS_AFFINITY_LO,
JS_AFFINITY_HI,
JS_CONFIG,
JS_STATUS,
JS_HEAD_NEXT_LO,
JS_HEAD_NEXT_HI,
JS_AFFINITY_NEXT_LO,
JS_AFFINITY_NEXT_HI,
JS_CONFIG_NEXT
};
/*MMU_REG(r)*/
static int mmu_reg_snapshot[] = {
MMU_IRQ_MASK,
MMU_IRQ_STATUS
};
/* MMU_AS_REG(n,r) */
static int as_reg_snapshot[] = {
AS_TRANSTAB_LO,
AS_TRANSTAB_HI,
AS_MEMATTR_LO,
AS_MEMATTR_HI,
AS_FAULTSTATUS,
AS_FAULTADDRESS_LO,
AS_FAULTADDRESS_HI,
AS_STATUS
};
bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
int reg_range)
{
int i, j;
int offset = 0;
int slot_number;
int as_number;
if (kctx->reg_dump == NULL)
return false;
slot_number = kctx->kbdev->gpu_props.num_job_slots;
as_number = kctx->kbdev->gpu_props.num_address_spaces;
/* get the GPU control registers*/
for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) {
kctx->reg_dump[offset] =
GPU_CONTROL_REG(gpu_control_reg_snapshot[i]);
offset += 2;
}
/* get the Job control registers*/
for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) {
kctx->reg_dump[offset] =
JOB_CONTROL_REG(job_control_reg_snapshot[i]);
offset += 2;
}
/* get the Job Slot registers*/
for (j = 0; j < slot_number; j++) {
for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) {
kctx->reg_dump[offset] =
JOB_SLOT_REG(j, job_slot_reg_snapshot[i]);
offset += 2;
}
}
/* get the MMU registers*/
for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) {
kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]);
offset += 2;
}
/* get the Address space registers*/
for (j = 0; j < as_number; j++) {
for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) {
kctx->reg_dump[offset] =
MMU_AS_REG(j, as_reg_snapshot[i]);
offset += 2;
}
}
WARN_ON(offset >= (reg_range*2/4));
/* set the termination flag*/
kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG;
kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG;
dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n",
offset);
return true;
}
bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx)
{
int offset = 0;
if (kctx->reg_dump == NULL)
return false;
while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) {
kctx->reg_dump[offset+1] =
kbase_reg_read(kctx->kbdev,
kctx->reg_dump[offset], NULL);
offset += 2;
}
return true;
}
#endif

View File

@ -0,0 +1,427 @@
/*
*
* (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#include <mali_kbase.h>
#include <mali_kbase_tlstream.h>
#include <mali_kbase_config_defaults.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/devfreq.h>
#ifdef CONFIG_DEVFREQ_THERMAL
#include <linux/devfreq_cooling.h>
#endif
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
#include <linux/pm_opp.h>
#else /* Linux >= 3.13 */
/* In 3.13 the OPP include header file, types, and functions were all
* renamed. Use the old filename for the include, and define the new names to
* the old, when an old kernel is detected.
*/
#include <linux/opp.h>
#define dev_pm_opp opp
#define dev_pm_opp_get_voltage opp_get_voltage
#define dev_pm_opp_get_opp_count opp_get_opp_count
#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
#define dev_pm_opp_find_freq_floor opp_find_freq_floor
#endif /* Linux >= 3.13 */
/**
* opp_translate - Translate nominal OPP frequency from devicetree into real
* frequency and core mask
* @kbdev: Device pointer
* @freq: Nominal frequency
* @core_mask: Pointer to u64 to store core mask to
*
* Return: Real target frequency
*
* This function will only perform translation if an operating-points-v2-mali
* table is present in devicetree. If one is not present then it will return an
* untranslated frequency and all cores enabled.
*/
static unsigned long opp_translate(struct kbase_device *kbdev,
unsigned long freq, u64 *core_mask)
{
int i;
for (i = 0; i < kbdev->num_opps; i++) {
if (kbdev->opp_table[i].opp_freq == freq) {
*core_mask = kbdev->opp_table[i].core_mask;
return kbdev->opp_table[i].real_freq;
}
}
/* Failed to find OPP - return all cores enabled & nominal frequency */
*core_mask = kbdev->gpu_props.props.raw_props.shader_present;
return freq;
}
static int
kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
unsigned long nominal_freq;
unsigned long freq = 0;
unsigned long voltage;
int err;
u64 core_mask;
freq = *target_freq;
rcu_read_lock();
opp = devfreq_recommended_opp(dev, &freq, flags);
voltage = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
if (IS_ERR_OR_NULL(opp)) {
dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
return PTR_ERR(opp);
}
nominal_freq = freq;
/*
* Only update if there is a change of frequency
*/
if (kbdev->current_nominal_freq == nominal_freq) {
*target_freq = nominal_freq;
return 0;
}
freq = opp_translate(kbdev, nominal_freq, &core_mask);
#ifdef CONFIG_REGULATOR
if (kbdev->regulator && kbdev->current_voltage != voltage
&& kbdev->current_freq < freq) {
err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
if (err) {
dev_err(dev, "Failed to increase voltage (%d)\n", err);
return err;
}
}
#endif
err = clk_set_rate(kbdev->clock, freq);
if (err) {
dev_err(dev, "Failed to set clock %lu (target %lu)\n",
freq, *target_freq);
return err;
}
#ifdef CONFIG_REGULATOR
if (kbdev->regulator && kbdev->current_voltage != voltage
&& kbdev->current_freq > freq) {
err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
if (err) {
dev_err(dev, "Failed to decrease voltage (%d)\n", err);
return err;
}
}
#endif
if (kbdev->pm.backend.ca_current_policy->id ==
KBASE_PM_CA_POLICY_ID_DEVFREQ)
kbase_devfreq_set_core_mask(kbdev, core_mask);
*target_freq = nominal_freq;
kbdev->current_voltage = voltage;
kbdev->current_nominal_freq = nominal_freq;
kbdev->current_freq = freq;
kbdev->current_core_mask = core_mask;
KBASE_TLSTREAM_AUX_DEVFREQ_TARGET((u64)nominal_freq);
kbase_pm_reset_dvfs_utilisation(kbdev);
return err;
}
static int
kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
*freq = kbdev->current_nominal_freq;
return 0;
}
static int
kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
stat->current_frequency = kbdev->current_nominal_freq;
kbase_pm_get_dvfs_utilisation(kbdev,
&stat->total_time, &stat->busy_time);
stat->private_data = NULL;
return 0;
}
static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
struct devfreq_dev_profile *dp)
{
int count;
int i = 0;
unsigned long freq;
struct dev_pm_opp *opp;
rcu_read_lock();
count = dev_pm_opp_get_opp_count(kbdev->dev);
if (count < 0) {
rcu_read_unlock();
return count;
}
rcu_read_unlock();
dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
GFP_KERNEL);
if (!dp->freq_table)
return -ENOMEM;
rcu_read_lock();
for (i = 0, freq = ULONG_MAX; i < count; i++, freq--) {
opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
if (IS_ERR(opp))
break;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
dev_pm_opp_put(opp);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
dp->freq_table[i] = freq;
}
rcu_read_unlock();
if (count != i)
dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n",
count, i);
dp->max_state = i;
return 0;
}
static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev)
{
struct devfreq_dev_profile *dp = kbdev->devfreq->profile;
kfree(dp->freq_table);
}
static void kbase_devfreq_exit(struct device *dev)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
kbase_devfreq_term_freq_table(kbdev);
}
static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
{
struct device_node *opp_node = of_parse_phandle(kbdev->dev->of_node,
"operating-points-v2", 0);
struct device_node *node;
int i = 0;
int count;
if (!opp_node)
return 0;
if (!of_device_is_compatible(opp_node, "operating-points-v2-mali"))
return 0;
count = dev_pm_opp_get_opp_count(kbdev->dev);
kbdev->opp_table = kmalloc_array(count,
sizeof(struct kbase_devfreq_opp), GFP_KERNEL);
if (!kbdev->opp_table)
return -ENOMEM;
for_each_available_child_of_node(opp_node, node) {
u64 core_mask;
u64 opp_freq, real_freq;
const void *core_count_p;
if (of_property_read_u64(node, "opp-hz", &opp_freq)) {
dev_warn(kbdev->dev, "OPP is missing required opp-hz property\n");
continue;
}
if (of_property_read_u64(node, "opp-hz-real", &real_freq))
real_freq = opp_freq;
if (of_property_read_u64(node, "opp-core-mask", &core_mask))
core_mask =
kbdev->gpu_props.props.raw_props.shader_present;
core_count_p = of_get_property(node, "opp-core-count", NULL);
if (core_count_p) {
u64 remaining_core_mask =
kbdev->gpu_props.props.raw_props.shader_present;
int core_count = be32_to_cpup(core_count_p);
core_mask = 0;
for (; core_count > 0; core_count--) {
int core = ffs(remaining_core_mask);
if (!core) {
dev_err(kbdev->dev, "OPP has more cores than GPU\n");
return -ENODEV;
}
core_mask |= (1ull << (core-1));
remaining_core_mask &= ~(1ull << (core-1));
}
}
if (!core_mask) {
dev_err(kbdev->dev, "OPP has invalid core mask of 0\n");
return -ENODEV;
}
kbdev->opp_table[i].opp_freq = opp_freq;
kbdev->opp_table[i].real_freq = real_freq;
kbdev->opp_table[i].core_mask = core_mask;
dev_info(kbdev->dev, "OPP %d : opp_freq=%llu real_freq=%llu core_mask=%llx\n",
i, opp_freq, real_freq, core_mask);
i++;
}
kbdev->num_opps = i;
return 0;
}
int kbase_devfreq_init(struct kbase_device *kbdev)
{
struct devfreq_dev_profile *dp;
int err;
if (!kbdev->clock) {
dev_err(kbdev->dev, "Clock not available for devfreq\n");
return -ENODEV;
}
kbdev->current_freq = clk_get_rate(kbdev->clock);
kbdev->current_nominal_freq = kbdev->current_freq;
dp = &kbdev->devfreq_profile;
dp->initial_freq = kbdev->current_freq;
dp->polling_ms = 100;
dp->target = kbase_devfreq_target;
dp->get_dev_status = kbase_devfreq_status;
dp->get_cur_freq = kbase_devfreq_cur_freq;
dp->exit = kbase_devfreq_exit;
if (kbase_devfreq_init_freq_table(kbdev, dp))
return -EFAULT;
if (dp->max_state > 0) {
/* Record the maximum frequency possible */
kbdev->gpu_props.props.core_props.gpu_freq_khz_max =
dp->freq_table[0] / 1000;
};
err = kbase_devfreq_init_core_mask_table(kbdev);
if (err)
return err;
kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
"simple_ondemand", NULL);
if (IS_ERR(kbdev->devfreq)) {
kbase_devfreq_term_freq_table(kbdev);
return PTR_ERR(kbdev->devfreq);
}
/* devfreq_add_device only copies a few of kbdev->dev's fields, so
* set drvdata explicitly so IPA models can access kbdev. */
dev_set_drvdata(&kbdev->devfreq->dev, kbdev);
err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq);
if (err) {
dev_err(kbdev->dev,
"Failed to register OPP notifier (%d)\n", err);
goto opp_notifier_failed;
}
#ifdef CONFIG_DEVFREQ_THERMAL
err = kbase_ipa_init(kbdev);
if (err) {
dev_err(kbdev->dev, "IPA initialization failed\n");
goto cooling_failed;
}
kbdev->devfreq_cooling = of_devfreq_cooling_register_power(
kbdev->dev->of_node,
kbdev->devfreq,
&kbase_ipa_power_model_ops);
if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) {
err = PTR_ERR(kbdev->devfreq_cooling);
dev_err(kbdev->dev,
"Failed to register cooling device (%d)\n",
err);
goto cooling_failed;
}
#endif
return 0;
#ifdef CONFIG_DEVFREQ_THERMAL
cooling_failed:
devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
#endif /* CONFIG_DEVFREQ_THERMAL */
opp_notifier_failed:
if (devfreq_remove_device(kbdev->devfreq))
dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
else
kbdev->devfreq = NULL;
return err;
}
void kbase_devfreq_term(struct kbase_device *kbdev)
{
int err;
dev_dbg(kbdev->dev, "Term Mali devfreq\n");
#ifdef CONFIG_DEVFREQ_THERMAL
if (kbdev->devfreq_cooling)
devfreq_cooling_unregister(kbdev->devfreq_cooling);
kbase_ipa_term(kbdev);
#endif
devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
err = devfreq_remove_device(kbdev->devfreq);
if (err)
dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
else
kbdev->devfreq = NULL;
kfree(kbdev->opp_table);
}

View File

@ -0,0 +1,29 @@
/*
*
* (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#ifndef _BASE_DEVFREQ_H_
#define _BASE_DEVFREQ_H_
int kbase_devfreq_init(struct kbase_device *kbdev);
void kbase_devfreq_term(struct kbase_device *kbdev);
#endif /* _BASE_DEVFREQ_H_ */

View File

@ -0,0 +1,264 @@
/*
*
* (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
*
*/
#include <mali_kbase.h>
#include <backend/gpu/mali_kbase_instr_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#if !defined(CONFIG_MALI_NO_MALI)
#ifdef CONFIG_DEBUG_FS
int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
{
struct kbase_io_access *old_buf;
struct kbase_io_access *new_buf;
unsigned long flags;
if (!new_size)
goto out_err; /* The new size must not be 0 */
new_buf = vmalloc(new_size * sizeof(*h->buf));
if (!new_buf)
goto out_err;
spin_lock_irqsave(&h->lock, flags);
old_buf = h->buf;
/* Note: we won't bother with copying the old data over. The dumping
* logic wouldn't work properly as it relies on 'count' both as a
* counter and as an index to the buffer which would have changed with
* the new array. This is a corner case that we don't need to support.
*/
h->count = 0;
h->size = new_size;
h->buf = new_buf;
spin_unlock_irqrestore(&h->lock, flags);
vfree(old_buf);
return 0;
out_err:
return -1;
}
int kbase_io_history_init(struct kbase_io_history *h, u16 n)
{
h->enabled = false;
spin_lock_init(&h->lock);
h->count = 0;
h->size = 0;
h->buf = NULL;
if (kbase_io_history_resize(h, n))
return -1;
return 0;
}
void kbase_io_history_term(struct kbase_io_history *h)
{
vfree(h->buf);
h->buf = NULL;
}
/* kbase_io_history_add - add new entry to the register access history
*
* @h: Pointer to the history data structure
* @addr: Register address
* @value: The value that is either read from or written to the register
* @write: 1 if it's a register write, 0 if it's a read
*/
static void kbase_io_history_add(struct kbase_io_history *h,
void __iomem const *addr, u32 value, u8 write)
{
struct kbase_io_access *io;
unsigned long flags;
spin_lock_irqsave(&h->lock, flags);
io = &h->buf[h->count % h->size];
io->addr = (uintptr_t)addr | write;
io->value = value;
++h->count;
/* If count overflows, move the index by the buffer size so the entire
* buffer will still be dumped later */
if (unlikely(!h->count))
h->count = h->size;
spin_unlock_irqrestore(&h->lock, flags);
}
void kbase_io_history_dump(struct kbase_device *kbdev)
{
struct kbase_io_history *const h = &kbdev->io_history;
u16 i;
size_t iters;
unsigned long flags;
if (!unlikely(h->enabled))
return;
spin_lock_irqsave(&h->lock, flags);
dev_err(kbdev->dev, "Register IO History:");
iters = (h->size > h->count) ? h->count : h->size;
dev_err(kbdev->dev, "Last %zu register accesses of %zu total:\n", iters,
h->count);
for (i = 0; i < iters; ++i) {
struct kbase_io_access *io =
&h->buf[(h->count - iters + i) % h->size];
char const access = (io->addr & 1) ? 'w' : 'r';
dev_err(kbdev->dev, "%6i: %c: reg 0x%p val %08x\n", i, access,
(void *)(io->addr & ~0x1), io->value);
}
spin_unlock_irqrestore(&h->lock, flags);
}
#endif /* CONFIG_DEBUG_FS */
void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
struct kbase_context *kctx)
{
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
writel(value, kbdev->reg + offset);
#ifdef CONFIG_DEBUG_FS
if (unlikely(kbdev->io_history.enabled))
kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
value, 1);
#endif /* CONFIG_DEBUG_FS */
dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
if (kctx && kctx->jctx.tb)
kbase_device_trace_register_access(kctx, REG_WRITE, offset,
value);
}
KBASE_EXPORT_TEST_API(kbase_reg_write);
u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
struct kbase_context *kctx)
{
u32 val;
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
val = readl(kbdev->reg + offset);
#ifdef CONFIG_DEBUG_FS
if (unlikely(kbdev->io_history.enabled))
kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
val, 0);
#endif /* CONFIG_DEBUG_FS */
dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
if (kctx && kctx->jctx.tb)
kbase_device_trace_register_access(kctx, REG_READ, offset, val);
return val;
}
KBASE_EXPORT_TEST_API(kbase_reg_read);
#endif /* !defined(CONFIG_MALI_NO_MALI) */
/**
* kbase_report_gpu_fault - Report a GPU fault.
* @kbdev: Kbase device pointer
* @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS
* was also set
*
* This function is called from the interrupt handler when a GPU fault occurs.
* It reports the details of the fault using dev_warn().
*/
static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
{
u32 status;
u64 address;
status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
address = (u64) kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
address |= kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
/* MALI_SEC_INTEGRATION */
if (kbdev->vendor_callbacks->update_status)
kbdev->vendor_callbacks->update_status(kbdev, "completion_code", status);
dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
status & 0xFF,
kbase_exception_name(kbdev, status),
address);
if (multiple)
dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
}
void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
{
KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
if (val & GPU_FAULT)
kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
if (val & RESET_COMPLETED)
kbase_pm_reset_done(kbdev);
if (val & PRFCNT_SAMPLE_COMPLETED)
kbase_instr_hwcnt_sample_done(kbdev);
if (val & CLEAN_CACHES_COMPLETED)
kbase_clean_caches_done(kbdev);
KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
/* kbase_pm_check_transitions must be called after the IRQ has been
* cleared. This is because it might trigger further power transitions
* and we don't want to miss the interrupt raised to notify us that
* these further transitions have finished.
*/
if (val & POWER_CHANGED_ALL)
kbase_pm_power_changed(kbdev);
KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
}

View File

@ -0,0 +1,72 @@
/*
*
* (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Backend-specific HW access device APIs
*/
#ifndef _KBASE_DEVICE_INTERNAL_H_
#define _KBASE_DEVICE_INTERNAL_H_
/**
* kbase_reg_write - write to GPU register
* @kbdev: Kbase device pointer
* @offset: Offset of register
* @value: Value to write
* @kctx: Kbase context pointer. May be NULL
*
* Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
* @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
* != KBASEP_AS_NR_INVALID).
*/
void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
struct kbase_context *kctx);
/**
* kbase_reg_read - read from GPU register
* @kbdev: Kbase device pointer
* @offset: Offset of register
* @kctx: Kbase context pointer. May be NULL
*
* Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
* @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
* != KBASEP_AS_NR_INVALID).
*
* Return: Value in desired register
*/
u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
struct kbase_context *kctx);
/**
* kbase_gpu_interrupt - GPU interrupt handler
* @kbdev: Kbase device pointer
* @val: The value of the GPU IRQ status register which triggered the call
*
* This function is called from the interrupt handler when a GPU irq is to be
* handled.
*/
void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val);
#endif /* _KBASE_DEVICE_INTERNAL_H_ */

View File

@ -0,0 +1,135 @@
/*
*
* (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Register-based HW access backend APIs
*/
#include <mali_kbase.h>
#include <mali_kbase_hwaccess_backend.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
#include <backend/gpu/mali_kbase_js_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
int kbase_backend_early_init(struct kbase_device *kbdev)
{
int err;
err = kbasep_platform_device_init(kbdev);
if (err)
return err;
err = kbase_pm_runtime_init(kbdev);
if (err)
goto fail_runtime_pm;
/* Ensure we can access the GPU registers */
kbase_pm_register_access_enable(kbdev);
/* Find out GPU properties based on the GPU feature registers */
kbase_gpuprops_set(kbdev);
/* We're done accessing the GPU registers for now. */
kbase_pm_register_access_disable(kbdev);
err = kbase_install_interrupts(kbdev);
if (err)
goto fail_interrupts;
err = kbase_hwaccess_pm_init(kbdev);
if (err)
goto fail_pm;
return 0;
fail_pm:
kbase_release_interrupts(kbdev);
fail_interrupts:
kbase_pm_runtime_term(kbdev);
fail_runtime_pm:
kbasep_platform_device_term(kbdev);
return err;
}
void kbase_backend_early_term(struct kbase_device *kbdev)
{
kbase_hwaccess_pm_term(kbdev);
kbase_release_interrupts(kbdev);
kbase_pm_runtime_term(kbdev);
kbasep_platform_device_term(kbdev);
}
int kbase_backend_late_init(struct kbase_device *kbdev)
{
int err;
err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT);
if (err)
return err;
err = kbase_backend_timer_init(kbdev);
if (err)
goto fail_timer;
#ifdef CONFIG_MALI_DEBUG
#ifndef CONFIG_MALI_NO_MALI
if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
dev_err(kbdev->dev, "Interrupt assigment check failed.\n");
err = -EINVAL;
goto fail_interrupt_test;
}
#endif /* !CONFIG_MALI_NO_MALI */
#endif /* CONFIG_MALI_DEBUG */
err = kbase_job_slot_init(kbdev);
if (err)
goto fail_job_slot;
init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
return 0;
fail_job_slot:
#ifdef CONFIG_MALI_DEBUG
#ifndef CONFIG_MALI_NO_MALI
fail_interrupt_test:
#endif /* !CONFIG_MALI_NO_MALI */
#endif /* CONFIG_MALI_DEBUG */
kbase_backend_timer_term(kbdev);
fail_timer:
kbase_hwaccess_pm_halt(kbdev);
return err;
}
void kbase_backend_late_term(struct kbase_device *kbdev)
{
kbase_job_slot_halt(kbdev);
kbase_job_slot_term(kbdev);
kbase_backend_timer_term(kbdev);
kbase_hwaccess_pm_halt(kbdev);
}

View File

@ -0,0 +1,115 @@
/*
*
* (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Base kernel property query backend APIs
*/
#include <mali_kbase.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <mali_kbase_hwaccess_gpuprops.h>
void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
struct kbase_gpuprops_regdump *regdump)
{
int i;
/* Fill regdump with the content of the relevant registers */
regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID), NULL);
regdump->l2_features = kbase_reg_read(kbdev,
GPU_CONTROL_REG(L2_FEATURES), NULL);
regdump->suspend_size = kbase_reg_read(kbdev,
GPU_CONTROL_REG(SUSPEND_SIZE), NULL);
regdump->tiler_features = kbase_reg_read(kbdev,
GPU_CONTROL_REG(TILER_FEATURES), NULL);
regdump->mem_features = kbase_reg_read(kbdev,
GPU_CONTROL_REG(MEM_FEATURES), NULL);
regdump->mmu_features = kbase_reg_read(kbdev,
GPU_CONTROL_REG(MMU_FEATURES), NULL);
regdump->as_present = kbase_reg_read(kbdev,
GPU_CONTROL_REG(AS_PRESENT), NULL);
regdump->js_present = kbase_reg_read(kbdev,
GPU_CONTROL_REG(JS_PRESENT), NULL);
for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
regdump->js_features[i] = kbase_reg_read(kbdev,
GPU_CONTROL_REG(JS_FEATURES_REG(i)), NULL);
for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
regdump->texture_features[i] = kbase_reg_read(kbdev,
GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)), NULL);
regdump->thread_max_threads = kbase_reg_read(kbdev,
GPU_CONTROL_REG(THREAD_MAX_THREADS), NULL);
regdump->thread_max_workgroup_size = kbase_reg_read(kbdev,
GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE),
NULL);
regdump->thread_max_barrier_size = kbase_reg_read(kbdev,
GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE), NULL);
regdump->thread_features = kbase_reg_read(kbdev,
GPU_CONTROL_REG(THREAD_FEATURES), NULL);
regdump->shader_present_lo = kbase_reg_read(kbdev,
GPU_CONTROL_REG(SHADER_PRESENT_LO), NULL);
regdump->shader_present_hi = kbase_reg_read(kbdev,
GPU_CONTROL_REG(SHADER_PRESENT_HI), NULL);
regdump->tiler_present_lo = kbase_reg_read(kbdev,
GPU_CONTROL_REG(TILER_PRESENT_LO), NULL);
regdump->tiler_present_hi = kbase_reg_read(kbdev,
GPU_CONTROL_REG(TILER_PRESENT_HI), NULL);
regdump->l2_present_lo = kbase_reg_read(kbdev,
GPU_CONTROL_REG(L2_PRESENT_LO), NULL);
regdump->l2_present_hi = kbase_reg_read(kbdev,
GPU_CONTROL_REG(L2_PRESENT_HI), NULL);
regdump->stack_present_lo = kbase_reg_read(kbdev,
GPU_CONTROL_REG(STACK_PRESENT_LO), NULL);
regdump->stack_present_hi = kbase_reg_read(kbdev,
GPU_CONTROL_REG(STACK_PRESENT_HI), NULL);
}
void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
struct kbase_gpuprops_regdump *regdump)
{
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) {
/* Ensure we can access the GPU registers */
kbase_pm_register_access_enable(kbdev);
regdump->coherency_features = kbase_reg_read(kbdev,
GPU_CONTROL_REG(COHERENCY_FEATURES), NULL);
/* We're done accessing the GPU registers for now. */
kbase_pm_register_access_disable(kbdev);
} else {
/* Pre COHERENCY_FEATURES we only supported ACE_LITE */
regdump->coherency_features =
COHERENCY_FEATURE_BIT(COHERENCY_NONE) |
COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
}
}

View File

@ -0,0 +1,499 @@
/*
*
* (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* GPU backend instrumentation APIs.
*/
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
#include <mali_kbase_hwaccess_instr.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_instr_internal.h>
/**
* kbasep_instr_hwcnt_cacheclean - Issue Cache Clean & Invalidate command to
* hardware
*
* @kbdev: Kbase device
*/
static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
{
unsigned long flags;
unsigned long pm_flags;
u32 irq_mask;
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
KBASE_INSTR_STATE_REQUEST_CLEAN);
/* Enable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
irq_mask | CLEAN_CACHES_COMPLETED, NULL);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
/* clean&invalidate the caches so we're sure the mmu tables for the dump
* buffer is valid */
KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
GPU_COMMAND_CLEAN_INV_CACHES, NULL);
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANING;
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
}
int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
struct kbase_context *kctx,
struct kbase_uk_hwcnt_setup *setup)
{
unsigned long flags, pm_flags;
int err = -EINVAL;
u32 irq_mask;
int ret;
u64 shader_cores_needed;
u32 prfcnt_config;
shader_cores_needed = kbase_pm_get_present_cores(kbdev,
KBASE_PM_CORE_SHADER);
/* alignment failure */
if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
goto out_err;
/* Override core availability policy to ensure all cores are available
*/
kbase_pm_ca_instr_enable(kbdev);
/* Request the cores early on synchronously - we'll release them on any
* errors (e.g. instrumentation already active) */
kbase_pm_request_cores_sync(kbdev, true, shader_cores_needed);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
/* Instrumentation is already enabled */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
goto out_unrequest_cores;
}
/* Enable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask |
PRFCNT_SAMPLE_COMPLETED, NULL);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
/* In use, this context is the owner */
kbdev->hwcnt.kctx = kctx;
/* Remember the dump address so we can reprogram it later */
kbdev->hwcnt.addr = setup->dump_buffer;
/* Request the clean */
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
kbdev->hwcnt.backend.triggered = 0;
/* Clean&invalidate the caches so we're sure the mmu tables for the dump
* buffer is valid */
ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
&kbdev->hwcnt.backend.cache_clean_work);
KBASE_DEBUG_ASSERT(ret);
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
/* Wait for cacheclean to complete */
wait_event(kbdev->hwcnt.backend.wait,
kbdev->hwcnt.backend.triggered != 0);
KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
KBASE_INSTR_STATE_IDLE);
kbase_pm_request_l2_caches(kbdev);
/* Configure */
prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
{
u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
>> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
if (arch_v6)
prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
}
#endif
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
setup->dump_buffer & 0xFFFFFFFF, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
setup->dump_buffer >> 32, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
setup->jm_bm, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
setup->shader_bm, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
setup->mmu_l2_bm, kctx);
/* Due to PRLAM-8186 we need to disable the Tiler before we enable the
* HW counter dump. */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0,
kctx);
else
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
setup->tiler_bm, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL, kctx);
/* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump
*/
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
setup->tiler_bm, kctx);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
kbdev->hwcnt.backend.triggered = 1;
wake_up(&kbdev->hwcnt.backend.wait);
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
err = 0;
dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
return err;
out_unrequest_cores:
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_pm_unrequest_cores(kbdev, true, shader_cores_needed);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
out_err:
return err;
}
int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
{
unsigned long flags, pm_flags;
int err = -EINVAL;
u32 irq_mask;
struct kbase_device *kbdev = kctx->kbdev;
while (1) {
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) {
/* Instrumentation is not enabled */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
goto out;
}
if (kbdev->hwcnt.kctx != kctx) {
/* Instrumentation has been setup for another context */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
goto out;
}
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE)
break;
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
/* Ongoing dump/setup - wait for its completion */
wait_event(kbdev->hwcnt.backend.wait,
kbdev->hwcnt.backend.triggered != 0);
}
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
kbdev->hwcnt.backend.triggered = 0;
/* Disable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL);
/* Disable the counters */
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx);
kbdev->hwcnt.kctx = NULL;
kbdev->hwcnt.addr = 0ULL;
kbase_pm_ca_instr_disable(kbdev);
kbase_pm_unrequest_cores(kbdev, true,
kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER));
kbase_pm_release_l2_caches(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p",
kctx);
err = 0;
out:
return err;
}
int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
{
unsigned long flags;
int err = -EINVAL;
struct kbase_device *kbdev = kctx->kbdev;
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.kctx != kctx) {
/* The instrumentation has been setup for another context */
goto unlock;
}
if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) {
/* HW counters are disabled or another dump is ongoing, or we're
* resetting */
goto unlock;
}
kbdev->hwcnt.backend.triggered = 0;
/* Mark that we're dumping - the PF handler can signal that we faulted
*/
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
/* Reconfigure the dump address */
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
kbdev->hwcnt.addr & 0xFFFFFFFF, NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
kbdev->hwcnt.addr >> 32, NULL);
/* Start dumping */
KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL,
kbdev->hwcnt.addr, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
GPU_COMMAND_PRFCNT_SAMPLE, kctx);
dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
err = 0;
unlock:
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
return err;
}
KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
bool * const success)
{
unsigned long flags;
bool complete = false;
struct kbase_device *kbdev = kctx->kbdev;
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) {
*success = true;
complete = true;
} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
*success = false;
complete = true;
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
return complete;
}
KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete);
void kbasep_cache_clean_worker(struct work_struct *data)
{
struct kbase_device *kbdev;
unsigned long flags;
kbdev = container_of(data, struct kbase_device,
hwcnt.backend.cache_clean_work);
mutex_lock(&kbdev->cacheclean_lock);
kbasep_instr_hwcnt_cacheclean(kbdev);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
/* Wait for our condition, and any reset to complete */
while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
wait_event(kbdev->hwcnt.backend.cache_clean_wait,
kbdev->hwcnt.backend.state !=
KBASE_INSTR_STATE_CLEANING);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
}
KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
KBASE_INSTR_STATE_CLEANED);
/* All finished and idle */
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
kbdev->hwcnt.backend.triggered = 1;
wake_up(&kbdev->hwcnt.backend.wait);
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
mutex_unlock(&kbdev->cacheclean_lock);
}
void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
{
unsigned long flags;
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
kbdev->hwcnt.backend.triggered = 1;
wake_up(&kbdev->hwcnt.backend.wait);
} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) {
int ret;
/* Always clean and invalidate the cache after a successful dump
*/
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
&kbdev->hwcnt.backend.cache_clean_work);
KBASE_DEBUG_ASSERT(ret);
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
}
void kbase_clean_caches_done(struct kbase_device *kbdev)
{
u32 irq_mask;
if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
unsigned long flags;
unsigned long pm_flags;
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
/* Disable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
irq_mask & ~CLEAN_CACHES_COMPLETED, NULL);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
/* Wakeup... */
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
/* Only wake if we weren't resetting */
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
}
}
int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
{
struct kbase_device *kbdev = kctx->kbdev;
unsigned long flags;
int err;
/* Wait for dump & cacheclean to complete */
wait_event(kbdev->hwcnt.backend.wait,
kbdev->hwcnt.backend.triggered != 0);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
err = -EINVAL;
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
} else {
/* Dump done */
KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
KBASE_INSTR_STATE_IDLE);
err = 0;
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
return err;
}
int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
{
unsigned long flags;
int err = -EINVAL;
struct kbase_device *kbdev = kctx->kbdev;
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
/* Check it's the context previously set up and we're not already
* dumping */
if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
KBASE_INSTR_STATE_IDLE)
goto out;
/* Clear the counters */
KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
GPU_COMMAND_PRFCNT_CLEAR, kctx);
err = 0;
out:
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
return err;
}
KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear);
int kbase_instr_backend_init(struct kbase_device *kbdev)
{
int ret = 0;
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
init_waitqueue_head(&kbdev->hwcnt.backend.wait);
init_waitqueue_head(&kbdev->hwcnt.backend.cache_clean_wait);
INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work,
kbasep_cache_clean_worker);
kbdev->hwcnt.backend.triggered = 0;
kbdev->hwcnt.backend.cache_clean_wq =
/* MALI_SEC_INTEGRATION */
/* alloc_workqueue option is changed to ordered */
alloc_workqueue("Mali cache cleaning workqueue", WQ_UNBOUND | __WQ_ORDERED | 0, 1);
if (NULL == kbdev->hwcnt.backend.cache_clean_wq)
ret = -EINVAL;
return ret;
}
void kbase_instr_backend_term(struct kbase_device *kbdev)
{
destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq);
}

View File

@ -0,0 +1,63 @@
/*
*
* (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Backend-specific instrumentation definitions
*/
#ifndef _KBASE_INSTR_DEFS_H_
#define _KBASE_INSTR_DEFS_H_
/*
* Instrumentation State Machine States
*/
enum kbase_instr_state {
/* State where instrumentation is not active */
KBASE_INSTR_STATE_DISABLED = 0,
/* State machine is active and ready for a command. */
KBASE_INSTR_STATE_IDLE,
/* Hardware is currently dumping a frame. */
KBASE_INSTR_STATE_DUMPING,
/* We've requested a clean to occur on a workqueue */
KBASE_INSTR_STATE_REQUEST_CLEAN,
/* Hardware is currently cleaning and invalidating caches. */
KBASE_INSTR_STATE_CLEANING,
/* Cache clean completed, and either a) a dump is complete, or
* b) instrumentation can now be setup. */
KBASE_INSTR_STATE_CLEANED,
/* An error has occured during DUMPING (page fault). */
KBASE_INSTR_STATE_FAULT
};
/* Structure used for instrumentation and HW counters dumping */
struct kbase_instr_backend {
wait_queue_head_t wait;
int triggered;
enum kbase_instr_state state;
wait_queue_head_t cache_clean_wait;
struct workqueue_struct *cache_clean_wq;
struct work_struct cache_clean_work;
};
#endif /* _KBASE_INSTR_DEFS_H_ */

View File

@ -0,0 +1,50 @@
/*
*
* (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Backend-specific HW access instrumentation APIs
*/
#ifndef _KBASE_INSTR_INTERNAL_H_
#define _KBASE_INSTR_INTERNAL_H_
/**
* kbasep_cache_clean_worker() - Workqueue for handling cache cleaning
* @data: a &struct work_struct
*/
void kbasep_cache_clean_worker(struct work_struct *data);
/**
* kbase_clean_caches_done() - Cache clean interrupt received
* @kbdev: Kbase device
*/
void kbase_clean_caches_done(struct kbase_device *kbdev);
/**
* kbase_instr_hwcnt_sample_done() - Dump complete interrupt received
* @kbdev: Kbase device
*/
void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev);
#endif /* _KBASE_INSTR_INTERNAL_H_ */

View File

@ -0,0 +1,44 @@
/*
*
* (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Backend specific IRQ APIs
*/
#ifndef _KBASE_IRQ_INTERNAL_H_
#define _KBASE_IRQ_INTERNAL_H_
int kbase_install_interrupts(struct kbase_device *kbdev);
void kbase_release_interrupts(struct kbase_device *kbdev);
/**
* kbase_synchronize_irqs - Ensure that all IRQ handlers have completed
* execution
* @kbdev: The kbase device
*/
void kbase_synchronize_irqs(struct kbase_device *kbdev);
int kbasep_common_test_interrupt_handlers(
struct kbase_device * const kbdev);
#endif /* _KBASE_IRQ_INTERNAL_H_ */

View File

@ -0,0 +1,474 @@
/*
*
* (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#include <mali_kbase.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
#include <linux/interrupt.h>
#if !defined(CONFIG_MALI_NO_MALI)
/* GPU IRQ Tags */
#define JOB_IRQ_TAG 0
#define MMU_IRQ_TAG 1
#define GPU_IRQ_TAG 2
static void *kbase_tag(void *ptr, u32 tag)
{
return (void *)(((uintptr_t) ptr) | tag);
}
static void *kbase_untag(void *ptr)
{
return (void *)(((uintptr_t) ptr) & ~3);
}
static irqreturn_t kbase_job_irq_handler(int irq, void *data)
{
unsigned long flags;
struct kbase_device *kbdev = kbase_untag(data);
u32 val;
spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!kbdev->pm.backend.gpu_powered) {
/* GPU is turned off - IRQ is not for us */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
flags);
return IRQ_NONE;
}
val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
#ifdef CONFIG_MALI_DEBUG
if (!kbdev->pm.backend.driver_ready_for_irqs)
dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
__func__, irq, val);
#endif /* CONFIG_MALI_DEBUG */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!val)
return IRQ_NONE;
dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbase_job_done(kbdev, val);
return IRQ_HANDLED;
}
KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
{
unsigned long flags;
struct kbase_device *kbdev = kbase_untag(data);
u32 val;
spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!kbdev->pm.backend.gpu_powered) {
/* GPU is turned off - IRQ is not for us */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
flags);
return IRQ_NONE;
}
atomic_inc(&kbdev->faults_pending);
val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
#ifdef CONFIG_MALI_DEBUG
if (!kbdev->pm.backend.driver_ready_for_irqs)
dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
__func__, irq, val);
#endif /* CONFIG_MALI_DEBUG */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!val) {
atomic_dec(&kbdev->faults_pending);
return IRQ_NONE;
}
dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbase_mmu_interrupt(kbdev, val);
atomic_dec(&kbdev->faults_pending);
return IRQ_HANDLED;
}
static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
{
unsigned long flags;
struct kbase_device *kbdev = kbase_untag(data);
u32 val;
spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!kbdev->pm.backend.gpu_powered) {
/* GPU is turned off - IRQ is not for us */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
flags);
return IRQ_NONE;
}
val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
#ifdef CONFIG_MALI_DEBUG
if (!kbdev->pm.backend.driver_ready_for_irqs)
dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
__func__, irq, val);
#endif /* CONFIG_MALI_DEBUG */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!val)
return IRQ_NONE;
dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbase_gpu_interrupt(kbdev, val);
return IRQ_HANDLED;
}
KBASE_EXPORT_TEST_API(kbase_gpu_irq_handler);
static irq_handler_t kbase_handler_table[] = {
[JOB_IRQ_TAG] = kbase_job_irq_handler,
[MMU_IRQ_TAG] = kbase_mmu_irq_handler,
[GPU_IRQ_TAG] = kbase_gpu_irq_handler,
};
#ifdef CONFIG_MALI_DEBUG
#define JOB_IRQ_HANDLER JOB_IRQ_TAG
#define MMU_IRQ_HANDLER MMU_IRQ_TAG
#define GPU_IRQ_HANDLER GPU_IRQ_TAG
/**
* kbase_set_custom_irq_handler - Set a custom IRQ handler
* @kbdev: Device for which the handler is to be registered
* @custom_handler: Handler to be registered
* @irq_type: Interrupt type
*
* Registers given interrupt handler for requested interrupt type
* In the case where irq handler is not specified, the default handler shall be
* registered
*
* Return: 0 case success, error code otherwise
*/
int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
irq_handler_t custom_handler,
int irq_type)
{
int result = 0;
irq_handler_t requested_irq_handler = NULL;
KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
(GPU_IRQ_HANDLER >= irq_type));
/* Release previous handler */
if (kbdev->irqs[irq_type].irq)
free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
requested_irq_handler = (NULL != custom_handler) ? custom_handler :
kbase_handler_table[irq_type];
if (0 != request_irq(kbdev->irqs[irq_type].irq,
requested_irq_handler,
kbdev->irqs[irq_type].flags | IRQF_SHARED,
dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
result = -EINVAL;
dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
kbdev->irqs[irq_type].irq, irq_type);
#ifdef CONFIG_SPARSE_IRQ
dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
#endif /* CONFIG_SPARSE_IRQ */
}
return result;
}
KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
/* test correct interrupt assigment and reception by cpu */
struct kbasep_irq_test {
struct hrtimer timer;
wait_queue_head_t wait;
int triggered;
u32 timeout;
};
static struct kbasep_irq_test kbasep_irq_test_data;
#define IRQ_TEST_TIMEOUT 500
static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
{
unsigned long flags;
struct kbase_device *kbdev = kbase_untag(data);
u32 val;
spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!kbdev->pm.backend.gpu_powered) {
/* GPU is turned off - IRQ is not for us */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
flags);
return IRQ_NONE;
}
val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!val)
return IRQ_NONE;
dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbasep_irq_test_data.triggered = 1;
wake_up(&kbasep_irq_test_data.wait);
kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
return IRQ_HANDLED;
}
static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
{
unsigned long flags;
struct kbase_device *kbdev = kbase_untag(data);
u32 val;
spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!kbdev->pm.backend.gpu_powered) {
/* GPU is turned off - IRQ is not for us */
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
flags);
return IRQ_NONE;
}
val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
if (!val)
return IRQ_NONE;
dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbasep_irq_test_data.triggered = 1;
wake_up(&kbasep_irq_test_data.wait);
kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
return IRQ_HANDLED;
}
static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
{
struct kbasep_irq_test *test_data = container_of(timer,
struct kbasep_irq_test, timer);
test_data->timeout = 1;
test_data->triggered = 1;
wake_up(&test_data->wait);
return HRTIMER_NORESTART;
}
static int kbasep_common_test_interrupt(
struct kbase_device * const kbdev, u32 tag)
{
int err = 0;
irq_handler_t test_handler;
u32 old_mask_val;
u16 mask_offset;
u16 rawstat_offset;
switch (tag) {
case JOB_IRQ_TAG:
test_handler = kbase_job_irq_test_handler;
rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
break;
case MMU_IRQ_TAG:
test_handler = kbase_mmu_irq_test_handler;
rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
mask_offset = MMU_REG(MMU_IRQ_MASK);
break;
case GPU_IRQ_TAG:
/* already tested by pm_driver - bail out */
default:
return 0;
}
/* store old mask */
old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
/* mask interrupts */
kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
if (kbdev->irqs[tag].irq) {
/* release original handler and install test handler */
if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) {
err = -EINVAL;
} else {
kbasep_irq_test_data.timeout = 0;
hrtimer_init(&kbasep_irq_test_data.timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
kbasep_irq_test_data.timer.function =
kbasep_test_interrupt_timeout;
/* trigger interrupt */
kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
hrtimer_start(&kbasep_irq_test_data.timer,
HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
HRTIMER_MODE_REL);
wait_event(kbasep_irq_test_data.wait,
kbasep_irq_test_data.triggered != 0);
if (kbasep_irq_test_data.timeout != 0) {
dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n",
kbdev->irqs[tag].irq, tag);
err = -EINVAL;
} else {
dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n",
kbdev->irqs[tag].irq, tag);
}
hrtimer_cancel(&kbasep_irq_test_data.timer);
kbasep_irq_test_data.triggered = 0;
/* mask interrupts */
kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
/* release test handler */
free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
}
/* restore original interrupt */
if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag],
kbdev->irqs[tag].flags | IRQF_SHARED,
dev_name(kbdev->dev), kbase_tag(kbdev, tag))) {
dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n",
kbdev->irqs[tag].irq, tag);
err = -EINVAL;
}
}
/* restore old mask */
kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
return err;
}
int kbasep_common_test_interrupt_handlers(
struct kbase_device * const kbdev)
{
int err;
init_waitqueue_head(&kbasep_irq_test_data.wait);
kbasep_irq_test_data.triggered = 0;
/* A suspend won't happen during startup/insmod */
kbase_pm_context_active(kbdev);
err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
if (err) {
dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
goto out;
}
err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
if (err) {
dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
goto out;
}
dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
out:
kbase_pm_context_idle(kbdev);
return err;
}
#endif /* CONFIG_MALI_DEBUG */
int kbase_install_interrupts(struct kbase_device *kbdev)
{
u32 nr = ARRAY_SIZE(kbase_handler_table);
int err;
u32 i;
for (i = 0; i < nr; i++) {
err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i],
kbdev->irqs[i].flags | IRQF_SHARED,
dev_name(kbdev->dev),
kbase_tag(kbdev, i));
if (err) {
dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
kbdev->irqs[i].irq, i);
#ifdef CONFIG_SPARSE_IRQ
dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
#endif /* CONFIG_SPARSE_IRQ */
goto release;
}
}
return 0;
release:
while (i-- > 0)
free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
return err;
}
void kbase_release_interrupts(struct kbase_device *kbdev)
{
u32 nr = ARRAY_SIZE(kbase_handler_table);
u32 i;
for (i = 0; i < nr; i++) {
if (kbdev->irqs[i].irq)
free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
}
}
void kbase_synchronize_irqs(struct kbase_device *kbdev)
{
u32 nr = ARRAY_SIZE(kbase_handler_table);
u32 i;
for (i = 0; i < nr; i++) {
if (kbdev->irqs[i].irq)
synchronize_irq(kbdev->irqs[i].irq);
}
}
#endif /* !defined(CONFIG_MALI_NO_MALI) */

View File

@ -0,0 +1,240 @@
/*
*
* (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Register backend context / address space management
*/
#include <mali_kbase.h>
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_ctx_sched.h>
/**
* assign_and_activate_kctx_addr_space - Assign an AS to a context
* @kbdev: Kbase device
* @kctx: Kbase context
* @current_as: Address Space to assign
*
* Assign an Address Space (AS) to a context, and add the context to the Policy.
*
* This includes
* setting up the global runpool_irq structure and the context on the AS,
* Activating the MMU on the AS,
* Allowing jobs to be submitted on the AS.
*
* Context:
* kbasep_js_kctx_info.jsctx_mutex held,
* kbasep_js_device_data.runpool_mutex held,
* AS transaction mutex held,
* Runpool IRQ lock held
*/
static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
struct kbase_context *kctx,
struct kbase_as *current_as)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
lockdep_assert_held(&js_devdata->runpool_mutex);
lockdep_assert_held(&kbdev->hwaccess_lock);
/* Attribute handling */
kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
/* Allow it to run jobs */
kbasep_js_set_submit_allowed(js_devdata, kctx);
kbase_js_runpool_inc_context_count(kbdev, kctx);
}
bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
int i;
if (kbdev->hwaccess.active_kctx == kctx) {
/* Context is already active */
return true;
}
for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
if (kbdev->as_to_kctx[i] == kctx) {
/* Context already has ASID - mark as active */
return true;
}
}
/* Context does not have address space assigned */
return false;
}
void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
int as_nr = kctx->as_nr;
if (as_nr == KBASEP_AS_NR_INVALID) {
WARN(1, "Attempting to release context without ASID\n");
return;
}
lockdep_assert_held(&kbdev->hwaccess_lock);
if (atomic_read(&kctx->refcount) != 1) {
WARN(1, "Attempting to release active ASID\n");
return;
}
kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx);
kbase_ctx_sched_release_ctx(kctx);
kbase_js_runpool_dec_context_count(kbdev, kctx);
}
void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
}
int kbase_backend_find_and_release_free_address_space(
struct kbase_device *kbdev, struct kbase_context *kctx)
{
struct kbasep_js_device_data *js_devdata;
struct kbasep_js_kctx_info *js_kctx_info;
unsigned long flags;
int i;
js_devdata = &kbdev->js_data;
js_kctx_info = &kctx->jctx.sched_info;
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
mutex_lock(&js_devdata->runpool_mutex);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
struct kbasep_js_kctx_info *as_js_kctx_info;
struct kbase_context *as_kctx;
as_kctx = kbdev->as_to_kctx[i];
as_js_kctx_info = &as_kctx->jctx.sched_info;
/* Don't release privileged or active contexts, or contexts with
* jobs running.
* Note that a context will have at least 1 reference (which
* was previously taken by kbasep_js_schedule_ctx()) until
* descheduled.
*/
if (as_kctx && !kbase_ctx_flag(as_kctx, KCTX_PRIVILEGED) &&
atomic_read(&as_kctx->refcount) == 1) {
if (!kbasep_js_runpool_retain_ctx_nolock(kbdev,
as_kctx)) {
WARN(1, "Failed to retain active context\n");
spin_unlock_irqrestore(&kbdev->hwaccess_lock,
flags);
mutex_unlock(&js_devdata->runpool_mutex);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
return KBASEP_AS_NR_INVALID;
}
kbasep_js_clear_submit_allowed(js_devdata, as_kctx);
/* Drop and retake locks to take the jsctx_mutex on the
* context we're about to release without violating lock
* ordering
*/
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&js_devdata->runpool_mutex);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
/* Release context from address space */
mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex);
mutex_lock(&js_devdata->runpool_mutex);
kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx);
if (!kbase_ctx_flag(as_kctx, KCTX_SCHEDULED)) {
kbasep_js_runpool_requeue_or_kill_ctx(kbdev,
as_kctx,
true);
mutex_unlock(&js_devdata->runpool_mutex);
mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
return i;
}
/* Context was retained while locks were dropped,
* continue looking for free AS */
mutex_unlock(&js_devdata->runpool_mutex);
mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
mutex_lock(&js_devdata->runpool_mutex);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
}
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&js_devdata->runpool_mutex);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
return KBASEP_AS_NR_INVALID;
}
bool kbase_backend_use_ctx(struct kbase_device *kbdev,
struct kbase_context *kctx,
int as_nr)
{
struct kbasep_js_device_data *js_devdata;
struct kbase_as *new_address_space = NULL;
js_devdata = &kbdev->js_data;
if (kbdev->hwaccess.active_kctx == kctx) {
WARN(1, "Context is already scheduled in\n");
return false;
}
new_address_space = &kbdev->as[as_nr];
lockdep_assert_held(&js_devdata->runpool_mutex);
lockdep_assert_held(&kbdev->mmu_hw_mutex);
lockdep_assert_held(&kbdev->hwaccess_lock);
assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space);
if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
/* We need to retain it to keep the corresponding address space
*/
kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
}
return true;
}

View File

@ -0,0 +1,128 @@
/*
*
* (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Register-based HW access backend specific definitions
*/
#ifndef _KBASE_HWACCESS_GPU_DEFS_H_
#define _KBASE_HWACCESS_GPU_DEFS_H_
/* SLOT_RB_SIZE must be < 256 */
#define SLOT_RB_SIZE 2
#define SLOT_RB_MASK (SLOT_RB_SIZE - 1)
/**
* struct rb_entry - Ringbuffer entry
* @katom: Atom associated with this entry
*/
struct rb_entry {
struct kbase_jd_atom *katom;
};
/**
* struct slot_rb - Slot ringbuffer
* @entries: Ringbuffer entries
* @last_context: The last context to submit a job on this slot
* @read_idx: Current read index of buffer
* @write_idx: Current write index of buffer
* @job_chain_flag: Flag used to implement jobchain disambiguation
*/
struct slot_rb {
struct rb_entry entries[SLOT_RB_SIZE];
struct kbase_context *last_context;
u8 read_idx;
u8 write_idx;
u8 job_chain_flag;
};
/**
* struct kbase_backend_data - GPU backend specific data for HW access layer
* @slot_rb: Slot ringbuffers
* @rmu_workaround_flag: When PRLAM-8987 is present, this flag determines
* whether slots 0/1 or slot 2 are currently being
* pulled from
* @scheduling_timer: The timer tick used for rescheduling jobs
* @timer_running: Is the timer running? The runpool_mutex must be
* held whilst modifying this.
* @suspend_timer: Is the timer suspended? Set when a suspend
* occurs and cleared on resume. The runpool_mutex
* must be held whilst modifying this.
* @reset_gpu: Set to a KBASE_RESET_xxx value (see comments)
* @reset_workq: Work queue for performing the reset
* @reset_work: Work item for performing the reset
* @reset_wait: Wait event signalled when the reset is complete
* @reset_timer: Timeout for soft-stops before the reset
* @timeouts_updated: Have timeout values just been updated?
*
* The hwaccess_lock (a spinlock) must be held when accessing this structure
*/
struct kbase_backend_data {
struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS];
bool rmu_workaround_flag;
struct hrtimer scheduling_timer;
bool timer_running;
bool suspend_timer;
atomic_t reset_gpu;
/* The GPU reset isn't pending */
#define KBASE_RESET_GPU_NOT_PENDING 0
/* kbase_prepare_to_reset_gpu has been called */
#define KBASE_RESET_GPU_PREPARED 1
/* kbase_reset_gpu has been called - the reset will now definitely happen
* within the timeout period */
#define KBASE_RESET_GPU_COMMITTED 2
/* The GPU reset process is currently occuring (timeout has expired or
* kbasep_try_reset_gpu_early was called) */
#define KBASE_RESET_GPU_HAPPENING 3
/* Reset the GPU silently, used when resetting the GPU as part of normal
* behavior (e.g. when exiting protected mode). */
#define KBASE_RESET_GPU_SILENT 4
struct workqueue_struct *reset_workq;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
struct hrtimer reset_timer;
bool timeouts_updated;
};
/**
* struct kbase_jd_atom_backend - GPU backend specific katom data
*/
struct kbase_jd_atom_backend {
};
/**
* struct kbase_context_backend - GPU backend specific context data
*/
struct kbase_context_backend {
};
#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,169 @@
/*
*
* (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Job Manager backend-specific low-level APIs.
*/
#ifndef _KBASE_JM_HWACCESS_H_
#define _KBASE_JM_HWACCESS_H_
#include <mali_kbase_hw.h>
#include <mali_kbase_debug.h>
#include <linux/atomic.h>
#include <backend/gpu/mali_kbase_jm_rb.h>
/**
* kbase_job_submit_nolock() - Submit a job to a certain job-slot
* @kbdev: Device pointer
* @katom: Atom to submit
* @js: Job slot to submit on
*
* The caller must check kbasep_jm_is_submit_slots_free() != false before
* calling this.
*
* The following locking conditions are made on the caller:
* - it must hold the hwaccess_lock
*/
void kbase_job_submit_nolock(struct kbase_device *kbdev,
struct kbase_jd_atom *katom, int js);
/**
* kbase_job_done_slot() - Complete the head job on a particular job-slot
* @kbdev: Device pointer
* @s: Job slot
* @completion_code: Completion code of job reported by GPU
* @job_tail: Job tail address reported by GPU
* @end_timestamp: Timestamp of job completion
*/
void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code,
u64 job_tail, ktime_t *end_timestamp);
#ifdef CONFIG_GPU_TRACEPOINTS
static inline char *kbasep_make_job_slot_string(int js, char *js_string,
size_t js_size)
{
snprintf(js_string, js_size, "job_slot_%i", js);
return js_string;
}
#endif
/**
* kbase_job_hw_submit() - Submit a job to the GPU
* @kbdev: Device pointer
* @katom: Atom to submit
* @js: Job slot to submit on
*
* The caller must check kbasep_jm_is_submit_slots_free() != false before
* calling this.
*
* The following locking conditions are made on the caller:
* - it must hold the hwaccess_lock
*/
void kbase_job_hw_submit(struct kbase_device *kbdev,
struct kbase_jd_atom *katom,
int js);
/**
* kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop
* on the specified atom
* @kbdev: Device pointer
* @js: Job slot to stop on
* @action: The action to perform, either JSn_COMMAND_HARD_STOP or
* JSn_COMMAND_SOFT_STOP
* @core_reqs: Core requirements of atom to stop
* @target_katom: Atom to stop
*
* The following locking conditions are made on the caller:
* - it must hold the hwaccess_lock
*/
void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
int js,
u32 action,
base_jd_core_req core_reqs,
struct kbase_jd_atom *target_katom);
/**
* kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job
* slot belonging to a given context.
* @kbdev: Device pointer
* @kctx: Context pointer. May be NULL
* @katom: Specific atom to stop. May be NULL
* @js: Job slot to hard stop
* @action: The action to perform, either JSn_COMMAND_HARD_STOP or
* JSn_COMMAND_SOFT_STOP
*
* If no context is provided then all jobs on the slot will be soft or hard
* stopped.
*
* If a katom is provided then only that specific atom will be stopped. In this
* case the kctx parameter is ignored.
*
* Jobs that are on the slot but are not yet on the GPU will be unpulled and
* returned to the job scheduler.
*
* Return: true if an atom was stopped, false otherwise
*/
bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js,
struct kbase_jd_atom *katom,
u32 action);
/**
* kbase_job_slot_init - Initialise job slot framework
* @kbdev: Device pointer
*
* Called on driver initialisation
*
* Return: 0 on success
*/
int kbase_job_slot_init(struct kbase_device *kbdev);
/**
* kbase_job_slot_halt - Halt the job slot framework
* @kbdev: Device pointer
*
* Should prevent any further job slot processing
*/
void kbase_job_slot_halt(struct kbase_device *kbdev);
/**
* kbase_job_slot_term - Terminate job slot framework
* @kbdev: Device pointer
*
* Called on driver termination
*/
void kbase_job_slot_term(struct kbase_device *kbdev);
/**
* kbase_gpu_cacheclean - Cause a GPU cache clean & flush
* @kbdev: Device pointer
*
* Caller must not be in IRQ context
*/
void kbase_gpu_cacheclean(struct kbase_device *kbdev);
#endif /* _KBASE_JM_HWACCESS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
/*
*
* (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Register-based HW access backend specific APIs
*/
#ifndef _KBASE_HWACCESS_GPU_H_
#define _KBASE_HWACCESS_GPU_H_
#include <backend/gpu/mali_kbase_pm_internal.h>
/**
* kbase_gpu_irq_evict - Evict an atom from a NEXT slot
*
* @kbdev: Device pointer
* @js: Job slot to evict from
*
* Evict the atom in the NEXT slot for the specified job slot. This function is
* called from the job complete IRQ handler when the previous job has failed.
*
* Return: true if job evicted from NEXT registers, false otherwise
*/
bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js);
/**
* kbase_gpu_complete_hw - Complete an atom on job slot js
*
* @kbdev: Device pointer
* @js: Job slot that has completed
* @completion_code: Event code from job that has completed
* @job_tail: The tail address from the hardware if the job has partially
* completed
* @end_timestamp: Time of completion
*/
void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
u32 completion_code,
u64 job_tail,
ktime_t *end_timestamp);
/**
* kbase_gpu_inspect - Inspect the contents of the HW access ringbuffer
*
* @kbdev: Device pointer
* @js: Job slot to inspect
* @idx: Index into ringbuffer. 0 is the job currently running on
* the slot, 1 is the job waiting, all other values are invalid.
* Return: The atom at that position in the ringbuffer
* or NULL if no atom present
*/
struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
int idx);
/**
* kbase_gpu_dump_slots - Print the contents of the slot ringbuffers
*
* @kbdev: Device pointer
*/
void kbase_gpu_dump_slots(struct kbase_device *kbdev);
#endif /* _KBASE_HWACCESS_GPU_H_ */

View File

@ -0,0 +1,308 @@
/*
*
* (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Base kernel affinity manager APIs
*/
#include <mali_kbase.h>
#include "mali_kbase_js_affinity.h"
#include "mali_kbase_hw.h"
#include <backend/gpu/mali_kbase_pm_internal.h>
bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev,
int js)
{
/*
* Here are the reasons for using job slot 2:
* - BASE_HW_ISSUE_8987 (which is entirely used for that purpose)
* - In absence of the above, then:
* - Atoms with BASE_JD_REQ_COHERENT_GROUP
* - But, only when there aren't contexts with
* KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, because the atoms that run on
* all cores on slot 1 could be blocked by those using a coherent group
* on slot 2
* - And, only when you actually have 2 or more coregroups - if you
* only have 1 coregroup, then having jobs for slot 2 implies they'd
* also be for slot 1, meaning you'll get interference from them. Jobs
* able to run on slot 2 could also block jobs that can only run on
* slot 1 (tiler jobs)
*/
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
return true;
if (js != 2)
return true;
/* Only deal with js==2 now: */
if (kbdev->gpu_props.num_core_groups > 1) {
/* Only use slot 2 in the 2+ coregroup case */
if (kbasep_js_ctx_attr_is_attr_on_runpool(kbdev,
KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) ==
false) {
/* ...But only when we *don't* have atoms that run on
* all cores */
/* No specific check for BASE_JD_REQ_COHERENT_GROUP
* atoms - the policy will sort that out */
return true;
}
}
/* Above checks failed mean we shouldn't use slot 2 */
return false;
}
/*
* As long as it has been decided to have a deeper modification of
* what job scheduler, power manager and affinity manager will
* implement, this function is just an intermediate step that
* assumes:
* - all working cores will be powered on when this is called.
* - largest current configuration is 2 core groups.
* - It has been decided not to have hardcoded values so the low
* and high cores in a core split will be evently distributed.
* - Odd combinations of core requirements have been filtered out
* and do not get to this function (e.g. CS+T+NSS is not
* supported here).
* - This function is frequently called and can be optimized,
* (see notes in loops), but as the functionallity will likely
* be modified, optimization has not been addressed.
*/
bool kbase_js_choose_affinity(u64 * const affinity,
struct kbase_device *kbdev,
struct kbase_jd_atom *katom, int js)
{
base_jd_core_req core_req = katom->core_req;
unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
u64 core_availability_mask;
lockdep_assert_held(&kbdev->hwaccess_lock);
core_availability_mask = kbase_pm_ca_get_core_mask(kbdev);
/*
* If no cores are currently available (core availability policy is
* transitioning) then fail.
*/
if (0 == core_availability_mask) {
*affinity = 0;
return false;
}
KBASE_DEBUG_ASSERT(js >= 0);
if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
BASE_JD_REQ_T) {
/* If the hardware supports XAFFINITY then we'll only enable
* the tiler (which is the default so this is a no-op),
* otherwise enable shader core 0. */
if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
*affinity = 1;
else
*affinity = 0;
return true;
}
if (1 == kbdev->gpu_props.num_cores) {
/* trivial case only one core, nothing to do */
*affinity = core_availability_mask &
kbdev->pm.debug_core_mask[js];
} else {
if ((core_req & (BASE_JD_REQ_COHERENT_GROUP |
BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
if (js == 0 || num_core_groups == 1) {
/* js[0] and single-core-group systems just get
* the first core group */
*affinity =
kbdev->gpu_props.props.coherency_info.group[0].core_mask
& core_availability_mask &
kbdev->pm.debug_core_mask[js];
} else {
/* js[1], js[2] use core groups 0, 1 for
* dual-core-group systems */
u32 core_group_idx = ((u32) js) - 1;
KBASE_DEBUG_ASSERT(core_group_idx <
num_core_groups);
*affinity =
kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask
& core_availability_mask &
kbdev->pm.debug_core_mask[js];
/* If the job is specifically targeting core
* group 1 and the core availability policy is
* keeping that core group off, then fail */
if (*affinity == 0 && core_group_idx == 1 &&
kbdev->pm.backend.cg1_disabled
== true)
katom->event_code =
BASE_JD_EVENT_PM_EVENT;
}
} else {
/* All cores are available when no core split is
* required */
*affinity = core_availability_mask &
kbdev->pm.debug_core_mask[js];
}
}
/*
* If no cores are currently available in the desired core group(s)
* (core availability policy is transitioning) then fail.
*/
if (*affinity == 0)
return false;
/* Enable core 0 if tiler required for hardware without XAFFINITY
* support (notes above) */
if (core_req & BASE_JD_REQ_T) {
if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
*affinity = *affinity | 1;
}
return true;
}
static inline bool kbase_js_affinity_is_violating(
struct kbase_device *kbdev,
u64 *affinities)
{
/* This implementation checks whether the two slots involved in Generic
* thread creation have intersecting affinity. This is due to micro-
* architectural issues where a job in slot A targetting cores used by
* slot B could prevent the job in slot B from making progress until the
* job in slot A has completed.
*/
u64 affinity_set_left;
u64 affinity_set_right;
u64 intersection;
KBASE_DEBUG_ASSERT(affinities != NULL);
affinity_set_left = affinities[1];
affinity_set_right = affinities[2];
/* A violation occurs when any bit in the left_set is also in the
* right_set */
intersection = affinity_set_left & affinity_set_right;
return (bool) (intersection != (u64) 0u);
}
bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
u64 affinity)
{
struct kbasep_js_device_data *js_devdata;
u64 new_affinities[BASE_JM_MAX_NR_SLOTS];
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
js_devdata = &kbdev->js_data;
memcpy(new_affinities, js_devdata->runpool_irq.slot_affinities,
sizeof(js_devdata->runpool_irq.slot_affinities));
new_affinities[js] |= affinity;
return kbase_js_affinity_is_violating(kbdev, new_affinities);
}
void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
u64 affinity)
{
struct kbasep_js_device_data *js_devdata;
u64 cores;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
js_devdata = &kbdev->js_data;
KBASE_DEBUG_ASSERT(kbase_js_affinity_would_violate(kbdev, js, affinity)
== false);
cores = affinity;
while (cores) {
int bitnum = fls64(cores) - 1;
u64 bit = 1ULL << bitnum;
s8 cnt;
cnt =
++(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
if (cnt == 1)
js_devdata->runpool_irq.slot_affinities[js] |= bit;
cores &= ~bit;
}
}
void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
u64 affinity)
{
struct kbasep_js_device_data *js_devdata;
u64 cores;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
js_devdata = &kbdev->js_data;
cores = affinity;
while (cores) {
int bitnum = fls64(cores) - 1;
u64 bit = 1ULL << bitnum;
s8 cnt;
KBASE_DEBUG_ASSERT(
js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] > 0);
cnt =
--(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
if (0 == cnt)
js_devdata->runpool_irq.slot_affinities[js] &= ~bit;
cores &= ~bit;
}
}
#if KBASE_TRACE_ENABLE
void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
{
struct kbasep_js_device_data *js_devdata;
int slot_nr;
KBASE_DEBUG_ASSERT(kbdev != NULL);
js_devdata = &kbdev->js_data;
for (slot_nr = 0; slot_nr < 3; ++slot_nr)
KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL,
NULL, 0u, slot_nr,
(u32) js_devdata->runpool_irq.slot_affinities[slot_nr]);
}
#endif /* KBASE_TRACE_ENABLE */

View File

@ -0,0 +1,134 @@
/*
*
* (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Affinity Manager internal APIs.
*/
#ifndef _KBASE_JS_AFFINITY_H_
#define _KBASE_JS_AFFINITY_H_
/**
* kbase_js_can_run_job_on_slot_no_lock - Decide whether it is possible to
* submit a job to a particular job slot in the current status
*
* @kbdev: The kbase device structure of the device
* @js: Job slot number to check for allowance
*
* Will check if submitting to the given job slot is allowed in the current
* status. For example using job slot 2 while in soft-stoppable state and only
* having 1 coregroup is not allowed by the policy. This function should be
* called prior to submitting a job to a slot to make sure policy rules are not
* violated.
*
* The following locking conditions are made on the caller
* - it must hold hwaccess_lock
*/
bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, int js);
/**
* kbase_js_choose_affinity - Compute affinity for a given job.
*
* @affinity: Affinity bitmap computed
* @kbdev: The kbase device structure of the device
* @katom: Job chain of which affinity is going to be found
* @js: Slot the job chain is being submitted
*
* Currently assumes an all-on/all-off power management policy.
* Also assumes there is at least one core with tiler available.
*
* Returns true if a valid affinity was chosen, false if
* no cores were available.
*/
bool kbase_js_choose_affinity(u64 * const affinity,
struct kbase_device *kbdev,
struct kbase_jd_atom *katom,
int js);
/**
* kbase_js_affinity_would_violate - Determine whether a proposed affinity on
* job slot @js would cause a violation of affinity restrictions.
*
* @kbdev: Kbase device structure
* @js: The job slot to test
* @affinity: The affinity mask to test
*
* The following locks must be held by the caller
* - hwaccess_lock
*
* Return: true if the affinity would violate the restrictions
*/
bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
u64 affinity);
/**
* kbase_js_affinity_retain_slot_cores - Affinity tracking: retain cores used by
* a slot
*
* @kbdev: Kbase device structure
* @js: The job slot retaining the cores
* @affinity: The cores to retain
*
* The following locks must be held by the caller
* - hwaccess_lock
*/
void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
u64 affinity);
/**
* kbase_js_affinity_release_slot_cores - Affinity tracking: release cores used
* by a slot
*
* @kbdev: Kbase device structure
* @js: Job slot
* @affinity: Bit mask of core to be released
*
* Cores must be released as soon as a job is dequeued from a slot's 'submit
* slots', and before another job is submitted to those slots. Otherwise, the
* refcount could exceed the maximum number submittable to a slot,
* %BASE_JM_SUBMIT_SLOTS.
*
* The following locks must be held by the caller
* - hwaccess_lock
*/
void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
u64 affinity);
/**
* kbase_js_debug_log_current_affinities - log the current affinities
*
* @kbdev: Kbase device structure
*
* Output to the Trace log the current tracked affinities on all slots
*/
#if KBASE_TRACE_ENABLE
void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev);
#else /* KBASE_TRACE_ENABLE */
static inline void
kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
{
}
#endif /* KBASE_TRACE_ENABLE */
#endif /* _KBASE_JS_AFFINITY_H_ */

View File

@ -0,0 +1,358 @@
/*
*
* (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Register-based HW access backend specific job scheduler APIs
*/
#include <mali_kbase.h>
#include <mali_kbase_hwaccess_jm.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
#include <backend/gpu/mali_kbase_js_internal.h>
/*
* Hold the runpool_mutex for this
*/
static inline bool timer_callback_should_run(struct kbase_device *kbdev)
{
struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
s8 nr_running_ctxs;
lockdep_assert_held(&kbdev->js_data.runpool_mutex);
/* Timer must stop if we are suspending */
if (backend->suspend_timer)
return false;
/* nr_contexts_pullable is updated with the runpool_mutex. However, the
* locking in the caller gives us a barrier that ensures
* nr_contexts_pullable is up-to-date for reading */
nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable);
#ifdef CONFIG_MALI_DEBUG
if (kbdev->js_data.softstop_always) {
/* Debug support for allowing soft-stop on a single context */
return true;
}
#endif /* CONFIG_MALI_DEBUG */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
/* Timeouts would have to be 4x longer (due to micro-
* architectural design) to support OpenCL conformance tests, so
* only run the timer when there's:
* - 2 or more CL contexts
* - 1 or more GLES contexts
*
* NOTE: We will treat a context that has both Compute and Non-
* Compute jobs will be treated as an OpenCL context (hence, we
* don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE).
*/
{
s8 nr_compute_ctxs =
kbasep_js_ctx_attr_count_on_runpool(kbdev,
KBASEP_JS_CTX_ATTR_COMPUTE);
s8 nr_noncompute_ctxs = nr_running_ctxs -
nr_compute_ctxs;
return (bool) (nr_compute_ctxs >= 2 ||
nr_noncompute_ctxs > 0);
}
} else {
/* Run the timer callback whenever you have at least 1 context
*/
return (bool) (nr_running_ctxs > 0);
}
}
static enum hrtimer_restart timer_callback(struct hrtimer *timer)
{
unsigned long flags;
struct kbase_device *kbdev;
struct kbasep_js_device_data *js_devdata;
struct kbase_backend_data *backend;
int s;
bool reset_needed = false;
KBASE_DEBUG_ASSERT(timer != NULL);
backend = container_of(timer, struct kbase_backend_data,
scheduling_timer);
kbdev = container_of(backend, struct kbase_device, hwaccess.backend);
js_devdata = &kbdev->js_data;
/* Loop through the slots */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
struct kbase_jd_atom *atom = NULL;
if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) {
atom = kbase_gpu_inspect(kbdev, s, 0);
KBASE_DEBUG_ASSERT(atom != NULL);
}
if (atom != NULL) {
/* The current version of the model doesn't support
* Soft-Stop */
if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
u32 ticks = atom->ticks++;
#ifndef CONFIG_MALI_JOB_DUMP
u32 soft_stop_ticks, hard_stop_ticks,
gpu_reset_ticks;
if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
soft_stop_ticks =
js_devdata->soft_stop_ticks_cl;
hard_stop_ticks =
js_devdata->hard_stop_ticks_cl;
gpu_reset_ticks =
js_devdata->gpu_reset_ticks_cl;
} else {
soft_stop_ticks =
js_devdata->soft_stop_ticks;
hard_stop_ticks =
js_devdata->hard_stop_ticks_ss;
gpu_reset_ticks =
js_devdata->gpu_reset_ticks_ss;
}
/* If timeouts have been changed then ensure
* that atom tick count is not greater than the
* new soft_stop timeout. This ensures that
* atoms do not miss any of the timeouts due to
* races between this worker and the thread
* changing the timeouts. */
if (backend->timeouts_updated &&
ticks > soft_stop_ticks)
ticks = atom->ticks = soft_stop_ticks;
/* Job is Soft-Stoppable */
if (ticks == soft_stop_ticks) {
int disjoint_threshold =
KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
u32 softstop_flags = 0u;
/* Job has been scheduled for at least
* js_devdata->soft_stop_ticks ticks.
* Soft stop the slot so we can run
* other jobs.
*/
dev_dbg(kbdev->dev, "Soft-stop");
#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
/* nr_user_contexts_running is updated
* with the runpool_mutex, but we can't
* take that here.
*
* However, if it's about to be
* increased then the new context can't
* run any jobs until they take the
* hwaccess_lock, so it's OK to observe
* the older value.
*
* Similarly, if it's about to be
* decreased, the last job from another
* context has already finished, so it's
* not too bad that we observe the older
* value and register a disjoint event
* when we try soft-stopping */
if (js_devdata->nr_user_contexts_running
>= disjoint_threshold)
softstop_flags |=
JS_COMMAND_SW_CAUSES_DISJOINT;
kbase_job_slot_softstop_swflags(kbdev,
s, atom, softstop_flags);
#endif
} else if (ticks == hard_stop_ticks) {
/* Job has been scheduled for at least
* js_devdata->hard_stop_ticks_ss ticks.
* It should have been soft-stopped by
* now. Hard stop the slot.
*/
#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
int ms =
js_devdata->scheduling_period_ns
/ 1000000u;
dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
(unsigned long)ticks,
(unsigned long)ms);
kbase_job_slot_hardstop(atom->kctx, s,
atom);
#endif
} else if (ticks == gpu_reset_ticks) {
/* Job has been scheduled for at least
* js_devdata->gpu_reset_ticks_ss ticks.
* It should have left the GPU by now.
* Signal that the GPU needs to be
* reset.
*/
reset_needed = true;
}
#else /* !CONFIG_MALI_JOB_DUMP */
/* NOTE: During CONFIG_MALI_JOB_DUMP, we use
* the alternate timeouts, which makes the hard-
* stop and GPU reset timeout much longer. We
* also ensure that we don't soft-stop at all.
*/
if (ticks == js_devdata->soft_stop_ticks) {
/* Job has been scheduled for at least
* js_devdata->soft_stop_ticks. We do
* not soft-stop during
* CONFIG_MALI_JOB_DUMP, however.
*/
dev_dbg(kbdev->dev, "Soft-stop");
} else if (ticks ==
js_devdata->hard_stop_ticks_dumping) {
/* Job has been scheduled for at least
* js_devdata->hard_stop_ticks_dumping
* ticks. Hard stop the slot.
*/
#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
int ms =
js_devdata->scheduling_period_ns
/ 1000000u;
dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
(unsigned long)ticks,
(unsigned long)ms);
kbase_job_slot_hardstop(atom->kctx, s,
atom);
#endif
} else if (ticks ==
js_devdata->gpu_reset_ticks_dumping) {
/* Job has been scheduled for at least
* js_devdata->gpu_reset_ticks_dumping
* ticks. It should have left the GPU by
* now. Signal that the GPU needs to be
* reset.
*/
reset_needed = true;
}
#endif /* !CONFIG_MALI_JOB_DUMP */
}
}
}
#if KBASE_GPU_RESET_EN
if (reset_needed) {
dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve.");
if (kbase_prepare_to_reset_gpu_locked(kbdev))
kbase_reset_gpu_locked(kbdev);
}
#endif /* KBASE_GPU_RESET_EN */
/* the timer is re-issued if there is contexts in the run-pool */
if (backend->timer_running)
hrtimer_start(&backend->scheduling_timer,
HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
HRTIMER_MODE_REL);
backend->timeouts_updated = false;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return HRTIMER_NORESTART;
}
void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
unsigned long flags;
lockdep_assert_held(&js_devdata->runpool_mutex);
if (!timer_callback_should_run(kbdev)) {
/* Take spinlock to force synchronisation with timer */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
backend->timer_running = false;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* From now on, return value of timer_callback_should_run() will
* also cause the timer to not requeue itself. Its return value
* cannot change, because it depends on variables updated with
* the runpool_mutex held, which the caller of this must also
* hold */
hrtimer_cancel(&backend->scheduling_timer);
}
if (timer_callback_should_run(kbdev) && !backend->timer_running) {
/* Take spinlock to force synchronisation with timer */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
backend->timer_running = true;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
hrtimer_start(&backend->scheduling_timer,
HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
HRTIMER_MODE_REL);
KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u,
0u);
}
}
int kbase_backend_timer_init(struct kbase_device *kbdev)
{
struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
backend->scheduling_timer.function = timer_callback;
/* MALI_SEC_INTEGRATION */
#ifdef CONFIG_SCHED_EHMP
backend->scheduling_timer.bounded_to_boot_cluster = true;
#endif
backend->timer_running = false;
return 0;
}
void kbase_backend_timer_term(struct kbase_device *kbdev)
{
struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
hrtimer_cancel(&backend->scheduling_timer);
}
void kbase_backend_timer_suspend(struct kbase_device *kbdev)
{
struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
backend->suspend_timer = true;
kbase_backend_ctx_count_changed(kbdev);
}
void kbase_backend_timer_resume(struct kbase_device *kbdev)
{
struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
backend->suspend_timer = false;
kbase_backend_ctx_count_changed(kbdev);
}
void kbase_backend_timeouts_changed(struct kbase_device *kbdev)
{
struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
backend->timeouts_updated = true;
}

View File

@ -0,0 +1,74 @@
/*
*
* (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Register-based HW access backend specific job scheduler APIs
*/
#ifndef _KBASE_JS_BACKEND_H_
#define _KBASE_JS_BACKEND_H_
/**
* kbase_backend_timer_init() - Initialise the JS scheduling timer
* @kbdev: Device pointer
*
* This function should be called at driver initialisation
*
* Return: 0 on success
*/
int kbase_backend_timer_init(struct kbase_device *kbdev);
/**
* kbase_backend_timer_term() - Terminate the JS scheduling timer
* @kbdev: Device pointer
*
* This function should be called at driver termination
*/
void kbase_backend_timer_term(struct kbase_device *kbdev);
/**
* kbase_backend_timer_suspend - Suspend is happening, stop the JS scheduling
* timer
* @kbdev: Device pointer
*
* This function should be called on suspend, after the active count has reached
* zero. This is required as the timer may have been started on job submission
* to the job scheduler, but before jobs are submitted to the GPU.
*
* Caller must hold runpool_mutex.
*/
void kbase_backend_timer_suspend(struct kbase_device *kbdev);
/**
* kbase_backend_timer_resume - Resume is happening, re-evaluate the JS
* scheduling timer
* @kbdev: Device pointer
*
* This function should be called on resume. Note that is is not guaranteed to
* re-start the timer, only evalute whether it should be re-started.
*
* Caller must hold runpool_mutex.
*/
void kbase_backend_timer_resume(struct kbase_device *kbdev);
#endif /* _KBASE_JS_BACKEND_H_ */

View File

@ -0,0 +1,454 @@
/*
*
* (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#include <linux/bitops.h>
#include <mali_kbase.h>
#include <mali_kbase_mem.h>
#include <mali_kbase_mmu_hw.h>
#include <mali_kbase_tlstream.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <mali_kbase_as_fault_debugfs.h>
static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
u32 num_pages)
{
u64 region;
/* can't lock a zero sized range */
KBASE_DEBUG_ASSERT(num_pages);
region = pfn << PAGE_SHIFT;
/*
* fls returns (given the ASSERT above):
* 1 .. 32
*
* 10 + fls(num_pages)
* results in the range (11 .. 42)
*/
/* gracefully handle num_pages being zero */
if (0 == num_pages) {
region |= 11;
} else {
u8 region_width;
region_width = 10 + fls(num_pages);
if (num_pages != (1ul << (region_width - 11))) {
/* not pow2, so must go up to the next pow2 */
region_width += 1;
}
KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
region |= region_width;
}
return region;
}
static int wait_ready(struct kbase_device *kbdev,
unsigned int as_nr, struct kbase_context *kctx)
{
unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending. Do not log remaining register accesses. */
while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL);
if (max_loops == 0) {
dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
return -1;
}
/* If waiting in loop was performed, log last read value. */
if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
return 0;
}
static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
struct kbase_context *kctx)
{
int status;
/* write AS_COMMAND when MMU is ready to accept another command */
status = wait_ready(kbdev, as_nr, kctx);
if (status == 0)
kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd,
kctx);
return status;
}
static void validate_protected_page_fault(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
/* GPUs which support (native) protected mode shall not report page
* fault addresses unless it has protected debug mode and protected
* debug mode is turned on */
u32 protected_debug_mode = 0;
if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
return;
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
protected_debug_mode = kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_STATUS),
kctx) & GPU_DBGEN;
}
if (!protected_debug_mode) {
/* fault_addr should never be reported in protected mode.
* However, we just continue by printing an error message */
dev_err(kbdev->dev, "Fault address reported in protected mode\n");
}
}
void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
{
const int num_as = 16;
const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
const int pf_shift = 0;
const unsigned long as_bit_mask = (1UL << num_as) - 1;
unsigned long flags;
u32 new_mask;
u32 tmp;
/* bus faults */
u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
/* page faults (note: Ignore ASes with both pf and bf) */
u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
/* MALI_SEC_INTEGRATION */
u32 org_mask;
/* previous content of as struct */
enum kbase_mmu_fault_type fault_type;
u32 fault_status;
u64 fault_addr;
KBASE_DEBUG_ASSERT(NULL != kbdev);
/* remember current mask */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
/* MALI_SEC_INTEGRATION */
org_mask = new_mask;
/* mask interrupts for now */
kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
while (bf_bits | pf_bits) {
struct kbase_as *as;
int as_no;
struct kbase_context *kctx;
/* MALI_SEC_INTEGRATION */
int caught_bug;
/*
* the while logic ensures we have a bit set, no need to check
* for not-found here
*/
as_no = ffs(bf_bits | pf_bits) - 1;
as = &kbdev->as[as_no];
/*
* Refcount the kctx ASAP - it shouldn't disappear anyway, since
* Bus/Page faults _should_ only occur whilst jobs are running,
* and a job causing the Bus/Page fault shouldn't complete until
* the MMU is updated
*/
kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
/* MALI_SEC_INTEGRATION : save previous */
fault_addr = as->fault_addr;
/* find faulting address */
as->fault_addr = kbase_reg_read(kbdev,
MMU_AS_REG(as_no,
AS_FAULTADDRESS_HI),
kctx);
as->fault_addr <<= 32;
as->fault_addr |= kbase_reg_read(kbdev,
MMU_AS_REG(as_no,
AS_FAULTADDRESS_LO),
kctx);
/* Mark the fault protected or not */
as->protected_mode = kbdev->protected_mode;
if (kbdev->protected_mode && as->fault_addr)
{
/* check if address reporting is allowed */
validate_protected_page_fault(kbdev, kctx);
}
/* report the fault to debugfs */
kbase_as_fault_debugfs_new(kbdev, as_no);
/* MALI_SEC_INTEGRATION : save previous */
fault_status = as->fault_status;
/* record the fault status */
as->fault_status = kbase_reg_read(kbdev,
MMU_AS_REG(as_no,
AS_FAULTSTATUS),
kctx);
/* MALI_SEC_INTEGRATION : save previous */
if (as->fault_type != KBASE_MMU_FAULT_TYPE_UNKNOWN) {
int new_fault = (bf_bits & (1 << as_no)) ?
KBASE_MMU_FAULT_TYPE_BUS :
KBASE_MMU_FAULT_TYPE_PAGE;
dev_err(kbdev->dev,
"New fault of type 0x%x while fault 0x%x is pending\n",
new_fault,
(int)as->fault_type);
}
fault_type = as->fault_type;
/* find the fault type */
as->fault_type = (bf_bits & (1 << as_no)) ?
KBASE_MMU_FAULT_TYPE_BUS :
KBASE_MMU_FAULT_TYPE_PAGE;
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
as->fault_extra_addr = kbase_reg_read(kbdev,
MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
kctx);
as->fault_extra_addr <<= 32;
as->fault_extra_addr |= kbase_reg_read(kbdev,
MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
kctx);
}
if (kbase_as_has_bus_fault(as)) {
/* Mark bus fault as handled.
* Note that a bus fault is processed first in case
* where both a bus fault and page fault occur.
*/
bf_bits &= ~(1UL << as_no);
/* remove the queued BF (and PF) from the mask */
new_mask &= ~(MMU_BUS_ERROR(as_no) |
MMU_PAGE_FAULT(as_no));
} else {
/* Mark page fault as handled */
pf_bits &= ~(1UL << as_no);
/* remove the queued PF from the mask */
new_mask &= ~MMU_PAGE_FAULT(as_no);
}
/* Process the interrupt for this address space */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* MALI_SEC_INTEGRATION */
caught_bug = kbase_mmu_interrupt_process(kbdev, kctx, as);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* MALI_SEC_INTEGRATION */
if (caught_bug < 0) {
dev_err(kbdev->dev, "Caught bug, queue_work rejected\n");
dev_err(kbdev->dev, "irq_mask was 0x%lx, now reduced down to 0x%lx\n",
(unsigned long)org_mask,
(unsigned long)new_mask);
dev_err(kbdev->dev, "Previous fault_type = %d, new fault_type = %d\n",
(int)fault_type,
(int)as->fault_type);
dev_err(kbdev->dev, "Previous fault_status = 0x%lx, new fault_status = 0x%lx\n",
(unsigned long)fault_status,
(unsigned long)as->fault_status);
dev_err(kbdev->dev, "Previous fault_addr = 0x%llx, new fault_addr = 0x%llx\n",
(unsigned long long)fault_addr,
(unsigned long long)as->fault_addr);
/* BUG(); */ /* we've reproduced the bug, crash and generate ramdump */
kctx->need_to_force_schedule_out = true;
}
}
/* reenable interrupts */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
new_mask |= tmp;
kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}
void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_context *kctx)
{
struct kbase_mmu_setup *current_setup = &as->current_setup;
u32 transcfg = 0;
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
/* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
/* Clear PTW_MEMATTR bits */
transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
/* Enable correct PTW_MEMATTR bits */
transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
if (kbdev->system_coherency == COHERENCY_ACE) {
/* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
/* Clear PTW_SH bits */
transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
/* Enable correct PTW_SH bits */
transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
}
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
transcfg, kctx);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
(current_setup->transcfg >> 32) & 0xFFFFFFFFUL,
kctx);
} else {
if (kbdev->system_coherency == COHERENCY_ACE)
current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
}
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
current_setup->transtab & 0xFFFFFFFFUL, kctx);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
(current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
current_setup->memattr & 0xFFFFFFFFUL, kctx);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
(current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as,
current_setup->transtab,
current_setup->memattr,
transcfg);
write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
}
int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
unsigned int handling_irq)
{
int ret;
lockdep_assert_held(&kbdev->mmu_hw_mutex);
if (op == AS_COMMAND_UNLOCK) {
/* Unlock doesn't require a lock first */
ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
} else {
u64 lock_addr = lock_region(kbdev, vpfn, nr);
/* Lock the region that needs to be updated */
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
lock_addr & 0xFFFFFFFFUL, kctx);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
(lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
/* Run the MMU operation */
write_cmd(kbdev, as->number, op, kctx);
/* Wait for the flush to complete */
ret = wait_ready(kbdev, as->number, kctx);
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
/* Issue an UNLOCK command to ensure that valid page
tables are re-read by the GPU after an update.
Note that, the FLUSH command should perform all the
actions necessary, however the bus logs show that if
multiple page faults occur within an 8 page region
the MMU does not always re-read the updated page
table entries for later faults or is only partially
read, it subsequently raises the page fault IRQ for
the same addresses, the unlock ensures that the MMU
cache is flushed, so updates can be re-read. As the
region is now unlocked we need to issue 2 UNLOCK
commands in order to flush the MMU/uTLB,
see PRLAM-8812.
*/
write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
}
}
return ret;
}
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_context *kctx, enum kbase_mmu_fault_type type)
{
unsigned long flags;
u32 pf_bf_mask;
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
/*
* A reset is in-flight and we're flushing the IRQ + bottom half
* so don't update anything as it could race with the reset code.
*/
if (kbdev->irq_reset_flush)
goto unlock;
/* Clear the page (and bus fault IRQ as well in case one occurred) */
pf_bf_mask = MMU_PAGE_FAULT(as->number);
if (type == KBASE_MMU_FAULT_TYPE_BUS ||
type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
pf_bf_mask |= MMU_BUS_ERROR(as->number);
kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}
void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_context *kctx, enum kbase_mmu_fault_type type)
{
unsigned long flags;
u32 irq_mask;
/* Enable the page fault IRQ (and bus fault IRQ as well in case one
* occurred) */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
/*
* A reset is in-flight and we're flushing the IRQ + bottom half
* so don't update anything as it could race with the reset code.
*/
if (kbdev->irq_reset_flush)
goto unlock;
irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
MMU_PAGE_FAULT(as->number);
if (type == KBASE_MMU_FAULT_TYPE_BUS ||
type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
irq_mask |= MMU_BUS_ERROR(as->number);
kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}

View File

@ -0,0 +1,47 @@
/*
*
* (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Interface file for the direct implementation for MMU hardware access
*
* Direct MMU hardware interface
*
* This module provides the interface(s) that are required by the direct
* register access implementation of the MMU hardware interface
*/
#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_
#define _MALI_KBASE_MMU_HW_DIRECT_H_
#include <mali_kbase_defs.h>
/**
* kbase_mmu_interrupt - Process an MMU interrupt.
*
* Process the MMU interrupt that was reported by the &kbase_device.
*
* @kbdev: kbase context to clear the fault from.
* @irq_stat: Value of the MMU_IRQ_STATUS register
*/
void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
#endif /* _MALI_KBASE_MMU_HW_DIRECT_H_ */

View File

@ -0,0 +1,68 @@
/*
*
* (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* "Always on" power management policy
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
static u64 always_on_get_core_mask(struct kbase_device *kbdev)
{
return kbdev->gpu_props.props.raw_props.shader_present;
}
static bool always_on_get_core_active(struct kbase_device *kbdev)
{
return true;
}
static void always_on_init(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
static void always_on_term(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
/*
* The struct kbase_pm_policy structure for the demand power policy.
*
* This is the static structure that defines the demand power policy's callback
* and name.
*/
const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
"always_on", /* name */
always_on_init, /* init */
always_on_term, /* term */
always_on_get_core_mask, /* get_core_mask */
always_on_get_core_active, /* get_core_active */
0u, /* flags */
KBASE_PM_POLICY_ID_ALWAYS_ON, /* id */
};
KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops);

View File

@ -0,0 +1,82 @@
/*
*
* (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* "Always on" power management policy
*/
#ifndef MALI_KBASE_PM_ALWAYS_ON_H
#define MALI_KBASE_PM_ALWAYS_ON_H
/**
* DOC:
* The "Always on" power management policy has the following
* characteristics:
*
* - When KBase indicates that the GPU will be powered up, but we don't yet
* know which Job Chains are to be run:
* All Shader Cores are powered up, regardless of whether or not they will
* be needed later.
*
* - When KBase indicates that a set of Shader Cores are needed to submit the
* currently queued Job Chains:
* All Shader Cores are kept powered, regardless of whether or not they will
* be needed
*
* - When KBase indicates that the GPU need not be powered:
* The Shader Cores are kept powered, regardless of whether or not they will
* be needed. The GPU itself is also kept powered, even though it is not
* needed.
*
* This policy is automatically overridden during system suspend: the desired
* core state is ignored, and the cores are forced off regardless of what the
* policy requests. After resuming from suspend, new changes to the desired
* core state made by the policy are honored.
*
* Note:
*
* - KBase indicates the GPU will be powered up when it has a User Process that
* has just started to submit Job Chains.
*
* - KBase indicates the GPU need not be powered when all the Job Chains from
* User Processes have finished, and it is waiting for a User Process to
* submit some more Job Chains.
*/
/**
* struct kbasep_pm_policy_always_on - Private struct for policy instance data
* @dummy: unused dummy variable
*
* This contains data that is private to the particular power policy that is
* active.
*/
struct kbasep_pm_policy_always_on {
int dummy;
};
extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops;
#endif /* MALI_KBASE_PM_ALWAYS_ON_H */

View File

@ -0,0 +1,549 @@
/*
*
* (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* GPU backend implementation of base kernel power management APIs
*/
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_hwaccess_jm.h>
#include <backend/gpu/mali_kbase_js_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
int kbase_pm_runtime_init(struct kbase_device *kbdev)
{
struct kbase_pm_callback_conf *callbacks;
callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
if (callbacks) {
kbdev->pm.backend.callback_power_on =
callbacks->power_on_callback;
kbdev->pm.backend.callback_power_off =
callbacks->power_off_callback;
kbdev->pm.backend.callback_power_suspend =
callbacks->power_suspend_callback;
kbdev->pm.backend.callback_power_resume =
callbacks->power_resume_callback;
kbdev->pm.callback_power_runtime_init =
callbacks->power_runtime_init_callback;
kbdev->pm.callback_power_runtime_term =
callbacks->power_runtime_term_callback;
kbdev->pm.backend.callback_power_runtime_on =
callbacks->power_runtime_on_callback;
kbdev->pm.backend.callback_power_runtime_off =
callbacks->power_runtime_off_callback;
kbdev->pm.backend.callback_power_runtime_idle =
callbacks->power_runtime_idle_callback;
/* MALI_SEC_INTEGRATION */
kbdev->pm.backend.callback_power_dvfs_on =
callbacks->power_dvfs_on_callback;
if (callbacks->power_runtime_init_callback)
return callbacks->power_runtime_init_callback(kbdev);
/* MALI_SEC_INTEGRATION */
/*else
return 0;*/
/* MALI_SEC_INTEGRATION, add else, ARM's mistakes? */
} else {
kbdev->pm.backend.callback_power_on = NULL;
kbdev->pm.backend.callback_power_off = NULL;
kbdev->pm.backend.callback_power_suspend = NULL;
kbdev->pm.backend.callback_power_resume = NULL;
kbdev->pm.callback_power_runtime_init = NULL;
kbdev->pm.callback_power_runtime_term = NULL;
kbdev->pm.backend.callback_power_runtime_on = NULL;
kbdev->pm.backend.callback_power_runtime_off = NULL;
kbdev->pm.backend.callback_power_runtime_idle = NULL;
kbdev->pm.backend.callback_power_dvfs_on = NULL;
}
return 0;
}
void kbase_pm_runtime_term(struct kbase_device *kbdev)
{
if (kbdev->pm.callback_power_runtime_term) {
kbdev->pm.callback_power_runtime_term(kbdev);
}
}
void kbase_pm_register_access_enable(struct kbase_device *kbdev)
{
struct kbase_pm_callback_conf *callbacks;
callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
if (callbacks)
callbacks->power_on_callback(kbdev);
kbdev->pm.backend.gpu_powered = true;
}
void kbase_pm_register_access_disable(struct kbase_device *kbdev)
{
struct kbase_pm_callback_conf *callbacks;
callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
if (callbacks)
callbacks->power_off_callback(kbdev);
kbdev->pm.backend.gpu_powered = false;
}
int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
{
int ret = 0;
KBASE_DEBUG_ASSERT(kbdev != NULL);
mutex_init(&kbdev->pm.lock);
/* MALI_SEC_INTEGRATION */
kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
WQ_HIGHPRI | WQ_UNBOUND | __WQ_ORDERED, 1);
if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
return -ENOMEM;
INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
kbase_pm_gpu_poweroff_wait_wq);
kbdev->pm.backend.gpu_powered = false;
kbdev->pm.suspending = false;
/* MALI_SEC_INTEGRATION */
init_waitqueue_head(&kbdev->pm.suspending_wait);
#ifdef CONFIG_MALI_DEBUG
kbdev->pm.backend.driver_ready_for_irqs = false;
#endif /* CONFIG_MALI_DEBUG */
kbdev->pm.backend.gpu_in_desired_state = true;
init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
/* Initialise the metrics subsystem */
ret = kbasep_pm_metrics_init(kbdev);
if (ret)
return ret;
init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
kbdev->pm.backend.l2_powered = 0;
init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
kbdev->pm.backend.reset_done = false;
init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
kbdev->pm.active_count = 0;
spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
if (kbase_pm_ca_init(kbdev) != 0)
goto workq_fail;
if (kbase_pm_policy_init(kbdev) != 0)
goto pm_policy_fail;
return 0;
pm_policy_fail:
kbase_pm_ca_term(kbdev);
workq_fail:
kbasep_pm_metrics_term(kbdev);
return -EINVAL;
}
void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
{
lockdep_assert_held(&kbdev->pm.lock);
/* Turn clocks and interrupts on - no-op if we haven't done a previous
* kbase_pm_clock_off() */
kbase_pm_clock_on(kbdev, is_resume);
/* Update core status as required by the policy */
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
kbase_pm_update_cores_state(kbdev);
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
/* NOTE: We don't wait to reach the desired state, since running atoms
* will wait for that state to be reached anyway */
}
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
{
struct kbase_device *kbdev = container_of(data, struct kbase_device,
pm.backend.gpu_poweroff_wait_work);
struct kbase_pm_device_data *pm = &kbdev->pm;
struct kbase_pm_backend_data *backend = &pm->backend;
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
unsigned long flags;
#if !PLATFORM_POWER_DOWN_ONLY
/* Wait for power transitions to complete. We do this with no locks held
* so that we don't deadlock with any pending workqueues */
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
kbase_pm_check_transitions_sync(kbdev);
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
#endif /* !PLATFORM_POWER_DOWN_ONLY */
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
#if PLATFORM_POWER_DOWN_ONLY
if (kbdev->pm.backend.gpu_powered) {
if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2)) {
/* If L2 cache is powered then we must flush it before
* we power off the GPU. Normally this would have been
* handled when the L2 was powered off. */
kbase_gpu_cacheclean(kbdev);
}
}
#endif /* PLATFORM_POWER_DOWN_ONLY */
if (!backend->poweron_required) {
#if !PLATFORM_POWER_DOWN_ONLY
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
WARN_ON(kbdev->l2_available_bitmap ||
kbdev->shader_available_bitmap ||
kbdev->tiler_available_bitmap);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
#endif /* !PLATFORM_POWER_DOWN_ONLY */
/* Consume any change-state events */
kbase_timeline_pm_check_handle_event(kbdev,
KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
/* Disable interrupts and turn the clock off */
if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
/*
* Page/bus faults are pending, must drop locks to
* process. Interrupts are disabled so no more faults
* should be generated at this point.
*/
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
kbase_flush_mmu_wqs(kbdev);
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
/* Turn off clock now that fault have been handled. We
* dropped locks so poweron_required may have changed -
* power back on if this is the case.*/
if (backend->poweron_required)
kbase_pm_clock_on(kbdev, false);
else
WARN_ON(!kbase_pm_clock_off(kbdev,
backend->poweroff_is_suspend));
}
}
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
backend->poweroff_wait_in_progress = false;
if (backend->poweron_required) {
backend->poweron_required = false;
kbase_pm_update_cores_state_nolock(kbdev);
kbase_backend_slot_update(kbdev);
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
wake_up(&kbdev->pm.backend.poweroff_wait);
}
/* MALI_SEC_INTEGRATION */
/* Remove using 'power off wait wq' @ bifrost ddk */
int kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
{
unsigned long flags;
lockdep_assert_held(&kbdev->pm.lock);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
if (!kbdev->pm.backend.poweroff_wait_in_progress) {
/* Force all cores off */
kbdev->pm.backend.desired_shader_state = 0;
kbdev->pm.backend.desired_tiler_state = 0;
/* Force all cores to be unavailable, in the situation where
* transitions are in progress for some cores but not others,
* and kbase_pm_check_transitions_nolock can not immediately
* power off the cores */
kbdev->shader_available_bitmap = 0;
kbdev->tiler_available_bitmap = 0;
kbdev->l2_available_bitmap = 0;
#ifdef MALI_SEC_INTEGRATION
kbdev->pm.backend.poweroff_wait_in_progress = true;
kbdev->pm.backend.poweroff_is_suspend = is_suspend;
#endif
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
#ifdef MALI_SEC_INTEGRATION
/*Kick off wq here. Callers will have to wait*/
queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
&kbdev->pm.backend.gpu_poweroff_wait_work);
#endif
} else {
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
/* MALI_SEC_INTEGRATION */
return kbase_pm_clock_off(kbdev, is_suspend);
}
static bool is_poweroff_in_progress(struct kbase_device *kbdev)
{
bool ret;
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return ret;
}
void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
{
wait_event_killable(kbdev->pm.backend.poweroff_wait,
is_poweroff_in_progress(kbdev));
}
int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
unsigned int flags)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
unsigned long irq_flags;
int ret;
KBASE_DEBUG_ASSERT(kbdev != NULL);
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
/* A suspend won't happen during startup/insmod */
KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
/* Power up the GPU, don't enable IRQs as we are not ready to receive
* them. */
ret = kbase_pm_init_hw(kbdev, flags);
if (ret) {
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
return ret;
}
kbasep_pm_init_core_use_bitmaps(kbdev);
kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
kbdev->pm.debug_core_mask[1] =
kbdev->pm.debug_core_mask[2] =
#ifdef CONFIG_MALI_GPU_CORE_MASK_SELECTION
kbdev->gpu_props.props.raw_props.shader_present&kbdev->pm.debug_core_mask_info;
#else
kbdev->gpu_props.props.raw_props.shader_present;
#endif
/* Pretend the GPU is active to prevent a power policy turning the GPU
* cores off */
kbdev->pm.active_count = 1;
spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
irq_flags);
/* Ensure cycle counter is off */
kbdev->pm.backend.gpu_cycle_counter_requests = 0;
spin_unlock_irqrestore(
&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
irq_flags);
/* We are ready to receive IRQ's now as power policy is set up, so
* enable them now. */
#ifdef CONFIG_MALI_DEBUG
spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
kbdev->pm.backend.driver_ready_for_irqs = true;
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
#endif
kbase_pm_enable_interrupts(kbdev);
/* Turn on the GPU and any cores needed by the policy */
kbase_pm_do_poweron(kbdev, false);
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
/* Idle the GPU and/or cores, if the policy wants it to */
kbase_pm_context_idle(kbdev);
return 0;
}
void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
mutex_lock(&kbdev->pm.lock);
kbase_pm_cancel_deferred_poweroff(kbdev);
if (!kbase_pm_do_poweroff(kbdev, false)) {
/* MALI_SEC_INTEGRATION */
/* Remove using 'power off wait wq' @ bifrost ddk */
/* Page/bus faults are pending, must drop pm.lock to process.
* Interrupts are disabled so no more faults should be
* generated at this point */
mutex_unlock(&kbdev->pm.lock);
kbase_flush_mmu_wqs(kbdev);
mutex_lock(&kbdev->pm.lock);
WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
}
mutex_unlock(&kbdev->pm.lock);
}
KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
/* Free any resources the policy allocated */
kbase_pm_policy_term(kbdev);
kbase_pm_ca_term(kbdev);
/* Shut down the metrics subsystem */
kbasep_pm_metrics_term(kbdev);
destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
}
void kbase_pm_power_changed(struct kbase_device *kbdev)
{
bool cores_are_available;
unsigned long flags;
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
if (cores_are_available) {
/* Log timelining information that a change in state has
* completed */
kbase_timeline_pm_handle_event(kbdev,
KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
kbase_backend_slot_update(kbdev);
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
u64 new_core_mask_js0, u64 new_core_mask_js1,
u64 new_core_mask_js2)
{
kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
new_core_mask_js2;
kbase_pm_update_cores_state_nolock(kbdev);
}
void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
{
kbase_pm_update_active(kbdev);
}
void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
{
kbase_pm_update_active(kbdev);
}
void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
/* Force power off the GPU and all cores (regardless of policy), only
* after the PM active count reaches zero (otherwise, we risk turning it
* off prematurely) */
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
kbase_pm_cancel_deferred_poweroff(kbdev);
if (!kbase_pm_do_poweroff(kbdev, true)) {
/* Page/bus faults are pending, must drop pm.lock to process.
* Interrupts are disabled so no more faults should be
* generated at this point */
mutex_unlock(&kbdev->pm.lock);
/* MALI_SEC_INTEGRATION */
mutex_unlock(&js_devdata->runpool_mutex);
kbase_flush_mmu_wqs(kbdev);
/* MALI_SEC_INTEGRATION */
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
}
kbase_backend_timer_suspend(kbdev);
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
/* MALI_SEC_INTEGRATION */
/* Remove using 'power off wait wq' @ bifrost ddk */
/* kbase_pm_wait_for_poweroff_complete(kbdev); */
}
void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
kbdev->pm.suspending = false;
kbase_pm_do_poweron(kbdev, true);
/* MALI_SEC_INTEGRATION */
wake_up(&kbdev->pm.suspending_wait);
kbase_backend_timer_resume(kbdev);
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
}

View File

@ -0,0 +1,187 @@
/*
*
* (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Base kernel core availability APIs
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
static const struct kbase_pm_ca_policy *const policy_list[] = {
&kbase_pm_ca_fixed_policy_ops,
#ifdef CONFIG_MALI_DEVFREQ
&kbase_pm_ca_devfreq_policy_ops,
#endif
#if !MALI_CUSTOMER_RELEASE
&kbase_pm_ca_random_policy_ops
#endif
};
/**
* POLICY_COUNT - The number of policies available in the system.
*
* This is derived from the number of functions listed in policy_list.
*/
#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
int kbase_pm_ca_init(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
kbdev->pm.backend.ca_current_policy = policy_list[0];
kbdev->pm.backend.ca_current_policy->init(kbdev);
return 0;
}
void kbase_pm_ca_term(struct kbase_device *kbdev)
{
kbdev->pm.backend.ca_current_policy->term(kbdev);
}
int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list)
{
if (!list)
return POLICY_COUNT;
*list = policy_list;
return POLICY_COUNT;
}
KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies);
const struct kbase_pm_ca_policy
*kbase_pm_ca_get_policy(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
return kbdev->pm.backend.ca_current_policy;
}
KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy);
void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
const struct kbase_pm_ca_policy *new_policy)
{
const struct kbase_pm_ca_policy *old_policy;
unsigned long flags;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(new_policy != NULL);
KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u,
new_policy->id);
/* During a policy change we pretend the GPU is active */
/* A suspend won't happen here, because we're in a syscall from a
* userspace thread */
kbase_pm_context_active(kbdev);
mutex_lock(&kbdev->pm.lock);
/* Remove the policy to prevent IRQ handlers from working on it */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
old_policy = kbdev->pm.backend.ca_current_policy;
kbdev->pm.backend.ca_current_policy = NULL;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
if (old_policy->term)
old_policy->term(kbdev);
if (new_policy->init)
new_policy->init(kbdev);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->pm.backend.ca_current_policy = new_policy;
/* If any core power state changes were previously attempted, but
* couldn't be made because the policy was changing (current_policy was
* NULL), then re-try them here. */
kbase_pm_update_cores_state_nolock(kbdev);
kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
kbdev->shader_ready_bitmap,
kbdev->shader_transitioning_bitmap);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->pm.lock);
/* Now the policy change is finished, we release our fake context active
* reference */
kbase_pm_context_idle(kbdev);
}
KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy);
u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
/* All cores must be enabled when instrumentation is in use */
if (kbdev->pm.backend.instr_enabled)
return kbdev->gpu_props.props.raw_props.shader_present &
kbdev->pm.debug_core_mask_all;
if (kbdev->pm.backend.ca_current_policy == NULL)
return kbdev->gpu_props.props.raw_props.shader_present &
kbdev->pm.debug_core_mask_all;
return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) &
kbdev->pm.debug_core_mask_all;
}
KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);
void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
u64 cores_transitioning)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
if (kbdev->pm.backend.ca_current_policy != NULL)
kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
cores_ready,
cores_transitioning);
}
void kbase_pm_ca_instr_enable(struct kbase_device *kbdev)
{
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->pm.backend.instr_enabled = true;
kbase_pm_update_cores_state_nolock(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
void kbase_pm_ca_instr_disable(struct kbase_device *kbdev)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
kbdev->pm.backend.instr_enabled = false;
kbase_pm_update_cores_state_nolock(kbdev);
}

View File

@ -0,0 +1,97 @@
/*
*
* (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Base kernel core availability APIs
*/
#ifndef _KBASE_PM_CA_H_
#define _KBASE_PM_CA_H_
/**
* kbase_pm_ca_init - Initialize core availability framework
*
* Must be called before calling any other core availability function
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Return: 0 if the core availability framework was successfully initialized,
* -errno otherwise
*/
int kbase_pm_ca_init(struct kbase_device *kbdev);
/**
* kbase_pm_ca_term - Terminate core availability framework
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_ca_term(struct kbase_device *kbdev);
/**
* kbase_pm_ca_get_core_mask - Get currently available shaders core mask
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Returns a mask of the currently available shader cores.
* Calls into the core availability policy
*
* Return: The bit mask of available cores
*/
u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev);
/**
* kbase_pm_ca_update_core_status - Update core status
*
* @kbdev: The kbase device structure for the device (must be
* a valid pointer)
* @cores_ready: The bit mask of cores ready for job submission
* @cores_transitioning: The bit mask of cores that are transitioning power
* state
*
* Update core availability policy with current core power status
*
* Calls into the core availability policy
*/
void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
u64 cores_transitioning);
/**
* kbase_pm_ca_instr_enable - Enable override for instrumentation
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* This overrides the output of the core availability policy, ensuring that all
* cores are available
*/
void kbase_pm_ca_instr_enable(struct kbase_device *kbdev);
/**
* kbase_pm_ca_instr_disable - Disable override for instrumentation
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* This disables any previously enabled override, and resumes normal policy
* functionality
*/
void kbase_pm_ca_instr_disable(struct kbase_device *kbdev);
#endif /* _KBASE_PM_CA_H_ */

View File

@ -0,0 +1,134 @@
/*
*
* (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* A core availability policy implementing core mask selection from devfreq OPPs
*
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <linux/version.h>
void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
{
struct kbasep_pm_ca_policy_devfreq *data =
&kbdev->pm.backend.ca_policy_data.devfreq;
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
data->cores_desired = core_mask;
/* Disable any cores that are now unwanted */
data->cores_enabled &= data->cores_desired;
kbdev->pm.backend.ca_in_transition = true;
/* If there are no cores to be powered off then power on desired cores
*/
if (!(data->cores_used & ~data->cores_desired)) {
data->cores_enabled = data->cores_desired;
kbdev->pm.backend.ca_in_transition = false;
}
kbase_pm_update_cores_state_nolock(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX %llX\n",
data->cores_desired, data->cores_enabled);
}
static void devfreq_init(struct kbase_device *kbdev)
{
struct kbasep_pm_ca_policy_devfreq *data =
&kbdev->pm.backend.ca_policy_data.devfreq;
if (kbdev->current_core_mask) {
data->cores_enabled = kbdev->current_core_mask;
data->cores_desired = kbdev->current_core_mask;
} else {
data->cores_enabled =
kbdev->gpu_props.props.raw_props.shader_present;
data->cores_desired =
kbdev->gpu_props.props.raw_props.shader_present;
}
data->cores_used = 0;
kbdev->pm.backend.ca_in_transition = false;
}
static void devfreq_term(struct kbase_device *kbdev)
{
}
static u64 devfreq_get_core_mask(struct kbase_device *kbdev)
{
return kbdev->pm.backend.ca_policy_data.devfreq.cores_enabled;
}
static void devfreq_update_core_status(struct kbase_device *kbdev,
u64 cores_ready,
u64 cores_transitioning)
{
struct kbasep_pm_ca_policy_devfreq *data =
&kbdev->pm.backend.ca_policy_data.devfreq;
lockdep_assert_held(&kbdev->hwaccess_lock);
data->cores_used = cores_ready | cores_transitioning;
/* If in desired state then clear transition flag */
if (data->cores_enabled == data->cores_desired)
kbdev->pm.backend.ca_in_transition = false;
/* If all undesired cores are now off then power on desired cores.
* The direct comparison against cores_enabled limits potential
* recursion to one level */
if (!(data->cores_used & ~data->cores_desired) &&
data->cores_enabled != data->cores_desired) {
data->cores_enabled = data->cores_desired;
kbase_pm_update_cores_state_nolock(kbdev);
kbdev->pm.backend.ca_in_transition = false;
}
}
/*
* The struct kbase_pm_ca_policy structure for the devfreq core availability
* policy.
*
* This is the static structure that defines the devfreq core availability power
* policy's callback and name.
*/
const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops = {
"devfreq", /* name */
devfreq_init, /* init */
devfreq_term, /* term */
devfreq_get_core_mask, /* get_core_mask */
devfreq_update_core_status, /* update_core_status */
0u, /* flags */
KBASE_PM_CA_POLICY_ID_DEVFREQ, /* id */
};

View File

@ -0,0 +1,60 @@
/*
*
* (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* A core availability policy for use with devfreq, where core masks are
* associated with OPPs.
*/
#ifndef MALI_KBASE_PM_CA_DEVFREQ_H
#define MALI_KBASE_PM_CA_DEVFREQ_H
/**
* struct kbasep_pm_ca_policy_devfreq - Private structure for devfreq ca policy
*
* This contains data that is private to the devfreq core availability
* policy.
*
* @cores_desired: Cores that the policy wants to be available
* @cores_enabled: Cores that the policy is currently returning as available
* @cores_used: Cores currently powered or transitioning
*/
struct kbasep_pm_ca_policy_devfreq {
u64 cores_desired;
u64 cores_enabled;
u64 cores_used;
};
extern const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops;
/**
* kbase_devfreq_set_core_mask - Set core mask for policy to use
* @kbdev: Device pointer
* @core_mask: New core mask
*
* The new core mask will have immediate effect if the GPU is powered, or will
* take effect when it is next powered on.
*/
void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
#endif /* MALI_KBASE_PM_CA_DEVFREQ_H */

View File

@ -0,0 +1,70 @@
/*
*
* (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* A power policy implementing fixed core availability
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
static void fixed_init(struct kbase_device *kbdev)
{
kbdev->pm.backend.ca_in_transition = false;
}
static void fixed_term(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
static u64 fixed_get_core_mask(struct kbase_device *kbdev)
{
return kbdev->gpu_props.props.raw_props.shader_present;
}
static void fixed_update_core_status(struct kbase_device *kbdev,
u64 cores_ready,
u64 cores_transitioning)
{
CSTD_UNUSED(kbdev);
CSTD_UNUSED(cores_ready);
CSTD_UNUSED(cores_transitioning);
}
/*
* The struct kbase_pm_policy structure for the fixed power policy.
*
* This is the static structure that defines the fixed power policy's callback
* and name.
*/
const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = {
"fixed", /* name */
fixed_init, /* init */
fixed_term, /* term */
fixed_get_core_mask, /* get_core_mask */
fixed_update_core_status, /* update_core_status */
0u, /* flags */
KBASE_PM_CA_POLICY_ID_FIXED, /* id */
};
KBASE_EXPORT_TEST_API(kbase_pm_ca_fixed_policy_ops);

View File

@ -0,0 +1,45 @@
/*
*
* (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* A power policy implementing fixed core availability
*/
#ifndef MALI_KBASE_PM_CA_FIXED_H
#define MALI_KBASE_PM_CA_FIXED_H
/**
* struct kbasep_pm_ca_policy_fixed - Private structure for policy instance data
*
* @dummy: Dummy member - no state is needed
*
* This contains data that is private to the particular power policy that is
* active.
*/
struct kbasep_pm_ca_policy_fixed {
int dummy;
};
extern const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops;
#endif /* MALI_KBASE_PM_CA_FIXED_H */

View File

@ -0,0 +1,75 @@
/*
*
* (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* "Coarse Demand" power management policy
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev)
{
if (kbdev->pm.active_count == 0)
return 0;
return kbdev->gpu_props.props.raw_props.shader_present;
}
static bool coarse_demand_get_core_active(struct kbase_device *kbdev)
{
if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
&& !kbdev->tiler_inuse_cnt)
return false;
return true;
}
static void coarse_demand_init(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
static void coarse_demand_term(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
/* The struct kbase_pm_policy structure for the demand power policy.
*
* This is the static structure that defines the demand power policy's callback
* and name.
*/
const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
"coarse_demand", /* name */
coarse_demand_init, /* init */
coarse_demand_term, /* term */
coarse_demand_get_core_mask, /* get_core_mask */
coarse_demand_get_core_active, /* get_core_active */
0u, /* flags */
KBASE_PM_POLICY_ID_COARSE_DEMAND, /* id */
};
KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops);

View File

@ -0,0 +1,69 @@
/*
*
* (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* "Coarse Demand" power management policy
*/
#ifndef MALI_KBASE_PM_COARSE_DEMAND_H
#define MALI_KBASE_PM_COARSE_DEMAND_H
/**
* DOC:
* The "Coarse" demand power management policy has the following
* characteristics:
* - When KBase indicates that the GPU will be powered up, but we don't yet
* know which Job Chains are to be run:
* - All Shader Cores are powered up, regardless of whether or not they will
* be needed later.
* - When KBase indicates that a set of Shader Cores are needed to submit the
* currently queued Job Chains:
* - All Shader Cores are kept powered, regardless of whether or not they will
* be needed
* - When KBase indicates that the GPU need not be powered:
* - The Shader Cores are powered off, and the GPU itself is powered off too.
*
* @note:
* - KBase indicates the GPU will be powered up when it has a User Process that
* has just started to submit Job Chains.
* - KBase indicates the GPU need not be powered when all the Job Chains from
* User Processes have finished, and it is waiting for a User Process to
* submit some more Job Chains.
*/
/**
* struct kbasep_pm_policy_coarse_demand - Private structure for coarse demand
* policy
*
* This contains data that is private to the coarse demand power policy.
*
* @dummy: Dummy member - no state needed
*/
struct kbasep_pm_policy_coarse_demand {
int dummy;
};
extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops;
#endif /* MALI_KBASE_PM_COARSE_DEMAND_H */

View File

@ -0,0 +1,533 @@
/*
*
* (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Backend-specific Power Manager definitions
*/
#ifndef _KBASE_PM_HWACCESS_DEFS_H_
#define _KBASE_PM_HWACCESS_DEFS_H_
#include "mali_kbase_pm_ca_fixed.h"
#include "mali_kbase_pm_ca_devfreq.h"
#if !MALI_CUSTOMER_RELEASE
#include "mali_kbase_pm_ca_random.h"
#endif
#include "mali_kbase_pm_always_on.h"
#include "mali_kbase_pm_coarse_demand.h"
#include "mali_kbase_pm_demand.h"
#if !MALI_CUSTOMER_RELEASE
#include "mali_kbase_pm_demand_always_powered.h"
#include "mali_kbase_pm_fast_start.h"
#endif
/* Forward definition - see mali_kbase.h */
struct kbase_device;
struct kbase_jd_atom;
/**
* enum kbase_pm_core_type - The types of core in a GPU.
*
* These enumerated values are used in calls to
* - kbase_pm_get_present_cores()
* - kbase_pm_get_active_cores()
* - kbase_pm_get_trans_cores()
* - kbase_pm_get_ready_cores().
*
* They specify which type of core should be acted on. These values are set in
* a manner that allows core_type_to_reg() function to be simpler and more
* efficient.
*
* @KBASE_PM_CORE_L2: The L2 cache
* @KBASE_PM_CORE_SHADER: Shader cores
* @KBASE_PM_CORE_TILER: Tiler cores
* @KBASE_PM_CORE_STACK: Core stacks
*/
enum kbase_pm_core_type {
KBASE_PM_CORE_L2 = L2_PRESENT_LO,
KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO,
KBASE_PM_CORE_TILER = TILER_PRESENT_LO,
KBASE_PM_CORE_STACK = STACK_PRESENT_LO
};
/**
* struct kbasep_pm_metrics_data - Metrics data collected for use by the power
* management framework.
*
* @time_period_start: time at which busy/idle measurements started
* @time_busy: number of ns the GPU was busy executing jobs since the
* @time_period_start timestamp.
* @time_idle: number of ns since time_period_start the GPU was not executing
* jobs since the @time_period_start timestamp.
* @prev_busy: busy time in ns of previous time period.
* Updated when metrics are reset.
* @prev_idle: idle time in ns of previous time period
* Updated when metrics are reset.
* @gpu_active: true when the GPU is executing jobs. false when
* not. Updated when the job scheduler informs us a job in submitted
* or removed from a GPU slot.
* @busy_cl: number of ns the GPU was busy executing CL jobs. Note that
* if two CL jobs were active for 400ns, this value would be updated
* with 800.
* @busy_gl: number of ns the GPU was busy executing GL jobs. Note that
* if two GL jobs were active for 400ns, this value would be updated
* with 800.
* @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device.
* @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As
* GL jobs never run on slot 2 this slot is not recorded.
* @lock: spinlock protecting the kbasep_pm_metrics_data structure
* @timer: timer to regularly make DVFS decisions based on the power
* management metrics.
* @timer_active: boolean indicating @timer is running
* @platform_data: pointer to data controlled by platform specific code
* @kbdev: pointer to kbase device for which metrics are collected
*
*/
struct kbasep_pm_metrics_data {
ktime_t time_period_start;
u32 time_busy;
u32 time_idle;
u32 prev_busy;
u32 prev_idle;
bool gpu_active;
u32 busy_cl[2];
u32 busy_gl;
u32 active_cl_ctx[2];
u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */
spinlock_t lock;
/* MALI_SEC_INTEGRATION */
/* #ifdef CONFIG_MALI_MIDGARD_DVFS */
struct hrtimer timer;
bool timer_active;
/* MALI_SEC_INTEGRATION */
struct delayed_work work;
/* #endif */
void *platform_data;
struct kbase_device *kbdev;
#ifdef CONFIG_MALI_SEC_CL_BOOST
atomic_t time_compute_jobs, time_vertex_jobs, time_fragment_jobs;
bool is_full_compute_util; /* Only compute utilisation is 100% */
#endif
};
union kbase_pm_policy_data {
struct kbasep_pm_policy_always_on always_on;
struct kbasep_pm_policy_coarse_demand coarse_demand;
struct kbasep_pm_policy_demand demand;
#if !MALI_CUSTOMER_RELEASE
struct kbasep_pm_policy_demand_always_powered demand_always_powered;
struct kbasep_pm_policy_fast_start fast_start;
#endif
};
union kbase_pm_ca_policy_data {
struct kbasep_pm_ca_policy_fixed fixed;
struct kbasep_pm_ca_policy_devfreq devfreq;
#if !MALI_CUSTOMER_RELEASE
struct kbasep_pm_ca_policy_random random;
#endif
};
/**
* struct kbase_pm_backend_data - Data stored per device for power management.
*
* This structure contains data for the power management framework. There is one
* instance of this structure per device in the system.
*
* @ca_current_policy: The policy that is currently actively controlling core
* availability.
* @pm_current_policy: The policy that is currently actively controlling the
* power state.
* @ca_policy_data: Private data for current CA policy
* @pm_policy_data: Private data for current PM policy
* @ca_in_transition: Flag indicating when core availability policy is
* transitioning cores. The core availability policy must
* set this when a change in core availability is occurring.
* power_change_lock must be held when accessing this.
* @reset_done: Flag when a reset is complete
* @reset_done_wait: Wait queue to wait for changes to @reset_done
* @l2_powered_wait: Wait queue for whether the l2 cache has been powered as
* requested
* @l2_powered: State indicating whether all the l2 caches are powered.
* Non-zero indicates they're *all* powered
* Zero indicates that some (or all) are not powered
* @gpu_cycle_counter_requests: The reference count of active gpu cycle counter
* users
* @gpu_cycle_counter_requests_lock: Lock to protect @gpu_cycle_counter_requests
* @desired_shader_state: A bit mask identifying the shader cores that the
* power policy would like to be on. The current state
* of the cores may be different, but there should be
* transitions in progress that will eventually achieve
* this state (assuming that the policy doesn't change
* its mind in the mean time).
* @powering_on_shader_state: A bit mask indicating which shader cores are
* currently in a power-on transition
* @desired_tiler_state: A bit mask identifying the tiler cores that the power
* policy would like to be on. See @desired_shader_state
* @powering_on_tiler_state: A bit mask indicating which tiler core are
* currently in a power-on transition
* @powering_on_l2_state: A bit mask indicating which l2-caches are currently
* in a power-on transition
* @powering_on_stack_state: A bit mask indicating which core stacks are
* currently in a power-on transition
* @gpu_in_desired_state: This flag is set if the GPU is powered as requested
* by the desired_xxx_state variables
* @gpu_in_desired_state_wait: Wait queue set when @gpu_in_desired_state != 0
* @gpu_powered: Set to true when the GPU is powered and register
* accesses are possible, false otherwise
* @instr_enabled: Set to true when instrumentation is enabled,
* false otherwise
* @cg1_disabled: Set if the policy wants to keep the second core group
* powered off
* @driver_ready_for_irqs: Debug state indicating whether sufficient
* initialization of the driver has occurred to handle
* IRQs
* @gpu_powered_lock: Spinlock that must be held when writing @gpu_powered or
* accessing @driver_ready_for_irqs
* @metrics: Structure to hold metrics for the GPU
* @gpu_poweroff_pending: number of poweroff timer ticks until the GPU is
* powered off
* @shader_poweroff_pending_time: number of poweroff timer ticks until shaders
* and/or timers are powered off
* @gpu_poweroff_timer: Timer for powering off GPU
* @gpu_poweroff_wq: Workqueue to power off GPU on when timer fires
* @gpu_poweroff_work: Workitem used on @gpu_poweroff_wq
* @shader_poweroff_pending: Bit mask of shaders to be powered off on next
* timer callback
* @tiler_poweroff_pending: Bit mask of tilers to be powered off on next timer
* callback
* @poweroff_timer_needed: true if the poweroff timer is currently required,
* false otherwise
* @poweroff_timer_running: true if the poweroff timer is currently running,
* false otherwise
* power_change_lock should be held when accessing,
* unless there is no way the timer can be running (eg
* hrtimer_cancel() was called immediately before)
* @poweroff_wait_in_progress: true if a wait for GPU power off is in progress.
* hwaccess_lock must be held when accessing
* @poweron_required: true if a GPU power on is required. Should only be set
* when poweroff_wait_in_progress is true, and therefore the
* GPU can not immediately be powered on. pm.lock must be
* held when accessing
* @poweroff_is_suspend: true if the GPU is being powered off due to a suspend
* request. pm.lock must be held when accessing
* @gpu_poweroff_wait_wq: workqueue for waiting for GPU to power off
* @gpu_poweroff_wait_work: work item for use with @gpu_poweroff_wait_wq
* @poweroff_wait: waitqueue for waiting for @gpu_poweroff_wait_work to complete
* @callback_power_on: Callback when the GPU needs to be turned on. See
* &struct kbase_pm_callback_conf
* @callback_power_off: Callback when the GPU may be turned off. See
* &struct kbase_pm_callback_conf
* @callback_power_suspend: Callback when a suspend occurs and the GPU needs to
* be turned off. See &struct kbase_pm_callback_conf
* @callback_power_resume: Callback when a resume occurs and the GPU needs to
* be turned on. See &struct kbase_pm_callback_conf
* @callback_power_runtime_on: Callback when the GPU needs to be turned on. See
* &struct kbase_pm_callback_conf
* @callback_power_runtime_off: Callback when the GPU may be turned off. See
* &struct kbase_pm_callback_conf
* @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
* &struct kbase_pm_callback_conf
*
* Note:
* During an IRQ, @ca_current_policy or @pm_current_policy can be NULL when the
* policy is being changed with kbase_pm_ca_set_policy() or
* kbase_pm_set_policy(). The change is protected under
* kbase_device.pm.power_change_lock. Direct access to this
* from IRQ context must therefore check for NULL. If NULL, then
* kbase_pm_ca_set_policy() or kbase_pm_set_policy() will re-issue the policy
* functions that would have been done under IRQ.
*/
struct kbase_pm_backend_data {
const struct kbase_pm_ca_policy *ca_current_policy;
const struct kbase_pm_policy *pm_current_policy;
union kbase_pm_ca_policy_data ca_policy_data;
union kbase_pm_policy_data pm_policy_data;
bool ca_in_transition;
bool reset_done;
wait_queue_head_t reset_done_wait;
wait_queue_head_t l2_powered_wait;
int l2_powered;
int gpu_cycle_counter_requests;
spinlock_t gpu_cycle_counter_requests_lock;
u64 desired_shader_state;
u64 powering_on_shader_state;
u64 desired_tiler_state;
u64 powering_on_tiler_state;
u64 powering_on_l2_state;
#ifdef CONFIG_MALI_CORESTACK
u64 powering_on_stack_state;
#endif /* CONFIG_MALI_CORESTACK */
bool gpu_in_desired_state;
wait_queue_head_t gpu_in_desired_state_wait;
bool gpu_powered;
bool instr_enabled;
bool cg1_disabled;
#ifdef CONFIG_MALI_DEBUG
bool driver_ready_for_irqs;
#endif /* CONFIG_MALI_DEBUG */
spinlock_t gpu_powered_lock;
struct kbasep_pm_metrics_data metrics;
int gpu_poweroff_pending;
int shader_poweroff_pending_time;
struct hrtimer gpu_poweroff_timer;
struct workqueue_struct *gpu_poweroff_wq;
struct work_struct gpu_poweroff_work;
u64 shader_poweroff_pending;
u64 tiler_poweroff_pending;
bool poweroff_timer_needed;
bool poweroff_timer_running;
bool poweroff_wait_in_progress;
bool poweron_required;
bool poweroff_is_suspend;
struct workqueue_struct *gpu_poweroff_wait_wq;
struct work_struct gpu_poweroff_wait_work;
wait_queue_head_t poweroff_wait;
int (*callback_power_on)(struct kbase_device *kbdev);
void (*callback_power_off)(struct kbase_device *kbdev);
void (*callback_power_suspend)(struct kbase_device *kbdev);
void (*callback_power_resume)(struct kbase_device *kbdev);
int (*callback_power_runtime_on)(struct kbase_device *kbdev);
void (*callback_power_runtime_off)(struct kbase_device *kbdev);
int (*callback_power_runtime_idle)(struct kbase_device *kbdev);
/* MALI_SEC_INTEGRATION */
int (*callback_power_dvfs_on)(struct kbase_device *kbdev);
};
/* List of policy IDs */
enum kbase_pm_policy_id {
KBASE_PM_POLICY_ID_DEMAND = 1,
KBASE_PM_POLICY_ID_ALWAYS_ON,
KBASE_PM_POLICY_ID_COARSE_DEMAND,
#if !MALI_CUSTOMER_RELEASE
KBASE_PM_POLICY_ID_DEMAND_ALWAYS_POWERED,
KBASE_PM_POLICY_ID_FAST_START
#endif
};
typedef u32 kbase_pm_policy_flags;
/**
* struct kbase_pm_policy - Power policy structure.
*
* Each power policy exposes a (static) instance of this structure which
* contains function pointers to the policy's methods.
*
* @name: The name of this policy
* @init: Function called when the policy is selected
* @term: Function called when the policy is unselected
* @get_core_mask: Function called to get the current shader core mask
* @get_core_active: Function called to get the current overall GPU power
* state
* @flags: Field indicating flags for this policy
* @id: Field indicating an ID for this policy. This is not
* necessarily the same as its index in the list returned
* by kbase_pm_list_policies().
* It is used purely for debugging.
*/
struct kbase_pm_policy {
char *name;
/**
* Function called when the policy is selected
*
* This should initialize the kbdev->pm.pm_policy_data structure. It
* should not attempt to make any changes to hardware state.
*
* It is undefined what state the cores are in when the function is
* called.
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
*/
void (*init)(struct kbase_device *kbdev);
/**
* Function called when the policy is unselected.
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
*/
void (*term)(struct kbase_device *kbdev);
/**
* Function called to get the current shader core mask
*
* The returned mask should meet or exceed (kbdev->shader_needed_bitmap
* | kbdev->shader_inuse_bitmap).
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
*
* Return: The mask of shader cores to be powered
*/
u64 (*get_core_mask)(struct kbase_device *kbdev);
/**
* Function called to get the current overall GPU power state
*
* This function should consider the state of kbdev->pm.active_count. If
* this count is greater than 0 then there is at least one active
* context on the device and the GPU should be powered. If it is equal
* to 0 then there are no active contexts and the GPU could be powered
* off if desired.
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
*
* Return: true if the GPU should be powered, false otherwise
*/
bool (*get_core_active)(struct kbase_device *kbdev);
kbase_pm_policy_flags flags;
enum kbase_pm_policy_id id;
};
enum kbase_pm_ca_policy_id {
KBASE_PM_CA_POLICY_ID_FIXED = 1,
KBASE_PM_CA_POLICY_ID_DEVFREQ,
KBASE_PM_CA_POLICY_ID_RANDOM
};
typedef u32 kbase_pm_ca_policy_flags;
/**
* Maximum length of a CA policy names
*/
#define KBASE_PM_CA_MAX_POLICY_NAME_LEN 15
/**
* struct kbase_pm_ca_policy - Core availability policy structure.
*
* Each core availability policy exposes a (static) instance of this structure
* which contains function pointers to the policy's methods.
*
* @name: The name of this policy
* @init: Function called when the policy is selected
* @term: Function called when the policy is unselected
* @get_core_mask: Function called to get the current shader core
* availability mask
* @update_core_status: Function called to update the current core status
* @flags: Field indicating flags for this policy
* @id: Field indicating an ID for this policy. This is not
* necessarily the same as its index in the list returned
* by kbase_pm_list_policies().
* It is used purely for debugging.
*/
struct kbase_pm_ca_policy {
char name[KBASE_PM_CA_MAX_POLICY_NAME_LEN + 1];
/**
* Function called when the policy is selected
*
* This should initialize the kbdev->pm.ca_policy_data structure. It
* should not attempt to make any changes to hardware state.
*
* It is undefined what state the cores are in when the function is
* called.
*
* @kbdev The kbase device structure for the device (must be a
* valid pointer)
*/
void (*init)(struct kbase_device *kbdev);
/**
* Function called when the policy is unselected.
*
* @kbdev The kbase device structure for the device (must be a
* valid pointer)
*/
void (*term)(struct kbase_device *kbdev);
/**
* Function called to get the current shader core availability mask
*
* When a change in core availability is occurring, the policy must set
* kbdev->pm.ca_in_transition to true. This is to indicate that
* reporting changes in power state cannot be optimized out, even if
* kbdev->pm.desired_shader_state remains unchanged. This must be done
* by any functions internal to the Core Availability Policy that change
* the return value of kbase_pm_ca_policy::get_core_mask.
*
* @kbdev The kbase device structure for the device (must be a
* valid pointer)
*
* Return: The current core availability mask
*/
u64 (*get_core_mask)(struct kbase_device *kbdev);
/**
* Function called to update the current core status
*
* If none of the cores in core group 0 are ready or transitioning, then
* the policy must ensure that the next call to get_core_mask does not
* return 0 for all cores in core group 0. It is an error to disable
* core group 0 through the core availability policy.
*
* When a change in core availability has finished, the policy must set
* kbdev->pm.ca_in_transition to false. This is to indicate that
* changes in power state can once again be optimized out when
* kbdev->pm.desired_shader_state is unchanged.
*
* @kbdev: The kbase device structure for the device
* (must be a valid pointer)
* @cores_ready: The mask of cores currently powered and
* ready to run jobs
* @cores_transitioning: The mask of cores currently transitioning
* power state
*/
void (*update_core_status)(struct kbase_device *kbdev, u64 cores_ready,
u64 cores_transitioning);
kbase_pm_ca_policy_flags flags;
/**
* Field indicating an ID for this policy. This is not necessarily the
* same as its index in the list returned by kbase_pm_list_policies().
* It is used purely for debugging.
*/
enum kbase_pm_ca_policy_id id;
};
#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */

View File

@ -0,0 +1,78 @@
/*
*
* (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* A simple demand based power management policy
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
static u64 demand_get_core_mask(struct kbase_device *kbdev)
{
u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap;
if (0 == kbdev->pm.active_count)
return 0;
return desired;
}
static bool demand_get_core_active(struct kbase_device *kbdev)
{
if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
&& !kbdev->tiler_inuse_cnt)
return false;
return true;
}
static void demand_init(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
static void demand_term(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
/*
* The struct kbase_pm_policy structure for the demand power policy.
*
* This is the static structure that defines the demand power policy's callback
* and name.
*/
const struct kbase_pm_policy kbase_pm_demand_policy_ops = {
"demand", /* name */
demand_init, /* init */
demand_term, /* term */
demand_get_core_mask, /* get_core_mask */
demand_get_core_active, /* get_core_active */
0u, /* flags */
KBASE_PM_POLICY_ID_DEMAND, /* id */
};
KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops);

View File

@ -0,0 +1,69 @@
/*
*
* (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* A simple demand based power management policy
*/
#ifndef MALI_KBASE_PM_DEMAND_H
#define MALI_KBASE_PM_DEMAND_H
/**
* DOC: Demand power management policy
*
* The demand power management policy has the following characteristics:
* - When KBase indicates that the GPU will be powered up, but we don't yet
* know which Job Chains are to be run:
* - The Shader Cores are not powered up
*
* - When KBase indicates that a set of Shader Cores are needed to submit the
* currently queued Job Chains:
* - Only those Shader Cores are powered up
*
* - When KBase indicates that the GPU need not be powered:
* - The Shader Cores are powered off, and the GPU itself is powered off too.
*
* Note:
* - KBase indicates the GPU will be powered up when it has a User Process that
* has just started to submit Job Chains.
*
* - KBase indicates the GPU need not be powered when all the Job Chains from
* User Processes have finished, and it is waiting for a User Process to
* submit some more Job Chains.
*/
/**
* struct kbasep_pm_policy_demand - Private structure for policy instance data
*
* @dummy: No state is needed, a dummy variable
*
* This contains data that is private to the demand power policy.
*/
struct kbasep_pm_policy_demand {
int dummy;
};
extern const struct kbase_pm_policy kbase_pm_demand_policy_ops;
#endif /* MALI_KBASE_PM_DEMAND_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,569 @@
/*
*
* (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Power management API definitions used internally by GPU backend
*/
#ifndef _KBASE_BACKEND_PM_INTERNAL_H_
#define _KBASE_BACKEND_PM_INTERNAL_H_
#include <mali_kbase_hwaccess_pm.h>
#include "mali_kbase_pm_ca.h"
#include "mali_kbase_pm_policy.h"
/**
* kbase_pm_dev_idle - The GPU is idle.
*
* The OS may choose to turn off idle devices
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_dev_idle(struct kbase_device *kbdev);
/**
* kbase_pm_dev_activate - The GPU is active.
*
* The OS should avoid opportunistically turning off the GPU while it is active
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_dev_activate(struct kbase_device *kbdev);
/**
* kbase_pm_get_present_cores - Get details of the cores that are present in
* the device.
*
* This function can be called by the active power policy to return a bitmask of
* the cores (of a specified type) present in the GPU device and also a count of
* the number of cores.
*
* @kbdev: The kbase device structure for the device (must be a valid
* pointer)
* @type: The type of core (see the enum kbase_pm_core_type enumeration)
*
* Return: The bit mask of cores present
*/
u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
enum kbase_pm_core_type type);
/**
* kbase_pm_get_active_cores - Get details of the cores that are currently
* active in the device.
*
* This function can be called by the active power policy to return a bitmask of
* the cores (of a specified type) that are actively processing work (i.e.
* turned on *and* busy).
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
* @type: The type of core (see the enum kbase_pm_core_type enumeration)
*
* Return: The bit mask of active cores
*/
u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
enum kbase_pm_core_type type);
/**
* kbase_pm_get_trans_cores - Get details of the cores that are currently
* transitioning between power states.
*
* This function can be called by the active power policy to return a bitmask of
* the cores (of a specified type) that are currently transitioning between
* power states.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
* @type: The type of core (see the enum kbase_pm_core_type enumeration)
*
* Return: The bit mask of transitioning cores
*/
u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
enum kbase_pm_core_type type);
/**
* kbase_pm_get_ready_cores - Get details of the cores that are currently
* powered and ready for jobs.
*
* This function can be called by the active power policy to return a bitmask of
* the cores (of a specified type) that are powered and ready for jobs (they may
* or may not be currently executing jobs).
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
* @type: The type of core (see the enum kbase_pm_core_type enumeration)
*
* Return: The bit mask of ready cores
*/
u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
enum kbase_pm_core_type type);
/**
* kbase_pm_clock_on - Turn the clock for the device on, and enable device
* interrupts.
*
* This function can be used by a power policy to turn the clock for the GPU on.
* It should be modified during integration to perform the necessary actions to
* ensure that the GPU is fully powered and clocked.
*
* @kbdev: The kbase device structure for the device (must be a valid
* pointer)
* @is_resume: true if clock on due to resume after suspend, false otherwise
*/
void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
/**
* kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
* device off.
*
* This function can be used by a power policy to turn the clock for the GPU
* off. It should be modified during integration to perform the necessary
* actions to turn the clock off (if this is possible in the integration).
*
* @kbdev: The kbase device structure for the device (must be a valid
* pointer)
* @is_suspend: true if clock off due to suspend, false otherwise
*
* Return: true if clock was turned off, or
* false if clock can not be turned off due to pending page/bus fault
* workers. Caller must flush MMU workqueues and retry
*/
bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend);
/**
* kbase_pm_enable_interrupts - Enable interrupts on the device.
*
* Interrupts are also enabled after a call to kbase_pm_clock_on().
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
/**
* kbase_pm_disable_interrupts - Disable interrupts on the device.
*
* This prevents delivery of Power Management interrupts to the CPU so that
* kbase_pm_check_transitions_nolock() will not be called from the IRQ handler
* until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
*
* Interrupts are also disabled after a call to kbase_pm_clock_off().
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
/**
* kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
* that does not take the hwaccess_lock
*
* Caller must hold the hwaccess_lock.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
/**
* kbase_pm_init_hw - Initialize the hardware.
* @kbdev: The kbase device structure for the device (must be a valid pointer)
* @flags: Flags specifying the type of PM init
*
* This function checks the GPU ID register to ensure that the GPU is supported
* by the driver and performs a reset on the device so that it is in a known
* state before the device is used.
*
* Return: 0 if the device is supported and successfully reset.
*/
int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
/**
* kbase_pm_reset_done - The GPU has been reset successfully.
*
* This function must be called by the GPU interrupt handler when the
* RESET_COMPLETED bit is set. It signals to the power management initialization
* code that the GPU has been successfully reset.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_reset_done(struct kbase_device *kbdev);
/**
* kbase_pm_check_transitions_nolock - Check if there are any power transitions
* to make, and if so start them.
*
* This function will check the desired_xx_state members of
* struct kbase_pm_device_data and the actual status of the hardware to see if
* any power transitions can be made at this time to make the hardware state
* closer to the state desired by the power policy.
*
* The return value can be used to check whether all the desired cores are
* available, and so whether it's worth submitting a job (e.g. from a Power
* Management IRQ).
*
* Note that this still returns true when desired_xx_state has no
* cores. That is: of the no cores desired, none were *un*available. In
* this case, the caller may still need to try submitting jobs. This is because
* the Core Availability Policy might have taken us to an intermediate state
* where no cores are powered, before powering on more cores (e.g. for core
* rotation)
*
* The caller must hold kbase_device.pm.power_change_lock
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Return: non-zero when all desired cores are available. That is,
* it's worthwhile for the caller to submit a job.
* false otherwise
*/
bool kbase_pm_check_transitions_nolock(struct kbase_device *kbdev);
/**
* kbase_pm_check_transitions_sync - Synchronous and locking variant of
* kbase_pm_check_transitions_nolock()
*
* On returning, the desired state at the time of the call will have been met.
*
* There is nothing to stop the core being switched off by calls to
* kbase_pm_release_cores() or kbase_pm_unrequest_cores(). Therefore, the
* caller must have already made a call to
* kbase_pm_request_cores()/kbase_pm_request_cores_sync() previously.
*
* The usual use-case for this is to ensure cores are 'READY' after performing
* a GPU Reset.
*
* Unlike kbase_pm_check_transitions_nolock(), the caller must not hold
* kbase_device.pm.power_change_lock, because this function will take that
* lock itself.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_check_transitions_sync(struct kbase_device *kbdev);
/**
* kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
* where the caller must hold
* kbase_device.pm.power_change_lock
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
/**
* kbase_pm_update_cores_state - Update the desired state of shader cores from
* the Power Policy, and begin any power
* transitions.
*
* This function will update the desired_xx_state members of
* struct kbase_pm_device_data by calling into the current Power Policy. It will
* then begin power transitions to make the hardware acheive the desired shader
* core state.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_update_cores_state(struct kbase_device *kbdev);
/**
* kbase_pm_cancel_deferred_poweroff - Cancel any pending requests to power off
* the GPU and/or shader cores.
*
* This should be called by any functions which directly power off the GPU.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev);
/**
* kbasep_pm_init_core_use_bitmaps - Initialise data tracking the required
* and used cores.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbasep_pm_init_core_use_bitmaps(struct kbase_device *kbdev);
/**
* kbasep_pm_metrics_init - Initialize the metrics gathering framework.
*
* This must be called before other metric gathering APIs are called.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Return: 0 on success, error code on error
*/
int kbasep_pm_metrics_init(struct kbase_device *kbdev);
/**
* kbasep_pm_metrics_term - Terminate the metrics gathering framework.
*
* This must be called when metric gathering is no longer required. It is an
* error to call any metrics gathering function (other than
* kbasep_pm_metrics_init()) after calling this function.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbasep_pm_metrics_term(struct kbase_device *kbdev);
/**
* kbase_pm_report_vsync - Function to be called by the frame buffer driver to
* update the vsync metric.
*
* This function should be called by the frame buffer driver to update whether
* the system is hitting the vsync target or not. buffer_updated should be true
* if the vsync corresponded with a new frame being displayed, otherwise it
* should be false. This function does not need to be called every vsync, but
* only when the value of @buffer_updated differs from a previous call.
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
* @buffer_updated: True if the buffer has been updated on this VSync,
* false otherwise
*/
void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
/**
* kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
* the clock speed of the GPU.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* This function should be called regularly by the DVFS system to check whether
* the clock speed of the GPU needs updating.
*/
void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
/**
* kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
* needed
*
* If the caller is the first caller then the GPU cycle counters will be enabled
* along with the l2 cache
*
* The GPU must be powered when calling this function (i.e.
* kbase_pm_context_active() must have been called).
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
/**
* kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
* needed (l2 cache already on)
*
* This is a version of the above function
* (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
* l2 cache is known to be on and assured to be on until the subsequent call of
* kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
* not sleep and can be called from atomic functions.
*
* The GPU must be powered when calling this function (i.e.
* kbase_pm_context_active() must have been called) and the l2 cache must be
* powered on.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
/**
* kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
* longer in use
*
* If the caller is the last caller then the GPU cycle counters will be
* disabled. A request must have been made before a call to this.
*
* Caller must not hold the hwaccess_lock, as it will be taken in this function.
* If the caller is already holding this lock then
* kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
/**
* kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
* that does not take hwaccess_lock
*
* Caller must hold the hwaccess_lock.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
/**
* kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to
* complete
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev);
/**
* kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device
*
* Setup the power management callbacks and initialize/enable the runtime-pm
* for the Mali GPU platform device, using the callback function. This must be
* called before the kbase_pm_register_access_enable() function.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
int kbase_pm_runtime_init(struct kbase_device *kbdev);
/**
* kbase_pm_runtime_term - Disable runtime-pm for Mali GPU platform device
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_runtime_term(struct kbase_device *kbdev);
/**
* kbase_pm_register_access_enable - Enable access to GPU registers
*
* Enables access to the GPU registers before power management has powered up
* the GPU with kbase_pm_powerup().
*
* This results in the power management callbacks provided in the driver
* configuration to get called to turn on power and/or clocks to the GPU. See
* kbase_pm_callback_conf.
*
* This should only be used before power management is powered up with
* kbase_pm_powerup()
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_register_access_enable(struct kbase_device *kbdev);
/**
* kbase_pm_register_access_disable - Disable early register access
*
* Disables access to the GPU registers enabled earlier by a call to
* kbase_pm_register_access_enable().
*
* This results in the power management callbacks provided in the driver
* configuration to get called to turn off power and/or clocks to the GPU. See
* kbase_pm_callback_conf
*
* This should only be used before power management is powered up with
* kbase_pm_powerup()
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_register_access_disable(struct kbase_device *kbdev);
/* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
* function */
/**
* kbase_pm_metrics_is_active - Check if the power management metrics
* collection is active.
*
* Note that this returns if the power management metrics collection was
* active at the time of calling, it is possible that after the call the metrics
* collection enable may have changed state.
*
* The caller must handle the consequence that the state may have changed.
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
* Return: true if metrics collection was active else false.
*/
bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
/**
* kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
*
* @kbdev: The kbase device structure for the device (must be a valid
* pointer)
* @is_resume: true if power on due to resume after suspend,
* false otherwise
*/
void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
/**
* kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
* requested.
*
* @kbdev: The kbase device structure for the device (must be a valid
* pointer)
* @is_suspend: true if power off due to suspend,
* false otherwise
*/
/* MALI_SEC_INTEGRATION */
int kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
unsigned long *total, unsigned long *busy);
void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev);
#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */
#ifdef CONFIG_MALI_MIDGARD_DVFS
/**
* kbase_platform_dvfs_event - Report utilisation to DVFS code
*
* Function provided by platform specific code when DVFS is enabled to allow
* the power management metrics system to report utilisation.
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
* @utilisation: The current calculated utilisation by the metrics system.
* @util_gl_share: The current calculated gl share of utilisation.
* @util_cl_share: The current calculated cl share of utilisation per core
* group.
* Return: Returns 0 on failure and non zero on success.
*/
int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
u32 util_gl_share, u32 util_cl_share[2]);
#endif
void kbase_pm_power_changed(struct kbase_device *kbdev);
/**
* kbase_pm_metrics_update - Inform the metrics system that an atom is either
* about to be run or has just completed.
* @kbdev: The kbase device structure for the device (must be a valid pointer)
* @now: Pointer to the timestamp of the change, or NULL to use current time
*
* Caller must hold hwaccess_lock
*/
void kbase_pm_metrics_update(struct kbase_device *kbdev,
ktime_t *now);
/**
* kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
* If the GPU does not have coherency this is a no-op
* @kbdev: Device pointer
*
* This function should be called after L2 power up.
*/
void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
/**
* kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
* If the GPU does not have coherency this is a no-op
* @kbdev: Device pointer
*
* This function should be called before L2 power off.
*/
void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */

View File

@ -0,0 +1,427 @@
/*
*
* (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Metrics for power management
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_jm_rb.h>
/* When VSync is being hit aim for utilisation between 70-90% */
#define KBASE_PM_VSYNC_MIN_UTILISATION 70
#define KBASE_PM_VSYNC_MAX_UTILISATION 90
/* Otherwise aim for 10-40% */
#define KBASE_PM_NO_VSYNC_MIN_UTILISATION 10
#define KBASE_PM_NO_VSYNC_MAX_UTILISATION 40
/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
* This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
* under 11s. Exceeding this will cause overflow */
#define KBASE_PM_TIME_SHIFT 8
/* Maximum time between sampling of utilization data, without resetting the
* counters. */
#define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */
#ifdef CONFIG_MALI_MIDGARD_DVFS
static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
{
unsigned long flags;
struct kbasep_pm_metrics_data *metrics;
KBASE_DEBUG_ASSERT(timer != NULL);
metrics = container_of(timer, struct kbasep_pm_metrics_data, timer);
kbase_pm_get_dvfs_action(metrics->kbdev);
spin_lock_irqsave(&metrics->lock, flags);
if (metrics->timer_active)
hrtimer_start(timer,
HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period),
HRTIMER_MODE_REL);
spin_unlock_irqrestore(&metrics->lock, flags);
return HRTIMER_NORESTART;
}
#endif /* CONFIG_MALI_MIDGARD_DVFS */
int kbasep_pm_metrics_init(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
kbdev->pm.backend.metrics.kbdev = kbdev;
kbdev->pm.backend.metrics.time_period_start = ktime_get();
kbdev->pm.backend.metrics.time_busy = 0;
kbdev->pm.backend.metrics.time_idle = 0;
kbdev->pm.backend.metrics.prev_busy = 0;
kbdev->pm.backend.metrics.prev_idle = 0;
kbdev->pm.backend.metrics.gpu_active = false;
kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
kbdev->pm.backend.metrics.busy_cl[0] = 0;
kbdev->pm.backend.metrics.busy_cl[1] = 0;
kbdev->pm.backend.metrics.busy_gl = 0;
spin_lock_init(&kbdev->pm.backend.metrics.lock);
/* MALI_SEC_INTEGRATION */
if (kbdev->vendor_callbacks->pm_metrics_init)
kbdev->vendor_callbacks->pm_metrics_init(kbdev);
else {
#ifdef CONFIG_MALI_MIDGARD_DVFS
kbdev->pm.backend.metrics.timer_active = true;
hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
kbdev->pm.backend.metrics.timer.function = dvfs_callback;
hrtimer_start(&kbdev->pm.backend.metrics.timer,
HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
HRTIMER_MODE_REL);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
}
/* MALI_SEC_INTEGRATION */
if (kbdev->vendor_callbacks->cl_boost_init)
kbdev->vendor_callbacks->cl_boost_init(kbdev);
return 0;
}
KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
void kbasep_pm_metrics_term(struct kbase_device *kbdev)
{
#ifdef CONFIG_MALI_MIDGARD_DVFS
unsigned long flags;
KBASE_DEBUG_ASSERT(kbdev != NULL);
spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
kbdev->pm.backend.metrics.timer_active = false;
spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
/* MALI_SEC_INTEGRATION */
if (kbdev->vendor_callbacks->pm_metrics_term)
kbdev->vendor_callbacks->pm_metrics_term(kbdev);
}
KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
* function
*/
static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
ktime_t now)
{
ktime_t diff;
lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start);
if (ktime_to_ns(diff) < 0)
return;
if (kbdev->pm.backend.metrics.gpu_active) {
u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
kbdev->pm.backend.metrics.time_busy += ns_time;
if (kbdev->pm.backend.metrics.active_cl_ctx[0])
kbdev->pm.backend.metrics.busy_cl[0] += ns_time;
if (kbdev->pm.backend.metrics.active_cl_ctx[1])
kbdev->pm.backend.metrics.busy_cl[1] += ns_time;
if (kbdev->pm.backend.metrics.active_gl_ctx[0])
kbdev->pm.backend.metrics.busy_gl += ns_time;
if (kbdev->pm.backend.metrics.active_gl_ctx[1])
kbdev->pm.backend.metrics.busy_gl += ns_time;
} else {
kbdev->pm.backend.metrics.time_idle += (u32) (ktime_to_ns(diff)
>> KBASE_PM_TIME_SHIFT);
}
kbdev->pm.backend.metrics.time_period_start = now;
}
#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
/* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this
* function.
*/
static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev,
ktime_t now)
{
/* Store previous value */
kbdev->pm.backend.metrics.prev_idle =
kbdev->pm.backend.metrics.time_idle;
kbdev->pm.backend.metrics.prev_busy =
kbdev->pm.backend.metrics.time_busy;
/* Reset current values */
kbdev->pm.backend.metrics.time_period_start = now;
kbdev->pm.backend.metrics.time_idle = 0;
kbdev->pm.backend.metrics.time_busy = 0;
kbdev->pm.backend.metrics.busy_cl[0] = 0;
kbdev->pm.backend.metrics.busy_cl[1] = 0;
kbdev->pm.backend.metrics.busy_gl = 0;
}
void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev)
{
unsigned long flags;
spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get());
spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
}
void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
unsigned long *total_out, unsigned long *busy_out)
{
ktime_t now = ktime_get();
unsigned long flags, busy, total;
spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
busy = kbdev->pm.backend.metrics.time_busy;
total = busy + kbdev->pm.backend.metrics.time_idle;
/* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
* 100ms) */
if (total >= MALI_UTILIZATION_MAX_PERIOD) {
kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
} else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
total += kbdev->pm.backend.metrics.prev_idle +
kbdev->pm.backend.metrics.prev_busy;
busy += kbdev->pm.backend.metrics.prev_busy;
}
*total_out = total;
*busy_out = busy;
spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
}
#endif
#ifdef CONFIG_MALI_MIDGARD_DVFS
/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
* function
*/
int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev,
int *util_gl_share,
int util_cl_share[2],
ktime_t now)
{
int utilisation;
int busy;
kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
if (kbdev->pm.backend.metrics.time_idle +
kbdev->pm.backend.metrics.time_busy == 0) {
/* No data - so we return NOP */
utilisation = -1;
if (util_gl_share)
*util_gl_share = -1;
if (util_cl_share) {
util_cl_share[0] = -1;
util_cl_share[1] = -1;
}
goto out;
}
utilisation = (100 * kbdev->pm.backend.metrics.time_busy) /
(kbdev->pm.backend.metrics.time_idle +
kbdev->pm.backend.metrics.time_busy);
busy = kbdev->pm.backend.metrics.busy_gl +
kbdev->pm.backend.metrics.busy_cl[0] +
kbdev->pm.backend.metrics.busy_cl[1];
if (busy != 0) {
if (util_gl_share)
*util_gl_share =
(100 * kbdev->pm.backend.metrics.busy_gl) /
busy;
if (util_cl_share) {
util_cl_share[0] =
(100 * kbdev->pm.backend.metrics.busy_cl[0]) /
busy;
util_cl_share[1] =
(100 * kbdev->pm.backend.metrics.busy_cl[1]) /
busy;
}
} else {
if (util_gl_share)
*util_gl_share = -1;
if (util_cl_share) {
util_cl_share[0] = -1;
util_cl_share[1] = -1;
}
}
out:
return utilisation;
}
void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
{
unsigned long flags;
int utilisation, util_gl_share;
int util_cl_share[2];
ktime_t now;
KBASE_DEBUG_ASSERT(kbdev != NULL);
spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
now = ktime_get();
utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share,
util_cl_share, now);
if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 ||
util_cl_share[1] < 0) {
utilisation = 0;
util_gl_share = 0;
util_cl_share[0] = 0;
util_cl_share[1] = 0;
goto out;
}
out:
#ifdef CONFIG_MALI_MIDGARD_DVFS
kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share,
util_cl_share);
#endif /*CONFIG_MALI_MIDGARD_DVFS */
kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
}
bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
{
bool isactive;
unsigned long flags;
KBASE_DEBUG_ASSERT(kbdev != NULL);
spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
isactive = kbdev->pm.backend.metrics.timer_active;
spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
return isactive;
}
KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
/**
* kbase_pm_metrics_active_calc - Update PM active counts based on currently
* running atoms
* @kbdev: Device pointer
*
* The caller must hold kbdev->pm.backend.metrics.lock
*/
static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
{
int js;
lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
#ifdef CONFIG_MALI_SEC_UTILIZATION
/*Setting gpu_active here may show real GPU utilization but it can't make full utilization(100%) */
#else
kbdev->pm.backend.metrics.gpu_active = false;
#endif
for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
/* Head atom may have just completed, so if it isn't running
* then try the next atom */
if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED)
katom = kbase_gpu_inspect(kbdev, js, 1);
if (katom && katom->gpu_rb_state ==
KBASE_ATOM_GPU_RB_SUBMITTED) {
if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
int device_nr = (katom->core_req &
BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)
? katom->device_nr : 0;
if (!WARN_ON(device_nr >= 2))
kbdev->pm.backend.metrics.
active_cl_ctx[device_nr] = 1;
} else {
/* Slot 2 should not be running non-compute
* atoms */
if (!WARN_ON(js >= 2))
kbdev->pm.backend.metrics.
active_gl_ctx[js] = 1;
}
#ifdef CONFIG_MALI_SEC_UTILIZATION
/*Setting gpu_active here may show real GPU utilization but it can't make full utilization(100%) */
#else
kbdev->pm.backend.metrics.gpu_active = true;
#endif
}
}
}
/* called when job is submitted to or removed from a GPU slot */
void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp)
{
unsigned long flags;
ktime_t now;
lockdep_assert_held(&kbdev->hwaccess_lock);
spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
if (!timestamp) {
now = ktime_get();
timestamp = &now;
}
/* Track how long CL and/or GL jobs have been busy for */
kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp);
kbase_pm_metrics_active_calc(kbdev);
spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
}

View File

@ -0,0 +1,984 @@
/*
*
* (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Power policy API implementations
*/
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_config_defaults.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
static const struct kbase_pm_policy *const policy_list[] = {
#ifdef CONFIG_MALI_NO_MALI
&kbase_pm_always_on_policy_ops,
&kbase_pm_demand_policy_ops,
&kbase_pm_coarse_demand_policy_ops,
#if !MALI_CUSTOMER_RELEASE
&kbase_pm_demand_always_powered_policy_ops,
&kbase_pm_fast_start_policy_ops,
#endif
#else /* CONFIG_MALI_NO_MALI */
#if !PLATFORM_POWER_DOWN_ONLY
&kbase_pm_demand_policy_ops,
#endif /* !PLATFORM_POWER_DOWN_ONLY */
&kbase_pm_coarse_demand_policy_ops,
&kbase_pm_always_on_policy_ops,
#if !MALI_CUSTOMER_RELEASE
#if !PLATFORM_POWER_DOWN_ONLY
&kbase_pm_demand_always_powered_policy_ops,
&kbase_pm_fast_start_policy_ops,
#endif /* !PLATFORM_POWER_DOWN_ONLY */
#endif
#endif /* CONFIG_MALI_NO_MALI */
};
/* The number of policies available in the system.
* This is derived from the number of functions listed in policy_get_functions.
*/
#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
/* Function IDs for looking up Timeline Trace codes in
* kbase_pm_change_state_trace_code */
enum kbase_pm_func_id {
KBASE_PM_FUNC_ID_REQUEST_CORES_START,
KBASE_PM_FUNC_ID_REQUEST_CORES_END,
KBASE_PM_FUNC_ID_RELEASE_CORES_START,
KBASE_PM_FUNC_ID_RELEASE_CORES_END,
/* Note: kbase_pm_unrequest_cores() is on the slow path, and we neither
* expect to hit it nor tend to hit it very much anyway. We can detect
* whether we need more instrumentation by a difference between
* PM_CHECKTRANS events and PM_SEND/HANDLE_EVENT. */
/* Must be the last */
KBASE_PM_FUNC_ID_COUNT
};
/* State changes during request/unrequest/release-ing cores */
enum {
KBASE_PM_CHANGE_STATE_SHADER = (1u << 0),
KBASE_PM_CHANGE_STATE_TILER = (1u << 1),
/* These two must be last */
KBASE_PM_CHANGE_STATE_MASK = (KBASE_PM_CHANGE_STATE_TILER |
KBASE_PM_CHANGE_STATE_SHADER),
KBASE_PM_CHANGE_STATE_COUNT = KBASE_PM_CHANGE_STATE_MASK + 1
};
typedef u32 kbase_pm_change_state;
#ifdef CONFIG_MALI_TRACE_TIMELINE
/* Timeline Trace code lookups for each function */
static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT]
[KBASE_PM_CHANGE_STATE_COUNT] = {
/* kbase_pm_request_cores */
[KBASE_PM_FUNC_ID_REQUEST_CORES_START][0] = 0,
[KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START,
[KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START,
[KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START,
[KBASE_PM_FUNC_ID_REQUEST_CORES_END][0] = 0,
[KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END,
[KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END,
[KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END,
/* kbase_pm_release_cores */
[KBASE_PM_FUNC_ID_RELEASE_CORES_START][0] = 0,
[KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START,
[KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START,
[KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START,
[KBASE_PM_FUNC_ID_RELEASE_CORES_END][0] = 0,
[KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END,
[KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END,
[KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
KBASE_PM_CHANGE_STATE_TILER] =
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END
};
static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
enum kbase_pm_func_id func_id,
kbase_pm_change_state state)
{
int trace_code;
KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT);
KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) ==
state);
trace_code = kbase_pm_change_state_trace_code[func_id][state];
KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code);
}
#else /* CONFIG_MALI_TRACE_TIMELINE */
static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
enum kbase_pm_func_id func_id, kbase_pm_change_state state)
{
}
#endif /* CONFIG_MALI_TRACE_TIMELINE */
/**
* kbasep_pm_do_poweroff_cores - Process a poweroff request and power down any
* requested shader cores
* @kbdev: Device pointer
*/
static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev)
{
u64 prev_shader_state = kbdev->pm.backend.desired_shader_state;
u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state;
lockdep_assert_held(&kbdev->hwaccess_lock);
kbdev->pm.backend.desired_shader_state &=
~kbdev->pm.backend.shader_poweroff_pending;
kbdev->pm.backend.desired_tiler_state &=
~kbdev->pm.backend.tiler_poweroff_pending;
kbdev->pm.backend.shader_poweroff_pending = 0;
kbdev->pm.backend.tiler_poweroff_pending = 0;
if (prev_shader_state != kbdev->pm.backend.desired_shader_state ||
prev_tiler_state !=
kbdev->pm.backend.desired_tiler_state ||
kbdev->pm.backend.ca_in_transition) {
bool cores_are_available;
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START);
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);
/* Don't need 'cores_are_available',
* because we don't return anything */
CSTD_UNUSED(cores_are_available);
}
}
static enum hrtimer_restart
kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
{
struct kbase_device *kbdev;
unsigned long flags;
kbdev = container_of(timer, struct kbase_device,
pm.backend.gpu_poweroff_timer);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* It is safe for this call to do nothing if the work item is already
* queued. The worker function will read the must up-to-date state of
* kbdev->pm.backend.gpu_poweroff_pending under lock.
*
* If a state change occurs while the worker function is processing,
* this call will succeed as a work item can be requeued once it has
* started processing.
*/
if (kbdev->pm.backend.gpu_poweroff_pending)
queue_work(kbdev->pm.backend.gpu_poweroff_wq,
&kbdev->pm.backend.gpu_poweroff_work);
if (kbdev->pm.backend.shader_poweroff_pending ||
kbdev->pm.backend.tiler_poweroff_pending) {
kbdev->pm.backend.shader_poweroff_pending_time--;
KBASE_DEBUG_ASSERT(
kbdev->pm.backend.shader_poweroff_pending_time
>= 0);
if (!kbdev->pm.backend.shader_poweroff_pending_time)
kbasep_pm_do_poweroff_cores(kbdev);
}
if (kbdev->pm.backend.poweroff_timer_needed) {
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
return HRTIMER_RESTART;
}
kbdev->pm.backend.poweroff_timer_running = false;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return HRTIMER_NORESTART;
}
static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
{
unsigned long flags;
struct kbase_device *kbdev;
bool do_poweroff = false;
kbdev = container_of(data, struct kbase_device,
pm.backend.gpu_poweroff_work);
mutex_lock(&kbdev->pm.lock);
if (kbdev->pm.backend.gpu_poweroff_pending == 0) {
mutex_unlock(&kbdev->pm.lock);
return;
}
kbdev->pm.backend.gpu_poweroff_pending--;
if (kbdev->pm.backend.gpu_poweroff_pending > 0) {
mutex_unlock(&kbdev->pm.lock);
return;
}
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_poweroff_pending == 0);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* Only power off the GPU if a request is still pending */
if (!kbdev->pm.backend.pm_current_policy->get_core_active(kbdev))
do_poweroff = true;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
if (do_poweroff) {
kbdev->pm.backend.poweroff_timer_needed = false;
hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
kbdev->pm.backend.poweroff_timer_running = false;
/* Power off the GPU */
kbase_pm_do_poweroff(kbdev, false);
}
mutex_unlock(&kbdev->pm.lock);
}
int kbase_pm_policy_init(struct kbase_device *kbdev)
{
struct workqueue_struct *wq;
/* MALI_SEC_INTEGRATION */
/* alloc_workqueue option is changed to ordered */
wq = alloc_workqueue("kbase_pm_do_poweroff",
WQ_HIGHPRI | WQ_UNBOUND | __WQ_ORDERED, 1);
if (!wq)
return -ENOMEM;
kbdev->pm.backend.gpu_poweroff_wq = wq;
INIT_WORK(&kbdev->pm.backend.gpu_poweroff_work,
kbasep_pm_do_gpu_poweroff_wq);
hrtimer_init(&kbdev->pm.backend.gpu_poweroff_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
kbdev->pm.backend.gpu_poweroff_timer.function =
kbasep_pm_do_gpu_poweroff_callback;
kbdev->pm.backend.pm_current_policy = policy_list[0];
kbdev->pm.backend.pm_current_policy->init(kbdev);
kbdev->pm.gpu_poweroff_time =
HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS);
kbdev->pm.poweroff_shader_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
kbdev->pm.poweroff_gpu_ticks = DEFAULT_PM_POWEROFF_TICK_GPU;
return 0;
}
void kbase_pm_policy_term(struct kbase_device *kbdev)
{
kbdev->pm.backend.pm_current_policy->term(kbdev);
destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wq);
}
void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev)
{
unsigned long flags;
lockdep_assert_held(&kbdev->pm.lock);
kbdev->pm.backend.poweroff_timer_needed = false;
hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->pm.backend.poweroff_timer_running = false;
/* If wq is already running but is held off by pm.lock, make sure it has
* no effect */
kbdev->pm.backend.gpu_poweroff_pending = 0;
kbdev->pm.backend.shader_poweroff_pending = 0;
kbdev->pm.backend.tiler_poweroff_pending = 0;
kbdev->pm.backend.shader_poweroff_pending_time = 0;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
void kbase_pm_update_active(struct kbase_device *kbdev)
{
struct kbase_pm_device_data *pm = &kbdev->pm;
struct kbase_pm_backend_data *backend = &pm->backend;
unsigned long flags;
bool active;
lockdep_assert_held(&pm->lock);
/* pm_current_policy will never be NULL while pm.lock is held */
KBASE_DEBUG_ASSERT(backend->pm_current_policy);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
active = backend->pm_current_policy->get_core_active(kbdev);
if (active) {
if (backend->gpu_poweroff_pending) {
/* Cancel any pending power off request */
backend->gpu_poweroff_pending = 0;
/* If a request was pending then the GPU was still
* powered, so no need to continue */
if (!kbdev->poweroff_pending) {
spin_unlock_irqrestore(&kbdev->hwaccess_lock,
flags);
return;
}
}
if (!backend->poweroff_timer_running && !backend->gpu_powered &&
(pm->poweroff_gpu_ticks ||
pm->poweroff_shader_ticks)) {
backend->poweroff_timer_needed = true;
backend->poweroff_timer_running = true;
hrtimer_start(&backend->gpu_poweroff_timer,
pm->gpu_poweroff_time,
HRTIMER_MODE_REL);
}
/* Power on the GPU and any cores requested by the policy */
if (pm->backend.poweroff_wait_in_progress) {
pm->backend.poweron_required = true;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
} else {
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
kbase_pm_do_poweron(kbdev, false);
}
} else {
/* It is an error for the power policy to power off the GPU
* when there are contexts active */
KBASE_DEBUG_ASSERT(pm->active_count == 0);
if (backend->shader_poweroff_pending ||
backend->tiler_poweroff_pending) {
backend->shader_poweroff_pending = 0;
backend->tiler_poweroff_pending = 0;
backend->shader_poweroff_pending_time = 0;
}
/* Request power off */
if (pm->backend.gpu_powered) {
if (pm->poweroff_gpu_ticks) {
backend->gpu_poweroff_pending =
pm->poweroff_gpu_ticks;
backend->poweroff_timer_needed = true;
if (!backend->poweroff_timer_running) {
/* Start timer if not running (eg if
* power policy has been changed from
* always_on to something else). This
* will ensure the GPU is actually
* powered off */
backend->poweroff_timer_running
= true;
hrtimer_start(
&backend->gpu_poweroff_timer,
pm->gpu_poweroff_time,
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock,
flags);
} else {
/* MALI_SEC_INTEGRATION */
if (pm->backend.poweroff_wait_in_progress && pm->backend.poweron_required) {
pm->backend.poweron_required = false;
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock,
flags);
/* Power off the GPU immediately */
kbase_pm_do_poweroff(kbdev, false);
}
} else {
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
}
}
void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
{
u64 desired_bitmap;
u64 desired_tiler_bitmap;
bool cores_are_available;
bool do_poweroff = false;
lockdep_assert_held(&kbdev->hwaccess_lock);
if (kbdev->pm.backend.pm_current_policy == NULL)
return;
if (kbdev->pm.backend.poweroff_wait_in_progress)
return;
if (kbdev->protected_mode_transition && !kbdev->shader_needed_bitmap &&
!kbdev->shader_inuse_bitmap && !kbdev->tiler_needed_cnt
&& !kbdev->tiler_inuse_cnt) {
/* We are trying to change in/out of protected mode - force all
* cores off so that the L2 powers down */
desired_bitmap = 0;
desired_tiler_bitmap = 0;
} else {
desired_bitmap =
kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
desired_tiler_bitmap = 1;
else
desired_tiler_bitmap = 0;
if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
/* Unless XAFFINITY is supported, enable core 0 if tiler
* required, regardless of core availability */
if (kbdev->tiler_needed_cnt > 0 ||
kbdev->tiler_inuse_cnt > 0)
desired_bitmap |= 1;
}
}
if (kbdev->pm.backend.desired_shader_state != desired_bitmap)
KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u,
(u32)desired_bitmap);
/* Are any cores being powered on? */
if (~kbdev->pm.backend.desired_shader_state & desired_bitmap ||
~kbdev->pm.backend.desired_tiler_state & desired_tiler_bitmap ||
kbdev->pm.backend.ca_in_transition) {
/* Check if we are powering off any cores before updating shader
* state */
if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap) {
/* Start timer to power off cores */
kbdev->pm.backend.shader_poweroff_pending |=
(kbdev->pm.backend.desired_shader_state &
~desired_bitmap);
kbdev->pm.backend.tiler_poweroff_pending |=
(kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap);
if (kbdev->pm.poweroff_shader_ticks &&
!kbdev->protected_mode_transition)
kbdev->pm.backend.shader_poweroff_pending_time =
kbdev->pm.poweroff_shader_ticks;
else
do_poweroff = true;
}
kbdev->pm.backend.desired_shader_state = desired_bitmap;
kbdev->pm.backend.desired_tiler_state = desired_tiler_bitmap;
/* If any cores are being powered on, transition immediately */
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
} else if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap) {
/* Start timer to power off cores */
kbdev->pm.backend.shader_poweroff_pending |=
(kbdev->pm.backend.desired_shader_state &
~desired_bitmap);
kbdev->pm.backend.tiler_poweroff_pending |=
(kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap);
if (kbdev->pm.poweroff_shader_ticks &&
!kbdev->protected_mode_transition)
kbdev->pm.backend.shader_poweroff_pending_time =
kbdev->pm.poweroff_shader_ticks;
else
kbasep_pm_do_poweroff_cores(kbdev);
} else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 &&
desired_tiler_bitmap != 0 &&
kbdev->pm.backend.poweroff_timer_needed) {
/* If power policy is keeping cores on despite there being no
* active contexts then disable poweroff timer as it isn't
* required.
* Only reset poweroff_timer_needed if we're not in the middle
* of the power off callback */
kbdev->pm.backend.poweroff_timer_needed = false;
}
/* Ensure timer does not power off wanted cores and make sure to power
* off unwanted cores */
if (kbdev->pm.backend.shader_poweroff_pending ||
kbdev->pm.backend.tiler_poweroff_pending) {
kbdev->pm.backend.shader_poweroff_pending &=
~(kbdev->pm.backend.desired_shader_state &
desired_bitmap);
kbdev->pm.backend.tiler_poweroff_pending &=
~(kbdev->pm.backend.desired_tiler_state &
desired_tiler_bitmap);
if (!kbdev->pm.backend.shader_poweroff_pending &&
!kbdev->pm.backend.tiler_poweroff_pending)
kbdev->pm.backend.shader_poweroff_pending_time = 0;
}
/* Shader poweroff is deferred to the end of the function, to eliminate
* issues caused by the core availability policy recursing into this
* function */
if (do_poweroff)
kbasep_pm_do_poweroff_cores(kbdev);
/* Don't need 'cores_are_available', because we don't return anything */
CSTD_UNUSED(cores_are_available);
}
void kbase_pm_update_cores_state(struct kbase_device *kbdev)
{
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_pm_update_cores_state_nolock(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
{
if (!list)
return POLICY_COUNT;
*list = policy_list;
return POLICY_COUNT;
}
KBASE_EXPORT_TEST_API(kbase_pm_list_policies);
const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
return kbdev->pm.backend.pm_current_policy;
}
KBASE_EXPORT_TEST_API(kbase_pm_get_policy);
void kbase_pm_set_policy(struct kbase_device *kbdev,
const struct kbase_pm_policy *new_policy)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
const struct kbase_pm_policy *old_policy;
unsigned long flags;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(new_policy != NULL);
KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id);
/* During a policy change we pretend the GPU is active */
/* A suspend won't happen here, because we're in a syscall from a
* userspace thread */
kbase_pm_context_active(kbdev);
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
/* Remove the policy to prevent IRQ handlers from working on it */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
old_policy = kbdev->pm.backend.pm_current_policy;
kbdev->pm.backend.pm_current_policy = NULL;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u,
old_policy->id);
if (old_policy->term)
old_policy->term(kbdev);
KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u,
new_policy->id);
if (new_policy->init)
new_policy->init(kbdev);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->pm.backend.pm_current_policy = new_policy;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* If any core power state changes were previously attempted, but
* couldn't be made because the policy was changing (current_policy was
* NULL), then re-try them here. */
kbase_pm_update_active(kbdev);
kbase_pm_update_cores_state(kbdev);
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
/* Now the policy change is finished, we release our fake context active
* reference */
kbase_pm_context_idle(kbdev);
}
KBASE_EXPORT_TEST_API(kbase_pm_set_policy);
/* Check whether a state change has finished, and trace it as completed */
static void
kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev)
{
if ((kbdev->shader_available_bitmap &
kbdev->pm.backend.desired_shader_state)
== kbdev->pm.backend.desired_shader_state &&
(kbdev->tiler_available_bitmap &
kbdev->pm.backend.desired_tiler_state)
== kbdev->pm.backend.desired_tiler_state)
kbase_timeline_pm_check_handle_event(kbdev,
KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
}
void kbase_pm_request_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
u64 cores;
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
lockdep_assert_held(&kbdev->hwaccess_lock);
cores = shader_cores;
while (cores) {
int bitnum = fls64(cores) - 1;
u64 bit = 1ULL << bitnum;
/* It should be almost impossible for this to overflow. It would
* require 2^32 atoms to request a particular core, which would
* require 2^24 contexts to submit. This would require an amount
* of memory that is impossible on a 32-bit system and extremely
* unlikely on a 64-bit system. */
int cnt = ++kbdev->shader_needed_cnt[bitnum];
if (1 == cnt) {
kbdev->shader_needed_bitmap |= bit;
change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
}
cores &= ~bit;
}
if (tiler_required) {
int cnt = ++kbdev->tiler_needed_cnt;
if (1 == cnt)
change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt != 0);
}
if (change_gpu_state) {
KBASE_TRACE_ADD(kbdev, PM_REQUEST_CHANGE_SHADER_NEEDED, NULL,
NULL, 0u, (u32) kbdev->shader_needed_bitmap);
kbase_timeline_pm_cores_func(kbdev,
KBASE_PM_FUNC_ID_REQUEST_CORES_START,
change_gpu_state);
kbase_pm_update_cores_state_nolock(kbdev);
kbase_timeline_pm_cores_func(kbdev,
KBASE_PM_FUNC_ID_REQUEST_CORES_END,
change_gpu_state);
}
}
KBASE_EXPORT_TEST_API(kbase_pm_request_cores);
void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
lockdep_assert_held(&kbdev->hwaccess_lock);
while (shader_cores) {
int bitnum = fls64(shader_cores) - 1;
u64 bit = 1ULL << bitnum;
int cnt;
KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
cnt = --kbdev->shader_needed_cnt[bitnum];
if (0 == cnt) {
kbdev->shader_needed_bitmap &= ~bit;
change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
}
shader_cores &= ~bit;
}
if (tiler_required) {
int cnt;
KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0);
cnt = --kbdev->tiler_needed_cnt;
if (0 == cnt)
change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
}
if (change_gpu_state) {
KBASE_TRACE_ADD(kbdev, PM_UNREQUEST_CHANGE_SHADER_NEEDED, NULL,
NULL, 0u, (u32) kbdev->shader_needed_bitmap);
kbase_pm_update_cores_state_nolock(kbdev);
/* Trace that any state change effectively completes immediately
* - no-one will wait on the state change */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
}
KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores);
enum kbase_pm_cores_ready
kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
u64 prev_shader_needed; /* Just for tracing */
u64 prev_shader_inuse; /* Just for tracing */
lockdep_assert_held(&kbdev->hwaccess_lock);
prev_shader_needed = kbdev->shader_needed_bitmap;
prev_shader_inuse = kbdev->shader_inuse_bitmap;
/* If desired_shader_state does not contain the requested cores, then
* power management is not attempting to powering those cores (most
* likely due to core availability policy) and a new job affinity must
* be chosen */
if ((kbdev->pm.backend.desired_shader_state & shader_cores) !=
shader_cores) {
return (kbdev->pm.backend.poweroff_wait_in_progress ||
kbdev->pm.backend.pm_current_policy == NULL) ?
KBASE_CORES_NOT_READY : KBASE_NEW_AFFINITY;
}
if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores ||
(tiler_required && !kbdev->tiler_available_bitmap)) {
/* Trace ongoing core transition */
kbase_timeline_pm_l2_transition_start(kbdev);
return KBASE_CORES_NOT_READY;
}
/* If we started to trace a state change, then trace it has being
* finished by now, at the very latest */
kbase_pm_trace_check_and_finish_state_change(kbdev);
/* Trace core transition done */
kbase_timeline_pm_l2_transition_done(kbdev);
while (shader_cores) {
int bitnum = fls64(shader_cores) - 1;
u64 bit = 1ULL << bitnum;
int cnt;
KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
cnt = --kbdev->shader_needed_cnt[bitnum];
if (0 == cnt)
kbdev->shader_needed_bitmap &= ~bit;
/* shader_inuse_cnt should not overflow because there can only
* be a very limited number of jobs on the h/w at one time */
kbdev->shader_inuse_cnt[bitnum]++;
kbdev->shader_inuse_bitmap |= bit;
shader_cores &= ~bit;
}
if (tiler_required) {
KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0);
--kbdev->tiler_needed_cnt;
kbdev->tiler_inuse_cnt++;
KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt != 0);
}
if (prev_shader_needed != kbdev->shader_needed_bitmap)
KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_NEEDED, NULL,
NULL, 0u, (u32) kbdev->shader_needed_bitmap);
if (prev_shader_inuse != kbdev->shader_inuse_bitmap)
KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL,
NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
return KBASE_CORES_READY;
}
KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores);
void kbase_pm_release_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
lockdep_assert_held(&kbdev->hwaccess_lock);
while (shader_cores) {
int bitnum = fls64(shader_cores) - 1;
u64 bit = 1ULL << bitnum;
int cnt;
KBASE_DEBUG_ASSERT(kbdev->shader_inuse_cnt[bitnum] > 0);
cnt = --kbdev->shader_inuse_cnt[bitnum];
if (0 == cnt) {
kbdev->shader_inuse_bitmap &= ~bit;
change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
}
shader_cores &= ~bit;
}
if (tiler_required) {
int cnt;
KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt > 0);
cnt = --kbdev->tiler_inuse_cnt;
if (0 == cnt)
change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
}
if (change_gpu_state) {
KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_SHADER_INUSE, NULL,
NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
kbase_timeline_pm_cores_func(kbdev,
KBASE_PM_FUNC_ID_RELEASE_CORES_START,
change_gpu_state);
kbase_pm_update_cores_state_nolock(kbdev);
kbase_timeline_pm_cores_func(kbdev,
KBASE_PM_FUNC_ID_RELEASE_CORES_END,
change_gpu_state);
/* Trace that any state change completed immediately */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
}
KBASE_EXPORT_TEST_API(kbase_pm_release_cores);
void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
bool tiler_required,
u64 shader_cores)
{
unsigned long flags;
kbase_pm_wait_for_poweroff_complete(kbdev);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_pm_request_cores(kbdev, tiler_required, shader_cores);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
kbase_pm_check_transitions_sync(kbdev);
}
KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync);
void kbase_pm_request_l2_caches(struct kbase_device *kbdev)
{
unsigned long flags;
u32 prior_l2_users_count;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
prior_l2_users_count = kbdev->l2_users_count++;
KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0);
/* if the GPU is reset while the l2 is on, l2 will be off but
* prior_l2_users_count will be > 0. l2_available_bitmap will have been
* set to 0 though by kbase_pm_init_hw */
if (!prior_l2_users_count || !kbdev->l2_available_bitmap)
kbase_pm_check_transitions_nolock(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
wait_event(kbdev->pm.backend.l2_powered_wait,
kbdev->pm.backend.l2_powered == 1);
/* Trace that any state change completed immediately */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches);
void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
kbdev->l2_users_count++;
}
KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on);
void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
KBASE_DEBUG_ASSERT(kbdev->l2_users_count > 0);
--kbdev->l2_users_count;
if (!kbdev->l2_users_count) {
kbase_pm_check_transitions_nolock(kbdev);
/* Trace that any state change completed immediately */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
}
KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches);

View File

@ -0,0 +1,232 @@
/*
*
* (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
/*
* Power policy API definitions
*/
#ifndef _KBASE_PM_POLICY_H_
#define _KBASE_PM_POLICY_H_
/**
* kbase_pm_policy_init - Initialize power policy framework
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Must be called before calling any other policy function
*
* Return: 0 if the power policy framework was successfully
* initialized, -errno otherwise.
*/
int kbase_pm_policy_init(struct kbase_device *kbdev);
/**
* kbase_pm_policy_term - Terminate power policy framework
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_policy_term(struct kbase_device *kbdev);
/**
* kbase_pm_update_active - Update the active power state of the GPU
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Calls into the current power policy
*/
void kbase_pm_update_active(struct kbase_device *kbdev);
/**
* kbase_pm_update_cores - Update the desired core state of the GPU
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Calls into the current power policy
*/
void kbase_pm_update_cores(struct kbase_device *kbdev);
enum kbase_pm_cores_ready {
KBASE_CORES_NOT_READY = 0,
KBASE_NEW_AFFINITY = 1,
KBASE_CORES_READY = 2
};
/**
* kbase_pm_request_cores_sync - Synchronous variant of kbase_pm_request_cores()
*
* @kbdev: The kbase device structure for the device
* @tiler_required: true if the tiler is required, false otherwise
* @shader_cores: A bitmask of shader cores which are necessary for the job
*
* When this function returns, the @shader_cores will be in the READY state.
*
* This is safe variant of kbase_pm_check_transitions_sync(): it handles the
* work of ensuring the requested cores will remain powered until a matching
* call to kbase_pm_unrequest_cores()/kbase_pm_release_cores() (as appropriate)
* is made.
*/
void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores);
/**
* kbase_pm_request_cores - Mark one or more cores as being required
* for jobs to be submitted
*
* @kbdev: The kbase device structure for the device
* @tiler_required: true if the tiler is required, false otherwise
* @shader_cores: A bitmask of shader cores which are necessary for the job
*
* This function is called by the job scheduler to mark one or more cores as
* being required to submit jobs that are ready to run.
*
* The cores requested are reference counted and a subsequent call to
* kbase_pm_register_inuse_cores() or kbase_pm_unrequest_cores() should be
* made to dereference the cores as being 'needed'.
*
* The active power policy will meet or exceed the requirements of the
* requested cores in the system. Any core transitions needed will be begun
* immediately, but they might not complete/the cores might not be available
* until a Power Management IRQ.
*
* Return: 0 if the cores were successfully requested, or -errno otherwise.
*/
void kbase_pm_request_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores);
/**
* kbase_pm_unrequest_cores - Unmark one or more cores as being required for
* jobs to be submitted.
*
* @kbdev: The kbase device structure for the device
* @tiler_required: true if the tiler is required, false otherwise
* @shader_cores: A bitmask of shader cores (as given to
* kbase_pm_request_cores() )
*
* This function undoes the effect of kbase_pm_request_cores(). It should be
* used when a job is not going to be submitted to the hardware (e.g. the job is
* cancelled before it is enqueued).
*
* The active power policy will meet or exceed the requirements of the
* requested cores in the system. Any core transitions needed will be begun
* immediately, but they might not complete until a Power Management IRQ.
*
* The policy may use this as an indication that it can power down cores.
*/
void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores);
/**
* kbase_pm_register_inuse_cores - Register a set of cores as in use by a job
*
* @kbdev: The kbase device structure for the device
* @tiler_required: true if the tiler is required, false otherwise
* @shader_cores: A bitmask of shader cores (as given to
* kbase_pm_request_cores() )
*
* This function should be called after kbase_pm_request_cores() when the job
* is about to be submitted to the hardware. It will check that the necessary
* cores are available and if so update the 'needed' and 'inuse' bitmasks to
* reflect that the job is now committed to being run.
*
* If the necessary cores are not currently available then the function will
* return %KBASE_CORES_NOT_READY and have no effect.
*
* Return: %KBASE_CORES_NOT_READY if the cores are not immediately ready,
*
* %KBASE_NEW_AFFINITY if the affinity requested is not allowed,
*
* %KBASE_CORES_READY if the cores requested are already available
*/
enum kbase_pm_cores_ready kbase_pm_register_inuse_cores(
struct kbase_device *kbdev,
bool tiler_required,
u64 shader_cores);
/**
* kbase_pm_release_cores - Release cores after a job has run
*
* @kbdev: The kbase device structure for the device
* @tiler_required: true if the tiler is required, false otherwise
* @shader_cores: A bitmask of shader cores (as given to
* kbase_pm_register_inuse_cores() )
*
* This function should be called when a job has finished running on the
* hardware. A call to kbase_pm_register_inuse_cores() must have previously
* occurred. The reference counts of the specified cores will be decremented
* which may cause the bitmask of 'inuse' cores to be reduced. The power policy
* may then turn off any cores which are no longer 'inuse'.
*/
void kbase_pm_release_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores);
/**
* kbase_pm_request_l2_caches - Request l2 caches
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Request the use of l2 caches for all core groups, power up, wait and prevent
* the power manager from powering down the l2 caches.
*
* This tells the power management that the caches should be powered up, and
* they should remain powered, irrespective of the usage of shader cores. This
* does not return until the l2 caches are powered up.
*
* The caller must call kbase_pm_release_l2_caches() when they are finished
* to allow normal power management of the l2 caches to resume.
*
* This should only be used when power management is active.
*/
void kbase_pm_request_l2_caches(struct kbase_device *kbdev);
/**
* kbase_pm_request_l2_caches_l2_is_on - Request l2 caches but don't power on
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Increment the count of l2 users but do not attempt to power on the l2
*
* It is the callers responsibility to ensure that the l2 is already powered up
* and to eventually call kbase_pm_release_l2_caches()
*/
void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev);
/**
* kbase_pm_request_l2_caches - Release l2 caches
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
* Release the use of l2 caches for all core groups and allow the power manager
* to power them down when necessary.
*
* This tells the power management that the caches can be powered down if
* necessary, with respect to the usage of shader cores.
*
* The caller must have called kbase_pm_request_l2_caches() prior to a call
* to this.
*
* This should only be used when power management is active.
*/
void kbase_pm_release_l2_caches(struct kbase_device *kbdev);
#endif /* _KBASE_PM_POLICY_H_ */

View File

@ -0,0 +1,108 @@
/*
*
* (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* SPDX-License-Identifier: GPL-2.0
*
*/
#include <mali_kbase.h>
#include <mali_kbase_hwaccess_time.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
u64 *system_time, struct timespec *ts)
{
u32 hi1, hi2;
kbase_pm_request_gpu_cycle_counter(kbdev);
/* Read hi, lo, hi to ensure that overflow from lo to hi is handled
* correctly */
do {
hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
NULL);
*cycle_counter = kbase_reg_read(kbdev,
GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
NULL);
*cycle_counter |= (((u64) hi1) << 32);
} while (hi1 != hi2);
/* Read hi, lo, hi to ensure that overflow from lo to hi is handled
* correctly */
do {
hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
NULL);
*system_time = kbase_reg_read(kbdev,
GPU_CONTROL_REG(TIMESTAMP_LO), NULL);
hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
NULL);
*system_time |= (((u64) hi1) << 32);
} while (hi1 != hi2);
/* Record the CPU's idea of current time */
getrawmonotonic(ts);
kbase_pm_release_gpu_cycle_counter(kbdev);
}
/**
* kbase_wait_write_flush - Wait for GPU write flush
* @kctx: Context pointer
*
* Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
* its write buffer.
*
* Only in use for BASE_HW_ISSUE_6367
*
* Note : If GPU resets occur then the counters are reset to zero, the delay may
* not be as expected.
*/
#ifndef CONFIG_MALI_NO_MALI
void kbase_wait_write_flush(struct kbase_context *kctx)
{
u32 base_count = 0;
/*
* The caller must be holding onto the kctx or the call is from
* userspace.
*/
kbase_pm_context_active(kctx->kbdev);
kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
while (true) {
u32 new_count;
new_count = kbase_reg_read(kctx->kbdev,
GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
/* First time around, just store the count. */
if (base_count == 0) {
base_count = new_count;
continue;
}
/* No need to handle wrapping, unsigned maths works for this. */
if ((new_count - base_count) > 1000)
break;
}
kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
kbase_pm_context_idle(kctx->kbdev);
}
#endif /* CONFIG_MALI_NO_MALI */

Some files were not shown because too many files have changed in this diff Show More